mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 03:38:10 -04:00
Merge tag 'dma-mapping-6.10-2024-05-20' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - optimize DMA sync calls when they are no-ops (Alexander Lobakin) - fix swiotlb padding for untrusted devices (Michael Kelley) - add documentation for swiotb (Michael Kelley) * tag 'dma-mapping-6.10-2024-05-20' of git://git.infradead.org/users/hch/dma-mapping: dma: fix DMA sync for drivers not calling dma_set_mask*() xsk: use generic DMA sync shortcut instead of a custom one page_pool: check for DMA sync shortcut earlier page_pool: don't use driver-set flags field directly page_pool: make sure frag API fields don't span between cachelines iommu/dma: avoid expensive indirect calls for sync operations dma: avoid redundant calls for sync operations dma: compile-out DMA sync op calls when not used iommu/dma: fix zeroing of bounce buffer padding used by untrusted devices swiotlb: remove alloc_size argument to swiotlb_tbl_map_single() Documentation/core-api: add swiotlb documentation
This commit is contained in:
@@ -691,6 +691,7 @@ struct device_physical_location {
|
||||
* and optionall (if the coherent mask is large enough) also
|
||||
* for dma allocations. This flag is managed by the dma ops
|
||||
* instance from ->dma_supported.
|
||||
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
|
||||
*
|
||||
* At the lowest level, every device in a Linux system is represented by an
|
||||
* instance of struct device. The device structure contains the information
|
||||
@@ -803,6 +804,9 @@ struct device {
|
||||
#ifdef CONFIG_DMA_OPS_BYPASS
|
||||
bool dma_ops_bypass : 1;
|
||||
#endif
|
||||
#ifdef CONFIG_DMA_NEED_SYNC
|
||||
bool dma_skip_sync:1;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -18,8 +18,11 @@ struct iommu_ops;
|
||||
*
|
||||
* DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
|
||||
* handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
|
||||
* DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
|
||||
* coherent and it's not an SWIOTLB buffer.
|
||||
*/
|
||||
#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
|
||||
#define DMA_F_CAN_SKIP_SYNC (1 << 1)
|
||||
|
||||
struct dma_map_ops {
|
||||
unsigned int flags;
|
||||
@@ -273,6 +276,15 @@ static inline bool dev_is_dma_coherent(struct device *dev)
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
|
||||
|
||||
static inline void dma_reset_need_sync(struct device *dev)
|
||||
{
|
||||
#ifdef CONFIG_DMA_NEED_SYNC
|
||||
/* Reset it only once so that the function can be called on hotpath */
|
||||
if (unlikely(dev->dma_skip_sync))
|
||||
dev->dma_skip_sync = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether potential kmalloc() buffers are safe for non-coherent DMA.
|
||||
*/
|
||||
|
||||
@@ -117,14 +117,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
@@ -147,7 +139,6 @@ u64 dma_get_required_mask(struct device *dev);
|
||||
bool dma_addressing_limited(struct device *dev);
|
||||
size_t dma_max_mapping_size(struct device *dev);
|
||||
size_t dma_opt_mapping_size(struct device *dev);
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
unsigned long dma_get_merge_boundary(struct device *dev);
|
||||
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
|
||||
@@ -195,22 +186,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return -ENOMEM;
|
||||
@@ -277,10 +252,6 @@ static inline size_t dma_opt_mapping_size(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline unsigned long dma_get_merge_boundary(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
@@ -310,6 +281,82 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
|
||||
}
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
|
||||
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
|
||||
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
static inline bool dma_dev_need_sync(const struct device *dev)
|
||||
{
|
||||
/* Always call DMA sync operations when debugging is enabled */
|
||||
return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_dev_need_sync(dev))
|
||||
__dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_dev_need_sync(dev))
|
||||
__dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_dev_need_sync(dev))
|
||||
__dma_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_dev_need_sync(dev))
|
||||
__dma_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
}
|
||||
|
||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
|
||||
}
|
||||
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
|
||||
static inline bool dma_dev_need_sync(const struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
|
||||
|
||||
struct page *dma_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
|
||||
void dma_free_pages(struct device *dev, size_t size, struct page *page,
|
||||
|
||||
@@ -65,6 +65,11 @@ static inline size_t iova_align(struct iova_domain *iovad, size_t size)
|
||||
return ALIGN(size, iovad->granule);
|
||||
}
|
||||
|
||||
static inline size_t iova_align_down(struct iova_domain *iovad, size_t size)
|
||||
{
|
||||
return ALIGN_DOWN(size, iovad->granule);
|
||||
}
|
||||
|
||||
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
|
||||
{
|
||||
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
|
||||
|
||||
@@ -43,7 +43,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
extern void __init swiotlb_update_mem_attributes(void);
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
||||
size_t mapping_size, size_t alloc_size,
|
||||
size_t mapping_size,
|
||||
unsigned int alloc_aligned_mask, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user