mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'dma-mapping-6.18-2025-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux
Pull dma-mapping updates from Marek Szyprowski: - Refactoring of DMA mapping API to physical addresses as the primary interface instead of page+offset parameters This gets much closer to Matthew Wilcox's long term wish for struct-pageless IO to cacheable DRAM and is supporting memdesc project which seeks to substantially transform how struct page works. An advantage of this approach is the possibility of introducing DMA_ATTR_MMIO, which covers existing 'dma_map_resource' flow in the common paths, what in turn lets to use recently introduced dma_iova_link() API to map PCI P2P MMIO without creating struct page Developped by Leon Romanovsky and Jason Gunthorpe - Minor clean-up by Petr Tesarik and Qianfeng Rong * tag 'dma-mapping-6.18-2025-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux: kmsan: fix missed kmsan_handle_dma() signature conversion mm/hmm: properly take MMIO path mm/hmm: migrate to physical address-based DMA mapping API dma-mapping: export new dma_*map_phys() interface xen: swiotlb: Open code map_resource callback dma-mapping: implement DMA_ATTR_MMIO for dma_(un)map_page_attrs() kmsan: convert kmsan_handle_dma to use physical addresses dma-mapping: convert dma_direct_*map_page to be phys_addr_t based iommu/dma: implement DMA_ATTR_MMIO for iommu_dma_(un)map_phys() iommu/dma: rename iommu_dma_*map_page to iommu_dma_*map_phys dma-mapping: rename trace_dma_*map_page to trace_dma_*map_phys dma-debug: refactor to use physical addresses for page mapping iommu/dma: implement DMA_ATTR_MMIO for dma_iova_link(). dma-mapping: introduce new DMA attribute to indicate MMIO memory swiotlb: Remove redundant __GFP_NOWARN dma-direct: clean up the logic in __dma_direct_alloc_pages()
This commit is contained in:
@@ -761,7 +761,7 @@ example warning message may look like this::
|
||||
[<ffffffff80235177>] find_busiest_group+0x207/0x8a0
|
||||
[<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50
|
||||
[<ffffffff803c7ea3>] check_unmap+0x203/0x490
|
||||
[<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50
|
||||
[<ffffffff803c8259>] debug_dma_unmap_phys+0x49/0x50
|
||||
[<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0
|
||||
[<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0
|
||||
[<ffffffff8026df84>] handle_IRQ_event+0x34/0x70
|
||||
@@ -855,7 +855,7 @@ that a driver may be leaking mappings.
|
||||
dma-debug interface debug_dma_mapping_error() to debug drivers that fail
|
||||
to check DMA mapping errors on addresses returned by dma_map_single() and
|
||||
dma_map_page() interfaces. This interface clears a flag set by
|
||||
debug_dma_map_page() to indicate that dma_mapping_error() has been called by
|
||||
debug_dma_map_phys() to indicate that dma_mapping_error() has been called by
|
||||
the driver. When driver does unmap, debug_dma_unmap() checks the flag and if
|
||||
this flag is still set, prints warning message that includes call trace that
|
||||
leads up to the unmap. This interface can be called from dma_mapping_error()
|
||||
|
||||
@@ -130,3 +130,21 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
|
||||
subsystem that the buffer is fully accessible at the elevated privilege
|
||||
level (and ideally inaccessible or at least read-only at the
|
||||
lesser-privileged levels).
|
||||
|
||||
DMA_ATTR_MMIO
|
||||
-------------
|
||||
|
||||
This attribute indicates the physical address is not normal system
|
||||
memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
|
||||
functions, it may not be cacheable, and access using CPU load/store
|
||||
instructions may not be allowed.
|
||||
|
||||
Usually this will be used to describe MMIO addresses, or other non-cacheable
|
||||
register addresses. When DMA mapping this sort of address we call
|
||||
the operation Peer to Peer as a one device is DMA'ing to another device.
|
||||
For PCI devices the p2pdma APIs must be used to determine if
|
||||
DMA_ATTR_MMIO is appropriate.
|
||||
|
||||
For architectures that require cache flushing for DMA coherence
|
||||
DMA_ATTR_MMIO will not perform any cache flushing. The address
|
||||
provided must never be mapped cacheable into the CPU.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#define can_map_direct(dev, addr) \
|
||||
((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
|
||||
|
||||
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
|
||||
bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr)
|
||||
{
|
||||
if (likely(!dev->bus_dma_limit))
|
||||
return false;
|
||||
@@ -24,7 +24,7 @@ bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
|
||||
|
||||
#define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
|
||||
|
||||
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle)
|
||||
bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle)
|
||||
{
|
||||
if (likely(!dev->bus_dma_limit))
|
||||
return false;
|
||||
|
||||
@@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
|
||||
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int prot = coherent ? IOMMU_CACHE : 0;
|
||||
int prot;
|
||||
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
prot = IOMMU_MMIO;
|
||||
else
|
||||
prot = coherent ? IOMMU_CACHE : 0;
|
||||
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
@@ -1190,11 +1195,9 @@ static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
|
||||
return iova_offset(iovad, phys | size);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
@@ -1208,27 +1211,34 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
*/
|
||||
if (dev_use_swiotlb(dev, size, dir) &&
|
||||
iova_unaligned(iovad, phys, size)) {
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
|
||||
if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
|
||||
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
|
||||
if (iova == DMA_MAPPING_ERROR)
|
||||
if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
|
||||
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
|
||||
return iova;
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
phys_addr_t phys;
|
||||
|
||||
phys = iommu_iova_to_phys(domain, dma_handle);
|
||||
if (attrs & DMA_ATTR_MMIO) {
|
||||
__iommu_dma_unmap(dev, dma_handle, size);
|
||||
return;
|
||||
}
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
if (WARN_ON(!phys))
|
||||
return;
|
||||
|
||||
@@ -1341,7 +1351,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
iommu_dma_unmap_page(dev, sg_dma_address(s),
|
||||
iommu_dma_unmap_phys(dev, sg_dma_address(s),
|
||||
sg_dma_len(s), dir, attrs);
|
||||
}
|
||||
|
||||
@@ -1354,8 +1364,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
|
||||
sg_dma_mark_swiotlb(sg);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
|
||||
s->offset, s->length, dir, attrs);
|
||||
sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
|
||||
s->length, dir, attrs);
|
||||
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
|
||||
goto out_unmap;
|
||||
sg_dma_len(s) = s->length;
|
||||
@@ -1546,20 +1556,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
__iommu_dma_unmap(dev, start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
|
||||
dma_get_mask(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(dev, handle, size);
|
||||
}
|
||||
|
||||
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
|
||||
{
|
||||
size_t alloc_size = PAGE_ALIGN(size);
|
||||
@@ -1838,12 +1834,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
|
||||
return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
|
||||
dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
|
||||
prot, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
|
||||
@@ -1949,9 +1946,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
|
||||
return -EIO;
|
||||
|
||||
if (dev_use_swiotlb(dev, size, dir) &&
|
||||
iova_unaligned(iovad, phys, size))
|
||||
iova_unaligned(iovad, phys, size)) {
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
return -EPERM;
|
||||
|
||||
return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
|
||||
size, dir, attrs);
|
||||
}
|
||||
|
||||
return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
|
||||
phys - iova_start_pad,
|
||||
|
||||
@@ -378,7 +378,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
|
||||
* is initialized by the hardware. Explicitly check/unpoison it
|
||||
* depending on the direction.
|
||||
*/
|
||||
kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
|
||||
kmsan_handle_dma(sg_phys(sg), sg->length, direction);
|
||||
*addr = (dma_addr_t)sg_phys(sg);
|
||||
return 0;
|
||||
}
|
||||
@@ -3157,7 +3157,7 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
if (!vq->use_dma_api) {
|
||||
kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
|
||||
kmsan_handle_dma(virt_to_phys(ptr), size, dir);
|
||||
return (dma_addr_t)virt_to_phys(ptr);
|
||||
}
|
||||
|
||||
|
||||
@@ -392,6 +392,25 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
}
|
||||
}
|
||||
|
||||
static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr = paddr;
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
|
||||
dev_err_once(dev,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether the given device DMA address mask can be supported
|
||||
* properly. For example, if your device can only drive the low 24-bits
|
||||
@@ -426,5 +445,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.alloc_pages_op = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
.max_mapping_size = swiotlb_max_mapping_size,
|
||||
.map_resource = dma_direct_map_resource,
|
||||
.map_resource = xen_swiotlb_direct_map_resource,
|
||||
};
|
||||
|
||||
@@ -149,7 +149,5 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
struct page *page, dma_addr_t dma_addr,
|
||||
enum dma_data_direction dir);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
||||
@@ -395,15 +395,15 @@ void *arch_dma_set_uncached(void *addr, size_t size);
|
||||
void arch_dma_clear_uncached(void *addr, size_t size);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
|
||||
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
|
||||
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
|
||||
bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
|
||||
bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
|
||||
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
int nents);
|
||||
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
int nents);
|
||||
#else
|
||||
#define arch_dma_map_page_direct(d, a) (false)
|
||||
#define arch_dma_unmap_page_direct(d, a) (false)
|
||||
#define arch_dma_map_phys_direct(d, a) (false)
|
||||
#define arch_dma_unmap_phys_direct(d, a) (false)
|
||||
#define arch_dma_map_sg_direct(d, s, n) (false)
|
||||
#define arch_dma_unmap_sg_direct(d, s, n) (false)
|
||||
#endif
|
||||
|
||||
@@ -58,6 +58,26 @@
|
||||
*/
|
||||
#define DMA_ATTR_PRIVILEGED (1UL << 9)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
|
||||
*
|
||||
* This attribute indicates the physical address is not normal system
|
||||
* memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
|
||||
* functions, it may not be cacheable, and access using CPU load/store
|
||||
* instructions may not be allowed.
|
||||
*
|
||||
* Usually this will be used to describe MMIO addresses, or other non-cacheable
|
||||
* register addresses. When DMA mapping this sort of address we call
|
||||
* the operation Peer to Peer as a one device is DMA'ing to another device.
|
||||
* For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
|
||||
* is appropriate.
|
||||
*
|
||||
* For architectures that require cache flushing for DMA coherence
|
||||
* DMA_ATTR_MMIO will not perform any cache flushing. The address
|
||||
* provided must never be mapped cacheable into the CPU.
|
||||
*/
|
||||
#define DMA_ATTR_MMIO (1UL << 10)
|
||||
|
||||
/*
|
||||
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
|
||||
* be given to a device to use as a DMA source or target. It is specific to a
|
||||
@@ -118,6 +138,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
unsigned long attrs);
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
@@ -172,6 +196,15 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline unsigned int dma_map_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
|
||||
@@ -21,10 +21,9 @@ static inline bool use_dma_iommu(struct device *dev)
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
@@ -43,10 +42,6 @@ size_t iommu_dma_opt_mapping_size(void);
|
||||
size_t iommu_dma_max_mapping_size(struct device *dev);
|
||||
void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs);
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
|
||||
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
|
||||
|
||||
@@ -182,8 +182,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
|
||||
|
||||
/**
|
||||
* kmsan_handle_dma() - Handle a DMA data transfer.
|
||||
* @page: first page of the buffer.
|
||||
* @offset: offset of the buffer within the first page.
|
||||
* @phys: physical address of the buffer.
|
||||
* @size: buffer size.
|
||||
* @dir: one of possible dma_data_direction values.
|
||||
*
|
||||
@@ -192,7 +191,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
|
||||
* * initializes the buffer, if it is copied from device;
|
||||
* * does both, if this is a DMA_BIDIRECTIONAL transfer.
|
||||
*/
|
||||
void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
|
||||
void kmsan_handle_dma(phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
/**
|
||||
@@ -372,8 +371,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_handle_dma(struct page *page, size_t offset,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -618,6 +618,7 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
|
||||
#else
|
||||
PAGEFLAG_FALSE(HighMem, highmem)
|
||||
#endif
|
||||
#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p)))
|
||||
|
||||
/* Does kmap_local_folio() only allow access to one page of the folio? */
|
||||
#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
|
||||
|
||||
@@ -31,7 +31,8 @@ TRACE_DEFINE_ENUM(DMA_NONE);
|
||||
{ DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
|
||||
{ DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
|
||||
{ DMA_ATTR_NO_WARN, "NO_WARN" }, \
|
||||
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
|
||||
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
|
||||
{ DMA_ATTR_MMIO, "MMIO" })
|
||||
|
||||
DECLARE_EVENT_CLASS(dma_map,
|
||||
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
|
||||
@@ -71,8 +72,7 @@ DEFINE_EVENT(dma_map, name, \
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs), \
|
||||
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
|
||||
|
||||
DEFINE_MAP_EVENT(dma_map_page);
|
||||
DEFINE_MAP_EVENT(dma_map_resource);
|
||||
DEFINE_MAP_EVENT(dma_map_phys);
|
||||
|
||||
DECLARE_EVENT_CLASS(dma_unmap,
|
||||
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
|
||||
@@ -109,8 +109,7 @@ DEFINE_EVENT(dma_unmap, name, \
|
||||
enum dma_data_direction dir, unsigned long attrs), \
|
||||
TP_ARGS(dev, addr, size, dir, attrs))
|
||||
|
||||
DEFINE_UNMAP_EVENT(dma_unmap_page);
|
||||
DEFINE_UNMAP_EVENT(dma_unmap_resource);
|
||||
DEFINE_UNMAP_EVENT(dma_unmap_phys);
|
||||
|
||||
DECLARE_EVENT_CLASS(dma_alloc_class,
|
||||
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
|
||||
|
||||
@@ -38,8 +38,8 @@ enum {
|
||||
dma_debug_single,
|
||||
dma_debug_sg,
|
||||
dma_debug_coherent,
|
||||
dma_debug_resource,
|
||||
dma_debug_noncoherent,
|
||||
dma_debug_phy,
|
||||
};
|
||||
|
||||
enum map_err_types {
|
||||
@@ -141,8 +141,8 @@ static const char *type2name[] = {
|
||||
[dma_debug_single] = "single",
|
||||
[dma_debug_sg] = "scatter-gather",
|
||||
[dma_debug_coherent] = "coherent",
|
||||
[dma_debug_resource] = "resource",
|
||||
[dma_debug_noncoherent] = "noncoherent",
|
||||
[dma_debug_phy] = "phy",
|
||||
};
|
||||
|
||||
static const char *dir2name[] = {
|
||||
@@ -1054,17 +1054,16 @@ static void check_unmap(struct dma_debug_entry *ref)
|
||||
dma_entry_free(entry);
|
||||
}
|
||||
|
||||
static void check_for_stack(struct device *dev,
|
||||
struct page *page, size_t offset)
|
||||
static void check_for_stack(struct device *dev, phys_addr_t phys)
|
||||
{
|
||||
void *addr;
|
||||
struct vm_struct *stack_vm_area = task_stack_vm_area(current);
|
||||
|
||||
if (!stack_vm_area) {
|
||||
/* Stack is direct-mapped. */
|
||||
if (PageHighMem(page))
|
||||
if (PhysHighMem(phys))
|
||||
return;
|
||||
addr = page_address(page) + offset;
|
||||
addr = phys_to_virt(phys);
|
||||
if (object_is_on_stack(addr))
|
||||
err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
|
||||
} else {
|
||||
@@ -1072,10 +1071,12 @@ static void check_for_stack(struct device *dev,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < stack_vm_area->nr_pages; i++) {
|
||||
if (page != stack_vm_area->pages[i])
|
||||
if (__phys_to_pfn(phys) !=
|
||||
page_to_pfn(stack_vm_area->pages[i]))
|
||||
continue;
|
||||
|
||||
addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
|
||||
addr = (u8 *)current->stack + i * PAGE_SIZE +
|
||||
(phys % PAGE_SIZE);
|
||||
err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
|
||||
break;
|
||||
}
|
||||
@@ -1204,9 +1205,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_map_single);
|
||||
|
||||
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
size_t size, int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
void debug_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
int direction, dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
@@ -1221,19 +1221,18 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
return;
|
||||
|
||||
entry->dev = dev;
|
||||
entry->type = dma_debug_single;
|
||||
entry->paddr = page_to_phys(page) + offset;
|
||||
entry->type = dma_debug_phy;
|
||||
entry->paddr = phys;
|
||||
entry->dev_addr = dma_addr;
|
||||
entry->size = size;
|
||||
entry->direction = direction;
|
||||
entry->map_err_type = MAP_ERR_NOT_CHECKED;
|
||||
|
||||
check_for_stack(dev, page, offset);
|
||||
if (!(attrs & DMA_ATTR_MMIO)) {
|
||||
check_for_stack(dev, phys);
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
void *addr = page_address(page) + offset;
|
||||
|
||||
check_for_illegal_area(dev, addr, size);
|
||||
if (!PhysHighMem(phys))
|
||||
check_for_illegal_area(dev, phys_to_virt(phys), size);
|
||||
}
|
||||
|
||||
add_dma_entry(entry, attrs);
|
||||
@@ -1277,11 +1276,11 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_mapping_error);
|
||||
|
||||
void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
void debug_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_single,
|
||||
.type = dma_debug_phy,
|
||||
.dev = dev,
|
||||
.dev_addr = dma_addr,
|
||||
.size = size,
|
||||
@@ -1305,7 +1304,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
check_for_stack(dev, sg_page(s), s->offset);
|
||||
check_for_stack(dev, sg_phys(s));
|
||||
if (!PageHighMem(sg_page(s)))
|
||||
check_for_illegal_area(dev, sg_virt(s), s->length);
|
||||
}
|
||||
@@ -1445,47 +1444,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
||||
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
entry = dma_entry_alloc();
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->type = dma_debug_resource;
|
||||
entry->dev = dev;
|
||||
entry->paddr = addr;
|
||||
entry->size = size;
|
||||
entry->dev_addr = dma_addr;
|
||||
entry->direction = direction;
|
||||
entry->map_err_type = MAP_ERR_NOT_CHECKED;
|
||||
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_resource,
|
||||
.dev = dev,
|
||||
.dev_addr = dma_addr,
|
||||
.size = size,
|
||||
.direction = direction,
|
||||
};
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
||||
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
|
||||
@@ -9,12 +9,11 @@
|
||||
#define _KERNEL_DMA_DEBUG_H
|
||||
|
||||
#ifdef CONFIG_DMA_API_DEBUG
|
||||
extern void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
extern void debug_dma_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
extern void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
@@ -31,14 +30,6 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
extern void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *virt, dma_addr_t addr);
|
||||
|
||||
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
int direction);
|
||||
@@ -62,14 +53,13 @@ extern void debug_dma_free_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr);
|
||||
#else /* CONFIG_DMA_API_DEBUG */
|
||||
static inline void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
static inline void debug_dma_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
static inline void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction)
|
||||
{
|
||||
}
|
||||
@@ -97,19 +87,6 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_unmap_resource(struct device *dev,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
|
||||
@@ -120,7 +120,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp_t gfp, bool allow_highmem)
|
||||
{
|
||||
int node = dev_to_node(dev);
|
||||
struct page *page = NULL;
|
||||
struct page *page;
|
||||
u64 phys_limit;
|
||||
|
||||
WARN_ON_ONCE(!PAGE_ALIGNED(size));
|
||||
@@ -131,30 +131,25 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
|
||||
page = dma_alloc_contiguous(dev, size, gfp);
|
||||
if (page) {
|
||||
if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
|
||||
(!allow_highmem && PageHighMem(page))) {
|
||||
dma_free_contiguous(dev, page, size);
|
||||
page = NULL;
|
||||
}
|
||||
if (dma_coherent_ok(dev, page_to_phys(page), size) &&
|
||||
(allow_highmem || !PageHighMem(page)))
|
||||
return page;
|
||||
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
again:
|
||||
if (!page)
|
||||
page = alloc_pages_node(node, gfp, get_order(size));
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
|
||||
while ((page = alloc_pages_node(node, gfp, get_order(size)))
|
||||
&& !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
__free_pages(page, get_order(size));
|
||||
page = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
phys_limit < DMA_BIT_MASK(64) &&
|
||||
!(gfp & (GFP_DMA32 | GFP_DMA))) {
|
||||
!(gfp & (GFP_DMA32 | GFP_DMA)))
|
||||
gfp |= GFP_DMA32;
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA))
|
||||
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
|
||||
goto again;
|
||||
}
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return page;
|
||||
@@ -453,7 +448,7 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
if (sg_dma_is_bus_address(sg))
|
||||
sg_dma_unmark_bus_address(sg);
|
||||
else
|
||||
dma_direct_unmap_page(dev, sg->dma_address,
|
||||
dma_direct_unmap_phys(dev, sg->dma_address,
|
||||
sg_dma_len(sg), dir, attrs);
|
||||
}
|
||||
}
|
||||
@@ -476,8 +471,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
*/
|
||||
break;
|
||||
case PCI_P2PDMA_MAP_NONE:
|
||||
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
|
||||
sg->offset, sg->length, dir, attrs);
|
||||
sg->dma_address = dma_direct_map_phys(dev, sg_phys(sg),
|
||||
sg->length, dir, attrs);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR) {
|
||||
ret = -EIO;
|
||||
goto out_unmap;
|
||||
@@ -502,22 +497,6 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr = paddr;
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
|
||||
dev_err_once(dev,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
WARN_ON_ONCE(1);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
|
||||
@@ -80,42 +80,57 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
arch_dma_mark_clean(paddr, size);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
static inline dma_addr_t dma_direct_map_phys(struct device *dev,
|
||||
phys_addr_t phys, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
if (is_swiotlb_force_bounce(dev)) {
|
||||
if (is_pci_p2pdma_page(page))
|
||||
return DMA_MAPPING_ERROR;
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
goto err_overflow;
|
||||
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
}
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
|
||||
dma_kmalloc_needs_bounce(dev, size, dir)) {
|
||||
if (is_pci_p2pdma_page(page))
|
||||
return DMA_MAPPING_ERROR;
|
||||
if (is_swiotlb_active(dev))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
if (attrs & DMA_ATTR_MMIO) {
|
||||
dma_addr = phys;
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, false)))
|
||||
goto err_overflow;
|
||||
} else {
|
||||
dma_addr = phys_to_dma(dev, phys);
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
|
||||
dma_kmalloc_needs_bounce(dev, size, dir)) {
|
||||
if (is_swiotlb_active(dev))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
dev_WARN_ONCE(dev, 1,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
goto err_overflow;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
if (!dev_is_dma_coherent(dev) &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return dma_addr;
|
||||
|
||||
err_overflow:
|
||||
dev_WARN_ONCE(
|
||||
dev, 1,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = dma_to_phys(dev, addr);
|
||||
phys_addr_t phys;
|
||||
|
||||
if (attrs & DMA_ATTR_MMIO)
|
||||
/* nothing to do: uncached and no swiotlb */
|
||||
return;
|
||||
|
||||
phys = dma_to_phys(dev, addr);
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
|
||||
|
||||
@@ -152,11 +152,11 @@ static inline bool dma_map_direct(struct device *dev,
|
||||
return dma_go_direct(dev, *dev->dma_mask, ops);
|
||||
}
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
bool is_mmio = attrs & DMA_ATTR_MMIO;
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
@@ -165,36 +165,81 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
(!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
|
||||
addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
|
||||
else if (use_dma_iommu(dev))
|
||||
addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
|
||||
else if (is_mmio) {
|
||||
if (!ops->map_resource)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
addr = ops->map_resource(dev, phys, size, dir, attrs);
|
||||
} else {
|
||||
struct page *page = phys_to_page(phys);
|
||||
size_t offset = offset_in_page(phys);
|
||||
|
||||
/*
|
||||
* The dma_ops API contract for ops->map_page() requires
|
||||
* kmappable memory, while ops->map_resource() does not.
|
||||
*/
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
kmsan_handle_dma(page, offset, size, dir);
|
||||
trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir,
|
||||
attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
|
||||
}
|
||||
|
||||
if (!is_mmio)
|
||||
kmsan_handle_dma(phys, size, dir);
|
||||
trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
|
||||
debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_map_phys);
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
|
||||
WARN_ON_ONCE(is_zone_device_page(page)))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
return dma_map_phys(dev, phys, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page_attrs);
|
||||
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
bool is_mmio = attrs & DMA_ATTR_MMIO;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_unmap_page_direct(dev, addr + size))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
(!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size)))
|
||||
dma_direct_unmap_phys(dev, addr, size, dir, attrs);
|
||||
else if (use_dma_iommu(dev))
|
||||
iommu_dma_unmap_page(dev, addr, size, dir, attrs);
|
||||
else
|
||||
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||
else if (is_mmio) {
|
||||
if (ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
} else
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
trace_dma_unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir);
|
||||
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_phys(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_unmap_phys);
|
||||
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return;
|
||||
|
||||
dma_unmap_phys(dev, addr, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page_attrs);
|
||||
|
||||
@@ -321,41 +366,18 @@ EXPORT_SYMBOL(dma_unmap_sg_attrs);
|
||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr = DMA_MAPPING_ERROR;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (WARN_ON_ONCE(!dev->dma_mask))
|
||||
if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
|
||||
WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (use_dma_iommu(dev))
|
||||
addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
|
||||
return addr;
|
||||
return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_resource);
|
||||
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
; /* nothing to do: uncached and no swiotlb */
|
||||
else if (use_dma_iommu(dev))
|
||||
iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
trace_dma_unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_resource);
|
||||
|
||||
|
||||
@@ -72,8 +72,8 @@ struct page *dma_common_alloc_pages(struct device *dev, size_t size,
|
||||
return NULL;
|
||||
|
||||
if (use_dma_iommu(dev))
|
||||
*dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
*dma_handle = iommu_dma_map_phys(dev, page_to_phys(page), size,
|
||||
dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
else
|
||||
*dma_handle = ops->map_page(dev, page, 0, size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
@@ -92,7 +92,7 @@ void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (use_dma_iommu(dev))
|
||||
iommu_dma_unmap_page(dev, dma_handle, size, dir,
|
||||
iommu_dma_unmap_phys(dev, dma_handle, size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, dma_handle, size, dir,
|
||||
|
||||
@@ -1209,7 +1209,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
nslabs = nr_slots(alloc_size);
|
||||
phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
|
||||
pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
GFP_NOWAIT);
|
||||
if (!pool)
|
||||
return -1;
|
||||
|
||||
|
||||
19
mm/hmm.c
19
mm/hmm.c
@@ -806,7 +806,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
|
||||
case PCI_P2PDMA_MAP_NONE:
|
||||
break;
|
||||
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
attrs |= DMA_ATTR_MMIO;
|
||||
pfns[idx] |= HMM_PFN_P2PDMA;
|
||||
break;
|
||||
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||
@@ -835,8 +835,8 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
|
||||
if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs))
|
||||
goto error;
|
||||
|
||||
dma_addr = dma_map_page(dev, page, 0, map->dma_entry_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
|
||||
DMA_BIDIRECTIONAL, attrs);
|
||||
if (dma_mapping_error(dev, dma_addr))
|
||||
goto error;
|
||||
|
||||
@@ -871,16 +871,17 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
|
||||
if ((pfns[idx] & valid_dma) != valid_dma)
|
||||
return false;
|
||||
|
||||
if (pfns[idx] & HMM_PFN_P2PDMA)
|
||||
attrs |= DMA_ATTR_MMIO;
|
||||
|
||||
if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
|
||||
; /* no need to unmap bus address P2P mappings */
|
||||
else if (dma_use_iova(state)) {
|
||||
if (pfns[idx] & HMM_PFN_P2PDMA)
|
||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
else if (dma_use_iova(state))
|
||||
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
|
||||
map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
|
||||
} else if (dma_need_unmap(dev))
|
||||
dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
else if (dma_need_unmap(dev))
|
||||
dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
|
||||
DMA_BIDIRECTIONAL, attrs);
|
||||
|
||||
pfns[idx] &=
|
||||
~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);
|
||||
|
||||
@@ -336,14 +336,16 @@ static void kmsan_handle_dma_page(const void *addr, size_t size,
|
||||
}
|
||||
|
||||
/* Helper function to handle DMA data transfers. */
|
||||
void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
|
||||
void kmsan_handle_dma(phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
u64 page_offset, to_go, addr;
|
||||
struct page *page = phys_to_page(phys);
|
||||
u64 page_offset, to_go;
|
||||
void *addr;
|
||||
|
||||
if (PageHighMem(page))
|
||||
if (PhysHighMem(phys))
|
||||
return;
|
||||
addr = (u64)page_address(page) + offset;
|
||||
addr = page_to_virt(page);
|
||||
/*
|
||||
* The kernel may occasionally give us adjacent DMA pages not belonging
|
||||
* to the same allocation. Process them separately to avoid triggering
|
||||
@@ -366,8 +368,7 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, item, nents, i)
|
||||
kmsan_handle_dma(sg_page(item), item->offset, item->length,
|
||||
dir);
|
||||
kmsan_handle_dma(sg_phys(item), item->length, dir);
|
||||
}
|
||||
|
||||
/* Functions from kmsan-checks.h follow. */
|
||||
|
||||
@@ -252,6 +252,9 @@ pub mod attrs {
|
||||
/// Indicates that the buffer is fully accessible at an elevated privilege level (and
|
||||
/// ideally inaccessible or at least read-only at lesser-privileged levels).
|
||||
pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
|
||||
|
||||
/// Indicates that the buffer is MMIO memory.
|
||||
pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO);
|
||||
}
|
||||
|
||||
/// DMA data direction.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
#include <linux/gfp.h>
|
||||
|
||||
inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
|
||||
inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user