Merge tag 'dma-mapping-6.17-2025-09-09' into HEAD

dma-mapping fix for Linux 6.17

- one more fix for DMA API debugging infrastructure (Baochen Qiang)
This commit is contained in:
Marek Szyprowski
2025-09-12 00:04:09 +02:00
7 changed files with 86 additions and 11 deletions

View File

@@ -25,6 +25,7 @@
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
#include <linux/dma-map-ops.h>
#include "of_private.h"
@@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
early_init_dt_reserve_memory(base, size, nomap) == 0)
if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
/* Architecture specific contiguous memory fixup. */
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
of_get_flat_dt_prop(node, "reusable", NULL))
dma_contiguous_early_fixup(base, size);
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
else
} else {
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
}
len -= t_len;
}
@@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
/* Architecture specific contiguous memory fixup. */
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
of_get_flat_dt_prop(node, "reusable", NULL))
dma_contiguous_early_fixup(base, size);
/* Save region in the reserved_mem array */
fdt_reserved_mem_save_node(node, uname, base, size);
return 0;

View File

@@ -153,6 +153,9 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
}
#endif /* CONFIG_DMA_CMA*/
#ifdef CONFIG_DMA_DECLARE_COHERENT

View File

@@ -483,8 +483,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
}
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(rmem->base, rmem->size);
if (default_cma)
dma_contiguous_default_area = cma;

View File

@@ -39,6 +39,7 @@ enum {
dma_debug_sg,
dma_debug_coherent,
dma_debug_resource,
dma_debug_noncoherent,
};
enum map_err_types {
@@ -141,6 +142,7 @@ static const char *type2name[] = {
[dma_debug_sg] = "scatter-gather",
[dma_debug_coherent] = "coherent",
[dma_debug_resource] = "resource",
[dma_debug_noncoherent] = "noncoherent",
};
static const char *dir2name[] = {
@@ -993,7 +995,8 @@ static void check_unmap(struct dma_debug_entry *ref)
"[mapped as %s] [unmapped as %s]\n",
ref->dev_addr, ref->size,
type2name[entry->type], type2name[ref->type]);
} else if (entry->type == dma_debug_coherent &&
} else if ((entry->type == dma_debug_coherent ||
entry->type == dma_debug_noncoherent) &&
ref->paddr != entry->paddr) {
err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different CPU address "
@@ -1581,6 +1584,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
}
void debug_dma_alloc_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr,
unsigned long attrs)
{
struct dma_debug_entry *entry;
if (unlikely(dma_debug_disabled()))
return;
entry = dma_entry_alloc();
if (!entry)
return;
entry->type = dma_debug_noncoherent;
entry->dev = dev;
entry->paddr = page_to_phys(page);
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = direction;
add_dma_entry(entry, attrs);
}
void debug_dma_free_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr)
{
struct dma_debug_entry ref = {
.type = dma_debug_noncoherent,
.dev = dev,
.paddr = page_to_phys(page),
.dev_addr = dma_addr,
.size = size,
.direction = direction,
};
if (unlikely(dma_debug_disabled()))
return;
check_unmap(&ref);
}
static int __init dma_debug_driver_setup(char *str)
{
int i;

View File

@@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
extern void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr,
unsigned long attrs);
extern void debug_dma_free_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr);
#else /* CONFIG_DMA_API_DEBUG */
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
@@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
int nelems, int direction)
{
}
static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr,
unsigned long attrs)
{
}
static inline void debug_dma_free_pages(struct device *dev, struct page *page,
size_t size, int direction,
dma_addr_t dma_addr)
{
}
#endif /* CONFIG_DMA_API_DEBUG */
#endif /* _KERNEL_DMA_DEBUG_H */

View File

@@ -712,7 +712,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
if (page) {
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
size, dir, gfp, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
} else {
trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
}
@@ -738,7 +738,7 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
debug_dma_unmap_page(dev, dma_handle, size, dir);
debug_dma_free_pages(dev, page, size, dir, dma_handle);
__dma_free_pages(dev, size, page, dma_handle, dir);
}
EXPORT_SYMBOL_GPL(dma_free_pages);

View File

@@ -102,8 +102,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
#ifdef CONFIG_DMA_DIRECT_REMAP
addr = dma_common_contiguous_remap(page, pool_size,
pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0));
pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)),
__builtin_return_address(0));
if (!addr)
goto free_page;
#else