mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-20 22:45:19 -04:00
dma-mapping: add a new dma_alloc_pages API
This API is the equivalent of alloc_pages, except that the returned memory is guaranteed to be DMA addressable by the passed in device. The implementation will also be used to provide a more sensible replacement for DMA_ATTR_NON_CONSISTENT flag. Additionally dma_alloc_noncoherent is switched over to use dma_alloc_pages as its backend. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> (MIPS part)
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Christoph Hellwig.
|
||||
* Copyright (C) 2018-2020 Christoph Hellwig.
|
||||
*
|
||||
* DMA operations that map physical memory directly without using an IOMMU.
|
||||
*/
|
||||
@@ -292,6 +292,56 @@ void dma_direct_free(struct device *dev, size_t size,
|
||||
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
|
||||
}
|
||||
|
||||
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
void *ret;
|
||||
|
||||
if (dma_should_alloc_from_pool(dev, gfp, 0)) {
|
||||
page = dma_alloc_from_pool(dev, size, &ret, gfp,
|
||||
dma_coherent_ok);
|
||||
if (!page)
|
||||
return NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
if (set_memory_decrypted((unsigned long)ret,
|
||||
1 << get_order(size)))
|
||||
goto out_free_pages;
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
done:
|
||||
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
|
||||
return page;
|
||||
out_free_pages:
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
struct page *page, dma_addr_t dma_addr,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
void *vaddr = page_address(page);
|
||||
|
||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||
if (dma_should_free_from_pool(dev, 0) &&
|
||||
dma_free_from_pool(dev, vaddr, size))
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
|
||||
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
|
||||
@@ -341,9 +341,7 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
|
||||
{
|
||||
if (force_dma_unencrypted(dev))
|
||||
prot = pgprot_decrypted(prot);
|
||||
if (dev_is_dma_coherent(dev) ||
|
||||
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT)))
|
||||
if (dev_is_dma_coherent(dev))
|
||||
return prot;
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
|
||||
if (attrs & DMA_ATTR_WRITE_COMBINE)
|
||||
@@ -472,6 +470,65 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
}
|
||||
EXPORT_SYMBOL(dma_free_attrs);
|
||||
|
||||
struct page *dma_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct page *page;
|
||||
|
||||
if (WARN_ON_ONCE(!dev->coherent_dma_mask))
|
||||
return NULL;
|
||||
if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
|
||||
return NULL;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
|
||||
else if (ops->alloc_pages)
|
||||
page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
|
||||
else
|
||||
return NULL;
|
||||
|
||||
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
|
||||
|
||||
return page;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_alloc_pages);
|
||||
|
||||
void dma_free_pages(struct device *dev, size_t size, struct page *page,
|
||||
dma_addr_t dma_handle, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
debug_dma_unmap_page(dev, dma_handle, size, dir);
|
||||
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
dma_direct_free_pages(dev, size, page, dma_handle, dir);
|
||||
else if (ops->free_pages)
|
||||
ops->free_pages(dev, size, page, dma_handle, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_free_pages);
|
||||
|
||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
return page_address(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
|
||||
|
||||
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, enum dma_data_direction dir)
|
||||
{
|
||||
dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_free_noncoherent);
|
||||
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
* Helpers for DMA ops implementations. These generally rely on the fact that
|
||||
* the allocated memory contains normal pages in the direct kernel mapping.
|
||||
*/
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
|
||||
/*
|
||||
@@ -49,3 +50,37 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
return -ENXIO;
|
||||
#endif /* CONFIG_MMU */
|
||||
}
|
||||
|
||||
struct page *dma_common_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct page *page;
|
||||
|
||||
page = dma_alloc_contiguous(dev, size, gfp);
|
||||
if (!page)
|
||||
page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*dma_handle = ops->map_page(dev, page, 0, size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (*dma_handle == DMA_MAPPING_ERROR) {
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(page_address(page), 0, size);
|
||||
return page;
|
||||
}
|
||||
|
||||
void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
|
||||
dma_addr_t dma_handle, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->unmap_page)
|
||||
ops->unmap_page(dev, dma_handle, size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
@@ -55,5 +55,7 @@ const struct dma_map_ops dma_virt_ops = {
|
||||
.free = dma_virt_free,
|
||||
.map_page = dma_virt_map_page,
|
||||
.map_sg = dma_virt_map_sg,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
.free_pages = dma_common_free_pages,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_virt_ops);
|
||||
|
||||
Reference in New Issue
Block a user