mm: cma: add cma_alloc_frozen{_compound}()

Introduce cma_alloc_frozen{_compound}() helper to alloc pages without
incrementing their refcount, then convert hugetlb cma to use the
cma_alloc_frozen_compound() and cma_release_frozen() and remove the unused
cma_{alloc,free}_folio(), also move the cma_validate_zones() into
mm/internal.h since no outside user.

The set_pages_refcounted() is only called to set non-compound pages after
above changes, so remove the processing about PageHead.

Link: https://lkml.kernel.org/r/20260109093136.1491549-6-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Cc: Mark Brown <broonie@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang
2026-01-09 17:31:35 +08:00
committed by Andrew Morton
parent e0c1326779
commit 9bda131c60
4 changed files with 97 additions and 70 deletions

View File

@@ -51,29 +51,15 @@ extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int
bool no_warn);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
unsigned int align, bool no_warn);
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order);
bool cma_release_frozen(struct cma *cma, const struct page *pages,
unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
bool cma_validate_zones(struct cma *cma);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
return NULL;
}
static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
{
return false;
}
static inline bool cma_validate_zones(struct cma *cma)
{
return false;
}
#endif
#endif

107
mm/cma.c
View File

@@ -856,8 +856,8 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
return ret;
}
static struct page *__cma_alloc(struct cma *cma, unsigned long count,
unsigned int align, gfp_t gfp)
static struct page *__cma_alloc_frozen(struct cma *cma,
unsigned long count, unsigned int align, gfp_t gfp)
{
struct page *page = NULL;
int ret = -ENOMEM, r;
@@ -904,7 +904,6 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
page, count, align, ret);
if (page) {
set_pages_refcounted(page, count);
count_vm_event(CMA_ALLOC_SUCCESS);
cma_sysfs_account_success_pages(cma, count);
} else {
@@ -915,6 +914,21 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
return page;
}
struct page *cma_alloc_frozen(struct cma *cma, unsigned long count,
unsigned int align, bool no_warn)
{
gfp_t gfp = GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0);
return __cma_alloc_frozen(cma, count, align, gfp);
}
struct page *cma_alloc_frozen_compound(struct cma *cma, unsigned int order)
{
gfp_t gfp = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN;
return __cma_alloc_frozen(cma, 1 << order, order, gfp);
}
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -927,43 +941,27 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
*/
struct page *cma_alloc(struct cma *cma, unsigned long count,
unsigned int align, bool no_warn)
{
return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
}
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
struct page *page;
if (WARN_ON(!order || !(gfp & __GFP_COMP)))
return NULL;
page = cma_alloc_frozen(cma, count, align, no_warn);
if (page)
set_pages_refcounted(page, count);
page = __cma_alloc(cma, 1 << order, order, gfp);
return page ? page_folio(page) : NULL;
return page;
}
/**
* cma_release() - release allocated pages
* @cma: Contiguous memory region for which the allocation is performed.
* @pages: Allocated pages.
* @count: Number of allocated pages.
*
* This function releases memory allocated by cma_alloc().
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
bool cma_release(struct cma *cma, const struct page *pages,
unsigned long count)
static struct cma_memrange *find_cma_memrange(struct cma *cma,
const struct page *pages, unsigned long count)
{
struct cma_memrange *cmr;
struct cma_memrange *cmr = NULL;
unsigned long pfn, end_pfn;
int r;
pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
if (!cma || !pages || count > cma->count)
return false;
return NULL;
pfn = page_to_pfn(pages);
@@ -981,27 +979,66 @@ bool cma_release(struct cma *cma, const struct page *pages,
if (r == cma->nranges) {
pr_debug("%s(page %p, count %lu, no cma range matches the page range)\n",
__func__, (void *)pages, count);
return false;
return NULL;
}
if (PageHead(pages))
__free_pages((struct page *)pages, compound_order(pages));
else
free_contig_range(pfn, count);
return cmr;
}
static void __cma_release_frozen(struct cma *cma, struct cma_memrange *cmr,
const struct page *pages, unsigned long count)
{
unsigned long pfn = page_to_pfn(pages);
pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
free_contig_frozen_range(pfn, count);
cma_clear_bitmap(cma, cmr, pfn, count);
cma_sysfs_account_release_pages(cma, count);
trace_cma_release(cma->name, pfn, pages, count);
}
/**
* cma_release() - release allocated pages
* @cma: Contiguous memory region for which the allocation is performed.
* @pages: Allocated pages.
* @count: Number of allocated pages.
*
* This function releases memory allocated by cma_alloc().
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
bool cma_release(struct cma *cma, const struct page *pages,
unsigned long count)
{
struct cma_memrange *cmr;
unsigned long i, pfn;
cmr = find_cma_memrange(cma, pages, count);
if (!cmr)
return false;
pfn = page_to_pfn(pages);
for (i = 0; i < count; i++, pfn++)
VM_WARN_ON(!put_page_testzero(pfn_to_page(pfn)));
__cma_release_frozen(cma, cmr, pages, count);
return true;
}
bool cma_free_folio(struct cma *cma, const struct folio *folio)
bool cma_release_frozen(struct cma *cma, const struct page *pages,
unsigned long count)
{
if (WARN_ON(!folio_test_large(folio)))
struct cma_memrange *cmr;
cmr = find_cma_memrange(cma, pages, count);
if (!cmr)
return false;
return cma_release(cma, &folio->page, folio_nr_pages(folio));
__cma_release_frozen(cma, cmr, pages, count);
return true;
}
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)

View File

@@ -20,35 +20,39 @@ static unsigned long hugetlb_cma_size __initdata;
void hugetlb_cma_free_folio(struct folio *folio)
{
int nid = folio_nid(folio);
folio_ref_dec(folio);
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
WARN_ON_ONCE(!cma_release_frozen(hugetlb_cma[folio_nid(folio)],
&folio->page, folio_nr_pages(folio)));
}
struct folio *hugetlb_cma_alloc_folio(int order, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
int node;
struct folio *folio = NULL;
struct folio *folio;
struct page *page = NULL;
if (hugetlb_cma[nid])
folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
page = cma_alloc_frozen_compound(hugetlb_cma[nid], order);
if (!folio && !(gfp_mask & __GFP_THISNODE)) {
if (!page && !(gfp_mask & __GFP_THISNODE)) {
for_each_node_mask(node, *nodemask) {
if (node == nid || !hugetlb_cma[node])
continue;
folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
if (folio)
page = cma_alloc_frozen_compound(hugetlb_cma[node], order);
if (page)
break;
}
}
if (folio)
folio_set_hugetlb_cma(folio);
if (!page)
return NULL;
set_page_refcounted(page);
folio = page_folio(page);
folio_set_hugetlb_cma(folio);
return folio;
}

View File

@@ -584,11 +584,6 @@ static inline void set_pages_refcounted(struct page *page, unsigned long nr_page
{
unsigned long pfn = page_to_pfn(page);
if (PageHead(page)) {
set_page_refcounted(page);
return;
}
for (; nr_pages--; pfn++)
set_page_refcounted(pfn_to_page(pfn));
}
@@ -1014,9 +1009,14 @@ void init_cma_reserved_pageblock(struct page *page);
struct cma;
#ifdef CONFIG_CMA
bool cma_validate_zones(struct cma *cma);
void *cma_reserve_early(struct cma *cma, unsigned long size);
void init_cma_pageblock(struct page *page);
#else
static inline bool cma_validate_zones(struct cma *cma)
{
return false;
}
static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
{
return NULL;