mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-15 07:12:07 -04:00
It can be desirable to reserve memory in a CMA area before it is activated, early in boot. Such reservations would effectively be memblock allocations, but they can be returned to the CMA area later. This functionality can be used to allow hugetlb bootmem allocations from a hugetlb CMA area. A new interface, cma_reserve_early is introduced. This allows for pageblock-aligned reservations. These reservations are skipped during the initial handoff of pages in a CMA area to the buddy allocator. The caller is responsible for making sure that the page structures are set up, and that the migrate type is set correctly, as with other memblock allocations that stick around. If the CMA area fails to activate (because it intersects with multiple zones), the reserved memory is not given to the buddy allocator, the caller needs to take care of that. Link: https://lkml.kernel.org/r/20250228182928.2645936-25-fvdl@google.com Signed-off-by: Frank van der Linden <fvdl@google.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
93 lines
2.6 KiB
C
93 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __MM_CMA_H__
|
|
#define __MM_CMA_H__
|
|
|
|
#include <linux/debugfs.h>
|
|
#include <linux/kobject.h>
|
|
|
|
struct cma_kobject {
|
|
struct kobject kobj;
|
|
struct cma *cma;
|
|
};
|
|
|
|
/*
|
|
* Multi-range support. This can be useful if the size of the allocation
|
|
* is not expected to be larger than the alignment (like with hugetlb_cma),
|
|
* and the total amount of memory requested, while smaller than the total
|
|
* amount of memory available, is large enough that it doesn't fit in a
|
|
* single physical memory range because of memory holes.
|
|
*
|
|
* Fields:
|
|
* @base_pfn: physical address of range
|
|
* @early_pfn: first PFN not reserved through cma_reserve_early
|
|
* @count: size of range
|
|
* @bitmap: bitmap of allocated (1 << order_per_bit)-sized chunks.
|
|
*/
|
|
struct cma_memrange {
|
|
unsigned long base_pfn;
|
|
unsigned long early_pfn;
|
|
unsigned long count;
|
|
unsigned long *bitmap;
|
|
#ifdef CONFIG_CMA_DEBUGFS
|
|
struct debugfs_u32_array dfs_bitmap;
|
|
#endif
|
|
};
|
|
#define CMA_MAX_RANGES 8
|
|
|
|
struct cma {
|
|
unsigned long count;
|
|
unsigned long available_count;
|
|
unsigned int order_per_bit; /* Order of pages represented by one bit */
|
|
spinlock_t lock;
|
|
#ifdef CONFIG_CMA_DEBUGFS
|
|
struct hlist_head mem_head;
|
|
spinlock_t mem_head_lock;
|
|
#endif
|
|
char name[CMA_MAX_NAME];
|
|
int nranges;
|
|
struct cma_memrange ranges[CMA_MAX_RANGES];
|
|
#ifdef CONFIG_CMA_SYSFS
|
|
/* the number of CMA page successful allocations */
|
|
atomic64_t nr_pages_succeeded;
|
|
/* the number of CMA page allocation failures */
|
|
atomic64_t nr_pages_failed;
|
|
/* the number of CMA page released */
|
|
atomic64_t nr_pages_released;
|
|
/* kobject requires dynamic object */
|
|
struct cma_kobject *cma_kobj;
|
|
#endif
|
|
unsigned long flags;
|
|
/* NUMA node (NUMA_NO_NODE if unspecified) */
|
|
int nid;
|
|
};
|
|
|
|
enum cma_flags {
|
|
CMA_RESERVE_PAGES_ON_ERROR,
|
|
CMA_ZONES_VALID,
|
|
CMA_ZONES_INVALID,
|
|
CMA_ACTIVATED,
|
|
};
|
|
|
|
extern struct cma cma_areas[MAX_CMA_AREAS];
|
|
extern unsigned int cma_area_count;
|
|
|
|
static inline unsigned long cma_bitmap_maxno(struct cma *cma,
|
|
struct cma_memrange *cmr)
|
|
{
|
|
return cmr->count >> cma->order_per_bit;
|
|
}
|
|
|
|
#ifdef CONFIG_CMA_SYSFS
|
|
void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
|
|
void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
|
|
void cma_sysfs_account_release_pages(struct cma *cma, unsigned long nr_pages);
|
|
#else
|
|
static inline void cma_sysfs_account_success_pages(struct cma *cma,
|
|
unsigned long nr_pages) {};
|
|
static inline void cma_sysfs_account_fail_pages(struct cma *cma,
|
|
unsigned long nr_pages) {};
|
|
static inline void cma_sysfs_account_release_pages(struct cma *cma,
|
|
unsigned long nr_pages) {};
|
|
#endif
|
|
#endif
|