mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 09:12:39 -04:00
Merge tag 'memblock-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
Pull memblock updates from Mike Rapoport: - 'reserve_mem' command line parameter to allow creation of named memory reservation at boot time. The driving use-case is to improve the ability of pstore to retain ramoops data across reboots. - cleanups and small improvements in memblock and mm_init - new tests cases in memblock test suite * tag 'memblock-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: memblock tests: fix implicit declaration of function 'numa_valid_node' memblock: Move late alloc warning down to phys alloc pstore/ramoops: Add ramoops.mem_name= command line option mm/memblock: Add "reserve_mem" to reserved named memory at boot up mm/mm_init.c: don't initialize page->lru again mm/mm_init.c: not always search next deferred_init_pfn from very beginning mm/mm_init.c: use deferred_init_mem_pfn_range_in_zone() to decide loop condition mm/mm_init.c: get the highest zone directly mm/mm_init.c: move nr_initialised reset down a bit mm/memblock: fix a typo in description of for_each_mem_region() mm/mm_init.c: use memblock_region_memory_base_pfn() to get startpfn mm/memblock: use PAGE_ALIGN_DOWN to get pgend in free_memmap mm/memblock: return true directly on finding overlap region memblock tests: add memblock_overlaps_region_checks mm/memblock: fix comment for memblock_isolate_range() memblock tests: add memblock_reserve_many_may_conflict_check() memblock tests: add memblock_reserve_all_locations_check() mm/memblock: remove empty dummy entry
This commit is contained in:
@@ -299,25 +299,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
||||
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
|
||||
unsigned long *out_spfn,
|
||||
unsigned long *out_epfn);
|
||||
/**
|
||||
* for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
|
||||
* memblock areas
|
||||
* @i: u64 used as loop variable
|
||||
* @zone: zone in which all of the memory blocks reside
|
||||
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
||||
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
||||
*
|
||||
* Walks over free (memory && !reserved) areas of memblock in a specific
|
||||
* zone. Available once memblock and an empty zone is initialized. The main
|
||||
* assumption is that the zone start, end, and pgdat have been associated.
|
||||
* This way we can use the zone to determine NUMA node, and if a given part
|
||||
* of the memblock is valid for the zone.
|
||||
*/
|
||||
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
|
||||
for (i = 0, \
|
||||
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
|
||||
i != U64_MAX; \
|
||||
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
|
||||
|
||||
/**
|
||||
* for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
|
||||
@@ -565,7 +546,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
|
||||
}
|
||||
|
||||
/**
|
||||
* for_each_mem_region - itereate over memory regions
|
||||
* for_each_mem_region - iterate over memory regions
|
||||
* @region: loop variable
|
||||
*/
|
||||
#define for_each_mem_region(region) \
|
||||
|
||||
@@ -4261,4 +4261,6 @@ static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
|
||||
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
|
||||
void vma_pgtable_walk_end(struct vm_area_struct *vma);
|
||||
|
||||
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
|
||||
|
||||
#endif /* _LINUX_MM_H */
|
||||
|
||||
Reference in New Issue
Block a user