mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-02 03:30:08 -04:00
migrate: replace RMP_ flags with TTU_ flags
Instead of translating between RMP_ and TTU_ flags, remove the RMP_ flags and just use the TTU_ flag space; there's plenty available. Possibly we should rename these to RMAP_ flags, and maybe even pass them in through rmap_walk_arg, but that can be done later. Link: https://lkml.kernel.org/r/20260109041345.3863089-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Gregory Price <gourry@gourry.net> Cc: Jann Horn <jannh@google.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Ying Huang <ying.huang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
3d702678f5
commit
832d95b531
@@ -92,6 +92,7 @@ struct anon_vma_chain {
|
||||
};
|
||||
|
||||
enum ttu_flags {
|
||||
TTU_USE_SHARED_ZEROPAGE = 0x2, /* for unused pages of large folios */
|
||||
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
|
||||
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
|
||||
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
|
||||
@@ -933,12 +934,8 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
|
||||
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
enum rmp_flags {
|
||||
RMP_LOCKED = 1 << 0,
|
||||
RMP_USE_SHARED_ZEROPAGE = 1 << 1,
|
||||
};
|
||||
|
||||
void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
|
||||
void remove_migration_ptes(struct folio *src, struct folio *dst,
|
||||
enum ttu_flags flags);
|
||||
|
||||
/*
|
||||
* rmap_walk_control: To control rmap traversing for specific needs
|
||||
|
||||
@@ -3431,7 +3431,7 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags)
|
||||
if (!folio_test_anon(folio))
|
||||
return;
|
||||
for (;;) {
|
||||
remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
|
||||
remove_migration_ptes(folio, folio, TTU_RMAP_LOCKED | flags);
|
||||
i += folio_nr_pages(folio);
|
||||
if (i >= nr)
|
||||
break;
|
||||
@@ -3944,7 +3944,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||
int old_order = folio_order(folio);
|
||||
struct folio *new_folio, *next;
|
||||
int nr_shmem_dropped = 0;
|
||||
int remap_flags = 0;
|
||||
enum ttu_flags ttu_flags = 0;
|
||||
int ret;
|
||||
pgoff_t end = 0;
|
||||
|
||||
@@ -4064,9 +4064,9 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||
shmem_uncharge(mapping->host, nr_shmem_dropped);
|
||||
|
||||
if (!ret && is_anon && !folio_is_device_private(folio))
|
||||
remap_flags = RMP_USE_SHARED_ZEROPAGE;
|
||||
ttu_flags = TTU_USE_SHARED_ZEROPAGE;
|
||||
|
||||
remap_page(folio, 1 << old_order, remap_flags);
|
||||
remap_page(folio, 1 << old_order, ttu_flags);
|
||||
|
||||
/*
|
||||
* Unlock all after-split folios except the one containing
|
||||
|
||||
12
mm/migrate.c
12
mm/migrate.c
@@ -452,11 +452,12 @@ static bool remove_migration_pte(struct folio *folio,
|
||||
* Get rid of all migration entries and replace them by
|
||||
* references to the indicated page.
|
||||
*/
|
||||
void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
|
||||
void remove_migration_ptes(struct folio *src, struct folio *dst,
|
||||
enum ttu_flags flags)
|
||||
{
|
||||
struct rmap_walk_arg rmap_walk_arg = {
|
||||
.folio = src,
|
||||
.map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
|
||||
.map_unused_to_zeropage = flags & TTU_USE_SHARED_ZEROPAGE,
|
||||
};
|
||||
|
||||
struct rmap_walk_control rwc = {
|
||||
@@ -464,9 +465,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
|
||||
.arg = &rmap_walk_arg,
|
||||
};
|
||||
|
||||
VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
|
||||
VM_BUG_ON_FOLIO((flags & TTU_USE_SHARED_ZEROPAGE) && (src != dst), src);
|
||||
|
||||
if (flags & RMP_LOCKED)
|
||||
if (flags & TTU_RMAP_LOCKED)
|
||||
rmap_walk_locked(dst, &rwc);
|
||||
else
|
||||
rmap_walk(dst, &rwc);
|
||||
@@ -1521,8 +1522,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
|
||||
rc = move_to_new_folio(dst, src, mode);
|
||||
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(src, !rc ? dst : src,
|
||||
ttu ? RMP_LOCKED : 0);
|
||||
remove_migration_ptes(src, !rc ? dst : src, ttu);
|
||||
|
||||
if (ttu & TTU_RMAP_LOCKED)
|
||||
i_mmap_unlock_write(mapping);
|
||||
|
||||
Reference in New Issue
Block a user