mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 04:21:09 -04:00
mm/rmap: convert "enum rmap_level" to "enum pgtable_level"
Let's factor it out, and convert all checks for unsupported levels to BUILD_BUG(). The code is written in a way such that force-inlining will optimize out the levels. [nathan@kernel.org: always inline __folio_rmap_sanity_checks()] Link: https://lkml.kernel.org/r/20250814-rmap-fix-build_bug-conversion-v1-1-fb7b10a0b362@kernel.org Link: https://lkml.kernel.org/r/20250811112631.759341-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Nathan Chancellor <nathan@kernel.org> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Juegren Gross <jgross@suse.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mariano Pache <npache@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
30680d5ef0
commit
b22cc9a9c7
@@ -1975,6 +1975,14 @@ static inline bool arch_has_pfn_modify_check(void)
|
||||
/* Page-Table Modification Mask */
|
||||
typedef unsigned int pgtbl_mod_mask;
|
||||
|
||||
enum pgtable_level {
|
||||
PGTABLE_LEVEL_PTE = 0,
|
||||
PGTABLE_LEVEL_PMD,
|
||||
PGTABLE_LEVEL_PUD,
|
||||
PGTABLE_LEVEL_P4D,
|
||||
PGTABLE_LEVEL_PGD,
|
||||
};
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
|
||||
|
||||
@@ -394,18 +394,8 @@ typedef int __bitwise rmap_t;
|
||||
/* The anonymous (sub)page is exclusive to a single process. */
|
||||
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
|
||||
|
||||
/*
|
||||
* Internally, we're using an enum to specify the granularity. We make the
|
||||
* compiler emit specialized code for each granularity.
|
||||
*/
|
||||
enum rmap_level {
|
||||
RMAP_LEVEL_PTE = 0,
|
||||
RMAP_LEVEL_PMD,
|
||||
RMAP_LEVEL_PUD,
|
||||
};
|
||||
|
||||
static inline void __folio_rmap_sanity_checks(const struct folio *folio,
|
||||
const struct page *page, int nr_pages, enum rmap_level level)
|
||||
static __always_inline void __folio_rmap_sanity_checks(const struct folio *folio,
|
||||
const struct page *page, int nr_pages, enum pgtable_level level)
|
||||
{
|
||||
/* hugetlb folios are handled separately. */
|
||||
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
||||
@@ -427,18 +417,18 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
|
||||
VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
|
||||
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
/*
|
||||
* We don't support folios larger than a single PMD yet. So
|
||||
* when RMAP_LEVEL_PMD is set, we assume that we are creating
|
||||
* when PGTABLE_LEVEL_PMD is set, we assume that we are creating
|
||||
* a single "entire" mapping of the folio.
|
||||
*/
|
||||
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
|
||||
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
|
||||
break;
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
/*
|
||||
* Assume that we are creating a single "entire" mapping of the
|
||||
* folio.
|
||||
@@ -447,7 +437,7 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
|
||||
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
|
||||
break;
|
||||
default:
|
||||
VM_WARN_ON_ONCE(true);
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -567,14 +557,14 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
|
||||
|
||||
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
|
||||
enum rmap_level level)
|
||||
enum pgtable_level level)
|
||||
{
|
||||
const int orig_nr_pages = nr_pages;
|
||||
|
||||
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
||||
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
if (!folio_test_large(folio)) {
|
||||
atomic_inc(&folio->_mapcount);
|
||||
break;
|
||||
@@ -587,11 +577,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
}
|
||||
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
atomic_inc(&folio->_entire_mapcount);
|
||||
folio_inc_large_mapcount(folio, dst_vma);
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,13 +601,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
|
||||
struct page *page, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
__folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -632,7 +624,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
|
||||
struct page *page, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
@@ -640,7 +632,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
|
||||
|
||||
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma, enum rmap_level level)
|
||||
struct vm_area_struct *src_vma, enum pgtable_level level)
|
||||
{
|
||||
const int orig_nr_pages = nr_pages;
|
||||
bool maybe_pinned;
|
||||
@@ -665,7 +657,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
* copying if the folio maybe pinned.
|
||||
*/
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
if (unlikely(maybe_pinned)) {
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
if (PageAnonExclusive(page + i))
|
||||
@@ -687,8 +679,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
} while (page++, --nr_pages > 0);
|
||||
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
if (PageAnonExclusive(page)) {
|
||||
if (unlikely(maybe_pinned))
|
||||
return -EBUSY;
|
||||
@@ -697,6 +689,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
atomic_inc(&folio->_entire_mapcount);
|
||||
folio_inc_large_mapcount(folio, dst_vma);
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -730,7 +724,7 @@ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
|
||||
src_vma, RMAP_LEVEL_PTE);
|
||||
src_vma, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
|
||||
@@ -738,7 +732,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
|
||||
RMAP_LEVEL_PTE);
|
||||
PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -770,7 +764,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
|
||||
src_vma, RMAP_LEVEL_PMD);
|
||||
src_vma, PGTABLE_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
return -EBUSY;
|
||||
@@ -778,7 +772,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
|
||||
}
|
||||
|
||||
static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, enum rmap_level level)
|
||||
struct page *page, int nr_pages, enum pgtable_level level)
|
||||
{
|
||||
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
|
||||
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
|
||||
@@ -873,7 +867,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
|
||||
static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
|
||||
struct page *page)
|
||||
{
|
||||
return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
|
||||
return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -904,7 +898,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
|
||||
RMAP_LEVEL_PMD);
|
||||
PGTABLE_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
return -EBUSY;
|
||||
|
||||
56
mm/rmap.c
56
mm/rmap.c
@@ -1265,7 +1265,7 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
|
||||
|
||||
static __always_inline void __folio_add_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||
enum rmap_level level)
|
||||
enum pgtable_level level)
|
||||
{
|
||||
atomic_t *mapped = &folio->_nr_pages_mapped;
|
||||
const int orig_nr_pages = nr_pages;
|
||||
@@ -1274,7 +1274,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
|
||||
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
||||
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
if (!folio_test_large(folio)) {
|
||||
nr = atomic_inc_and_test(&folio->_mapcount);
|
||||
break;
|
||||
@@ -1300,11 +1300,11 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
|
||||
|
||||
folio_add_large_mapcount(folio, orig_nr_pages, vma);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
first = atomic_inc_and_test(&folio->_entire_mapcount);
|
||||
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
|
||||
if (level == RMAP_LEVEL_PMD && first)
|
||||
if (level == PGTABLE_LEVEL_PMD && first)
|
||||
nr_pmdmapped = folio_large_nr_pages(folio);
|
||||
nr = folio_inc_return_large_mapcount(folio, vma);
|
||||
if (nr == 1)
|
||||
@@ -1323,7 +1323,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
|
||||
* We only track PMD mappings of PMD-sized
|
||||
* folios separately.
|
||||
*/
|
||||
if (level == RMAP_LEVEL_PMD)
|
||||
if (level == PGTABLE_LEVEL_PMD)
|
||||
nr_pmdmapped = nr_pages;
|
||||
nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
|
||||
/* Raced ahead of a remove and another add? */
|
||||
@@ -1336,6 +1336,8 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
|
||||
}
|
||||
folio_inc_large_mapcount(folio, vma);
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
__folio_mod_stat(folio, nr, nr_pmdmapped);
|
||||
}
|
||||
@@ -1427,7 +1429,7 @@ static void __page_check_anon_rmap(const struct folio *folio,
|
||||
|
||||
static __always_inline void __folio_add_anon_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||
unsigned long address, rmap_t flags, enum rmap_level level)
|
||||
unsigned long address, rmap_t flags, enum pgtable_level level)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1440,20 +1442,22 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
|
||||
|
||||
if (flags & RMAP_EXCLUSIVE) {
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
SetPageAnonExclusive(page + i);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
SetPageAnonExclusive(page);
|
||||
break;
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
/*
|
||||
* Keep the compiler happy, we don't support anonymous
|
||||
* PUD mappings.
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1507,7 +1511,7 @@ void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
|
||||
rmap_t flags)
|
||||
{
|
||||
__folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
|
||||
RMAP_LEVEL_PTE);
|
||||
PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1528,7 +1532,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
|
||||
RMAP_LEVEL_PMD);
|
||||
PGTABLE_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
@@ -1609,7 +1613,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
|
||||
static __always_inline void __folio_add_file_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||
enum rmap_level level)
|
||||
enum pgtable_level level)
|
||||
{
|
||||
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
|
||||
|
||||
@@ -1634,7 +1638,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
|
||||
void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
|
||||
int nr_pages, struct vm_area_struct *vma)
|
||||
{
|
||||
__folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
|
||||
__folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1651,7 +1655,7 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
|
||||
__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
@@ -1672,7 +1676,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
|
||||
{
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
|
||||
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
|
||||
__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
|
||||
__folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
@@ -1680,7 +1684,7 @@ void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
|
||||
|
||||
static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||
enum rmap_level level)
|
||||
enum pgtable_level level)
|
||||
{
|
||||
atomic_t *mapped = &folio->_nr_pages_mapped;
|
||||
int last = 0, nr = 0, nr_pmdmapped = 0;
|
||||
@@ -1689,7 +1693,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
||||
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
case PGTABLE_LEVEL_PTE:
|
||||
if (!folio_test_large(folio)) {
|
||||
nr = atomic_add_negative(-1, &folio->_mapcount);
|
||||
break;
|
||||
@@ -1719,11 +1723,11 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
|
||||
partially_mapped = nr && atomic_read(mapped);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
case RMAP_LEVEL_PUD:
|
||||
case PGTABLE_LEVEL_PMD:
|
||||
case PGTABLE_LEVEL_PUD:
|
||||
if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
|
||||
last = atomic_add_negative(-1, &folio->_entire_mapcount);
|
||||
if (level == RMAP_LEVEL_PMD && last)
|
||||
if (level == PGTABLE_LEVEL_PMD && last)
|
||||
nr_pmdmapped = folio_large_nr_pages(folio);
|
||||
nr = folio_dec_return_large_mapcount(folio, vma);
|
||||
if (!nr) {
|
||||
@@ -1743,7 +1747,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
|
||||
if (likely(nr < ENTIRELY_MAPPED)) {
|
||||
nr_pages = folio_large_nr_pages(folio);
|
||||
if (level == RMAP_LEVEL_PMD)
|
||||
if (level == PGTABLE_LEVEL_PMD)
|
||||
nr_pmdmapped = nr_pages;
|
||||
nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
|
||||
/* Raced ahead of another remove and an add? */
|
||||
@@ -1757,6 +1761,8 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
|
||||
partially_mapped = nr && nr < nr_pmdmapped;
|
||||
break;
|
||||
default:
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1796,7 +1802,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
|
||||
void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
|
||||
int nr_pages, struct vm_area_struct *vma)
|
||||
{
|
||||
__folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
|
||||
__folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1813,7 +1819,7 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
|
||||
__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
@@ -1834,7 +1840,7 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,
|
||||
{
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
|
||||
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
|
||||
__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
|
||||
__folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user