mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 06:41:39 -04:00
mm: add a batched helper to clear the young flag for large folios
Currently, MGLRU will call ptep_test_and_clear_young_notify() to check and clear the young flag for each PTE sequentially, which is inefficient for large folios reclamation. Moreover, on Arm64 architecture, which supports contiguous PTEs, the Arm64- specific ptep_test_and_clear_young() already implements an optimization to clear the young flags for PTEs within a contiguous range. However, this is not sufficient. Similar to the Arm64 specific clear_flush_young_ptes(), we can extend this to perform batched operations for the entire large folio (which might exceed the contiguous range: CONT_PTE_SIZE). Thus, we can introduce a new batched helper: test_and_clear_young_ptes() and its wrapper test_and_clear_young_ptes_notify() which are consistent with the existing functions, to perform batched checking of the young flags for large folios, which can help improve performance during large folio reclamation when MGLRU is enabled. And it will be overridden by the architecture that implements a more efficient batch operation in the following patches. Link: https://lkml.kernel.org/r/23ec671bfcc06cd24ee0fbff8e329402742274a0.1772778858.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Alistair Popple <apopple@nvidia.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Barry Song <baohua@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand (Arm) <david@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Jann Horn <jannh@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Xu <weixugc@google.com> Cc: Will Deacon <will@kernel.org> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
83ec1286b1
commit
6d7237dda4
@@ -1103,6 +1103,43 @@ static inline int clear_flush_young_ptes(struct vm_area_struct *vma,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef test_and_clear_young_ptes
|
||||
/**
|
||||
* test_and_clear_young_ptes - Mark PTEs that map consecutive pages of the same
|
||||
* folio as old
|
||||
* @vma: The virtual memory area the pages are mapped into.
|
||||
* @addr: Address the first page is mapped at.
|
||||
* @ptep: Page table pointer for the first entry.
|
||||
* @nr: Number of entries to clear access bit.
|
||||
*
|
||||
* May be overridden by the architecture; otherwise, implemented as a simple
|
||||
* loop over ptep_test_and_clear_young().
|
||||
*
|
||||
* Note that PTE bits in the PTE range besides the PFN can differ. For example,
|
||||
* some PTEs might be write-protected.
|
||||
*
|
||||
* Context: The caller holds the page table lock. The PTEs map consecutive
|
||||
* pages that belong to the same folio. The PTEs are all in the same PMD.
|
||||
*
|
||||
* Returns: whether any PTE was young.
|
||||
*/
|
||||
static inline int test_and_clear_young_ptes(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep, unsigned int nr)
|
||||
{
|
||||
int young = 0;
|
||||
|
||||
for (;;) {
|
||||
young |= ptep_test_and_clear_young(vma, addr, ptep);
|
||||
if (--nr == 0)
|
||||
break;
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return young;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On some architectures hardware does not set page access bit when accessing
|
||||
* memory page, it is responsibility of software setting this bit. It brings
|
||||
|
||||
@@ -1819,13 +1819,13 @@ static inline int pmdp_clear_flush_young_notify(struct vm_area_struct *vma,
|
||||
return young;
|
||||
}
|
||||
|
||||
static inline int ptep_test_and_clear_young_notify(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
static inline int test_and_clear_young_ptes_notify(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep, unsigned int nr)
|
||||
{
|
||||
int young;
|
||||
|
||||
young = ptep_test_and_clear_young(vma, addr, ptep);
|
||||
young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
|
||||
young = test_and_clear_young_ptes(vma, addr, ptep, nr);
|
||||
young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + nr * PAGE_SIZE);
|
||||
return young;
|
||||
}
|
||||
|
||||
@@ -1843,9 +1843,15 @@ static inline int pmdp_test_and_clear_young_notify(struct vm_area_struct *vma,
|
||||
|
||||
#define clear_flush_young_ptes_notify clear_flush_young_ptes
|
||||
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
|
||||
#define ptep_test_and_clear_young_notify ptep_test_and_clear_young
|
||||
#define test_and_clear_young_ptes_notify test_and_clear_young_ptes
|
||||
#define pmdp_test_and_clear_young_notify pmdp_test_and_clear_young
|
||||
|
||||
#endif /* CONFIG_MMU_NOTIFIER */
|
||||
|
||||
static inline int ptep_test_and_clear_young_notify(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return test_and_clear_young_ptes_notify(vma, addr, ptep, 1);
|
||||
}
|
||||
|
||||
#endif /* __MM_INTERNAL_H */
|
||||
|
||||
Reference in New Issue
Block a user