mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-21 22:19:22 -05:00
mm/rmap: pass dst_vma to folio_dup_file_rmap_pte() and friends
We'll need access to the destination MM when modifying the large mapcount of a non-hugetlb large folios next. So pass in the destination VMA. Link: https://lkml.kernel.org/r/20250303163014.1128035-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Andy Lutomirks^H^Hski <luto@kernel.org> Cc: Borislav Betkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcow (Oracle) <willy@infradead.org> Cc: Michal Koutn <mkoutny@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: tejun heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zefan Li <lizefan.x@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
845d2be6d4
commit
405c4ef769
@@ -335,7 +335,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
|
||||
}
|
||||
|
||||
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, enum rmap_level level)
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
|
||||
enum rmap_level level)
|
||||
{
|
||||
const int orig_nr_pages = nr_pages;
|
||||
|
||||
@@ -366,45 +367,47 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
* @folio: The folio to duplicate the mappings of
|
||||
* @page: The first page to duplicate the mappings of
|
||||
* @nr_pages: The number of pages of which the mapping will be duplicated
|
||||
* @dst_vma: The destination vm area
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + nr_pages)
|
||||
*
|
||||
* The caller needs to hold the page table lock.
|
||||
*/
|
||||
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
|
||||
struct page *page, int nr_pages)
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
__folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
|
||||
}
|
||||
|
||||
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
|
||||
struct page *page)
|
||||
struct page *page, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
__folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
|
||||
* @folio: The folio to duplicate the mapping of
|
||||
* @page: The first page to duplicate the mapping of
|
||||
* @dst_vma: The destination vm area
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
|
||||
*
|
||||
* The caller needs to hold the page table lock.
|
||||
*/
|
||||
static inline void folio_dup_file_rmap_pmd(struct folio *folio,
|
||||
struct page *page)
|
||||
struct page *page, struct vm_area_struct *dst_vma)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
|
||||
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *src_vma,
|
||||
enum rmap_level level)
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma, enum rmap_level level)
|
||||
{
|
||||
const int orig_nr_pages = nr_pages;
|
||||
bool maybe_pinned;
|
||||
@@ -470,6 +473,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
* @folio: The folio to duplicate the mappings of
|
||||
* @page: The first page to duplicate the mappings of
|
||||
* @nr_pages: The number of pages of which the mapping will be duplicated
|
||||
* @dst_vma: The destination vm area
|
||||
* @src_vma: The vm area from which the mappings are duplicated
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + nr_pages)
|
||||
@@ -488,16 +492,18 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
|
||||
* Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
|
||||
*/
|
||||
static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
|
||||
struct page *page, int nr_pages, struct vm_area_struct *src_vma)
|
||||
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
|
||||
RMAP_LEVEL_PTE);
|
||||
return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
|
||||
src_vma, RMAP_LEVEL_PTE);
|
||||
}
|
||||
|
||||
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
|
||||
struct page *page, struct vm_area_struct *src_vma)
|
||||
struct page *page, struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
return __folio_try_dup_anon_rmap(folio, page, 1, src_vma,
|
||||
return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
|
||||
RMAP_LEVEL_PTE);
|
||||
}
|
||||
|
||||
@@ -506,6 +512,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
|
||||
* of a folio
|
||||
* @folio: The folio to duplicate the mapping of
|
||||
* @page: The first page to duplicate the mapping of
|
||||
* @dst_vma: The destination vm area
|
||||
* @src_vma: The vm area from which the mapping is duplicated
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
|
||||
@@ -524,11 +531,12 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
|
||||
* Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
|
||||
*/
|
||||
static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
|
||||
struct page *page, struct vm_area_struct *src_vma)
|
||||
struct page *page, struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
|
||||
RMAP_LEVEL_PMD);
|
||||
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
|
||||
src_vma, RMAP_LEVEL_PMD);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
return -EBUSY;
|
||||
|
||||
@@ -1782,7 +1782,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
src_folio = page_folio(src_page);
|
||||
|
||||
folio_get(src_folio);
|
||||
if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
|
||||
if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
|
||||
/* Page maybe pinned: split and retry the fault on PTEs. */
|
||||
folio_put(src_folio);
|
||||
pte_free(dst_mm, pgtable);
|
||||
|
||||
10
mm/memory.c
10
mm/memory.c
@@ -864,7 +864,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
folio_get(folio);
|
||||
rss[mm_counter(folio)]++;
|
||||
/* Cannot fail as these pages cannot get pinned. */
|
||||
folio_try_dup_anon_rmap_pte(folio, page, src_vma);
|
||||
folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
|
||||
|
||||
/*
|
||||
* We do not preserve soft-dirty information, because so
|
||||
@@ -1018,14 +1018,14 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
|
||||
folio_ref_add(folio, nr);
|
||||
if (folio_test_anon(folio)) {
|
||||
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
|
||||
nr, src_vma))) {
|
||||
nr, dst_vma, src_vma))) {
|
||||
folio_ref_sub(folio, nr);
|
||||
return -EAGAIN;
|
||||
}
|
||||
rss[MM_ANONPAGES] += nr;
|
||||
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
|
||||
} else {
|
||||
folio_dup_file_rmap_ptes(folio, page, nr);
|
||||
folio_dup_file_rmap_ptes(folio, page, nr, dst_vma);
|
||||
rss[mm_counter_file(folio)] += nr;
|
||||
}
|
||||
if (any_writable)
|
||||
@@ -1043,7 +1043,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
|
||||
* guarantee the pinned page won't be randomly replaced in the
|
||||
* future.
|
||||
*/
|
||||
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
|
||||
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) {
|
||||
/* Page may be pinned, we have to copy. */
|
||||
folio_put(folio);
|
||||
err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
|
||||
@@ -1053,7 +1053,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
|
||||
rss[MM_ANONPAGES]++;
|
||||
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
|
||||
} else {
|
||||
folio_dup_file_rmap_pte(folio, page);
|
||||
folio_dup_file_rmap_pte(folio, page, dst_vma);
|
||||
rss[mm_counter_file(folio)]++;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user