mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-19 08:58:32 -05:00
mm/huge_memory: convert split_huge_pages_pid() from follow_page() to folio_walk
Let's remove yet another follow_page() user. Note that we have to do the split without holding the PTL, after folio_walk_end(). We don't care about losing the secretmem check in follow_page(). [david@redhat.com: teach can_split_folio() that we are not holding an additional reference] Link: https://lkml.kernel.org/r/c75d1c6c-8ea6-424f-853c-1ccda6c77ba2@redhat.com Link: https://lkml.kernel.org/r/20240802155524.517137-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <willy@infradead.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
b1d3e9bbcc
commit
8710f6ed34
@@ -314,7 +314,7 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags,
|
||||
vm_flags_t vm_flags);
|
||||
|
||||
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
||||
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
|
||||
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
unsigned int new_order);
|
||||
static inline int split_huge_page(struct page *page)
|
||||
@@ -470,7 +470,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
|
||||
}
|
||||
|
||||
static inline bool
|
||||
can_split_folio(struct folio *folio, int *pextra_pins)
|
||||
can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include <linux/memory-tiers.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/pgalloc_tag.h>
|
||||
#include <linux/pagewalk.h>
|
||||
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/pgalloc.h>
|
||||
@@ -3017,7 +3018,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
|
||||
}
|
||||
|
||||
/* Racy check whether the huge page can be split */
|
||||
bool can_split_folio(struct folio *folio, int *pextra_pins)
|
||||
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
|
||||
{
|
||||
int extra_pins;
|
||||
|
||||
@@ -3029,7 +3030,8 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
|
||||
extra_pins = folio_nr_pages(folio);
|
||||
if (pextra_pins)
|
||||
*pextra_pins = extra_pins;
|
||||
return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
|
||||
return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
|
||||
caller_pins;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3197,7 +3199,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
* Racy check if we can split the page, before unmap_folio() will
|
||||
* split PMDs
|
||||
*/
|
||||
if (!can_split_folio(folio, &extra_pins)) {
|
||||
if (!can_split_folio(folio, 1, &extra_pins)) {
|
||||
ret = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -3504,7 +3506,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
|
||||
*/
|
||||
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
|
||||
struct vm_area_struct *vma = vma_lookup(mm, addr);
|
||||
struct page *page;
|
||||
struct folio_walk fw;
|
||||
struct folio *folio;
|
||||
|
||||
if (!vma)
|
||||
@@ -3516,13 +3518,10 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* FOLL_DUMP to ignore special (like zero) pages */
|
||||
page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
|
||||
|
||||
if (IS_ERR_OR_NULL(page))
|
||||
folio = folio_walk_start(&fw, vma, addr, 0);
|
||||
if (!folio)
|
||||
continue;
|
||||
|
||||
folio = page_folio(page);
|
||||
if (!is_transparent_hugepage(folio))
|
||||
goto next;
|
||||
|
||||
@@ -3536,18 +3535,24 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
|
||||
* can be split or not. So skip the check here.
|
||||
*/
|
||||
if (!folio_test_private(folio) &&
|
||||
!can_split_folio(folio, NULL))
|
||||
!can_split_folio(folio, 0, NULL))
|
||||
goto next;
|
||||
|
||||
if (!folio_trylock(folio))
|
||||
goto next;
|
||||
folio_get(folio);
|
||||
folio_walk_end(&fw, vma);
|
||||
|
||||
if (!split_folio_to_order(folio, new_order))
|
||||
split++;
|
||||
|
||||
folio_unlock(folio);
|
||||
next:
|
||||
folio_put(folio);
|
||||
|
||||
cond_resched();
|
||||
continue;
|
||||
next:
|
||||
folio_walk_end(&fw, vma);
|
||||
cond_resched();
|
||||
}
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
@@ -1227,7 +1227,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
|
||||
goto keep_locked;
|
||||
if (folio_test_large(folio)) {
|
||||
/* cannot split folio, skip it */
|
||||
if (!can_split_folio(folio, NULL))
|
||||
if (!can_split_folio(folio, 1, NULL))
|
||||
goto activate_locked;
|
||||
/*
|
||||
* Split partially mapped folios right away.
|
||||
|
||||
Reference in New Issue
Block a user