hugetlb: convert hugetlb_vma_maps_page() to hugetlb_vma_maps_pfn()

pte_page() is more expensive than pte_pfn() (often it's defined as
pfn_to_page(pte_pfn())), so it makes sense to do the conversion to pfn
once (by calling folio_pfn()) rather than convert the pfn to a page each
time.

While this is a very small advantage, the main motivation is removing a
reference to folio->page.

Link: https://lkml.kernel.org/r/20250226163131.3795869-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle)
2025-02-26 16:31:29 +00:00
committed by Andrew Morton
parent d9a04a2615
commit fa17ad58f8

View File

@@ -338,8 +338,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio)
* mutex for the page in the mapping. So, we can not race with page being
* faulted into the vma.
*/
static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
unsigned long addr, struct page *page)
static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
pte_t *ptep, pte;
@@ -351,7 +351,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (huge_pte_none(pte) || !pte_present(pte))
return false;
if (pte_page(pte) == page)
if (pte_pfn(pte) == pfn)
return true;
return false;
@@ -396,7 +396,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
{
struct rb_root_cached *root = &mapping->i_mmap;
struct hugetlb_vma_lock *vma_lock;
struct page *page = &folio->page;
unsigned long pfn = folio_pfn(folio);
struct vm_area_struct *vma;
unsigned long v_start;
unsigned long v_end;
@@ -412,7 +412,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
if (!hugetlb_vma_maps_page(vma, v_start, page))
if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
continue;
if (!hugetlb_vma_trylock_write(vma)) {
@@ -462,7 +462,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
*/
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
if (hugetlb_vma_maps_page(vma, v_start, page))
if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);