mm/huge_memory: replace can_split_folio() with direct refcount calculation

can_split_folio() is just a refcount comparison, making sure only the
split caller holds an extra pin.  Open code it with
folio_expected_ref_count() != folio_ref_count() - 1.  For the extra_pins
used by folio_ref_freeze(), add folio_cache_ref_count() to calculate it. 
Also replace folio_expected_ref_count() with folio_cache_ref_count() used
by folio_ref_unfreeze(), since they are returning the same values when a
folio is frozen and folio_cache_ref_count() does not have unnecessary
folio_mapcount() in its implementation.

Link: https://lkml.kernel.org/r/20251126210618.1971206-3-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Zi Yan
2025-11-26 16:06:16 -05:00
committed by Andrew Morton
parent bdd0d69a32
commit 5842bcbfc3
3 changed files with 22 additions and 34 deletions

View File

@@ -369,7 +369,6 @@ enum split_type {
SPLIT_TYPE_NON_UNIFORM,
};
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int folio_split_unmapped(struct folio *folio, unsigned int new_order);

View File

@@ -3455,23 +3455,6 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
}
}
/* Racy check whether the huge page can be split */
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
int extra_pins;
/* Additional pins from page cache */
if (folio_test_anon(folio))
extra_pins = folio_test_swapcache(folio) ?
folio_nr_pages(folio) : 0;
else
extra_pins = folio_nr_pages(folio);
if (pextra_pins)
*pextra_pins = extra_pins;
return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
caller_pins;
}
static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
{
for (; nr_pages; page++, nr_pages--)
@@ -3767,11 +3750,19 @@ int folio_check_splittable(struct folio *folio, unsigned int new_order,
return 0;
}
/* Number of folio references from the pagecache or the swapcache. */
static unsigned int folio_cache_ref_count(const struct folio *folio)
{
if (folio_test_anon(folio) && !folio_test_swapcache(folio))
return 0;
return folio_nr_pages(folio);
}
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
struct page *split_at, struct xa_state *xas,
struct address_space *mapping, bool do_lru,
struct list_head *list, enum split_type split_type,
pgoff_t end, int *nr_shmem_dropped, int extra_pins)
pgoff_t end, int *nr_shmem_dropped)
{
struct folio *end_folio = folio_next(folio);
struct folio *new_folio, *next;
@@ -3782,10 +3773,9 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
VM_WARN_ON_ONCE(!mapping && end);
/* Prevent deferred_split_scan() touching ->_refcount */
ds_queue = folio_split_queue_lock(folio);
if (folio_ref_freeze(folio, 1 + extra_pins)) {
if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
struct swap_cluster_info *ci = NULL;
struct lruvec *lruvec;
int expected_refs;
if (old_order > 1) {
if (!list_empty(&folio->_deferred_list)) {
@@ -3853,8 +3843,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
zone_device_private_split_cb(folio, new_folio);
expected_refs = folio_expected_ref_count(new_folio) + 1;
folio_ref_unfreeze(new_folio, expected_refs);
folio_ref_unfreeze(new_folio,
folio_cache_ref_count(new_folio) + 1);
if (do_lru)
lru_add_split_folio(folio, new_folio, lruvec, list);
@@ -3897,8 +3887,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
* Otherwise, a parallel folio_try_get() can grab @folio
* and its caller can see stale page cache entries.
*/
expected_refs = folio_expected_ref_count(folio) + 1;
folio_ref_unfreeze(folio, expected_refs);
folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
if (do_lru)
unlock_page_lruvec(lruvec);
@@ -3947,7 +3936,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
struct folio *new_folio, *next;
int nr_shmem_dropped = 0;
int remap_flags = 0;
int extra_pins, ret;
int ret;
pgoff_t end = 0;
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
@@ -4028,7 +4017,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* Racy check if we can split the page, before unmap_folio() will
* split PMDs
*/
if (!can_split_folio(folio, 1, &extra_pins)) {
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -4051,8 +4040,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
}
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
true, list, split_type, end, &nr_shmem_dropped,
extra_pins);
true, list, split_type, end, &nr_shmem_dropped);
fail:
if (mapping)
xas_unlock(&xas);
@@ -4126,20 +4114,20 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
*/
int folio_split_unmapped(struct folio *folio, unsigned int new_order)
{
int extra_pins, ret = 0;
int ret = 0;
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
if (!can_split_folio(folio, 1, &extra_pins))
if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
return -EAGAIN;
local_irq_disable();
ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
NULL, false, NULL, SPLIT_TYPE_UNIFORM,
0, NULL, extra_pins);
0, NULL);
local_irq_enable();
return ret;
}
@@ -4632,7 +4620,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
* can be split or not. So skip the check here.
*/
if (!folio_test_private(folio) &&
!can_split_folio(folio, 0, NULL))
folio_expected_ref_count(folio) != folio_ref_count(folio))
goto next;
if (!folio_trylock(folio))

View File

@@ -1284,7 +1284,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
goto keep_locked;
if (folio_test_large(folio)) {
/* cannot split folio, skip it */
if (!can_split_folio(folio, 1, NULL))
if (folio_expected_ref_count(folio) !=
folio_ref_count(folio) - 1)
goto activate_locked;
/*
* Split partially mapped folios right away.