diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d98283164eda..19e69704fcff 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3731,6 +3731,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio_ref_freeze(folio, 1 + extra_pins)) { struct address_space *swap_cache = NULL; struct lruvec *lruvec; + int expected_refs; if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { @@ -3794,11 +3795,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, new_folio = next) { next = folio_next(new_folio); - folio_ref_unfreeze( - new_folio, - 1 + ((mapping || swap_cache) ? - folio_nr_pages(new_folio) : - 0)); + expected_refs = folio_expected_ref_count(new_folio) + 1; + folio_ref_unfreeze(new_folio, expected_refs); lru_add_split_folio(folio, new_folio, lruvec, list); @@ -3828,8 +3826,8 @@ static int __folio_split(struct folio *folio, unsigned int new_order, * Otherwise, a parallel folio_try_get() can grab @folio * and its caller can see stale page cache entries. */ - folio_ref_unfreeze(folio, 1 + - ((mapping || swap_cache) ? folio_nr_pages(folio) : 0)); + expected_refs = folio_expected_ref_count(folio) + 1; + folio_ref_unfreeze(folio, expected_refs); unlock_page_lruvec(lruvec);