mm, swap: simplify swap table sanity range check

The newly introduced helper, which checks bad slots and emptiness of a
cluster, can cover the older sanity check just fine, with a more rigorous
condition check.  So merge them.

Link: https://lkml.kernel.org/r/20260218-swap-table-p3-v3-8-f4e34be021a7@tencent.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song
2026-02-18 04:06:33 +08:00
committed by Andrew Morton
parent 1307442b93
commit 5dc533f7aa

View File

@@ -459,9 +459,11 @@ static void swap_table_free(struct swap_table *table)
* One special case is that bad slots can't be freed, so check the number of
* bad slots for swapoff, and non-swapoff path must never free bad slots.
*/
static void swap_cluster_assert_empty(struct swap_cluster_info *ci, bool swapoff)
static void swap_cluster_assert_empty(struct swap_cluster_info *ci,
unsigned int ci_off, unsigned int nr,
bool swapoff)
{
unsigned int ci_off = 0, ci_end = SWAPFILE_CLUSTER;
unsigned int ci_end = ci_off + nr;
unsigned long swp_tb;
int bad_slots = 0;
@@ -588,7 +590,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
{
swap_cluster_assert_empty(ci, false);
swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, false);
swap_cluster_free_table(ci);
move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
ci->order = 0;
@@ -898,26 +900,6 @@ static bool cluster_scan_range(struct swap_info_struct *si,
return true;
}
/*
* Currently, the swap table is not used for count tracking, just
* do a sanity check here to ensure nothing leaked, so the swap
* table should be empty upon freeing.
*/
static void swap_cluster_assert_table_empty(struct swap_cluster_info *ci,
unsigned int start, unsigned int nr)
{
unsigned int ci_off = start % SWAPFILE_CLUSTER;
unsigned int ci_end = ci_off + nr;
unsigned long swp_tb;
if (IS_ENABLED(CONFIG_DEBUG_VM)) {
do {
swp_tb = __swap_table_get(ci, ci_off);
VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb));
} while (++ci_off < ci_end);
}
}
static bool cluster_alloc_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
struct folio *folio,
@@ -943,13 +925,14 @@ static bool cluster_alloc_range(struct swap_info_struct *si,
if (likely(folio)) {
order = folio_order(folio);
nr_pages = 1 << order;
swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, nr_pages, false);
__swap_cache_add_folio(ci, folio, swp_entry(si->type, offset));
} else if (IS_ENABLED(CONFIG_HIBERNATION)) {
order = 0;
nr_pages = 1;
WARN_ON_ONCE(si->swap_map[offset]);
si->swap_map[offset] = 1;
swap_cluster_assert_table_empty(ci, offset, 1);
swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, 1, false);
} else {
/* Allocation without folio is only possible with hibernation */
WARN_ON_ONCE(1);
@@ -1768,7 +1751,7 @@ void swap_entries_free(struct swap_info_struct *si,
mem_cgroup_uncharge_swap(entry, nr_pages);
swap_range_free(si, offset, nr_pages);
swap_cluster_assert_table_empty(ci, offset, nr_pages);
swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, nr_pages, false);
if (!ci->count)
free_cluster(si, ci);
@@ -2780,7 +2763,7 @@ static void free_swap_cluster_info(struct swap_cluster_info *cluster_info,
/* Cluster with bad marks count will have a remaining table */
spin_lock(&ci->lock);
if (rcu_dereference_protected(ci->table, true)) {
swap_cluster_assert_empty(ci, true);
swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, true);
swap_cluster_free_table(ci);
}
spin_unlock(&ci->lock);