mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-28 06:44:36 -05:00
mm: swap: enable swap_entry_range_free() to drop any kind of last ref
The original VM_BUG_ON only allows swap_entry_range_free() to drop last SWAP_HAS_CACHE ref. By allowing other kind of last ref in VM_BUG_ON, swap_entry_range_free() could be a more general-purpose function able to handle all kind of last ref. Following thi change, also rename swap_entry_range_free() to swap_entries_free() and update it's comment accordingly. This is a preparation to use swap_entries_free() to drop more kind of last ref other than SWAP_HAS_CACHE. [shikemeng@huaweicloud.com: add __maybe_unused attribute for swap_is_last_ref() and update comment] Link: https://lkml.kernel.org/r/20250410153908.612984-1-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20250325162528.68385-3-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Baoquan He <bhe@redhat.com> Tested-by: SeongJae Park <sj@kernel.org> Cc: Kairui Song <kasong@tencent.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
9c1c38bcdc
commit
64944ef6a1
@@ -52,9 +52,9 @@
|
||||
static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
|
||||
unsigned char);
|
||||
static void free_swap_count_continuations(struct swap_info_struct *);
|
||||
static void swap_entry_range_free(struct swap_info_struct *si,
|
||||
struct swap_cluster_info *ci,
|
||||
swp_entry_t entry, unsigned int nr_pages);
|
||||
static void swap_entries_free(struct swap_info_struct *si,
|
||||
struct swap_cluster_info *ci,
|
||||
swp_entry_t entry, unsigned int nr_pages);
|
||||
static void swap_range_alloc(struct swap_info_struct *si,
|
||||
unsigned int nr_entries);
|
||||
static bool folio_swapcache_freeable(struct folio *folio);
|
||||
@@ -1471,7 +1471,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
|
||||
ci = lock_cluster(si, offset);
|
||||
usage = swap_entry_put_locked(si, offset, 1);
|
||||
if (!usage)
|
||||
swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
unlock_cluster(ci);
|
||||
|
||||
return usage;
|
||||
@@ -1501,7 +1501,7 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
|
||||
for (i = 0; i < nr; i++)
|
||||
WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
|
||||
if (!has_cache)
|
||||
swap_entry_range_free(si, ci, entry, nr);
|
||||
swap_entries_free(si, ci, entry, nr);
|
||||
unlock_cluster(ci);
|
||||
|
||||
return has_cache;
|
||||
@@ -1520,12 +1520,22 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the last HAS_CACHE flag of swap entries, caller have to
|
||||
* ensure all entries belong to the same cgroup.
|
||||
* Check if it's the last ref of swap entry in the freeing path.
|
||||
* Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM.
|
||||
*/
|
||||
static void swap_entry_range_free(struct swap_info_struct *si,
|
||||
struct swap_cluster_info *ci,
|
||||
swp_entry_t entry, unsigned int nr_pages)
|
||||
static inline bool __maybe_unused swap_is_last_ref(unsigned char count)
|
||||
{
|
||||
return (count == SWAP_HAS_CACHE) || (count == 1) ||
|
||||
(count == SWAP_MAP_SHMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the last ref of swap entries, caller have to ensure all entries
|
||||
* belong to the same cgroup and cluster.
|
||||
*/
|
||||
static void swap_entries_free(struct swap_info_struct *si,
|
||||
struct swap_cluster_info *ci,
|
||||
swp_entry_t entry, unsigned int nr_pages)
|
||||
{
|
||||
unsigned long offset = swp_offset(entry);
|
||||
unsigned char *map = si->swap_map + offset;
|
||||
@@ -1538,7 +1548,7 @@ static void swap_entry_range_free(struct swap_info_struct *si,
|
||||
|
||||
ci->count -= nr_pages;
|
||||
do {
|
||||
VM_BUG_ON(*map != SWAP_HAS_CACHE);
|
||||
VM_BUG_ON(!swap_is_last_ref(*map));
|
||||
*map = 0;
|
||||
} while (++map < map_end);
|
||||
|
||||
@@ -1561,7 +1571,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
|
||||
ci = lock_cluster(si, offset);
|
||||
do {
|
||||
if (!swap_entry_put_locked(si, offset, usage))
|
||||
swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
} while (++offset < end);
|
||||
unlock_cluster(ci);
|
||||
}
|
||||
@@ -1604,11 +1614,11 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
|
||||
|
||||
ci = lock_cluster(si, offset);
|
||||
if (swap_only_has_cache(si, offset, size))
|
||||
swap_entry_range_free(si, ci, entry, size);
|
||||
swap_entries_free(si, ci, entry, size);
|
||||
else {
|
||||
for (int i = 0; i < size; i++, entry.val++) {
|
||||
if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
|
||||
swap_entry_range_free(si, ci, entry, 1);
|
||||
swap_entries_free(si, ci, entry, 1);
|
||||
}
|
||||
}
|
||||
unlock_cluster(ci);
|
||||
|
||||
Reference in New Issue
Block a user