mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
mm/shmem, swap: remove redundant error handling for replacing folio
Shmem may replace a folio in the swap cache if the cached one doesn't fit the swapin's GFP zone. When doing so, shmem has already double checked that the swap cache folio is locked, still has the swap cache flag set, and contains the wanted swap entry. So it is impossible to fail due to an XArray mismatch. There is even a comment for that. Delete the defensive error handling path, and add a WARN_ON instead: if that happened, something has broken the basic principle of how the swap cache works, we should catch and fix that. Link: https://lkml.kernel.org/r/20250916160100.31545-10-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> Suggested-by: Chris Li <chrisl@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: kernel test robot <oliver.sang@intel.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Cc: Zi Yan <ziy@nvidia.com> Cc: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
fd8d4f862f
commit
84a7a9823e
32
mm/shmem.c
32
mm/shmem.c
@@ -2121,35 +2121,17 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
|
||||
/* Swap cache still stores N entries instead of a high-order entry */
|
||||
xa_lock_irq(&swap_mapping->i_pages);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
void *item = xas_load(&xas);
|
||||
|
||||
if (item != old) {
|
||||
error = -ENOENT;
|
||||
break;
|
||||
}
|
||||
|
||||
xas_store(&xas, new);
|
||||
WARN_ON_ONCE(xas_store(&xas, new) != old);
|
||||
xas_next(&xas);
|
||||
}
|
||||
if (!error) {
|
||||
mem_cgroup_replace_folio(old, new);
|
||||
shmem_update_stats(new, nr_pages);
|
||||
shmem_update_stats(old, -nr_pages);
|
||||
}
|
||||
|
||||
mem_cgroup_replace_folio(old, new);
|
||||
shmem_update_stats(new, nr_pages);
|
||||
shmem_update_stats(old, -nr_pages);
|
||||
xa_unlock_irq(&swap_mapping->i_pages);
|
||||
|
||||
if (unlikely(error)) {
|
||||
/*
|
||||
* Is this possible? I think not, now that our callers
|
||||
* check both the swapcache flag and folio->private
|
||||
* after getting the folio lock; but be defensive.
|
||||
* Reverse old to newpage for clear and free.
|
||||
*/
|
||||
old = new;
|
||||
} else {
|
||||
folio_add_lru(new);
|
||||
*foliop = new;
|
||||
}
|
||||
folio_add_lru(new);
|
||||
*foliop = new;
|
||||
|
||||
folio_clear_swapcache(old);
|
||||
old->private = NULL;
|
||||
|
||||
Reference in New Issue
Block a user