mm: tidy up swap_writeout

Use a goto label to consolidate the unlock folio and return pattern and
don't bother with an else after a return / goto.

Link: https://lkml.kernel.org/r/20250610054959.2057526-4-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Christoph Hellwig
2025-06-10 07:49:39 +02:00
committed by Andrew Morton
parent 44b1b073eb
commit 2d1844cdbe

View File

@@ -239,12 +239,11 @@ static void swap_zeromap_folio_clear(struct folio *folio)
*/
int swap_writeout(struct folio *folio, struct writeback_control *wbc)
{
int ret;
int ret = 0;
if (folio_free_swap(folio))
goto out_unlock;
if (folio_free_swap(folio)) {
folio_unlock(folio);
return 0;
}
/*
* Arch code may have to preserve more data than just the page
* contents, e.g. memory tags.
@@ -252,8 +251,7 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
ret = arch_prepare_to_swap(folio);
if (ret) {
folio_mark_dirty(folio);
folio_unlock(folio);
return ret;
goto out_unlock;
}
/*
@@ -264,20 +262,19 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
*/
if (is_folio_zero_filled(folio)) {
swap_zeromap_folio_set(folio);
folio_unlock(folio);
return 0;
} else {
/*
* Clear bits this folio occupies in the zeromap to prevent
* zero data being read in from any previous zero writes that
* occupied the same swap entries.
*/
swap_zeromap_folio_clear(folio);
goto out_unlock;
}
/*
* Clear bits this folio occupies in the zeromap to prevent zero data
* being read in from any previous zero writes that occupied the same
* swap entries.
*/
swap_zeromap_folio_clear(folio);
if (zswap_store(folio)) {
count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT);
folio_unlock(folio);
return 0;
goto out_unlock;
}
if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
folio_mark_dirty(folio);
@@ -286,6 +283,9 @@ int swap_writeout(struct folio *folio, struct writeback_control *wbc)
__swap_writepage(folio, wbc);
return 0;
out_unlock:
folio_unlock(folio);
return ret;
}
static inline void count_swpout_vm_event(struct folio *folio)