mm/page-writeback: drop usage of folio_index

folio_index is only needed for mixed usage of page cache and swap cache. 
The remaining three caller in page-writeback are for page cache tag
marking.  Swap cache space doesn't use tag (explicitly sets
mapping_set_no_writeback_tags), so use folio->index here directly.

Link: https://lkml.kernel.org/r/20250825163721.17734-1-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song
2025-08-26 00:37:21 +08:00
committed by Andrew Morton
parent 79dfed0976
commit 46afff4599

View File

@@ -38,10 +38,10 @@
#include <linux/sched/rt.h>
#include <linux/sched/signal.h>
#include <linux/mm_inline.h>
#include <linux/shmem_fs.h>
#include <trace/events/writeback.h>
#include "internal.h"
#include "swap.h"
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
@@ -2705,12 +2705,18 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
{
unsigned long flags;
/*
* Shmem writeback relies on swap, and swap writeback is LRU based,
* not using the dirty mark.
*/
VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
xa_lock_irqsave(&mapping->i_pages, flags);
if (folio->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
folio_account_dirtied(folio, mapping);
__xa_set_mark(&mapping->i_pages, folio_index(folio),
PAGECACHE_TAG_DIRTY);
__xa_set_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
}
xa_unlock_irqrestore(&mapping->i_pages, flags);
}
@@ -2989,7 +2995,7 @@ bool __folio_end_writeback(struct folio *folio)
xa_lock_irqsave(&mapping->i_pages, flags);
ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
__xa_clear_mark(&mapping->i_pages, folio_index(folio),
__xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_WRITEBACK);
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
struct bdi_writeback *wb = inode_to_wb(inode);
@@ -3026,7 +3032,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
XA_STATE(xas, &mapping->i_pages, folio->index);
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;