mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-14 00:39:30 -04:00
iomap: add public helpers for uptodate state manipulation
Add a new iomap_start_folio_write helper to abstract away the write_bytes_pending handling, and export it and the existing iomap_finish_folio_write for non-iomap writeback in fuse. Signed-off-by: Joanne Koong <joannelkoong@gmail.com> [hch: split from a larger patch] Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/20250710133343.399917-7-hch@lst.de Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
f4fa7981fa
commit
9caf1ea80c
@@ -1527,7 +1527,18 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
|
||||
|
||||
static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
|
||||
WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
|
||||
if (ifs)
|
||||
atomic_add(len, &ifs->write_bytes_pending);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_start_folio_write);
|
||||
|
||||
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
@@ -1538,6 +1549,7 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
|
||||
folio_end_writeback(folio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
|
||||
|
||||
/*
|
||||
* We're now finished for good with this ioend structure. Update the page
|
||||
@@ -1660,7 +1672,6 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
loff_t pos, loff_t end_pos, unsigned int dirty_len)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
size_t poff = offset_in_folio(folio, pos);
|
||||
unsigned int ioend_flags = 0;
|
||||
unsigned int map_len = min_t(u64, dirty_len,
|
||||
@@ -1703,8 +1714,7 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff))
|
||||
goto new_ioend;
|
||||
|
||||
if (ifs)
|
||||
atomic_add(map_len, &ifs->write_bytes_pending);
|
||||
iomap_start_folio_write(wpc->inode, folio, map_len);
|
||||
|
||||
/*
|
||||
* Clamp io_offset and io_size to the incore EOF so that ondisk
|
||||
@@ -1877,7 +1887,7 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
|
||||
* all blocks.
|
||||
*/
|
||||
WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
|
||||
atomic_inc(&ifs->write_bytes_pending);
|
||||
iomap_start_folio_write(inode, folio, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -461,6 +461,11 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
loff_t pos, loff_t end_pos, unsigned int dirty_len);
|
||||
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
|
||||
|
||||
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len);
|
||||
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len);
|
||||
|
||||
int iomap_writepages(struct iomap_writepage_ctx *wpc);
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user