btrfs: remove btrfs_fs_info::compressed_write_workers

The reason why end_bbio_compressed_write() queues a work into
compressed_write_workers wq is for end_compressed_writeback() call, as
it will grab all the involved folios and clear the writeback flags,
which may sleep.

However now we always run btrfs_bio::end_io() in task context, there is
no need to queue the work anymore.

Just remove btrfs_fs_info::compressed_write_workers and
compressed_bio::write_end_work.

There is a comment about the works queued into
compressed_write_workers, now change to flush endio wq instead, which is
responsible to handle all data endio functions.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo
2025-10-23 18:32:34 +10:30
committed by David Sterba
parent 4591c3ef75
commit 4bbdce8417
4 changed files with 12 additions and 32 deletions

View File

@@ -319,22 +319,6 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
/* the inode may be gone now */
}
static void btrfs_finish_compressed_write_work(struct work_struct *work)
{
struct compressed_bio *cb =
container_of(work, struct compressed_bio, write_end_work);
btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
cb->bbio.bio.bi_status == BLK_STS_OK);
if (cb->writeback)
end_compressed_writeback(cb);
/* Note, our inode could be gone now */
btrfs_free_compressed_folios(cb);
bio_put(&cb->bbio.bio);
}
/*
* Do the cleanup once all the compressed pages hit the disk. This will clear
* writeback on the file pages and free the compressed pages.
@@ -345,9 +329,15 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
static void end_bbio_compressed_write(struct btrfs_bio *bbio)
{
struct compressed_bio *cb = to_compressed_bio(bbio);
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
cb->bbio.bio.bi_status == BLK_STS_OK);
if (cb->writeback)
end_compressed_writeback(cb);
/* Note, our inode could be gone now. */
btrfs_free_compressed_folios(cb);
bio_put(&cb->bbio.bio);
}
static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
@@ -400,7 +390,6 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
cb->writeback = writeback;
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;

View File

@@ -63,11 +63,8 @@ struct compressed_bio {
/* Whether this is a write for writeback. */
bool writeback;
union {
/* For reads, this is the bio we are copying the data into */
struct btrfs_bio *orig_bbio;
struct work_struct write_end_work;
};
/* For reads, this is the bio we are copying the data into. */
struct btrfs_bio *orig_bbio;
/* Must be last. */
struct btrfs_bio bbio;

View File

@@ -1774,8 +1774,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
destroy_workqueue(fs_info->endio_workers);
if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers);
if (fs_info->compressed_write_workers)
destroy_workqueue(fs_info->compressed_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
@@ -1987,8 +1985,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
max_active, 2);
fs_info->compressed_write_workers =
alloc_workqueue("btrfs-compressed-write", flags, max_active);
fs_info->endio_freespace_worker =
btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
max_active, 0);
@@ -2004,7 +2000,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
if (!(fs_info->workers &&
fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->compressed_write_workers &&
fs_info->endio_write_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->fixup_workers &&
@@ -4291,7 +4286,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
/*
* When finishing a compressed write bio we schedule a work queue item
* to finish an ordered extent - btrfs_finish_compressed_write_work()
* to finish an ordered extent - end_bbio_compressed_write()
* calls btrfs_finish_ordered_extent() which in turns does a call to
* btrfs_queue_ordered_fn(), and that queues the ordered extent
* completion either in the endio_write_workers work queue or in the
@@ -4299,7 +4294,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
* below, so before we flush them we must flush this queue for the
* workers of compressed writes.
*/
flush_workqueue(fs_info->compressed_write_workers);
flush_workqueue(fs_info->endio_workers);
/*
* After we parked the cleaner kthread, ordered extents may have

View File

@@ -654,7 +654,6 @@ struct btrfs_fs_info {
struct workqueue_struct *endio_workers;
struct workqueue_struct *endio_meta_workers;
struct workqueue_struct *rmw_workers;
struct workqueue_struct *compressed_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue *caching_workers;