btrfs: allow folios to be released while ordered extent is finishing

When the release_folio callback (from struct address_space_operations) is
invoked we don't allow the folio to be released if its range is currently
locked in the inode's io_tree, as it may indicate the folio may be needed
by the task that locked the range.

However if the range is locked because an ordered extent is finishing,
then we can safely allow the folio to be released because ordered extent
completion doesn't need to use the folio at all.

When we are under memory pressure, the kernel starts writeback of dirty
pages (folios) with the goal of releasing the pages from the page cache
after writeback completes, however this often is not possible on btrfs
because:

  * Once the writeback completes we queue the ordered extent completion;

  * Once the ordered extent completion starts, we lock the range in the
    inode's io_tree (at btrfs_finish_one_ordered());

  * If the release_folio callback is called while the folio's range is
    locked in the inode's io_tree, we don't allow the folio to be
    released, so the kernel has to try to release memory elsewhere,
    which may result in triggering more writeback or releasing other
    pages from the page cache which may be more useful to have around
    for applications.

In contrast, when the release_folio callback is invoked after writeback
finishes and before ordered extent completion starts or locks the range,
we allow the folio to be released, as well as when the release_folio
callback is invoked after ordered extent completion unlocks the range.

Improve on this by detecting if the range is locked for ordered extent
completion and if it is, allow the folio to be released. This detection
is achieved by adding a new extent flag in the io_tree that is set when
the range is locked during ordered extent completion.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana
2025-03-25 12:55:54 +00:00
committed by David Sterba
parent cbfb4cbf45
commit 32c523c578
4 changed files with 60 additions and 26 deletions

View File

@@ -1752,6 +1752,28 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
return bitset;
}
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits)
{
struct extent_state *state;
*bits = 0;
spin_lock(&tree->lock);
state = tree_search(tree, start);
while (state) {
if (state->start > end)
break;
*bits |= state->state;
if (state->end >= end)
break;
state = next_state(state);
}
spin_unlock(&tree->lock);
}
/*
* Check if the whole range [@start,@end) contains the single @bit set.
*/

View File

@@ -37,6 +37,11 @@ enum {
* that is left for the ordered extent completion.
*/
ENUM_BIT(EXTENT_DELALLOC_NEW),
/*
* Mark that a range is being locked for finishing an ordered extent.
* Used together with EXTENT_LOCKED.
*/
ENUM_BIT(EXTENT_FINISHING_ORDERED),
/*
* When an ordered extent successfully completes for a region marked as
* a new delalloc range, use this flag when clearing a new delalloc
@@ -165,6 +170,7 @@ void free_extent_state(struct extent_state *state);
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,

View File

@@ -2623,33 +2623,37 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
{
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
bool ret;
u32 range_bits;
u32 clear_bits;
int ret;
if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
ret = false;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
EXTENT_QGROUP_RESERVED);
int ret2;
get_range_bits(tree, start, end, &range_bits);
/*
* At this point we can safely clear everything except the
* locked bit, the nodatasum bit and the delalloc new bit.
* The delalloc new bit will be cleared by ordered extent
* completion.
*/
ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
/*
* We can release the folio if it's locked only for ordered extent
* completion, since that doesn't require using the folio.
*/
if ((range_bits & EXTENT_LOCKED) &&
!(range_bits & EXTENT_FINISHING_ORDERED))
return false;
/* if clear_extent_bit failed for enomem reasons,
* we can't allow the release to continue.
*/
if (ret2 < 0)
ret = false;
else
ret = true;
}
return ret;
clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW |
EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED |
EXTENT_FINISHING_ORDERED);
/*
* At this point we can safely clear everything except the locked,
* nodatasum, delalloc new and finishing ordered bits. The delalloc new
* bit will be cleared by ordered extent completion.
*/
ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
/*
* If clear_extent_bit failed for enomem reasons, we can't allow the
* release to continue.
*/
if (ret < 0)
return false;
return true;
}
/*

View File

@@ -3129,8 +3129,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
* depending on their current state).
*/
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
clear_bits |= EXTENT_LOCKED;
lock_extent(io_tree, start, end, &cached_state);
clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
__lock_extent(io_tree, start, end,
EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
&cached_state);
}
if (freespace_inode)