btrfs: rename the functions to count, test and get bit ranges in io trees

These functions are exported so they should have a 'btrfs_' prefix by
convention, to make it clear they are btrfs specific and to avoid
collisions with functions from elsewhere in the kernel.

So add a 'btrfs_' prefix to their names to make it clear they are from
btrfs.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Filipe Manana
2025-04-04 16:07:19 +01:00
committed by David Sterba
parent e965835c98
commit f81c2aea71
7 changed files with 38 additions and 37 deletions

View File

@@ -1024,8 +1024,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* very likely resulting in a larger extent after writeback is
* triggered (except in a case of free space fragmentation).
*/
if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC))
if (btrfs_test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC))
goto next;
/*

View File

@@ -1612,10 +1612,10 @@ void btrfs_find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
* all given bits set. If the returned number of bytes is greater than zero
* then @start is updated with the offset of the first byte with the bits set.
*/
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
u32 bits, int contig,
struct extent_state **cached_state)
u64 btrfs_count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
u32 bits, int contig,
struct extent_state **cached_state)
{
struct extent_state *state = NULL;
struct extent_state *cached;
@@ -1700,7 +1700,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
/*
* Check if the single @bit exists in the given range.
*/
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
{
struct extent_state *state;
bool bitset = false;
@@ -1726,8 +1726,8 @@ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32
return bitset;
}
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state)
void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state)
{
struct extent_state *state;
@@ -1763,8 +1763,8 @@ void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
/*
* Check if the whole range [@start,@end) contains the single @bit set.
*/
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached)
bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached)
{
struct extent_state *state;
bool bitset = true;

View File

@@ -160,17 +160,17 @@ static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
int __init extent_state_init_cachep(void);
void __cold extent_state_free_cachep(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, u32 bits, int contig,
struct extent_state **cached_state);
u64 btrfs_count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, u32 bits, int contig,
struct extent_state **cached_state);
void free_extent_state(struct extent_state *state);
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
void get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state);
bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
void btrfs_get_range_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 *bits,
struct extent_state **cached_state);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int btrfs_clear_extent_bit_changeset(struct extent_io_tree *tree, u64 start, u64 end,

View File

@@ -374,8 +374,8 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, cached_state);
ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, cached_state);
btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
@@ -2618,7 +2618,7 @@ static bool try_release_extent_state(struct extent_io_tree *tree,
bool ret = false;
int ret2;
get_range_bits(tree, start, end, &range_bits, &cached_state);
btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state);
/*
* We can release the folio if it's locked only for ordered extent
@@ -2678,8 +2678,8 @@ bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
free_extent_map(em);
break;
}
if (test_range_bit_exists(io_tree, em->start,
extent_map_end(em) - 1, EXTENT_LOCKED))
if (btrfs_test_range_bit_exists(io_tree, em->start,
extent_map_end(em) - 1, EXTENT_LOCKED))
goto next;
/*
* If it's not in the list of modified extents, used by a fast

View File

@@ -3252,10 +3252,10 @@ static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end
if (inode->delalloc_bytes > 0) {
spin_unlock(&inode->lock);
*delalloc_start_ret = start;
delalloc_len = count_range_bits(&inode->io_tree,
delalloc_start_ret, end,
len, EXTENT_DELALLOC, 1,
cached_state);
delalloc_len = btrfs_count_range_bits(&inode->io_tree,
delalloc_start_ret, end,
len, EXTENT_DELALLOC, 1,
cached_state);
} else {
spin_unlock(&inode->lock);
}

View File

@@ -1742,8 +1742,8 @@ static int fallback_to_cow(struct btrfs_inode *inode,
* when starting writeback.
*/
btrfs_lock_extent(io_tree, start, end, &cached_state);
count = count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0, NULL);
count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
u64 bytes = count;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -2309,7 +2309,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
@@ -3377,8 +3377,8 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
return true;
if (btrfs_is_data_reloc_root(inode->root) &&
test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
NULL)) {
btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
NULL)) {
/* Skip the range without csum for data reloc inode */
btrfs_clear_extent_bits(&inode->io_tree, file_offset, end,
EXTENT_NODATASUM);
@@ -7155,7 +7155,8 @@ noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
range_end = round_up(offset + nocow_args.file_extent.num_bytes,
root->fs_info->sectorsize) - 1;
ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
EXTENT_DELALLOC);
if (ret)
return -EAGAIN;
}

View File

@@ -2409,8 +2409,8 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
{
u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
if (btrfs_test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}