Merge tag 'for-7.1-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - fixup warning when allocating memory for readahead, __GFP_NOWARN was
   accidentally dropped when setting mapping constraints

 - in tracepoint of file sync, fix sleeping in atomic context when
   handling dentries

 - harden initial loading of block group on crafted/fuzzed images,
   iterate all chunk mapping entries unconditionally

 - fix freeing pages of submitted io after checking for errors

 - fix incorrect inode size after remount when using fallocate KEEP_SIZE
   mode (also requires disabled 'no-holes' feature)

* tag 'for-7.1-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix incorrect i_size after remount caused by KEEP_SIZE prealloc gap
  btrfs: only release the dirty pages io tree after successful writes
  btrfs: tracepoints: fix sleep while in atomic context in btrfs_sync_file()
  btrfs: always pass __GFP_NOWARN from add_ra_bio_pages()
  btrfs: fix check_chunk_block_group_mappings() to iterate all chunk maps
This commit is contained in:
Linus Torvalds
2026-05-15 13:22:07 -07:00
6 changed files with 56 additions and 35 deletions

View File

@@ -2412,29 +2412,25 @@ static struct btrfs_block_group *btrfs_create_block_group(
*/
static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
{
u64 start = 0;
struct rb_node *node;
int ret = 0;
while (1) {
/*
* This is called during mount from btrfs_read_block_groups(), before
* any background threads are started, so no concurrent writers can
* modify the mapping_tree. No lock is needed here.
*/
for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
struct btrfs_chunk_map *map;
struct btrfs_block_group *bg;
/*
* btrfs_find_chunk_map() will return the first chunk map
* intersecting the range, so setting @length to 1 is enough to
* get the first chunk.
*/
map = btrfs_find_chunk_map(fs_info, start, 1);
if (!map)
break;
map = rb_entry(node, struct btrfs_chunk_map, rb_node);
bg = btrfs_lookup_block_group(fs_info, map->start);
if (unlikely(!bg)) {
btrfs_err(fs_info,
"chunk start=%llu len=%llu doesn't have corresponding block group",
map->start, map->chunk_len);
ret = -EUCLEAN;
btrfs_free_chunk_map(map);
break;
}
if (unlikely(bg->start != map->start || bg->length != map->chunk_len ||
@@ -2447,12 +2443,9 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
bg->start, bg->length,
bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
ret = -EUCLEAN;
btrfs_free_chunk_map(map);
btrfs_put_block_group(bg);
break;
}
start = map->start + map->chunk_len;
btrfs_free_chunk_map(map);
btrfs_put_block_group(bg);
}
return ret;

View File

@@ -407,22 +407,18 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
/*
* Avoid direct reclaim when the caller does not allow it. Since
* add_ra_bio_pages() is always speculative, suppress allocation warnings
* in either case.
*/
/* Avoid direct reclaim when the caller does not allow it. */
constraint_gfp = ~__GFP_FS;
cache_gfp = GFP_NOFS | __GFP_NOWARN;
if (!direct_reclaim) {
constraint_gfp = ~(__GFP_FS | __GFP_DIRECT_RECLAIM) | __GFP_NOWARN;
cache_gfp = (GFP_NOFS & ~__GFP_DIRECT_RECLAIM) | __GFP_NOWARN;
} else {
constraint_gfp = (~__GFP_FS) | __GFP_NOWARN;
cache_gfp = GFP_NOFS | __GFP_NOWARN;
constraint_gfp &= ~__GFP_DIRECT_RECLAIM;
cache_gfp &= ~__GFP_DIRECT_RECLAIM;
}
while (cur < compressed_end) {
pgoff_t page_end;
pgoff_t pg_index = cur >> PAGE_SHIFT;
gfp_t masked_constraint_gfp;
u32 add_size;
if (pg_index > end_index)
@@ -449,8 +445,14 @@ static noinline int add_ra_bio_pages(struct inode *inode,
continue;
}
folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, constraint_gfp),
0, NULL);
/*
* Since add_ra_bio_pages() is always speculative, suppress
* allocation warnings.
*/
masked_constraint_gfp = mapping_gfp_constraint(mapping, constraint_gfp);
masked_constraint_gfp |= __GFP_NOWARN;
folio = filemap_alloc_folio(masked_constraint_gfp, 0, NULL);
if (!folio)
break;

View File

@@ -4686,6 +4686,7 @@ static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
free_extent_buffer_stale(eb);
}
}
btrfs_extent_io_tree_release(dirty_pages);
}
static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,

View File

@@ -9299,10 +9299,38 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
u64 range_start;
u64 range_end;
if (cur_offset > actual_len)
i_size = actual_len;
else
i_size = cur_offset;
/*
* Make sure the file_extent_tree covers the entire
* range [old_i_size, new_i_size) before we update
* disk_i_size. Without this, a previous KEEP_SIZE
* prealloc that extended past i_size (and was lost
* across umount/mount because file_extent_tree is
* only populated up to round_up(i_size) on inode
* load) can leave a gap inside this range. That gap
* would cause btrfs_inode_safe_disk_i_size_write()
* (via find_contiguous_extent_bit() starting at 0)
* to truncate disk_i_size to the start of the gap,
* making the persisted size smaller than i_size.
*/
range_start = round_down(inode->i_size, fs_info->sectorsize);
range_end = round_up(i_size, fs_info->sectorsize);
ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode),
range_start, range_end - range_start);
if (ret) {
btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans);
break;
}
i_size_write(inode, i_size);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}

View File

@@ -1293,14 +1293,13 @@ static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
blk_finish_plug(&plug);
ret2 = btrfs_wait_extents(fs_info, dirty_pages);
btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
if (ret)
return ret;
else if (ret2)
if (ret2)
return ret2;
else
return 0;
btrfs_extent_io_tree_release(&trans->transaction->dirty_pages);
return 0;
}
/*

View File

@@ -771,10 +771,8 @@ TRACE_EVENT(btrfs_sync_file,
TP_fast_assign(
struct dentry *dentry = file_dentry(file);
struct inode *inode = file_inode(file);
struct dentry *parent = dget_parent(dentry);
struct inode *parent_inode = d_inode(parent);
struct inode *parent_inode = d_inode(dentry->d_parent);
dput(parent);
TP_fast_assign_fsid(btrfs_sb(inode->i_sb));
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->parent = btrfs_ino(BTRFS_I(parent_inode));