Merge tag 'xfs-fixes-6.19-rc2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Carlos Maiolino:
 "This contains a few fixes for zoned devices support, an UAF and a
  compiler warning, and some cleaning up"

* tag 'xfs-fixes-6.19-rc2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: fix the zoned RT growfs check for zone alignment
  xfs: validate that zoned RT devices are zone aligned
  xfs: fix XFS_ERRTAG_FORCE_ZERO_RANGE for zoned file system
  xfs: fix a memory leak in xfs_buf_item_init()
  xfs: fix stupid compiler warning
  xfs: fix a UAF problem in xattr repair
  xfs: ignore discard return value
This commit is contained in:
Linus Torvalds
2025-12-20 12:45:35 -08:00
8 changed files with 80 additions and 41 deletions

View File

@@ -301,6 +301,21 @@ xfs_validate_rt_geometry(
sbp->sb_rbmblocks != xfs_expected_rbmblocks(sbp))
return false;
if (xfs_sb_is_v5(sbp) &&
(sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_ZONED)) {
uint32_t mod;
/*
* Zoned RT devices must be aligned to the RT group size,
* because garbage collection assumes that all zones have the
* same size to avoid insane complexity if that weren't the
* case.
*/
div_u64_rem(sbp->sb_rextents, sbp->sb_rgextents, &mod);
if (mod)
return false;
}
return true;
}

View File

@@ -333,7 +333,6 @@ xrep_xattr_salvage_remote_attr(
.attr_filter = ent->flags & XFS_ATTR_NSP_ONDISK_MASK,
.namelen = rentry->namelen,
.name = rentry->name,
.value = ab->value,
.valuelen = be32_to_cpu(rentry->valuelen),
};
unsigned int namesize;
@@ -363,6 +362,7 @@ xrep_xattr_salvage_remote_attr(
error = -EDEADLOCK;
if (error)
return error;
args.value = ab->value;
/* Look up the remote value and stash it for reconstruction. */
error = xfs_attr3_leaf_getvalue(leaf_bp, &args);

View File

@@ -737,7 +737,7 @@ xfs_attr_recover_work(
struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip);
struct xfs_attr_intent *attr;
struct xfs_mount *mp = lip->li_log->l_mp;
struct xfs_inode *ip;
struct xfs_inode *ip = NULL;
struct xfs_da_args *args;
struct xfs_trans *tp;
struct xfs_trans_res resv;

View File

@@ -896,6 +896,7 @@ xfs_buf_item_init(
map_size = DIV_ROUND_UP(chunks, NBWORD);
if (map_size > XFS_BLF_DATAMAP_SIZE) {
xfs_buf_item_free_format(bip);
kmem_cache_free(xfs_buf_item_cache, bip);
xfs_err(mp,
"buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",

View File

@@ -108,7 +108,7 @@ xfs_discard_endio(
* list. We plug and chain the bios so that we only need a single completion
* call to clear all the busy extents once the discards are complete.
*/
int
void
xfs_discard_extents(
struct xfs_mount *mp,
struct xfs_busy_extents *extents)
@@ -116,7 +116,6 @@ xfs_discard_extents(
struct xfs_extent_busy *busyp;
struct bio *bio = NULL;
struct blk_plug plug;
int error = 0;
blk_start_plug(&plug);
list_for_each_entry(busyp, &extents->extent_list, list) {
@@ -126,18 +125,10 @@ xfs_discard_extents(
trace_xfs_discard_extent(xg, busyp->bno, busyp->length);
error = __blkdev_issue_discard(btp->bt_bdev,
__blkdev_issue_discard(btp->bt_bdev,
xfs_gbno_to_daddr(xg, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_KERNEL, &bio);
if (error && error != -EOPNOTSUPP) {
xfs_info(mp,
"discard failed for extent [0x%llx,%u], error %d",
(unsigned long long)busyp->bno,
busyp->length,
error);
break;
}
}
if (bio) {
@@ -148,8 +139,6 @@ xfs_discard_extents(
xfs_discard_endio_work(&extents->endio_work);
}
blk_finish_plug(&plug);
return error;
}
/*
@@ -385,9 +374,7 @@ xfs_trim_perag_extents(
* list after this function call, as it may have been freed by
* the time control returns to us.
*/
error = xfs_discard_extents(pag_mount(pag), extents);
if (error)
break;
xfs_discard_extents(pag_mount(pag), extents);
if (xfs_trim_should_stop())
break;
@@ -496,12 +483,10 @@ xfs_discard_rtdev_extents(
trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length);
error = __blkdev_issue_discard(bdev,
__blkdev_issue_discard(bdev,
xfs_rtb_to_daddr(mp, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_NOFS, &bio);
if (error)
break;
}
xfs_discard_free_rtdev_extents(tr);
@@ -741,9 +726,7 @@ xfs_trim_rtgroup_extents(
* list after this function call, as it may have been freed by
* the time control returns to us.
*/
error = xfs_discard_extents(rtg_mount(rtg), tr.extents);
if (error)
break;
xfs_discard_extents(rtg_mount(rtg), tr.extents);
low = tr.restart_rtx;
} while (!xfs_trim_should_stop() && low <= high);

View File

@@ -6,7 +6,7 @@ struct fstrim_range;
struct xfs_mount;
struct xfs_busy_extents;
int xfs_discard_extents(struct xfs_mount *mp, struct xfs_busy_extents *busy);
void xfs_discard_extents(struct xfs_mount *mp, struct xfs_busy_extents *busy);
int xfs_ioc_trim(struct xfs_mount *mp, struct fstrim_range __user *fstrim);
#endif /* XFS_DISCARD_H */

View File

@@ -1240,6 +1240,38 @@ xfs_falloc_insert_range(
return xfs_insert_file_space(XFS_I(inode), offset, len);
}
/*
* For various operations we need to zero up to one block at each end of
* the affected range. For zoned file systems this will require a space
* allocation, for which we need a reservation ahead of time.
*/
#define XFS_ZONED_ZERO_EDGE_SPACE_RES 2
/*
* Zero range implements a full zeroing mechanism but is only used in limited
* situations. It is more efficient to allocate unwritten extents than to
* perform zeroing here, so use an errortag to randomly force zeroing on DEBUG
* kernels for added test coverage.
*
* On zoned file systems, the error is already injected by
* xfs_file_zoned_fallocate, which then reserves the additional space needed.
* We only check for this extra space reservation here.
*/
static inline bool
xfs_falloc_force_zero(
struct xfs_inode *ip,
struct xfs_zone_alloc_ctx *ac)
{
if (xfs_is_zoned_inode(ip)) {
if (ac->reserved_blocks > XFS_ZONED_ZERO_EDGE_SPACE_RES) {
ASSERT(IS_ENABLED(CONFIG_XFS_DEBUG));
return true;
}
return false;
}
return XFS_TEST_ERROR(ip->i_mount, XFS_ERRTAG_FORCE_ZERO_RANGE);
}
/*
* Punch a hole and prealloc the range. We use a hole punch rather than
* unwritten extent conversion for two reasons:
@@ -1268,14 +1300,7 @@ xfs_falloc_zero_range(
if (error)
return error;
/*
* Zero range implements a full zeroing mechanism but is only used in
* limited situations. It is more efficient to allocate unwritten
* extents than to perform zeroing here, so use an errortag to randomly
* force zeroing on DEBUG kernels for added test coverage.
*/
if (XFS_TEST_ERROR(ip->i_mount,
XFS_ERRTAG_FORCE_ZERO_RANGE)) {
if (xfs_falloc_force_zero(ip, ac)) {
error = xfs_zero_range(ip, offset, len, ac, NULL);
} else {
error = xfs_free_file_space(ip, offset, len, ac);
@@ -1423,13 +1448,26 @@ xfs_file_zoned_fallocate(
{
struct xfs_zone_alloc_ctx ac = { };
struct xfs_inode *ip = XFS_I(file_inode(file));
struct xfs_mount *mp = ip->i_mount;
xfs_filblks_t count_fsb;
int error;
error = xfs_zoned_space_reserve(ip->i_mount, 2, XFS_ZR_RESERVED, &ac);
/*
* If full zeroing is forced by the error injection knob, we need a
* space reservation that covers the entire range. See the comment in
* xfs_zoned_write_space_reserve for the rationale for the calculation.
* Otherwise just reserve space for the two boundary blocks.
*/
count_fsb = XFS_ZONED_ZERO_EDGE_SPACE_RES;
if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ZERO_RANGE &&
XFS_TEST_ERROR(mp, XFS_ERRTAG_FORCE_ZERO_RANGE))
count_fsb += XFS_B_TO_FSB(mp, len) + 1;
error = xfs_zoned_space_reserve(mp, count_fsb, XFS_ZR_RESERVED, &ac);
if (error)
return error;
error = __xfs_file_fallocate(file, mode, offset, len, &ac);
xfs_zoned_space_unreserve(ip->i_mount, &ac);
xfs_zoned_space_unreserve(mp, &ac);
return error;
}

View File

@@ -1255,12 +1255,10 @@ xfs_growfs_check_rtgeom(
min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp),
nmp->m_rsumblocks * 2);
kfree(nmp);
trace_xfs_growfs_check_rtgeom(mp, min_logfsbs);
if (min_logfsbs > mp->m_sb.sb_logblocks)
return -EINVAL;
goto out_inval;
if (xfs_has_zoned(mp)) {
uint32_t gblocks = mp->m_groups[XG_TYPE_RTG].blocks;
@@ -1268,16 +1266,20 @@ xfs_growfs_check_rtgeom(
if (rextsize != 1)
return -EINVAL;
div_u64_rem(mp->m_sb.sb_rblocks, gblocks, &rem);
div_u64_rem(nmp->m_sb.sb_rblocks, gblocks, &rem);
if (rem) {
xfs_warn(mp,
"new RT volume size (%lld) not aligned to RT group size (%d)",
mp->m_sb.sb_rblocks, gblocks);
return -EINVAL;
nmp->m_sb.sb_rblocks, gblocks);
goto out_inval;
}
}
kfree(nmp);
return 0;
out_inval:
kfree(nmp);
return -EINVAL;
}
/*