Merge tag 'xfs-fixes-7.1-rc4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Carlos Maiolino:
 "A few bug fixes, nothing really special stands out"

* tag 'xfs-fixes-7.1-rc4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: Fix typo in comment
  xfs: fix the "limiting open zones" message
  xfs: flush delalloc blocks on ENOSPC in xfs_trans_alloc_icreate
  xfs: check da node block pad field during scrub
  xfs: fix memory leak for data allocated by xfs_zone_gc_data_alloc()
  xfs: fix memory leak on error in xfs_alloc_zone_info()
  xfs: check directory data block header padding in scrub
  xfs: zero directory data block padding on write verification
  xfs: zero entire directory data block header region at init
  xfs: remove the meaningless XFS_ALLOC_FLAG_FREEING
This commit is contained in:
Linus Torvalds
2026-05-15 13:17:46 -07:00
11 changed files with 52 additions and 21 deletions

View File

@@ -382,6 +382,7 @@ xfs_dir3_data_write_verify(
struct xfs_mount *mp = bp->b_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
struct xfs_dir3_data_hdr *datahdr3 = bp->b_addr;
xfs_failaddr_t fa;
fa = xfs_dir3_data_verify(bp);
@@ -396,6 +397,11 @@ xfs_dir3_data_write_verify(
if (bip)
hdr3->lsn = cpu_to_be64(bip->bli_item.li_lsn);
/*
* Zero padding that may be stale from old kernels.
*/
datahdr3->pad = 0;
xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF);
}
@@ -728,7 +734,6 @@ xfs_dir3_data_init(
struct xfs_dir2_data_unused *dup;
struct xfs_dir2_data_free *bf;
int error;
int i;
/*
* Get the buffer set up for the block.
@@ -741,13 +746,16 @@ xfs_dir3_data_init(
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_DATA_BUF);
/*
* Initialize the header.
* Initialize the whole directory header region to zero
* so that all padding, bestfree entries, and any
* future header fields are clean.
*/
hdr = bp->b_addr;
memset(hdr, 0, geo->data_entry_offset);
if (xfs_has_crc(mp)) {
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
memset(hdr3, 0, sizeof(*hdr3));
hdr3->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp));
hdr3->owner = cpu_to_be64(args->owner);
@@ -759,10 +767,6 @@ xfs_dir3_data_init(
bf = xfs_dir2_data_bestfree_p(mp, hdr);
bf[0].offset = cpu_to_be16(geo->data_entry_offset);
bf[0].length = cpu_to_be16(geo->blksize - geo->data_entry_offset);
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
bf[i].length = 0;
bf[i].offset = 0;
}
/*
* Set up an unused entry for the block's body.

View File

@@ -1414,8 +1414,7 @@ xfs_refcount_finish_one(
if (rcur == NULL) {
struct xfs_perag *pag = to_perag(ri->ri_group);
error = xfs_alloc_read_agf(pag, tp,
XFS_ALLOC_FLAG_FREEING, &agbp);
error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
if (error)
return error;

View File

@@ -251,6 +251,17 @@ xchk_ino_set_preen(
trace_xchk_ino_preen(sc, ino, __return_address);
}
/* Record a block indexed by a file fork that could be optimized. */
void
xchk_fblock_set_preen(
struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
trace_xchk_fblock_preen(sc, whichfork, offset, __return_address);
}
/* Record something being wrong with the filesystem primary superblock. */
void
xchk_set_corrupt(

View File

@@ -25,6 +25,8 @@ bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
void xchk_block_set_preen(struct xfs_scrub *sc,
struct xfs_buf *bp);
void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
void xchk_fblock_set_preen(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset);
void xchk_set_corrupt(struct xfs_scrub *sc);
void xchk_block_set_corrupt(struct xfs_scrub *sc,

View File

@@ -454,7 +454,12 @@ xchk_da_btree_block(
}
}
/* XXX: Check hdr3.pad32 once we know how to fix it. */
if (xfs_has_crc(ip->i_mount)) {
struct xfs_da3_node_hdr *nodehdr3 = blk->bp->b_addr;
if (nodehdr3->__pad32)
xchk_da_set_preen(ds, level);
}
break;
default:
xchk_da_set_corrupt(ds, level);

View File

@@ -492,7 +492,12 @@ xchk_directory_data_bestfree(
goto out;
xchk_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
if (xfs_has_crc(sc->mp)) {
struct xfs_dir3_data_hdr *hdr3 = bp->b_addr;
if (hdr3->pad)
xchk_fblock_set_preen(sc, XFS_DATA_FORK, lblk);
}
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_buf;

View File

@@ -699,12 +699,6 @@ xfs_create(
*/
error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
&tp);
if (error == -ENOSPC) {
/* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(mp);
error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
resblks, &tp);
}
if (error)
goto out_parent;

View File

@@ -350,7 +350,7 @@ xfs_dax_notify_dev_failure(
/*
* Shutdown fs from a force umount in pre-remove case which won't fail,
* so errors can be ignored. Otherwise, shutdown the filesystem with
* CORRUPT flag if error occured or notify.want_shutdown was set during
* CORRUPT flag if error occurred or notify.want_shutdown was set during
* RMAP querying.
*/
if (mf_flags & MF_MEM_PRE_REMOVE)

View File

@@ -1199,10 +1199,21 @@ xfs_trans_alloc_icreate(
{
struct xfs_trans *tp;
bool retried = false;
bool flushed = false;
int error;
retry:
error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
if (error == -ENOSPC && !flushed) {
/*
* Flush all delalloc blocks to reclaim space from speculative
* preallocation. This is similar to the quota retry below
* but targets FS-wide ENOSPC.
*/
xfs_flush_inodes(mp);
flushed = true;
goto retry;
}
if (error)
return error;

View File

@@ -1170,7 +1170,7 @@ xfs_calc_open_zones(
if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
mp->m_max_open_zones = bdev_open_zones;
xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
xfs_info(mp, "limiting open zones to %u due to hardware limit.",
bdev_open_zones);
}
@@ -1217,7 +1217,7 @@ xfs_alloc_zone_info(
return zi;
out_free_bitmaps:
while (--i > 0)
while (--i >= 0)
kvfree(zi->zi_used_bucket_bitmap[i]);
kfree(zi);
return NULL;

View File

@@ -1221,7 +1221,7 @@ xfs_zone_gc_mount(
if (data->oz)
xfs_open_zone_put(data->oz);
out_free_gc_data:
kfree(data);
xfs_zone_gc_data_free(data);
return error;
}