diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index a0c46aadb97d..e5e7127a4e92 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1641,7 +1641,7 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, srcmap->type == IOMAP_UNWRITTEN)) { s64 status; - if (range_dirty) { + if (range_dirty && srcmap->type == IOMAP_UNWRITTEN) { range_dirty = false; status = iomap_zero_iter_flush_and_stale(&iter); } else { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index d3b8c018c883..2ace8b8ffc86 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1811,6 +1811,7 @@ xfs_buffered_write_iomap_begin( if (error) return error; +restart: error = xfs_ilock_for_iomap(ip, flags, &lockmode); if (error) return error; @@ -1838,9 +1839,27 @@ xfs_buffered_write_iomap_begin( if (eof) imap.br_startoff = end_fsb; /* fake hole until the end */ - /* We never need to allocate blocks for zeroing or unsharing a hole. */ - if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) && - imap.br_startoff > offset_fsb) { + /* We never need to allocate blocks for unsharing a hole. */ + if ((flags & IOMAP_UNSHARE) && imap.br_startoff > offset_fsb) { + xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); + goto out_unlock; + } + + /* + * We may need to zero over a hole in the data fork if it's fronted by + * COW blocks and dirty pagecache. To make sure zeroing occurs, force + * writeback to remap pending blocks and restart the lookup. + */ + if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) { + if (filemap_range_needs_writeback(inode->i_mapping, offset, + offset + count - 1)) { + xfs_iunlock(ip, lockmode); + error = filemap_write_and_wait_range(inode->i_mapping, + offset, offset + count - 1); + if (error) + return error; + goto restart; + } xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); goto out_unlock; }