mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
btrfs: raid56: prepare verify_one_sector() to support bs > ps cases
The function verify_one_sector() assume each fs block can be mapped by one page, blocking bs > ps support for raid56. Prepare it for bs > ps cases by: - Introduce helpers to get a paddrs pointer Thankfully all the higher layer bio should still be aligned to fs block size, thus a fs block should still be fully covered by the bio. Introduce sector_paddrs_in_rbio() and rbio_stripe_paddrs(), which will return a paddrs pointer inside btrfs_raid_bio::bio_paddrs[] or stripe_paddrs[]. The pointer can be directly passed to btrfs_calculate_block_csum_pages() to verify the checksum. - Open code btrfs_check_block_csum() btrfs_check_block_csum() only supports fs blocks backed by large folios. But for raid56 we can have fs blocks backed by multiple non-contiguous pages, e.g. direct IO, encoded read/write/send. So instead of using btrfs_check_block_csum(), open code it to use btrfs_calculate_block_csum_pages(). Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
@@ -732,6 +732,13 @@ static phys_addr_t rbio_qstripe_step_paddr(const struct btrfs_raid_bio *rbio,
|
||||
return rbio_stripe_step_paddr(rbio, rbio->nr_data + 1, sector_nr, step_nr);
|
||||
}
|
||||
|
||||
/* Return a paddr pointer into the rbio::stripe_paddrs[] for the specified sector. */
|
||||
static phys_addr_t *rbio_stripe_paddrs(const struct btrfs_raid_bio *rbio,
|
||||
unsigned int stripe_nr, unsigned int sector_nr)
|
||||
{
|
||||
return &rbio->stripe_paddrs[rbio_paddr_index(rbio, stripe_nr, sector_nr, 0)];
|
||||
}
|
||||
|
||||
/*
|
||||
* The first stripe in the table for a logical address
|
||||
* has the lock. rbios are added in one of three ways:
|
||||
@@ -1003,6 +1010,41 @@ static phys_addr_t sector_paddr_in_rbio(struct btrfs_raid_bio *rbio,
|
||||
return rbio->stripe_paddrs[index];
|
||||
}
|
||||
|
||||
/*
|
||||
* Get paddr pointer for the sector specified by its @stripe_nr and @sector_nr.
|
||||
*
|
||||
* @rbio: The raid bio
|
||||
* @stripe_nr: Stripe number, valid range [0, real_stripe)
|
||||
* @sector_nr: Sector number inside the stripe,
|
||||
* valid range [0, stripe_nsectors)
|
||||
* @bio_list_only: Whether to use sectors inside the bio list only.
|
||||
*
|
||||
* The read/modify/write code wants to reuse the original bio page as much
|
||||
* as possible, and only use stripe_sectors as fallback.
|
||||
*
|
||||
* Return NULL if bio_list_only is set but the specified sector has no
|
||||
* coresponding bio.
|
||||
*/
|
||||
static phys_addr_t *sector_paddrs_in_rbio(struct btrfs_raid_bio *rbio,
|
||||
int stripe_nr, int sector_nr,
|
||||
bool bio_list_only)
|
||||
{
|
||||
phys_addr_t *ret = NULL;
|
||||
const int index = rbio_paddr_index(rbio, stripe_nr, sector_nr, 0);
|
||||
|
||||
ASSERT(index >= 0 && index < rbio->nr_sectors * rbio->sector_nsteps);
|
||||
|
||||
scoped_guard(spinlock, &rbio->bio_list_lock) {
|
||||
if (rbio->bio_paddrs[index] != INVALID_PADDR || bio_list_only) {
|
||||
/* Don't return sector without a valid page pointer */
|
||||
if (rbio->bio_paddrs[index] != INVALID_PADDR)
|
||||
ret = &rbio->bio_paddrs[index];
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return &rbio->stripe_paddrs[index];
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to sector_paddr_in_rbio(), but with extra consideration for
|
||||
* bs > ps cases, where we can have multiple steps for a fs block.
|
||||
@@ -1832,10 +1874,9 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
|
||||
int stripe_nr, int sector_nr)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
|
||||
phys_addr_t paddr;
|
||||
phys_addr_t *paddrs;
|
||||
u8 csum_buf[BTRFS_CSUM_SIZE];
|
||||
u8 *csum_expected;
|
||||
int ret;
|
||||
|
||||
if (!rbio->csum_bitmap || !rbio->csum_buf)
|
||||
return 0;
|
||||
@@ -1848,16 +1889,18 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
|
||||
* bio list if possible.
|
||||
*/
|
||||
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
|
||||
paddr = sector_paddr_in_rbio(rbio, stripe_nr, sector_nr, 0);
|
||||
paddrs = sector_paddrs_in_rbio(rbio, stripe_nr, sector_nr, 0);
|
||||
} else {
|
||||
paddr = rbio_stripe_paddr(rbio, stripe_nr, sector_nr);
|
||||
paddrs = rbio_stripe_paddrs(rbio, stripe_nr, sector_nr);
|
||||
}
|
||||
|
||||
csum_expected = rbio->csum_buf +
|
||||
(stripe_nr * rbio->stripe_nsectors + sector_nr) *
|
||||
fs_info->csum_size;
|
||||
ret = btrfs_check_block_csum(fs_info, paddr, csum_buf, csum_expected);
|
||||
return ret;
|
||||
btrfs_calculate_block_csum_pages(fs_info, paddrs, csum_buf);
|
||||
if (unlikely(memcmp(csum_buf, csum_expected, fs_info->csum_size) != 0))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void recover_vertical_step(struct btrfs_raid_bio *rbio,
|
||||
|
||||
Reference in New Issue
Block a user