md: factor out sync completion update into helper

Repeatedly reading 'mddev->recovery' flags in md_do_sync() may introduce
potential risk if this flag is modified during sync, leading to incorrect
offset updates. Therefore, replace direct 'mddev->recovery' checks with
'action'.

Move sync completion update logic into helper md_finish_sync(), which
improves readability and maintainability.

The reshape completion update remains safe as it only updated after
successful reshape when MD_RECOVERY_INTR is not set and 'curr_resync'
equals 'max_sectors'.

Link: https://lore.kernel.org/linux-raid/20260105110300.1442509-9-linan666@huaweicloud.com
Signed-off-by: Li Nan <linan122@huawei.com>
Reviewed-by: Yu Kuai <yukuai@fnnas.com>
Signed-off-by: Yu Kuai <yukuai@fnnas.com>
This commit is contained in:
Li Nan
2026-01-05 19:02:56 +08:00
committed by Yu Kuai
parent af9c40ff5a
commit 6dd3aa08e8

View File

@@ -9438,6 +9438,51 @@ static bool sync_io_within_limit(struct mddev *mddev)
(raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
/*
* Update sync offset and mddev status when sync completes
*/
static void md_finish_sync(struct mddev *mddev, enum sync_action action)
{
struct md_rdev *rdev;
switch (action) {
case ACTION_RESYNC:
case ACTION_REPAIR:
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
mddev->resync_offset = mddev->curr_resync;
break;
case ACTION_RECOVER:
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (mddev->delta_disks >= 0 &&
rdev_needs_recovery(rdev, mddev->curr_resync))
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
break;
case ACTION_RESHAPE:
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->delta_disks > 0 &&
mddev->pers->finish_reshape &&
mddev->pers->size &&
!mddev_is_dm(mddev)) {
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
if (!mddev_is_clustered(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
}
break;
/* */
case ACTION_CHECK:
default:
break;
}
}
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -9453,7 +9498,6 @@ void md_do_sync(struct md_thread *thread)
int last_mark,m;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
enum sync_action action;
const char *desc;
struct blk_plug plug;
@@ -9746,46 +9790,14 @@ void md_do_sync(struct md_thread *thread)
}
mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > MD_RESYNC_ACTIVE) {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
mddev->resync_offset = mddev->curr_resync;
} else {
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (mddev->delta_disks >= 0 &&
rdev_needs_recovery(rdev, mddev->curr_resync))
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
}
if (mddev->curr_resync > MD_RESYNC_ACTIVE)
md_finish_sync(mddev, action);
skip:
/* set CHANGE_PENDING here since maybe another update is needed,
* so other nodes are informed. It should be harmless for normal
* raid */
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->delta_disks > 0 &&
mddev->pers->finish_reshape &&
mddev->pers->size &&
!mddev_is_dm(mddev)) {
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
if (!mddev_is_clustered(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
}
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */