bcachefs: Refactor bch2_run_recovery_passes()

Don't use a continue; this simplifies the next patch where
run_recovery_passes() will be responsible for waking up copygc and
rebalance at the appropriate time.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet
2025-04-21 11:52:35 -04:00
parent 10e42b6f25
commit d64e8e842b

View File

@@ -12,6 +12,7 @@
#include "journal.h"
#include "lru.h"
#include "logged_ops.h"
#include "movinggc.h"
#include "rebalance.h"
#include "recovery.h"
#include "recovery_passes.h"
@@ -262,49 +263,45 @@ int bch2_run_recovery_passes(struct bch_fs *c)
*/
c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw;
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns) && !ret) {
c->next_recovery_pass = c->curr_recovery_pass + 1;
spin_lock_irq(&c->recovery_pass_lock);
spin_lock_irq(&c->recovery_pass_lock);
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns) && !ret) {
unsigned pass = c->curr_recovery_pass;
c->next_recovery_pass = pass + 1;
if (c->opts.recovery_pass_last &&
c->curr_recovery_pass > c->opts.recovery_pass_last) {
spin_unlock_irq(&c->recovery_pass_lock);
c->curr_recovery_pass > c->opts.recovery_pass_last)
break;
}
if (!should_run_recovery_pass(c, pass)) {
c->curr_recovery_pass++;
c->recovery_pass_done = max(c->recovery_pass_done, pass);
if (should_run_recovery_pass(c, pass)) {
spin_unlock_irq(&c->recovery_pass_lock);
continue;
ret = bch2_run_recovery_pass(c, pass) ?:
bch2_journal_flush(&c->journal);
if (!ret && !test_bit(BCH_FS_error, &c->flags))
bch2_clear_recovery_pass_required(c, pass);
spin_lock_irq(&c->recovery_pass_lock);
if (c->next_recovery_pass < c->curr_recovery_pass) {
/*
* bch2_run_explicit_recovery_pass() was called: we
* can't always catch -BCH_ERR_restart_recovery because
* it may have been called from another thread (btree
* node read completion)
*/
ret = 0;
c->recovery_passes_complete &= ~(~0ULL << c->curr_recovery_pass);
} else {
c->recovery_passes_complete |= BIT_ULL(pass);
c->recovery_pass_done = max(c->recovery_pass_done, pass);
}
}
spin_unlock_irq(&c->recovery_pass_lock);
ret = bch2_run_recovery_pass(c, pass) ?:
bch2_journal_flush(&c->journal);
if (!ret && !test_bit(BCH_FS_error, &c->flags))
bch2_clear_recovery_pass_required(c, pass);
spin_lock_irq(&c->recovery_pass_lock);
if (c->next_recovery_pass < c->curr_recovery_pass) {
/*
* bch2_run_explicit_recovery_pass() was called: we
* can't always catch -BCH_ERR_restart_recovery because
* it may have been called from another thread (btree
* node read completion)
*/
ret = 0;
c->recovery_passes_complete &= ~(~0ULL << c->curr_recovery_pass);
} else {
c->recovery_passes_complete |= BIT_ULL(pass);
c->recovery_pass_done = max(c->recovery_pass_done, pass);
}
c->curr_recovery_pass = c->next_recovery_pass;
spin_unlock_irq(&c->recovery_pass_lock);
}
spin_unlock_irq(&c->recovery_pass_lock);
return ret;
}