bcachefs: debug_check_iterators no longer requires BCACHEFS_DEBUG

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet
2025-05-10 15:25:56 -04:00
parent 110bb6cb8b
commit c4e3889440
3 changed files with 69 additions and 47 deletions

View File

@@ -393,8 +393,6 @@ do { \
"Disables rewriting of btree nodes during mark and sweep")\
BCH_DEBUG_PARAM(btree_shrinker_disabled, \
"Disables the shrinker callback for the btree node cache")\
BCH_DEBUG_PARAM(verify_btree_locking, \
"Enable additional asserts for btree locking") \
BCH_DEBUG_PARAM(verify_btree_ondisk, \
"Reread btree nodes at various points to verify the " \
"mergesort in the read path against modifications " \
@@ -404,15 +402,17 @@ do { \
"compare them") \
BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
"Don't use the write buffer for backpointers, enabling "\
"extra runtime checks")
"extra runtime checks") \
BCH_DEBUG_PARAM(debug_check_btree_locking, \
"Enable additional asserts for btree locking") \
BCH_DEBUG_PARAM(debug_check_iterators, \
"Enables extra verification for btree iterators")
/* Parameters that should only be compiled in debug mode: */
#define BCH_DEBUG_PARAMS_DEBUG() \
BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \
"significantly affect performance") \
BCH_DEBUG_PARAM(debug_check_iterators, \
"Enables extra verification for btree iterators") \
BCH_DEBUG_PARAM(debug_check_btree_accounting, \
"Verify btree accounting for keys within a node") \
BCH_DEBUG_PARAM(journal_seq_verify, \

View File

@@ -114,11 +114,9 @@ static inline bool btree_path_pos_in_node(struct btree_path *path,
!btree_path_pos_after_node(path, b);
}
/* Btree iterator: */
/* Debug: */
#ifdef CONFIG_BCACHEFS_DEBUG
static void bch2_btree_path_verify_cached(struct btree_trans *trans,
static void __bch2_btree_path_verify_cached(struct btree_trans *trans,
struct btree_path *path)
{
struct bkey_cached *ck;
@@ -135,7 +133,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
btree_node_unlock(trans, path, 0);
}
static void bch2_btree_path_verify_level(struct btree_trans *trans,
static void __bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
struct btree_path_level *l;
@@ -147,16 +145,13 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
struct printbuf buf3 = PRINTBUF;
const char *msg;
if (!static_branch_unlikely(&bch2_debug_check_iterators))
return;
l = &path->l[level];
tmp = l->iter;
locked = btree_node_locked(path, level);
if (path->cached) {
if (!level)
bch2_btree_path_verify_cached(trans, path);
__bch2_btree_path_verify_cached(trans, path);
return;
}
@@ -217,7 +212,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
msg, level, buf1.buf, buf2.buf, buf3.buf);
}
static void bch2_btree_path_verify(struct btree_trans *trans,
static void __bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path)
{
struct bch_fs *c = trans->c;
@@ -229,22 +224,22 @@ static void bch2_btree_path_verify(struct btree_trans *trans,
break;
}
bch2_btree_path_verify_level(trans, path, i);
__bch2_btree_path_verify_level(trans, path, i);
}
bch2_btree_path_verify_locks(path);
}
void bch2_trans_verify_paths(struct btree_trans *trans)
void __bch2_trans_verify_paths(struct btree_trans *trans)
{
struct btree_path *path;
unsigned iter;
trans_for_each_path(trans, path, iter)
bch2_btree_path_verify(trans, path);
__bch2_btree_path_verify(trans, path);
}
static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
{
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
@@ -256,11 +251,11 @@ static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter
!btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
__bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
__bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
{
BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
!iter->pos.snapshot);
@@ -274,16 +269,13 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
struct btree_iter *iter, struct bkey_s_c k)
static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
struct btree_iter *iter, struct bkey_s_c k)
{
struct btree_iter copy;
struct bkey_s_c prev;
int ret = 0;
if (!static_branch_unlikely(&bch2_debug_check_iterators))
return 0;
if (!(iter->flags & BTREE_ITER_filter_snapshots))
return 0;
@@ -324,7 +316,7 @@ static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
return ret;
}
void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
struct bpos pos)
{
bch2_trans_verify_not_unlocked_or_in_restart(trans);
@@ -357,19 +349,40 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
}
#else
static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned l) {}
static inline void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path) {}
static inline void bch2_btree_iter_verify(struct btree_trans *trans,
struct btree_iter *iter) {}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k) { return 0; }
struct btree_path *path, unsigned l)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_btree_path_verify_level(trans, path, l);
}
#endif
static inline void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_btree_path_verify(trans, path);
}
static inline void bch2_btree_iter_verify(struct btree_trans *trans,
struct btree_iter *iter)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_btree_iter_verify(trans, iter);
}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_btree_iter_verify_entry_exit(iter);
}
static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k)
{
return static_branch_unlikely(&bch2_debug_check_iterators)
? __bch2_btree_iter_verify_ret(trans, iter, k)
: 0;
}
/* Btree path: fixups after btree updates */

View File

@@ -285,14 +285,23 @@ static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex
: __bch2_trans_mutex_lock(trans, lock);
}
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_trans_verify_paths(struct btree_trans *);
void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
#else
static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
struct bpos pos) {}
#endif
/* Debug: */
void __bch2_trans_verify_paths(struct btree_trans *);
void __bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
static inline void bch2_trans_verify_paths(struct btree_trans *trans)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_trans_verify_paths(trans);
}
static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id btree,
struct bpos pos)
{
if (static_branch_unlikely(&bch2_debug_check_iterators))
__bch2_assert_pos_locked(trans, btree, pos);
}
void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
struct btree *, struct bkey_packed *);