mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 14:56:54 -04:00
bcachefs: Implement a new gc that only recalcs oldest gen
Full mark and sweep gc doesn't (yet?) work with the new btree key cache code, but it also blocks updates to interior btree nodes for the duration and isn't really necessary in practice; we aren't currently attempting to repair errors in allocation info at runtime. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
committed by
Kent Overstreet
parent
1ada160618
commit
451570a5bc
@@ -887,6 +887,82 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
|
||||
* node pointers currently never have cached pointers that can become stale:
|
||||
*/
|
||||
static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id id)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
for_each_btree_key(&trans, iter, id, POS_MIN, BTREE_ITER_PREFETCH, k, ret) {
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_BUCKET(ca, ptr, false);
|
||||
|
||||
if (gen_after(g->gc_gen, ptr->gen))
|
||||
g->gc_gen = ptr->gen;
|
||||
|
||||
if (gen_after(g->mark.gen, ptr->gen) > 32) {
|
||||
/* rewrite btree node */
|
||||
|
||||
}
|
||||
}
|
||||
percpu_up_read(&c->mark_lock);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_gc_gens(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
struct bucket_array *buckets;
|
||||
struct bucket *g;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
down_read(&c->state_lock);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
down_read(&ca->bucket_lock);
|
||||
buckets = bucket_array(ca);
|
||||
|
||||
for_each_bucket(g, buckets)
|
||||
g->gc_gen = g->mark.gen;
|
||||
up_read(&ca->bucket_lock);
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
if (btree_node_type_needs_gc(i)) {
|
||||
ret = bch2_gc_btree_gens(c, i);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
down_read(&ca->bucket_lock);
|
||||
buckets = bucket_array(ca);
|
||||
|
||||
for_each_bucket(g, buckets)
|
||||
g->oldest_gen = g->gc_gen;
|
||||
up_read(&ca->bucket_lock);
|
||||
}
|
||||
err:
|
||||
up_read(&c->state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Btree coalescing */
|
||||
|
||||
static void recalc_packed_keys(struct btree *b)
|
||||
@@ -1262,7 +1338,14 @@ static int bch2_gc_thread(void *arg)
|
||||
last = atomic_long_read(&clock->now);
|
||||
last_kick = atomic_read(&c->kick_gc);
|
||||
|
||||
/*
|
||||
* Full gc is currently incompatible with btree key cache:
|
||||
*/
|
||||
#if 0
|
||||
ret = bch2_gc(c, NULL, false, false);
|
||||
#else
|
||||
ret = bch2_gc_gens(c);
|
||||
#endif
|
||||
if (ret)
|
||||
bch_err(c, "btree gc failed: %i", ret);
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ void bch2_coalesce(struct bch_fs *);
|
||||
|
||||
struct journal_keys;
|
||||
int bch2_gc(struct bch_fs *, struct journal_keys *, bool, bool);
|
||||
int bch2_gc_gens(struct bch_fs *);
|
||||
void bch2_gc_thread_stop(struct bch_fs *);
|
||||
int bch2_gc_thread_start(struct bch_fs *);
|
||||
void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
|
||||
|
||||
@@ -39,6 +39,7 @@ struct bucket {
|
||||
|
||||
u16 io_time[2];
|
||||
u8 oldest_gen;
|
||||
u8 gc_gen;
|
||||
unsigned gen_valid:1;
|
||||
};
|
||||
|
||||
|
||||
@@ -486,9 +486,16 @@ STORE(bch2_fs)
|
||||
bch2_coalesce(c);
|
||||
|
||||
if (attr == &sysfs_trigger_gc) {
|
||||
/*
|
||||
* Full gc is currently incompatible with btree key cache:
|
||||
*/
|
||||
#if 0
|
||||
down_read(&c->state_lock);
|
||||
bch2_gc(c, NULL, false, false);
|
||||
up_read(&c->state_lock);
|
||||
#else
|
||||
bch2_gc_gens(c);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (attr == &sysfs_trigger_alloc_write) {
|
||||
|
||||
Reference in New Issue
Block a user