mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-24 05:25:18 -05:00
bcachefs: bch2_trigger_stripe_ptr() no longer uses ec_stripes_heap_lock
Introduce per-entry locks, like with struct bucket - the stripes heap is going away. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
@@ -674,10 +674,10 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
return -BCH_ERR_ENOMEM_mark_stripe_ptr;
|
||||
}
|
||||
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
gc_stripe_lock(m);
|
||||
|
||||
if (!m || !m->alive) {
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
gc_stripe_unlock(m);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
|
||||
@@ -693,7 +693,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
.type = BCH_DISK_ACCOUNTING_replicas,
|
||||
};
|
||||
memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
gc_stripe_unlock(m);
|
||||
|
||||
acc.replicas.data_type = data_type;
|
||||
int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, true);
|
||||
|
||||
@@ -39,33 +39,6 @@ static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t
|
||||
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
|
||||
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
|
||||
|
||||
/*
|
||||
* Ugly hack alert:
|
||||
*
|
||||
* We need to cram a spinlock in a single byte, because that's what we have left
|
||||
* in struct bucket, and we care about the size of these - during fsck, we need
|
||||
* in memory state for every single bucket on every device.
|
||||
*
|
||||
* We used to do
|
||||
* while (xchg(&b->lock, 1) cpu_relax();
|
||||
* but, it turns out not all architectures support xchg on a single byte.
|
||||
*
|
||||
* So now we use bit_spin_lock(), with fun games since we can't burn a whole
|
||||
* ulong for this - we just need to make sure the lock bit always ends up in the
|
||||
* first byte.
|
||||
*/
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define BUCKET_LOCK_BITNR 0
|
||||
#else
|
||||
#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
|
||||
#endif
|
||||
|
||||
union ulong_byte_assert {
|
||||
ulong ulong;
|
||||
u8 byte;
|
||||
};
|
||||
|
||||
static inline void bucket_unlock(struct bucket *b)
|
||||
{
|
||||
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
|
||||
|
||||
@@ -7,6 +7,33 @@
|
||||
|
||||
#define BUCKET_JOURNAL_SEQ_BITS 16
|
||||
|
||||
/*
|
||||
* Ugly hack alert:
|
||||
*
|
||||
* We need to cram a spinlock in a single byte, because that's what we have left
|
||||
* in struct bucket, and we care about the size of these - during fsck, we need
|
||||
* in memory state for every single bucket on every device.
|
||||
*
|
||||
* We used to do
|
||||
* while (xchg(&b->lock, 1) cpu_relax();
|
||||
* but, it turns out not all architectures support xchg on a single byte.
|
||||
*
|
||||
* So now we use bit_spin_lock(), with fun games since we can't burn a whole
|
||||
* ulong for this - we just need to make sure the lock bit always ends up in the
|
||||
* first byte.
|
||||
*/
|
||||
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define BUCKET_LOCK_BITNR 0
|
||||
#else
|
||||
#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
|
||||
#endif
|
||||
|
||||
union ulong_byte_assert {
|
||||
ulong ulong;
|
||||
u8 byte;
|
||||
};
|
||||
|
||||
struct bucket {
|
||||
u8 lock;
|
||||
u8 gen_valid:1;
|
||||
|
||||
@@ -132,6 +132,20 @@ static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
|
||||
m->sectors);
|
||||
}
|
||||
|
||||
static inline void gc_stripe_unlock(struct gc_stripe *s)
|
||||
{
|
||||
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
|
||||
|
||||
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
|
||||
wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
|
||||
}
|
||||
|
||||
static inline void gc_stripe_lock(struct gc_stripe *s)
|
||||
{
|
||||
wait_on_bit_lock((void *) &s->lock, BUCKET_LOCK_BITNR,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
struct bch_read_bio;
|
||||
|
||||
struct ec_stripe_buf {
|
||||
|
||||
@@ -20,12 +20,11 @@ struct stripe {
|
||||
};
|
||||
|
||||
struct gc_stripe {
|
||||
u8 lock;
|
||||
unsigned alive:1; /* does a corresponding key exist in stripes btree? */
|
||||
u16 sectors;
|
||||
|
||||
u8 nr_blocks;
|
||||
u8 nr_redundant;
|
||||
|
||||
unsigned alive:1; /* does a corresponding key exist in stripes btree? */
|
||||
u16 block_sectors[BCH_BKEY_PTRS_MAX];
|
||||
struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX];
|
||||
|
||||
|
||||
Reference in New Issue
Block a user