mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-22 03:00:30 -05:00
Merge tag 'bcachefs-2025-03-13' of git://evilpiepirate.org/bcachefs
Pull bcachefs fixes from Kent Overstreet: "Roxana caught an unitialized value that might explain some of the rebalance weirdness we're still tracking down - cool. Otherwise pretty minor" * tag 'bcachefs-2025-03-13' of git://evilpiepirate.org/bcachefs: bcachefs: bch2_get_random_u64_below() bcachefs: target_congested -> get_random_u32_below() bcachefs: fix tiny leak in bch2_dev_add() bcachefs: Make sure trans is unlocked when submitting read IO bcachefs: Initialize from_inode members for bch_io_opts bcachefs: Fix b->written overflow
This commit is contained in:
@@ -1186,7 +1186,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
le64_to_cpu(i->journal_seq),
|
||||
b->written, b->written + sectors, ptr_written);
|
||||
|
||||
b->written += sectors;
|
||||
b->written = min(b->written + sectors, btree_sectors(c));
|
||||
|
||||
if (blacklisted && !first)
|
||||
continue;
|
||||
|
||||
@@ -99,7 +99,7 @@ static inline bool ptr_better(struct bch_fs *c,
|
||||
|
||||
/* Pick at random, biased in favor of the faster device: */
|
||||
|
||||
return bch2_rand_range(l1 + l2) > l1;
|
||||
return bch2_get_random_u64_below(l1 + l2) > l1;
|
||||
}
|
||||
|
||||
if (bch2_force_reconstruct_read)
|
||||
|
||||
@@ -1198,6 +1198,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
|
||||
opts->_name##_from_inode = true; \
|
||||
} else { \
|
||||
opts->_name = c->opts._name; \
|
||||
opts->_name##_from_inode = false; \
|
||||
}
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
|
||||
@@ -59,7 +59,7 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return bch2_rand_range(nr * CONGESTED_MAX) < total;
|
||||
return get_random_u32_below(nr * CONGESTED_MAX) < total;
|
||||
}
|
||||
|
||||
#else
|
||||
@@ -951,12 +951,6 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
goto retry_pick;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock the iterator while the btree node's lock is still in
|
||||
* cache, before doing the IO:
|
||||
*/
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
if (flags & BCH_READ_NODECODE) {
|
||||
/*
|
||||
* can happen if we retry, and the extent we were going to read
|
||||
@@ -1113,6 +1107,15 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
trace_and_count(c, read_split, &orig->bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock the iterator while the btree node's lock is still in
|
||||
* cache, before doing the IO:
|
||||
*/
|
||||
if (!(flags & BCH_READ_IN_RETRY))
|
||||
bch2_trans_unlock(trans);
|
||||
else
|
||||
bch2_trans_unlock_long(trans);
|
||||
|
||||
if (!rbio->pick.idx) {
|
||||
if (unlikely(!rbio->have_ioref)) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@@ -1160,6 +1163,8 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
|
||||
if (likely(!(flags & BCH_READ_IN_RETRY))) {
|
||||
return 0;
|
||||
} else {
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
int ret;
|
||||
|
||||
rbio->context = RBIO_CONTEXT_UNBOUND;
|
||||
|
||||
@@ -1811,7 +1811,11 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
goto err_late;
|
||||
|
||||
up_write(&c->state_lock);
|
||||
return 0;
|
||||
out:
|
||||
printbuf_exit(&label);
|
||||
printbuf_exit(&errbuf);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&c->sb_lock);
|
||||
@@ -1820,10 +1824,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
if (ca)
|
||||
bch2_dev_free(ca);
|
||||
bch2_free_super(&sb);
|
||||
printbuf_exit(&label);
|
||||
printbuf_exit(&errbuf);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
goto out;
|
||||
err_late:
|
||||
up_write(&c->state_lock);
|
||||
ca = NULL;
|
||||
|
||||
@@ -653,19 +653,24 @@ int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t bch2_rand_range(size_t max)
|
||||
u64 bch2_get_random_u64_below(u64 ceil)
|
||||
{
|
||||
size_t rand;
|
||||
if (ceil <= U32_MAX)
|
||||
return __get_random_u32_below(ceil);
|
||||
|
||||
if (!max)
|
||||
return 0;
|
||||
/* this is the same (clever) algorithm as in __get_random_u32_below() */
|
||||
u64 rand = get_random_u64();
|
||||
u64 mult = ceil * rand;
|
||||
|
||||
do {
|
||||
rand = get_random_long();
|
||||
rand &= roundup_pow_of_two(max) - 1;
|
||||
} while (rand >= max);
|
||||
if (unlikely(mult < ceil)) {
|
||||
u64 bound = -ceil % ceil;
|
||||
while (unlikely(mult < bound)) {
|
||||
rand = get_random_u64();
|
||||
mult = ceil * rand;
|
||||
}
|
||||
}
|
||||
|
||||
return rand;
|
||||
return mul_u64_u64_shr(ceil, rand, 64);
|
||||
}
|
||||
|
||||
void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
|
||||
|
||||
@@ -401,7 +401,7 @@ do { \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
size_t bch2_rand_range(size_t);
|
||||
u64 bch2_get_random_u64_below(u64);
|
||||
|
||||
void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
|
||||
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
|
||||
|
||||
Reference in New Issue
Block a user