mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-02 10:31:26 -04:00
blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation
Calling mempool_alloc in a loop is not safe unless the maximum allocation size times the maximum number of threads using it is less than the minimum pool size. Use the new mempool_alloc_bulk helper to allocate all missing elements in one pass to remove this deadlock risk. This also means that non-pool allocations now use alloc_pages_bulk which can be significantly faster than a loop over individual page allocations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
2f655dcb2d
commit
3d939695e6
@@ -22,7 +22,7 @@
|
||||
#include "blk-cgroup.h"
|
||||
#include "blk-crypto-internal.h"
|
||||
|
||||
static unsigned int num_prealloc_bounce_pg = 32;
|
||||
static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS;
|
||||
module_param(num_prealloc_bounce_pg, uint, 0);
|
||||
MODULE_PARM_DESC(num_prealloc_bounce_pg,
|
||||
"Number of preallocated bounce pages for the blk-crypto crypto API fallback");
|
||||
@@ -144,11 +144,21 @@ static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
|
||||
static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
|
||||
{
|
||||
struct bio *src_bio = enc_bio->bi_private;
|
||||
int i;
|
||||
struct page **pages = (struct page **)enc_bio->bi_io_vec;
|
||||
struct bio_vec *bv;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < enc_bio->bi_vcnt; i++)
|
||||
mempool_free(enc_bio->bi_io_vec[i].bv_page,
|
||||
blk_crypto_bounce_page_pool);
|
||||
/*
|
||||
* Use the same trick as the alloc side to avoid the need for an extra
|
||||
* pages array.
|
||||
*/
|
||||
bio_for_each_bvec_all(bv, enc_bio, i)
|
||||
pages[i] = bv->bv_page;
|
||||
|
||||
i = mempool_free_bulk(blk_crypto_bounce_page_pool, (void **)pages,
|
||||
enc_bio->bi_vcnt);
|
||||
if (i < enc_bio->bi_vcnt)
|
||||
release_pages(pages + i, enc_bio->bi_vcnt - i);
|
||||
|
||||
if (enc_bio->bi_status)
|
||||
cmpxchg(&src_bio->bi_status, 0, enc_bio->bi_status);
|
||||
@@ -157,9 +167,14 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
|
||||
bio_endio(src_bio);
|
||||
}
|
||||
|
||||
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
|
||||
|
||||
static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
|
||||
unsigned int nr_segs)
|
||||
unsigned int nr_segs, struct page ***pages_ret)
|
||||
{
|
||||
unsigned int memflags = memalloc_noio_save();
|
||||
unsigned int nr_allocated;
|
||||
struct page **pages;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf,
|
||||
@@ -173,6 +188,30 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
|
||||
bio->bi_write_stream = bio_src->bi_write_stream;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
|
||||
/*
|
||||
* Move page array up in the allocated memory for the bio vecs as far as
|
||||
* possible so that we can start filling biovecs from the beginning
|
||||
* without overwriting the temporary page array.
|
||||
*/
|
||||
static_assert(PAGE_PTRS_PER_BVEC > 1);
|
||||
pages = (struct page **)bio->bi_io_vec;
|
||||
pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1);
|
||||
|
||||
/*
|
||||
* Try a bulk allocation first. This could leave random pages in the
|
||||
* array unallocated, but we'll fix that up later in mempool_alloc_bulk.
|
||||
*
|
||||
* Note: alloc_pages_bulk needs the array to be zeroed, as it assumes
|
||||
* any non-zero slot already contains a valid allocation.
|
||||
*/
|
||||
memset(pages, 0, sizeof(struct page *) * nr_segs);
|
||||
nr_allocated = alloc_pages_bulk(GFP_KERNEL, nr_segs, pages);
|
||||
if (nr_allocated < nr_segs)
|
||||
mempool_alloc_bulk(blk_crypto_bounce_page_pool, (void **)pages,
|
||||
nr_segs, nr_allocated);
|
||||
memalloc_noio_restore(memflags);
|
||||
*pages_ret = pages;
|
||||
return bio;
|
||||
}
|
||||
|
||||
@@ -209,6 +248,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
|
||||
struct scatterlist src, dst;
|
||||
union blk_crypto_iv iv;
|
||||
unsigned int nr_enc_pages, enc_idx;
|
||||
struct page **enc_pages;
|
||||
struct bio *enc_bio;
|
||||
unsigned int i;
|
||||
|
||||
@@ -231,15 +271,13 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
|
||||
*/
|
||||
new_bio:
|
||||
nr_enc_pages = min(bio_segments(src_bio), BIO_MAX_VECS);
|
||||
enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_enc_pages);
|
||||
enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_enc_pages, &enc_pages);
|
||||
enc_idx = 0;
|
||||
for (;;) {
|
||||
struct bio_vec src_bv =
|
||||
bio_iter_iovec(src_bio, src_bio->bi_iter);
|
||||
struct page *enc_page;
|
||||
struct page *enc_page = enc_pages[enc_idx];
|
||||
|
||||
enc_page = mempool_alloc(blk_crypto_bounce_page_pool,
|
||||
GFP_NOIO);
|
||||
__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
|
||||
src_bv.bv_offset);
|
||||
|
||||
@@ -258,10 +296,8 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
|
||||
*/
|
||||
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
|
||||
blk_crypto_dun_to_iv(curr_dun, &iv);
|
||||
if (crypto_skcipher_encrypt(ciph_req)) {
|
||||
bio_io_error(enc_bio);
|
||||
return;
|
||||
}
|
||||
if (crypto_skcipher_encrypt(ciph_req))
|
||||
goto out_free_enc_bio;
|
||||
bio_crypt_dun_increment(curr_dun, 1);
|
||||
src.offset += data_unit_size;
|
||||
dst.offset += data_unit_size;
|
||||
@@ -287,6 +323,18 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
|
||||
}
|
||||
|
||||
submit_bio(enc_bio);
|
||||
return;
|
||||
|
||||
out_free_enc_bio:
|
||||
/*
|
||||
* Add the remaining pages to the bio so that the normal completion path
|
||||
* in blk_crypto_fallback_encrypt_endio frees them. The exact data
|
||||
* layout does not matter for that, so don't bother iterating the source
|
||||
* bio.
|
||||
*/
|
||||
for (; enc_idx < nr_enc_pages; enc_idx++)
|
||||
__bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
|
||||
bio_io_error(enc_bio);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user