blk-crypto: handle the fallback above the block layer

Add a blk_crypto_submit_bio helper that either submits the bio when
it is not encrypted or inline encryption is provided, but otherwise
handles the encryption before going down into the low-level driver.
This reduces the risk from bio reordering and keeps memory allocation
as high up in the stack as possible.

Note that if the submitter knows that inline enctryption is known to
be supported by the underyling driver, it can still use plain
submit_bio.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig
2026-01-09 07:07:49 +01:00
committed by Jens Axboe
parent 66e5a11d2e
commit bb8e2019ad
12 changed files with 68 additions and 39 deletions

View File

@@ -206,6 +206,12 @@ it to a bio, given the blk_crypto_key and the data unit number that will be used
for en/decryption. Users don't need to worry about freeing the bio_crypt_ctx
later, as that happens automatically when the bio is freed or reset.
To submit a bio that uses inline encryption, users must call
``blk_crypto_submit_bio()`` instead of the usual ``submit_bio()``. This will
submit the bio to the underlying driver if it supports inline crypto, or else
call the blk-crypto fallback routines before submitting normal bios to the
underlying drivers.
Finally, when done using inline encryption with a blk_crypto_key on a
block_device, users must call ``blk_crypto_evict_key()``. This ensures that
the key is evicted from all keyslots it may be programmed into and unlinked from

View File

@@ -628,9 +628,6 @@ static void __submit_bio(struct bio *bio)
/* If plug is not used, add new plug here to cache nsecs time. */
struct blk_plug plug;
if (unlikely(!blk_crypto_bio_prep(bio)))
return;
blk_start_plug(&plug);
if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
@@ -794,6 +791,13 @@ void submit_bio_noacct(struct bio *bio)
if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported;
if (bio_has_crypt_ctx(bio)) {
if (WARN_ON_ONCE(!bio_has_data(bio)))
goto end_io;
if (!blk_crypto_supported(bio))
goto not_supported;
}
if (should_fail_bio(bio))
goto end_io;
bio_check_ro(bio);

View File

@@ -86,6 +86,12 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
void __user *argp);
static inline bool blk_crypto_supported(struct bio *bio)
{
return blk_crypto_config_supported_natively(bio->bi_bdev,
&bio->bi_crypt_context->bc_key->crypto_cfg);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int blk_crypto_sysfs_register(struct gendisk *disk)
@@ -139,6 +145,11 @@ static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
return -ENOTTY;
}
static inline bool blk_crypto_supported(struct bio *bio)
{
return false;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
@@ -165,14 +176,6 @@ static inline void bio_crypt_do_front_merge(struct request *rq,
#endif
}
bool __blk_crypto_bio_prep(struct bio *bio);
static inline bool blk_crypto_bio_prep(struct bio *bio)
{
if (bio_has_crypt_ctx(bio))
return __blk_crypto_bio_prep(bio);
return true;
}
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
{

View File

@@ -242,25 +242,13 @@ void __blk_crypto_free_request(struct request *rq)
rq->crypt_ctx = NULL;
}
/**
* __blk_crypto_bio_prep - Prepare bio for inline encryption
* @bio: bio to prepare
/*
* Process a bio with a crypto context. Returns true if the caller should
* submit the passed in bio, false if the bio is consumed.
*
* If the bio crypt context provided for the bio is supported by the underlying
* device's inline encryption hardware, do nothing.
*
* Otherwise, try to perform en/decryption for this bio by falling back to the
* kernel crypto API. For encryption this means submitting newly allocated
* bios for the encrypted payload while keeping back the source bio until they
* complete, while for reads the decryption happens in-place by a hooked in
* completion handler.
*
* Caller must ensure bio has bio_crypt_ctx.
*
* Return: true if @bio should be submitted to the driver by the caller, else
* false. Sets bio->bi_status, calls bio_endio and returns false on error.
* See the kerneldoc comment for blk_crypto_submit_bio for further details.
*/
bool __blk_crypto_bio_prep(struct bio *bio)
bool __blk_crypto_submit_bio(struct bio *bio)
{
const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
struct block_device *bdev = bio->bi_bdev;
@@ -288,6 +276,7 @@ bool __blk_crypto_bio_prep(struct bio *bio)
return true;
}
EXPORT_SYMBOL_GPL(__blk_crypto_submit_bio);
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)

View File

@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/blk-crypto.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
@@ -2821,7 +2822,7 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
}
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
void submit_bh(blk_opf_t opf, struct buffer_head *bh)

View File

@@ -105,7 +105,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
}
atomic_inc(&done.pending);
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
fscrypt_zeroout_range_done(&done);

View File

@@ -7,6 +7,7 @@
* Written by Theodore Ts'o, 2010.
*/
#include <linux/blk-crypto.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/highuid.h>
@@ -401,7 +402,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
if (bio) {
if (io->io_wbc->sync_mode == WB_SYNC_ALL)
io->io_bio->bi_opf |= REQ_SYNC;
submit_bio(io->io_bio);
blk_crypto_submit_bio(io->io_bio);
}
io->io_bio = NULL;
}

View File

@@ -36,6 +36,7 @@
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blk-crypto.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
#include <linux/prefetch.h>
@@ -345,7 +346,7 @@ int ext4_mpage_readpages(struct inode *inode,
if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
submit_and_realloc:
submit_bio(bio);
blk_crypto_submit_bio(bio);
bio = NULL;
}
if (bio == NULL) {
@@ -371,14 +372,14 @@ int ext4_mpage_readpages(struct inode *inode,
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_folio)) {
submit_bio(bio);
blk_crypto_submit_bio(bio);
bio = NULL;
} else
last_block_in_bio = first_block + blocks_per_folio - 1;
continue;
confused:
if (bio) {
submit_bio(bio);
blk_crypto_submit_bio(bio);
bio = NULL;
}
if (!folio_test_uptodate(folio))
@@ -389,7 +390,7 @@ int ext4_mpage_readpages(struct inode *inode,
; /* A label shall be followed by a statement until C23 */
}
if (bio)
submit_bio(bio);
blk_crypto_submit_bio(bio);
return 0;
}

View File

@@ -513,7 +513,7 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -522,7 +522,7 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
WARN_ON_ONCE(is_read_io(bio_op(bio)));
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
iostat_update_submit_ctx(bio, type);
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)

View File

@@ -5,6 +5,7 @@
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
#include <linux/blk-crypto.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/stat.h>
@@ -5046,7 +5047,7 @@ static void f2fs_dio_write_submit_io(const struct iomap_iter *iter,
enum temp_type temp = f2fs_get_segment_temp(sbi, type);
bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {

View File

@@ -3,6 +3,7 @@
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (c) 2016-2025 Christoph Hellwig.
*/
#include <linux/blk-crypto.h>
#include <linux/fscrypt.h>
#include <linux/pagemap.h>
#include <linux/iomap.h>
@@ -74,7 +75,7 @@ static void iomap_dio_submit_bio(const struct iomap_iter *iter,
dio->dops->submit_io(iter, bio, pos);
} else {
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
submit_bio(bio);
blk_crypto_submit_bio(bio);
}
}

View File

@@ -181,6 +181,28 @@ static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
bool __blk_crypto_submit_bio(struct bio *bio);
/**
* blk_crypto_submit_bio - Submit a bio that may have a crypto context
* @bio: bio to submit
*
* If @bio has no crypto context, or the crypt context attached to @bio is
* supported by the underlying device's inline encryption hardware, just submit
* @bio.
*
* Otherwise, try to perform en/decryption for this bio by falling back to the
* kernel crypto API. For encryption this means submitting newly allocated
* bios for the encrypted payload while keeping back the source bio until they
* complete, while for reads the decryption happens in-place by a hooked in
* completion handler.
*/
static inline void blk_crypto_submit_bio(struct bio *bio)
{
if (!bio_has_crypt_ctx(bio) || __blk_crypto_submit_bio(bio))
submit_bio(bio);
}
int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
/**
* bio_crypt_clone - clone bio encryption context