summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2026-01-09 07:07:48 +0100
committerJens Axboe <axboe@kernel.dk>2026-01-11 12:55:41 -0700
commit66e5a11d2ed6d58006d5cd8276de28751daaa230 (patch)
treeaed43f96d0af5f27eae56a2ecac33e21c5238e62
parent3d939695e68218d420be2b5dbb2fa39ccb7e97ed (diff)
blk-crypto: optimize data unit alignment checking
Avoid the relatively high overhead of constructing and walking per-page segment bio_vecs for data unit alignment checking by merging the checks into existing loops. For hardware support crypto, perform the check in bio_split_io_at, which already contains a similar alignment check applied for all I/O. This means bio-based drivers that do not call bio_split_to_limits, should they ever grow blk-crypto support, need to implement the check themselves, just like all other queue limits checks. For blk-crypto-fallback do it in the encryption/decryption loops. This means alignment errors for decryption will only be detected after I/O has completed, but that seems like a worthwhile trade off. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Eric Biggers <ebiggers@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-crypto-fallback.c15
-rw-r--r--block/blk-crypto.c22
-rw-r--r--block/blk-merge.c9
3 files changed, 21 insertions, 25 deletions
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 6be971859542..a331b061dbf4 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -278,6 +278,12 @@ new_bio:
bio_iter_iovec(src_bio, src_bio->bi_iter);
struct page *enc_page = enc_pages[enc_idx];
+ if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset,
+ data_unit_size)) {
+ enc_bio->bi_status = BLK_STS_INVAL;
+ goto out_free_enc_bio;
+ }
+
__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
src_bv.bv_offset);
@@ -296,8 +302,10 @@ new_bio:
*/
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
- if (crypto_skcipher_encrypt(ciph_req))
+ if (crypto_skcipher_encrypt(ciph_req)) {
+ enc_bio->bi_status = BLK_STS_IOERR;
goto out_free_enc_bio;
+ }
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
@@ -334,7 +342,7 @@ out_free_enc_bio:
*/
for (; enc_idx < nr_enc_pages; enc_idx++)
__bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
- bio_io_error(enc_bio);
+ bio_endio(enc_bio);
}
/*
@@ -387,6 +395,9 @@ static blk_status_t __blk_crypto_fallback_decrypt_bio(struct bio *bio,
__bio_for_each_segment(bv, bio, iter, iter) {
struct page *page = bv.bv_page;
+ if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
+ return BLK_STS_INVAL;
+
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 69e869d1c9bd..0b2535d8dbcc 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -219,22 +219,6 @@ bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
}
-/* Check that all I/O segments are data unit aligned. */
-static bool bio_crypt_check_alignment(struct bio *bio)
-{
- const unsigned int data_unit_size =
- bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
- struct bvec_iter iter;
- struct bio_vec bv;
-
- bio_for_each_segment(bv, bio, iter) {
- if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
- return false;
- }
-
- return true;
-}
-
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
{
return blk_crypto_get_keyslot(rq->q->crypto_profile,
@@ -287,12 +271,6 @@ bool __blk_crypto_bio_prep(struct bio *bio)
return false;
}
- if (!bio_crypt_check_alignment(bio)) {
- bio->bi_status = BLK_STS_INVAL;
- bio_endio(bio);
- return false;
- }
-
/*
* If the device does not natively support the encryption context, try to use
* the fallback if available.
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d3115d7469df..b82c6d304658 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -324,12 +324,19 @@ static inline unsigned int bvec_seg_gap(struct bio_vec *bvprv,
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes, unsigned len_align_mask)
{
+ struct bio_crypt_ctx *bc = bio_crypt_ctx(bio);
struct bio_vec bv, bvprv, *bvprvp = NULL;
unsigned nsegs = 0, bytes = 0, gaps = 0;
struct bvec_iter iter;
+ unsigned start_align_mask = lim->dma_alignment;
+
+ if (bc) {
+ start_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
+ len_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
+ }
bio_for_each_bvec(bv, bio, iter) {
- if (bv.bv_offset & lim->dma_alignment ||
+ if (bv.bv_offset & start_align_mask ||
bv.bv_len & len_align_mask)
return -EINVAL;