diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 597 |
1 files changed, 94 insertions, 503 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3f06df59fd82..3db18fb2d764 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -18,14 +18,10 @@ #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> -#include <linux/percpu.h> -#include <linux/atomic.h> +#include <asm/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> #include <asm/unaligned.h> -#include <crypto/hash.h> -#include <crypto/md5.h> -#include <crypto/algapi.h> #include <linux/device-mapper.h> @@ -66,7 +62,6 @@ struct dm_crypt_request { struct convert_context *ctx; struct scatterlist sg_in; struct scatterlist sg_out; - sector_t iv_sector; }; struct crypt_config; @@ -77,13 +72,11 @@ struct crypt_iv_operations { void (*dtr)(struct crypt_config *cc); int (*init)(struct crypt_config *cc); int (*wipe)(struct crypt_config *cc); - int (*generator)(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq); - int (*post)(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq); + int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); }; struct iv_essiv_private { + struct crypto_cipher *tfm; struct crypto_hash *hash_tfm; u8 *salt; }; @@ -92,32 +85,11 @@ struct iv_benbi_private { int shift; }; -#define LMK_SEED_SIZE 64 /* hash + 0 */ -struct iv_lmk_private { - struct crypto_shash *hash_tfm; - u8 *seed; -}; - /* * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; - -/* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; - /* ESSIV: struct crypto_cipher *essiv_tfm */ - void *iv_private; - struct crypto_ablkcipher *tfms[0]; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. - */ struct crypt_config { struct dm_dev *dev; sector_t start; @@ -141,19 +113,11 @@ struct crypt_config { union { struct iv_essiv_private essiv; struct iv_benbi_private benbi; - struct iv_lmk_private lmk; } iv_gen_private; sector_t iv_offset; unsigned int iv_size; /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; - unsigned tfms_count; - - /* * Layout of each crypto request: * * struct ablkcipher_request @@ -167,10 +131,11 @@ struct crypt_config { * correctly aligned. */ unsigned int dmreq_start; + struct ablkcipher_request *req; + struct crypto_ablkcipher *tfm; unsigned long flags; unsigned int key_size; - unsigned int key_parts; u8 key[0]; }; @@ -181,20 +146,6 @@ static struct kmem_cache *_crypt_io_pool; static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); -static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); - -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - -/* - * Use this to access cipher attributes that are the same for each CPU. - */ -static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) -{ - return __this_cpu_ptr(cc->cpu)->tfms[0]; -} /* * Different IV generation algorithms: @@ -215,38 +166,23 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) * null: the initial vector is always zero. Provides compatibility with * obsolete loop_fish2 devices. Do not use for new devices. * - * lmk: Compatible implementation of the block chaining mode used - * by the Loop-AES block device encryption system - * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ - * It operates on full 512 byte sectors and uses CBC - * with an IV derived from the sector number, the data and - * optionally extra IV seed. - * This means that after decryption the first block - * of sector must be tweaked according to decrypted data. - * Loop-AES can use three encryption schemes: - * version 1: is plain aes-cbc mode - * version 2: uses 64 multikey scheme with lmk IV generator - * version 3: the same as version 2 with additional IV seed - * (it uses 65 keys, last key is used as IV seed) - * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ -static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) +static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); - *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); + *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); return 0; } static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) + sector_t sector) { memset(iv, 0, cc->iv_size); - *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); + *(u64 *)iv = cpu_to_le64(sector); return 0; } @@ -257,8 +193,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; struct hash_desc desc; struct scatterlist sg; - struct crypto_cipher *essiv_tfm; - int err, cpu; + int err; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; @@ -268,16 +203,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) if (err) return err; - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, - - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, + return crypto_cipher_setkey(essiv->tfm, essiv->salt, crypto_hash_digestsize(essiv->hash_tfm)); - if (err) - return err; - } - - return 0; } /* Wipe salt and reset key derived from volume key */ @@ -285,76 +212,24 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) { struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); - struct crypto_cipher *essiv_tfm; - int cpu, r, err = 0; memset(essiv->salt, 0, salt_size); - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); - if (r) - err = r; - } - - return err; -} - -/* Set up per cpu cipher state */ -static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, - struct dm_target *ti, - u8 *salt, unsigned saltsize) -{ - struct crypto_cipher *essiv_tfm; - int err; - - /* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(essiv_tfm)) { - ti->error = "Error allocating crypto tfm for ESSIV"; - return essiv_tfm; - } - - if (crypto_cipher_blocksize(essiv_tfm) != - crypto_ablkcipher_ivsize(any_tfm(cc))) { - ti->error = "Block size of ESSIV cipher does " - "not match IV size of block cipher"; - crypto_free_cipher(essiv_tfm); - return ERR_PTR(-EINVAL); - } - - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_cipher(essiv_tfm); - return ERR_PTR(err); - } - - return essiv_tfm; + return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - int cpu; - struct crypt_cpu *cpu_cc; - struct crypto_cipher *essiv_tfm; struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; + crypto_free_cipher(essiv->tfm); + essiv->tfm = NULL; + crypto_free_hash(essiv->hash_tfm); essiv->hash_tfm = NULL; kzfree(essiv->salt); essiv->salt = NULL; - - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - essiv_tfm = cpu_cc->iv_private; - - if (essiv_tfm) - crypto_free_cipher(essiv_tfm); - - cpu_cc->iv_private = NULL; - } } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -363,7 +238,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; - int err, cpu; + int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -385,44 +260,48 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, goto bad; } + /* Allocate essiv_tfm */ + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(essiv_tfm)) { + ti->error = "Error allocating crypto tfm for ESSIV"; + err = PTR_ERR(essiv_tfm); + goto bad; + } + if (crypto_cipher_blocksize(essiv_tfm) != + crypto_ablkcipher_ivsize(cc->tfm)) { + ti->error = "Block size of ESSIV cipher does " + "not match IV size of block cipher"; + err = -EINVAL; + goto bad; + } + cc->iv_gen_private.essiv.salt = salt; + cc->iv_gen_private.essiv.tfm = essiv_tfm; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; - for_each_possible_cpu(cpu) { - essiv_tfm = setup_essiv_cpu(cc, ti, salt, - crypto_hash_digestsize(hash_tfm)); - if (IS_ERR(essiv_tfm)) { - crypt_iv_essiv_dtr(cc); - return PTR_ERR(essiv_tfm); - } - per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; - } - return 0; bad: + if (essiv_tfm && !IS_ERR(essiv_tfm)) + crypto_free_cipher(essiv_tfm); if (hash_tfm && !IS_ERR(hash_tfm)) crypto_free_hash(hash_tfm); kfree(salt); return err; } -static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) +static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { - struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; - memset(iv, 0, cc->iv_size); - *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); - crypto_cipher_encrypt_one(essiv_tfm, iv, iv); - + *(u64 *)iv = cpu_to_le64(sector); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); return 0; } static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc)); + unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); int log = ilog2(bs); /* we need to calculate how far we must shift the sector count @@ -447,177 +326,25 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc) { } -static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { __be64 val; memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ - val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); return 0; } -static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) +static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); return 0; } -static void crypt_iv_lmk_dtr(struct crypt_config *cc) -{ - struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - - if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) - crypto_free_shash(lmk->hash_tfm); - lmk->hash_tfm = NULL; - - kzfree(lmk->seed); - lmk->seed = NULL; -} - -static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) -{ - struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - - lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); - if (IS_ERR(lmk->hash_tfm)) { - ti->error = "Error initializing LMK hash"; - return PTR_ERR(lmk->hash_tfm); - } - - /* No seed in LMK version 2 */ - if (cc->key_parts == cc->tfms_count) { - lmk->seed = NULL; - return 0; - } - - lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); - if (!lmk->seed) { - crypt_iv_lmk_dtr(cc); - ti->error = "Error kmallocing seed storage in LMK"; - return -ENOMEM; - } - - return 0; -} - -static int crypt_iv_lmk_init(struct crypt_config *cc) -{ - struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - int subkey_size = cc->key_size / cc->key_parts; - - /* LMK seed is on the position of LMK_KEYS + 1 key */ - if (lmk->seed) - memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), - crypto_shash_digestsize(lmk->hash_tfm)); - - return 0; -} - -static int crypt_iv_lmk_wipe(struct crypt_config *cc) -{ - struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - - if (lmk->seed) - memset(lmk->seed, 0, LMK_SEED_SIZE); - - return 0; -} - -static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq, - u8 *data) -{ - struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - struct { - struct shash_desc desc; - char ctx[crypto_shash_descsize(lmk->hash_tfm)]; - } sdesc; - struct md5_state md5state; - u32 buf[4]; - int i, r; - - sdesc.desc.tfm = lmk->hash_tfm; - sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - r = crypto_shash_init(&sdesc.desc); - if (r) - return r; - - if (lmk->seed) { - r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); - if (r) - return r; - } - - /* Sector is always 512B, block size 16, add data of blocks 1-31 */ - r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); - if (r) - return r; - - /* Sector is cropped to 56 bits here */ - buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); - buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); - buf[2] = cpu_to_le32(4024); - buf[3] = 0; - r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); - if (r) - return r; - - /* No MD5 padding here */ - r = crypto_shash_export(&sdesc.desc, &md5state); - if (r) - return r; - - for (i = 0; i < MD5_HASH_WORDS; i++) - __cpu_to_le32s(&md5state.hash[i]); - memcpy(iv, &md5state.hash, cc->iv_size); - - return 0; -} - -static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - u8 *src; - int r = 0; - - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - src = kmap_atomic(sg_page(&dmreq->sg_in)); - r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); - kunmap_atomic(src); - } else - memset(iv, 0, cc->iv_size); - - return r; -} - -static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq) -{ - u8 *dst; - int r; - - if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) - return 0; - - dst = kmap_atomic(sg_page(&dmreq->sg_out)); - r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); - - /* Tweak the first block of plaintext sector */ - if (!r) - crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); - - kunmap_atomic(dst); - return r; -} - static struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; @@ -644,15 +371,6 @@ static struct crypt_iv_operations crypt_iv_null_ops = { .generator = crypt_iv_null_gen }; -static struct crypt_iv_operations crypt_iv_lmk_ops = { - .ctr = crypt_iv_lmk_ctr, - .dtr = crypt_iv_lmk_dtr, - .init = crypt_iv_lmk_init, - .wipe = crypt_iv_lmk_wipe, - .generator = crypt_iv_lmk_gen, - .post = crypt_iv_lmk_post -}; - static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -680,13 +398,6 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); } -static u8 *iv_of_dmreq(struct crypt_config *cc, - struct dm_crypt_request *dmreq) -{ - return (u8 *)ALIGN((unsigned long)(dmreq + 1), - crypto_ablkcipher_alignmask(any_tfm(cc)) + 1); -} - static int crypt_convert_block(struct crypt_config *cc, struct convert_context *ctx, struct ablkcipher_request *req) @@ -698,9 +409,9 @@ static int crypt_convert_block(struct crypt_config *cc, int r = 0; dmreq = dmreq_of_req(cc, req); - iv = iv_of_dmreq(cc, dmreq); + iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_ablkcipher_alignmask(cc->tfm) + 1); - dmreq->iv_sector = ctx->sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, @@ -723,7 +434,7 @@ static int crypt_convert_block(struct crypt_config *cc, } if (cc->iv_gen_ops) { - r = cc->iv_gen_ops->generator(cc, iv, dmreq); + r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); if (r < 0) return r; } @@ -736,28 +447,21 @@ static int crypt_convert_block(struct crypt_config *cc, else r = crypto_ablkcipher_decrypt(req); - if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) - r = cc->iv_gen_ops->post(cc, iv, dmreq); - return r; } static void kcryptd_async_done(struct crypto_async_request *async_req, int error); - static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); - unsigned key_index = ctx->sector & (cc->tfms_count - 1); - - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); - - ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + if (!cc->req) + cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + ablkcipher_request_set_tfm(cc->req, cc->tfm); + ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, + dmreq_of_req(cc, cc->req)); } /* @@ -766,7 +470,6 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->pending, 1); @@ -778,7 +481,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, cc->req); switch (r) { /* async */ @@ -787,7 +490,7 @@ static int crypt_convert(struct crypt_config *cc, INIT_COMPLETION(ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; + cc->req = NULL; ctx->sector++; continue; @@ -945,9 +648,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) * They must be separated as otherwise the final stages could be * starved by new requests which can block in the first stages due * to memory allocation. - * - * The work is done per CPU global for all dm-crypt instances. - * They should not depend on each other and do not block. */ static void crypt_endio(struct bio *clone, int error) { @@ -988,22 +688,25 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_destructor = dm_crypt_bio_destructor; } -static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) +static void kcryptd_io_read(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct bio *clone; + crypt_inc_pending(io); + /* * The block layer might modify the bvec array, so always * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ - clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); - if (!clone) - return 1; - - crypt_inc_pending(io); + clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); + if (unlikely(!clone)) { + io->error = -ENOMEM; + crypt_dec_pending(io); + return; + } clone_init(io, clone); clone->bi_idx = 0; @@ -1014,7 +717,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) sizeof(struct bio_vec) * clone->bi_vcnt); generic_make_request(clone); - return 0; } static void kcryptd_io_write(struct dm_crypt_io *io) @@ -1027,12 +729,9 @@ static void kcryptd_io(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); - if (bio_data_dir(io->base_bio) == READ) { - crypt_inc_pending(io); - if (kcryptd_io_read(io, GFP_NOIO)) - io->error = -ENOMEM; - crypt_dec_pending(io); - } else + if (bio_data_dir(io->base_bio) == READ) + kcryptd_io_read(io); + else kcryptd_io_write(io); } @@ -1200,12 +899,6 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, return; } - if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) - error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); - - if (error < 0) - io->error = -EIO; - mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) @@ -1276,93 +969,34 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) } } -static void crypt_free_tfms(struct crypt_config *cc, int cpu) -{ - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); - unsigned i; - - for (i = 0; i < cc->tfms_count; i++) - if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { - crypto_free_ablkcipher(cpu_cc->tfms[i]); - cpu_cc->tfms[i] = NULL; - } -} - -static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) -{ - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); - unsigned i; - int err; - - for (i = 0; i < cc->tfms_count; i++) { - cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); - if (IS_ERR(cpu_cc->tfms[i])) { - err = PTR_ERR(cpu_cc->tfms[i]); - crypt_free_tfms(cc, cpu); - return err; - } - } - - return 0; -} - -static int crypt_setkey_allcpus(struct crypt_config *cc) -{ - unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); - int cpu, err = 0, i, r; - - for_each_possible_cpu(cpu) { - for (i = 0; i < cc->tfms_count; i++) { - r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], - cc->key + (i * subkey_size), subkey_size); - if (r) - err = r; - } - } - - return err; -} - static int crypt_set_key(struct crypt_config *cc, char *key) { - int r = -EINVAL; - int key_string_len = strlen(key); - /* The key size may not be changed. */ - if (cc->key_size != (key_string_len >> 1)) - goto out; + if (cc->key_size != (strlen(key) >> 1)) + return -EINVAL; /* Hyphen (which gives a key_size of zero) means there is no key. */ if (!cc->key_size && strcmp(key, "-")) - goto out; + return -EINVAL; if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) - goto out; + return -EINVAL; set_bit(DM_CRYPT_KEY_VALID, &cc->flags); - r = crypt_setkey_allcpus(cc); - -out: - /* Hex key string not needed after here, so wipe it. */ - memset(key, '0', key_string_len); - - return r; + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } static int crypt_wipe_key(struct crypt_config *cc) { clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); memset(&cc->key, 0, cc->key_size * sizeof(u8)); - - return crypt_setkey_allcpus(cc); + return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); } static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1374,14 +1008,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - crypt_free_tfms(cc, cpu); - } - if (cc->bs) bioset_free(cc->bs); @@ -1395,12 +1021,12 @@ static void crypt_dtr(struct dm_target *ti) if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); + if (cc->tfm && !IS_ERR(cc->tfm)) + crypto_free_ablkcipher(cc->tfm); + if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1412,10 +1038,9 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) { struct crypt_config *cc = ti->private; - char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; + char *tmp, *cipher, *chainmode, *ivmode, *ivopts; char *cipher_api = NULL; - int cpu, ret = -EINVAL; - char dummy; + int ret = -EINVAL; /* Convert to crypto api definition? */ if (strchr(cipher_in, '(')) { @@ -1429,20 +1054,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, /* * Legacy dm-crypt cipher specification - * cipher[:keycount]-mode-iv:ivopts + * cipher-mode-iv:ivopts */ tmp = cipher_in; - keycount = strsep(&tmp, "-"); - cipher = strsep(&keycount, ":"); - - if (!keycount) - cc->tfms_count = 1; - else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || - !is_power_of_2(cc->tfms_count)) { - ti->error = "Bad cipher key count specification"; - return -EINVAL; - } - cc->key_parts = cc->tfms_count; + cipher = strsep(&tmp, "-"); cc->cipher = kstrdup(cipher, GFP_KERNEL); if (!cc->cipher) @@ -1455,14 +1070,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + - cc->tfms_count * sizeof(*(cc->cpu->tfms)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. @@ -1489,12 +1096,11 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Allocate cipher */ - for_each_possible_cpu(cpu) { - ret = crypt_alloc_tfms(cc, cpu, cipher_api); - if (ret < 0) { - ti->error = "Error allocating crypto tfm"; - goto bad; - } + cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); + if (IS_ERR(cc->tfm)) { + ret = PTR_ERR(cc->tfm); + ti->error = "Error allocating crypto tfm"; + goto bad; } /* Initialize and set key */ @@ -1505,7 +1111,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Initialize IV */ - cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc)); + cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, @@ -1528,15 +1134,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, cc->iv_gen_ops = &crypt_iv_benbi_ops; else if (strcmp(ivmode, "null") == 0) cc->iv_gen_ops = &crypt_iv_null_ops; - else if (strcmp(ivmode, "lmk") == 0) { - cc->iv_gen_ops = &crypt_iv_lmk_ops; - /* Version 2 and 3 is recognised according - * to length of provided multi-key string. - * If present (version 3), last key is used as IV seed. - */ - if (cc->key_size % cc->key_parts) - cc->key_parts++; - } else { + else { ret = -EINVAL; ti->error = "Invalid IV mode"; goto bad; @@ -1615,9 +1213,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->dmreq_start = sizeof(struct ablkcipher_request); - cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); + cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & + cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & ~(crypto_tfm_ctx_alignment() - 1); cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + @@ -1626,6 +1224,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Cannot allocate crypt request mempool"; goto bad; } + cc->req = NULL; cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { @@ -1682,20 +1281,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io", - WQ_NON_REENTRANT| - WQ_MEM_RECLAIM, - 1); + cc->io_queue = create_singlethread_workqueue("kcryptd_io"); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } - cc->crypt_queue = alloc_workqueue("kcryptd", - WQ_NON_REENTRANT| - WQ_CPU_INTENSIVE| - WQ_MEM_RECLAIM, - 1); + cc->crypt_queue = create_singlethread_workqueue("kcryptd"); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; @@ -1732,10 +1324,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); - if (bio_data_dir(io->base_bio) == READ) { - if (kcryptd_io_read(io, GFP_NOWAIT)) - kcryptd_queue_io(io); - } else + if (bio_data_dir(io->base_bio) == READ) + kcryptd_queue_io(io); + else kcryptd_queue_crypt(io); return DM_MAPIO_SUBMITTED; @@ -1869,7 +1460,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 11, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, |