summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/aes-neonbs-glue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/crypto/aes-neonbs-glue.c')
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c194
1 files changed, 96 insertions, 98 deletions
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index c4a623e86593..cb87c8fc66b3 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -85,9 +85,8 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
ctx->rounds = 6 + key_len / 4;
- kernel_neon_begin();
- aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
- kernel_neon_end();
+ scoped_ksimd()
+ aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
return 0;
}
@@ -110,10 +109,9 @@ static int __ecb_crypt(struct skcipher_request *req,
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
- kernel_neon_begin();
- fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
- ctx->rounds, blocks);
- kernel_neon_end();
+ scoped_ksimd()
+ fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
+ ctx->rounds, blocks);
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
@@ -146,9 +144,8 @@ static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
- kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
- kernel_neon_end();
+ scoped_ksimd()
+ aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
memzero_explicit(&rk, sizeof(rk));
return 0;
@@ -167,11 +164,11 @@ static int cbc_encrypt(struct skcipher_request *req)
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
/* fall back to the non-bitsliced NEON implementation */
- kernel_neon_begin();
- neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->enc, ctx->key.rounds, blocks,
- walk.iv);
- kernel_neon_end();
+ scoped_ksimd()
+ neon_aes_cbc_encrypt(walk.dst.virt.addr,
+ walk.src.virt.addr,
+ ctx->enc, ctx->key.rounds, blocks,
+ walk.iv);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
@@ -193,11 +190,10 @@ static int cbc_decrypt(struct skcipher_request *req)
blocks = round_down(blocks,
walk.stride / AES_BLOCK_SIZE);
- kernel_neon_begin();
- aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- ctx->key.rk, ctx->key.rounds, blocks,
- walk.iv);
- kernel_neon_end();
+ scoped_ksimd()
+ aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key.rk, ctx->key.rounds, blocks,
+ walk.iv);
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
}
@@ -220,30 +216,32 @@ static int ctr_encrypt(struct skcipher_request *req)
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- kernel_neon_begin();
- if (blocks >= 8) {
- aesbs_ctr_encrypt(dst, src, ctx->key.rk, ctx->key.rounds,
- blocks, walk.iv);
- dst += blocks * AES_BLOCK_SIZE;
- src += blocks * AES_BLOCK_SIZE;
- }
- if (nbytes && walk.nbytes == walk.total) {
- u8 buf[AES_BLOCK_SIZE];
- u8 *d = dst;
-
- if (unlikely(nbytes < AES_BLOCK_SIZE))
- src = dst = memcpy(buf + sizeof(buf) - nbytes,
- src, nbytes);
-
- neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
- nbytes, walk.iv);
+ scoped_ksimd() {
+ if (blocks >= 8) {
+ aesbs_ctr_encrypt(dst, src, ctx->key.rk,
+ ctx->key.rounds, blocks,
+ walk.iv);
+ dst += blocks * AES_BLOCK_SIZE;
+ src += blocks * AES_BLOCK_SIZE;
+ }
+ if (nbytes && walk.nbytes == walk.total) {
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *d = dst;
+
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(buf + sizeof(buf) -
+ nbytes, src, nbytes);
+
+ neon_aes_ctr_encrypt(dst, src, ctx->enc,
+ ctx->key.rounds, nbytes,
+ walk.iv);
- if (unlikely(nbytes < AES_BLOCK_SIZE))
- memcpy(d, dst, nbytes);
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ memcpy(d, dst, nbytes);
- nbytes = 0;
+ nbytes = 0;
+ }
}
- kernel_neon_end();
err = skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -314,69 +312,69 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
- out = walk.dst.virt.addr;
- in = walk.src.virt.addr;
- nbytes = walk.nbytes;
-
- kernel_neon_begin();
- if (blocks >= 8) {
- if (first == 1)
- neon_aes_ecb_encrypt(walk.iv, walk.iv,
- ctx->twkey,
- ctx->key.rounds, 1);
- first = 2;
-
- fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
- walk.iv);
-
- out += blocks * AES_BLOCK_SIZE;
- in += blocks * AES_BLOCK_SIZE;
- nbytes -= blocks * AES_BLOCK_SIZE;
+ scoped_ksimd() {
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7;
+ out = walk.dst.virt.addr;
+ in = walk.src.virt.addr;
+ nbytes = walk.nbytes;
+
+ if (blocks >= 8) {
+ if (first == 1)
+ neon_aes_ecb_encrypt(walk.iv, walk.iv,
+ ctx->twkey,
+ ctx->key.rounds, 1);
+ first = 2;
+
+ fn(out, in, ctx->key.rk, ctx->key.rounds, blocks,
+ walk.iv);
+
+ out += blocks * AES_BLOCK_SIZE;
+ in += blocks * AES_BLOCK_SIZE;
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ }
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ if (encrypt)
+ neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
+ ctx->key.rounds, nbytes,
+ ctx->twkey, walk.iv, first);
+ else
+ neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
+ ctx->key.rounds, nbytes,
+ ctx->twkey, walk.iv, first);
+ nbytes = first = 0;
+ }
+ err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes == walk.total && nbytes > 0) {
- if (encrypt)
- neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
- ctx->key.rounds, nbytes,
- ctx->twkey, walk.iv, first);
- else
- neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
- ctx->key.rounds, nbytes,
- ctx->twkey, walk.iv, first);
- nbytes = first = 0;
- }
- kernel_neon_end();
- err = skcipher_walk_done(&walk, nbytes);
- }
- if (err || likely(!tail))
- return err;
+ if (err || likely(!tail))
+ return err;
- /* handle ciphertext stealing */
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ /* handle ciphertext stealing */
+ dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+
+ out = walk.dst.virt.addr;
+ in = walk.src.virt.addr;
+ nbytes = walk.nbytes;
- out = walk.dst.virt.addr;
- in = walk.src.virt.addr;
- nbytes = walk.nbytes;
-
- kernel_neon_begin();
- if (encrypt)
- neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds,
- nbytes, ctx->twkey, walk.iv, first);
- else
- neon_aes_xts_decrypt(out, in, ctx->cts.key_dec, ctx->key.rounds,
- nbytes, ctx->twkey, walk.iv, first);
- kernel_neon_end();
+ if (encrypt)
+ neon_aes_xts_encrypt(out, in, ctx->cts.key_enc,
+ ctx->key.rounds, nbytes, ctx->twkey,
+ walk.iv, first);
+ else
+ neon_aes_xts_decrypt(out, in, ctx->cts.key_dec,
+ ctx->key.rounds, nbytes, ctx->twkey,
+ walk.iv, first);
+ }
return skcipher_walk_done(&walk, 0);
}