diff options
| -rw-r--r-- | arch/arm64/crypto/aes-glue.c | 75 | ||||
| -rw-r--r-- | arch/arm64/crypto/aes-neonbs-glue.c | 44 | ||||
| -rw-r--r-- | arch/arm64/crypto/ghash-ce-glue.c | 2 | ||||
| -rw-r--r-- | arch/arm64/crypto/sm4-ce-glue.c | 42 | ||||
| -rw-r--r-- | arch/riscv/crypto/Kconfig | 12 | ||||
| -rw-r--r-- | lib/crypto/Kconfig | 9 | ||||
| -rw-r--r-- | lib/crypto/Makefile | 1 | ||||
| -rw-r--r-- | lib/crypto/blake2b.c | 44 | ||||
| -rw-r--r-- | lib/crypto/blake2s.c | 38 | ||||
| -rw-r--r-- | lib/crypto/riscv/chacha-riscv64-zvkb.S | 5 |
10 files changed, 130 insertions, 142 deletions
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b087b900d279..c51d4487e9e9 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -549,38 +549,37 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) tail = 0; } - for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { - int nbytes = walk.nbytes; + scoped_ksimd() { + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; - if (walk.nbytes < walk.total) - nbytes &= ~(AES_BLOCK_SIZE - 1); + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); - scoped_ksimd() aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } - if (err || likely(!tail)) - return err; + if (err || likely(!tail)) + return err; - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, walk.nbytes, ctx->key2.key_enc, walk.iv, first); - + } return skcipher_walk_done(&walk, 0); } @@ -619,39 +618,37 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) tail = 0; } - for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { - int nbytes = walk.nbytes; + scoped_ksimd() { + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; - if (walk.nbytes < walk.total) - nbytes &= ~(AES_BLOCK_SIZE - 1); + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); - scoped_ksimd() aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } - if (err || likely(!tail)) - return err; - - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + if (err || likely(!tail)) + return err; - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, walk.nbytes, ctx->key2.key_enc, walk.iv, first); - + } return skcipher_walk_done(&walk, 0); } diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index d496effb0a5b..cb87c8fc66b3 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -312,13 +312,13 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, if (err) return err; - while (walk.nbytes >= AES_BLOCK_SIZE) { - int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; - out = walk.dst.virt.addr; - in = walk.src.virt.addr; - nbytes = walk.nbytes; + scoped_ksimd() { + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; + out = walk.dst.virt.addr; + in = walk.src.virt.addr; + nbytes = walk.nbytes; - scoped_ksimd() { if (blocks >= 8) { if (first == 1) neon_aes_ecb_encrypt(walk.iv, walk.iv, @@ -344,30 +344,28 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, ctx->twkey, walk.iv, first); nbytes = first = 0; } + err = skcipher_walk_done(&walk, nbytes); } - err = skcipher_walk_done(&walk, nbytes); - } - if (err || likely(!tail)) - return err; + if (err || likely(!tail)) + return err; - /* handle ciphertext stealing */ - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + /* handle ciphertext stealing */ + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); - err = skcipher_walk_virt(&walk, req, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; - out = walk.dst.virt.addr; - in = walk.src.virt.addr; - nbytes = walk.nbytes; + out = walk.dst.virt.addr; + in = walk.src.virt.addr; + nbytes = walk.nbytes; - scoped_ksimd() { if (encrypt) neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds, nbytes, ctx->twkey, diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 7951557a285a..ef249d06c92c 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -133,7 +133,7 @@ static int ghash_finup(struct shash_desc *desc, const u8 *src, u8 buf[GHASH_BLOCK_SIZE] = {}; memcpy(buf, src, len); - ghash_do_simd_update(1, ctx->digest, src, key, NULL, + ghash_do_simd_update(1, ctx->digest, buf, key, NULL, pmull_ghash_update_p8); memzero_explicit(buf, sizeof(buf)); } diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 5569cece5a0b..0eeabfa9ef25 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -346,11 +346,11 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt) tail = 0; } - while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { - if (nbytes < walk.total) - nbytes &= ~(SM4_BLOCK_SIZE - 1); + scoped_ksimd() { + while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { + if (nbytes < walk.total) + nbytes &= ~(SM4_BLOCK_SIZE - 1); - scoped_ksimd() { if (encrypt) sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, nbytes, @@ -359,32 +359,30 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt) sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, nbytes, rkey2_enc); - } - rkey2_enc = NULL; + rkey2_enc = NULL; - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - if (err) - return err; - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + } - if (likely(tail == 0)) - return 0; + if (likely(tail == 0)) + return 0; - /* handle ciphertext stealing */ + /* handle ciphertext stealing */ - dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); - skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(&subreq, src, dst, + SM4_BLOCK_SIZE + tail, req->iv); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() { if (encrypt) sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, walk.nbytes, diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index a75d6325607b..14c5acb935e9 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -4,7 +4,8 @@ menu "Accelerated Cryptographic Algorithms for CPU (riscv)" config CRYPTO_AES_RISCV64 tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_ALGAPI select CRYPTO_LIB_AES select CRYPTO_SKCIPHER @@ -20,7 +21,8 @@ config CRYPTO_AES_RISCV64 config CRYPTO_GHASH_RISCV64 tristate "Hash functions: GHASH" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_GCM help GCM GHASH function (NIST SP 800-38D) @@ -30,7 +32,8 @@ config CRYPTO_GHASH_RISCV64 config CRYPTO_SM3_RISCV64 tristate "Hash functions: SM3 (ShangMi 3)" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_HASH select CRYPTO_LIB_SM3 help @@ -42,7 +45,8 @@ config CRYPTO_SM3_RISCV64 config CRYPTO_SM4_RISCV64 tristate "Ciphers: SM4 (ShangMi 4)" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_ALGAPI select CRYPTO_SM4 help diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index a3647352bff6..6871a41e5069 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -61,7 +61,8 @@ config CRYPTO_LIB_CHACHA_ARCH default y if ARM64 && KERNEL_MODE_NEON default y if MIPS && CPU_MIPS32_R2 default y if PPC64 && CPU_LITTLE_ENDIAN && VSX - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if X86_64 @@ -184,7 +185,8 @@ config CRYPTO_LIB_SHA256_ARCH default y if ARM64 default y if MIPS && CPU_CAVIUM_OCTEON default y if PPC && SPE - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if SPARC64 default y if X86_64 @@ -202,7 +204,8 @@ config CRYPTO_LIB_SHA512_ARCH default y if ARM && !CPU_V7M default y if ARM64 default y if MIPS && CPU_CAVIUM_OCTEON - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if SPARC64 default y if X86_64 diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index b5346cebbb55..330ab65b29c4 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_LIB_BLAKE2B) += libblake2b.o libblake2b-y := blake2b.o -CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y) CFLAGS_blake2b.o += -I$(src)/$(SRCARCH) libblake2b-$(CONFIG_ARM) += arm/blake2b-neon-core.o diff --git a/lib/crypto/blake2b.c b/lib/crypto/blake2b.c index 09c6d65d8a6e..581b7f8486fa 100644 --- a/lib/crypto/blake2b.c +++ b/lib/crypto/blake2b.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/unroll.h> #include <linux/types.h> static const u8 blake2b_sigma[12][16] = { @@ -73,31 +74,26 @@ blake2b_compress_generic(struct blake2b_ctx *ctx, b = ror64(b ^ c, 63); \ } while (0) -#define ROUND(r) do { \ - G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ - G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ - G(r, 2, v[2], v[ 6], v[10], v[14]); \ - G(r, 3, v[3], v[ 7], v[11], v[15]); \ - G(r, 4, v[0], v[ 5], v[10], v[15]); \ - G(r, 5, v[1], v[ 6], v[11], v[12]); \ - G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ - G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -} while (0) - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - ROUND(10); - ROUND(11); - +#ifdef CONFIG_64BIT + /* + * Unroll the rounds loop to enable constant-folding of the + * blake2b_sigma values. Seems worthwhile on 64-bit kernels. + * Not worthwhile on 32-bit kernels because the code size is + * already so large there due to BLAKE2b using 64-bit words. + */ + unrolled_full +#endif + for (int r = 0; r < 12; r++) { + G(r, 0, v[0], v[4], v[8], v[12]); + G(r, 1, v[1], v[5], v[9], v[13]); + G(r, 2, v[2], v[6], v[10], v[14]); + G(r, 3, v[3], v[7], v[11], v[15]); + G(r, 4, v[0], v[5], v[10], v[15]); + G(r, 5, v[1], v[6], v[11], v[12]); + G(r, 6, v[2], v[7], v[8], v[13]); + G(r, 7, v[3], v[4], v[9], v[14]); + } #undef G -#undef ROUND for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 6182c21ed943..71578a084742 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/unroll.h> #include <linux/types.h> static const u8 blake2s_sigma[10][16] = { @@ -71,29 +72,22 @@ blake2s_compress_generic(struct blake2s_ctx *ctx, b = ror32(b ^ c, 7); \ } while (0) -#define ROUND(r) do { \ - G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ - G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ - G(r, 2, v[2], v[ 6], v[10], v[14]); \ - G(r, 3, v[3], v[ 7], v[11], v[15]); \ - G(r, 4, v[0], v[ 5], v[10], v[15]); \ - G(r, 5, v[1], v[ 6], v[11], v[12]); \ - G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ - G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -} while (0) - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - + /* + * Unroll the rounds loop to enable constant-folding of the + * blake2s_sigma values. + */ + unrolled_full + for (int r = 0; r < 10; r++) { + G(r, 0, v[0], v[4], v[8], v[12]); + G(r, 1, v[1], v[5], v[9], v[13]); + G(r, 2, v[2], v[6], v[10], v[14]); + G(r, 3, v[3], v[7], v[11], v[15]); + G(r, 4, v[0], v[5], v[10], v[15]); + G(r, 5, v[1], v[6], v[11], v[12]); + G(r, 6, v[2], v[7], v[8], v[13]); + G(r, 7, v[3], v[4], v[9], v[14]); + } #undef G -#undef ROUND for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; diff --git a/lib/crypto/riscv/chacha-riscv64-zvkb.S b/lib/crypto/riscv/chacha-riscv64-zvkb.S index b777d0b4e379..3d183ec818f5 100644 --- a/lib/crypto/riscv/chacha-riscv64-zvkb.S +++ b/lib/crypto/riscv/chacha-riscv64-zvkb.S @@ -60,7 +60,8 @@ #define VL t2 #define STRIDE t3 #define ROUND_CTR t4 -#define KEY0 s0 +#define KEY0 t5 +// Avoid s0/fp to allow for unwinding #define KEY1 s1 #define KEY2 s2 #define KEY3 s3 @@ -143,7 +144,6 @@ // The updated 32-bit counter is written back to state->x[12] before returning. SYM_FUNC_START(chacha_zvkb) addi sp, sp, -96 - sd s0, 0(sp) sd s1, 8(sp) sd s2, 16(sp) sd s3, 24(sp) @@ -280,7 +280,6 @@ SYM_FUNC_START(chacha_zvkb) bnez NBLOCKS, .Lblock_loop sw COUNTER, 48(STATEP) - ld s0, 0(sp) ld s1, 8(sp) ld s2, 16(sp) ld s3, 24(sp) |
