summaryrefslogtreecommitdiff
path: root/lib/crypto/blake2s.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-12 22:08:09 +1200
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-12 22:08:09 +1200
commit187d0801404f415f22c0b31531982c7ea97fa341 (patch)
tree6df127ce340741832b653fba9a0bd6c948a79acd /lib/crypto/blake2s.c
parent35ebee7e720944a66befb5899c72ce1e01dfa44e (diff)
parentf6a458746f905adb7d70e50e8b9383dc9e3fd75f (diff)
Merge tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux
Pull crypto library fixes from Eric Biggers: "Fixes for some recent regressions as well as some longstanding issues: - Fix incorrect output from the arm64 NEON implementation of GHASH - Merge the ksimd scopes in the arm64 XTS code to reduce stack usage - Roll up the BLAKE2b round loop on 32-bit kernels to greatly reduce code size and stack usage - Add missing RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS dependency - Fix chacha-riscv64-zvkb.S to not use frame pointer for data" * tag 'libcrypto-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: crypto: arm64/ghash - Fix incorrect output from ghash-neon crypto/arm64: sm4/xts - Merge ksimd scopes to reduce stack bloat crypto/arm64: aes/xts - Use single ksimd scope to reduce stack bloat lib/crypto: blake2s: Replace manual unrolling with unrolled_full lib/crypto: blake2b: Roll up BLAKE2b round loop on 32-bit lib/crypto: riscv: Depend on RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS lib/crypto: riscv/chacha: Avoid s0/fp register
Diffstat (limited to 'lib/crypto/blake2s.c')
-rw-r--r--lib/crypto/blake2s.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 6182c21ed943..71578a084742 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/unroll.h>
#include <linux/types.h>
static const u8 blake2s_sigma[10][16] = {
@@ -71,29 +72,22 @@ blake2s_compress_generic(struct blake2s_ctx *ctx,
b = ror32(b ^ c, 7); \
} while (0)
-#define ROUND(r) do { \
- G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
- G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
- G(r, 2, v[2], v[ 6], v[10], v[14]); \
- G(r, 3, v[3], v[ 7], v[11], v[15]); \
- G(r, 4, v[0], v[ 5], v[10], v[15]); \
- G(r, 5, v[1], v[ 6], v[11], v[12]); \
- G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
- G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
-} while (0)
- ROUND(0);
- ROUND(1);
- ROUND(2);
- ROUND(3);
- ROUND(4);
- ROUND(5);
- ROUND(6);
- ROUND(7);
- ROUND(8);
- ROUND(9);
-
+ /*
+ * Unroll the rounds loop to enable constant-folding of the
+ * blake2s_sigma values.
+ */
+ unrolled_full
+ for (int r = 0; r < 10; r++) {
+ G(r, 0, v[0], v[4], v[8], v[12]);
+ G(r, 1, v[1], v[5], v[9], v[13]);
+ G(r, 2, v[2], v[6], v[10], v[14]);
+ G(r, 3, v[3], v[7], v[11], v[15]);
+ G(r, 4, v[0], v[5], v[10], v[15]);
+ G(r, 5, v[1], v[6], v[11], v[12]);
+ G(r, 6, v[2], v[7], v[8], v[13]);
+ G(r, 7, v[3], v[4], v[9], v[14]);
+ }
#undef G
-#undef ROUND
for (i = 0; i < 8; ++i)
ctx->h[i] ^= v[i] ^ v[i + 8];