summaryrefslogtreecommitdiff
path: root/lib/crypto
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-03-31 17:05:47 -0700
committerEric Biggers <ebiggers@kernel.org>2026-04-01 13:02:10 -0700
commit6d575f11c70b0ceff7db47813ebb7aec09e8d01f (patch)
treec98090ee30af879414a6b841bb341d6aa9cdb4f4 /lib/crypto
parent7116418f6b00faf43e56f0e052b968b04fc75989 (diff)
lib/crypto: arm64/sha3: Remove obsolete chunking logic
Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the SHA-3 code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-9-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib/crypto')
-rw-r--r--lib/crypto/arm64/sha3-ce-core.S8
-rw-r--r--lib/crypto/arm64/sha3.h15
2 files changed, 7 insertions, 16 deletions
diff --git a/lib/crypto/arm64/sha3-ce-core.S b/lib/crypto/arm64/sha3-ce-core.S
index ace90b506490..b8ab01987ae0 100644
--- a/lib/crypto/arm64/sha3-ce-core.S
+++ b/lib/crypto/arm64/sha3-ce-core.S
@@ -37,8 +37,8 @@
.endm
/*
- * size_t sha3_ce_transform(struct sha3_state *state, const u8 *data,
- * size_t nblocks, size_t block_size)
+ * void sha3_ce_transform(struct sha3_state *state, const u8 *data,
+ * size_t nblocks, size_t block_size)
*
* block_size is assumed to be one of 72 (SHA3-512), 104 (SHA3-384), 136
* (SHA3-256 and SHAKE256), 144 (SHA3-224), or 168 (SHAKE128).
@@ -185,18 +185,16 @@ SYM_FUNC_START(sha3_ce_transform)
eor v0.16b, v0.16b, v31.16b
cbnz w8, 3b
- cond_yield 4f, x8, x9
cbnz x2, 0b
/* save state */
-4: st1 { v0.1d- v3.1d}, [x0], #32
+ st1 { v0.1d- v3.1d}, [x0], #32
st1 { v4.1d- v7.1d}, [x0], #32
st1 { v8.1d-v11.1d}, [x0], #32
st1 {v12.1d-v15.1d}, [x0], #32
st1 {v16.1d-v19.1d}, [x0], #32
st1 {v20.1d-v23.1d}, [x0], #32
st1 {v24.1d}, [x0]
- mov x0, x2
ret
SYM_FUNC_END(sha3_ce_transform)
diff --git a/lib/crypto/arm64/sha3.h b/lib/crypto/arm64/sha3.h
index b602f1b3b282..eaaba3224acc 100644
--- a/lib/crypto/arm64/sha3.h
+++ b/lib/crypto/arm64/sha3.h
@@ -12,22 +12,15 @@
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha3);
-asmlinkage size_t sha3_ce_transform(struct sha3_state *state, const u8 *data,
- size_t nblocks, size_t block_size);
+asmlinkage void sha3_ce_transform(struct sha3_state *state, const u8 *data,
+ size_t nblocks, size_t block_size);
static void sha3_absorb_blocks(struct sha3_state *state, const u8 *data,
size_t nblocks, size_t block_size)
{
if (static_branch_likely(&have_sha3) && likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd()
- rem = sha3_ce_transform(state, data, nblocks,
- block_size);
- data += (nblocks - rem) * block_size;
- nblocks = rem;
- } while (nblocks);
+ scoped_ksimd()
+ sha3_ce_transform(state, data, nblocks, block_size);
} else {
sha3_absorb_blocks_generic(state, data, nblocks, block_size);
}