summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2026-01-12 11:20:13 -0800
committerEric Biggers <ebiggers@kernel.org>2026-01-15 14:08:55 -0800
commit0cab15611e839142f4fd3c8a366acd1f7334b30b (patch)
tree8e2fd195499e121bf42f726b5e3b9482f8f5a9e6 /lib
parenta4e573db06a4e8c519ec4c42f8e1249a0853367a (diff)
lib/crypto: s390/aes: Migrate optimized code into library
Implement aes_preparekey_arch(), aes_encrypt_arch(), and aes_decrypt_arch() using the CPACF AES instructions. Then, remove the superseded "aes-s390" crypto_cipher. The result is that both the AES library and crypto_cipher APIs use the CPACF AES instructions, whereas previously only crypto_cipher did (and it wasn't enabled by default, which this commit fixes as well). Note that this preserves the optimization where the AES key is stored in raw form rather than expanded form. CPACF just takes the raw key. Acked-by: Ard Biesheuvel <ardb@kernel.org> Tested-by: Holger Dengler <dengler@linux.ibm.com> Reviewed-by: Holger Dengler <dengler@linux.ibm.com> Link: https://lore.kernel.org/r/20260112192035.10427-16-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/crypto/Kconfig1
-rw-r--r--lib/crypto/s390/aes.h106
2 files changed, 107 insertions, 0 deletions
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 2690b5ffc5ca..56a9b4f53b0e 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -19,6 +19,7 @@ config CRYPTO_LIB_AES_ARCH
default y if PPC && (SPE || (PPC64 && VSX))
default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
+ default y if S390
config CRYPTO_LIB_AESCFB
tristate
diff --git a/lib/crypto/s390/aes.h b/lib/crypto/s390/aes.h
new file mode 100644
index 000000000000..5466f6ecbce7
--- /dev/null
+++ b/lib/crypto/s390/aes.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AES optimized using the CP Assist for Cryptographic Functions (CPACF)
+ *
+ * Copyright 2026 Google LLC
+ */
+#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes128);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes192);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes256);
+
+/*
+ * When the CPU supports CPACF AES for the requested key length, we need only
+ * save a copy of the raw AES key, as that's what the CPACF instructions need.
+ *
+ * When unsupported, fall back to the generic key expansion and en/decryption.
+ */
+static void aes_preparekey_arch(union aes_enckey_arch *k,
+ union aes_invkey_arch *inv_k,
+ const u8 *in_key, int key_len, int nrounds)
+{
+ if (key_len == AES_KEYSIZE_128) {
+ if (static_branch_likely(&have_cpacf_aes128)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_128);
+ return;
+ }
+ } else if (key_len == AES_KEYSIZE_192) {
+ if (static_branch_likely(&have_cpacf_aes192)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_192);
+ return;
+ }
+ } else {
+ if (static_branch_likely(&have_cpacf_aes256)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_256);
+ return;
+ }
+ }
+ aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
+ in_key, key_len);
+}
+
+static inline bool aes_crypt_s390(const struct aes_enckey *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE], int decrypt)
+{
+ if (key->len == AES_KEYSIZE_128) {
+ if (static_branch_likely(&have_cpacf_aes128)) {
+ cpacf_km(CPACF_KM_AES_128 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ } else if (key->len == AES_KEYSIZE_192) {
+ if (static_branch_likely(&have_cpacf_aes192)) {
+ cpacf_km(CPACF_KM_AES_192 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ } else {
+ if (static_branch_likely(&have_cpacf_aes256)) {
+ cpacf_km(CPACF_KM_AES_256 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void aes_encrypt_arch(const struct aes_enckey *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
+{
+ if (likely(aes_crypt_s390(key, out, in, 0)))
+ return;
+ aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
+}
+
+static void aes_decrypt_arch(const struct aes_key *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
+{
+ if (likely(aes_crypt_s390((const struct aes_enckey *)key, out, in,
+ CPACF_DECRYPT)))
+ return;
+ aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds, out, in);
+}
+
+#define aes_mod_init_arch aes_mod_init_arch
+static void aes_mod_init_arch(void)
+{
+ if (cpu_have_feature(S390_CPU_FEATURE_MSA)) {
+ cpacf_mask_t km_functions;
+
+ cpacf_query(CPACF_KM, &km_functions);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_128))
+ static_branch_enable(&have_cpacf_aes128);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_192))
+ static_branch_enable(&have_cpacf_aes192);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_256))
+ static_branch_enable(&have_cpacf_aes256);
+ }
+}