summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom-qce.yaml1
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/crypto/Kconfig9
-rw-r--r--arch/arm/crypto/aes-ce-glue.c104
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c116
-rw-r--r--arch/arm/crypto/chacha-glue.c243
-rw-r--r--arch/arm/crypto/chacha-neon-core.S2
-rw-r--r--arch/arm64/crypto/Kconfig7
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S2
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c146
-rw-r--r--arch/mips/crypto/Kconfig6
-rw-r--r--arch/mips/crypto/chacha-glue.c131
-rw-r--r--arch/powerpc/crypto/Kconfig8
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c147
-rw-r--r--arch/riscv/crypto/Kconfig11
-rw-r--r--arch/riscv/crypto/chacha-riscv64-glue.c112
-rw-r--r--arch/riscv/crypto/chacha-riscv64-zvkb.S71
-rw-r--r--arch/s390/crypto/Kconfig7
-rw-r--r--arch/s390/crypto/chacha-glue.c99
-rw-r--r--arch/x86/Kconfig.assembler9
-rw-r--r--arch/x86/crypto/Kconfig25
-rw-r--r--arch/x86/crypto/Makefile9
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c13
-rw-r--r--arch/x86/crypto/aes-ctr-avx-x86_64.S47
-rw-r--r--arch/x86/crypto/aes-xts-avx-x86_64.S206
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c174
-rw-r--r--arch/x86/crypto/aria_aesni_avx2_glue.c22
-rw-r--r--arch/x86/crypto/aria_aesni_avx_glue.c20
-rw-r--r--arch/x86/crypto/aria_gfni_avx512_glue.c22
-rw-r--r--arch/x86/crypto/blake2s-core.S4
-rw-r--r--arch/x86/crypto/blake2s-glue.c6
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c21
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c21
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c21
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c20
-rw-r--r--arch/x86/crypto/chacha_glue.c168
-rw-r--r--arch/x86/crypto/poly1305-x86_64-cryptogams.pl8
-rw-r--r--arch/x86/crypto/poly1305_glue.c6
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c21
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c21
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c21
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c10
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c10
-rw-r--r--arch/x86/crypto/sm4_aesni_avx2_glue.c31
-rw-r--r--arch/x86/crypto/sm4_aesni_avx_glue.c31
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c21
-rw-r--r--arch/x86/kvm/svm/sev.c12
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/acompress.c231
-rw-r--r--crypto/algapi.c48
-rw-r--r--crypto/api.c10
-rw-r--r--crypto/chacha.c260
-rw-r--r--crypto/chacha_generic.c139
-rw-r--r--crypto/deflate.c363
-rw-r--r--crypto/internal.h3
-rw-r--r--crypto/scompress.c219
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c174
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h10
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c261
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h68
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c603
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/sev-dev.c251
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c20
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c45
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c10
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c5
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c14
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h16
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c28
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c1
-rw-r--r--drivers/crypto/omap-aes.c14
-rw-r--r--drivers/crypto/omap-sham.c14
-rw-r--r--drivers/crypto/sa2ul.c63
-rw-r--r--include/crypto/acompress.h6
-rw-r--r--include/crypto/algapi.h5
-rw-r--r--include/crypto/chacha.h9
-rw-r--r--include/crypto/ctr.h47
-rw-r--r--include/crypto/hash.h18
-rw-r--r--include/crypto/internal/acompress.h78
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/linux/psp-sev.h3
109 files changed, 1867 insertions, 6365 deletions
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
index 3f35122f7873..e009cb712fb8 100644
--- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml
@@ -45,6 +45,7 @@ properties:
- items:
- enum:
+ - qcom,qcs615-qce
- qcom,qcs8300-qce
- qcom,sa8775p-qce
- qcom,sc7280-qce
diff --git a/MAINTAINERS b/MAINTAINERS
index 96b827049501..a2604c13f11b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14267,8 +14267,6 @@ F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
MARVELL CRYPTO DRIVER
-M: Boris Brezillon <bbrezillon@kernel.org>
-M: Arnaud Ebalard <arno@natisbad.org>
M: Srujana Challa <schalla@marvell.com>
L: linux-crypto@vger.kernel.org
S: Maintained
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 23e4ea067ddb..332f2430beec 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -172,7 +172,6 @@ config CRYPTO_AES_ARM_BS
select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -200,7 +199,6 @@ config CRYPTO_AES_ARM_CE
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -216,15 +214,8 @@ config CRYPTO_AES_ARM_CE
config CRYPTO_CHACHA20_NEON
tristate
- select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: arm using:
- - NEON (Advanced SIMD) extensions
endmenu
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 1cf61f51e766..00591895d540 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -10,8 +10,6 @@
#include <asm/simd.h>
#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@@ -418,29 +416,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(ctx, dst, src);
- local_irq_restore(flags);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int xts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -586,10 +561,9 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-ce",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -600,10 +574,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-ce",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -615,10 +588,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__cts(cbc(aes))",
- .base.cra_driver_name = "__cts-cbc-aes-ce",
+ .base.cra_name = "cts(cbc(aes))",
+ .base.cra_driver_name = "cts-cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -631,10 +603,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cts_cbc_encrypt,
.decrypt = cts_cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-ce",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -647,25 +618,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-ce-sync",
- .base.cra_priority = 300 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-ce",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.base.cra_module = THIS_MODULE,
@@ -679,51 +634,14 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
-
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
module_cpu_feature_match(AES, aes_init);
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f6be80b5938b..95418df97fb4 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,8 +8,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
@@ -59,11 +57,6 @@ struct aesbs_xts_ctx {
struct crypto_aes_ctx tweak_key;
};
-struct aesbs_ctr_ctx {
- struct aesbs_ctx key; /* must be first member */
- struct crypto_aes_ctx fallback;
-};
-
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -200,25 +193,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = aes_expandkey(&ctx->fallback, in_key, key_len);
- if (err)
- return err;
-
- ctx->key.rounds = 6 + key_len / 4;
-
- kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
- kernel_neon_end();
-
- return 0;
-}
-
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -254,21 +228,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -374,13 +333,12 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-neonbs",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -389,13 +347,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-neonbs",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -405,13 +362,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-neonbs",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -422,29 +378,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-neonbs-sync",
- .base.cra_priority = 250 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .walksize = 8 * AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_ctr_setkey_sync,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-neonbs",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -455,54 +394,17 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
- if (aes_simd_algs[i])
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
late_initcall(aes_init);
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index 50e635512046..3a5c75c95d43 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -1,16 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
+ * ChaCha and HChaCha functions (ARM optimized)
*
* Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
* Copyright (C) 2015 Martin Willi
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
+#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -100,206 +97,16 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
}
EXPORT_SYMBOL(chacha_crypt_arch);
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv,
- bool neon)
+bool chacha_is_arch_optimized(void)
{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, state, ctx->nrounds);
- state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
- } else {
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- kernel_neon_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int do_chacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv, neon);
-}
-
-static int chacha_arm(struct skcipher_request *req)
-{
- return do_chacha(req, false);
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- return do_chacha(req, neon_usable());
-}
-
-static int do_xchacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- hchacha_block_arm(state, subctx.key, ctx->nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
- kernel_neon_end();
- }
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_stream_xor(req, &subctx, real_iv, neon);
-}
-
-static int xchacha_arm(struct skcipher_request *req)
-{
- return do_xchacha(req, false);
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
-static int xchacha_neon(struct skcipher_request *req)
+static int __init chacha_arm_mod_init(void)
{
- return do_xchacha(req, neon_usable());
-}
-
-static struct skcipher_alg arm_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_arm,
- .decrypt = chacha_arm,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- },
-};
-
-static struct skcipher_alg neon_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- int err = 0;
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (err)
- return err;
- }
-
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
- int i;
-
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A7:
case ARM_CPU_PART_CORTEX_A5:
@@ -308,45 +115,15 @@ static int __init chacha_simd_mod_init(void)
* the NEON implementation but do incredibly with the
* scalar one and use less power.
*/
- for (i = 0; i < ARRAY_SIZE(neon_algs); i++)
- neon_algs[i].base.cra_priority = 0;
break;
default:
static_branch_enable(&use_neon);
}
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- if (err)
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- }
- }
- return err;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
- crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
}
+ return 0;
}
+arch_initcall(chacha_arm_mod_init);
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)");
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-arm");
-#ifdef CONFIG_KERNEL_MODE_NEON
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
-#endif
diff --git a/arch/arm/crypto/chacha-neon-core.S b/arch/arm/crypto/chacha-neon-core.S
index 13d12f672656..ddd62b6294a5 100644
--- a/arch/arm/crypto/chacha-neon-core.S
+++ b/arch/arm/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha/XChaCha NEON helper functions
+ * ChaCha/HChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 3418c8d3c78d..ce655da0fbee 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -189,16 +189,9 @@ config CRYPTO_AES_ARM64_NEON_BLK
config CRYPTO_CHACHA20_NEON
tristate
depends on KERNEL_MODE_NEON
- select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: arm64 using:
- - NEON (Advanced SIMD) extensions
config CRYPTO_AES_ARM64_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XCTR/XTS modes (bit-sliced NEON)"
diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index b70ac76f2610..80079586ecc7 100644
--- a/arch/arm64/crypto/chacha-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha/XChaCha NEON helper functions
+ * ChaCha/HChaCha NEON helper functions
*
* Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index 229876acfc58..a0c336b28402 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -1,6 +1,5 @@
/*
- * ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
+ * ChaCha and HChaCha functions (ARM64 optimized)
*
* Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
@@ -19,10 +18,8 @@
* (at your option) any later version.
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
+#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -95,143 +92,20 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
}
EXPORT_SYMBOL(chacha_crypt_arch);
-static int chacha_neon_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
+bool chacha_is_arch_optimized(void)
{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = rounddown(nbytes, walk.stride);
-
- if (!static_branch_likely(&have_neon) ||
- !crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- kernel_neon_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_neon_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_arch(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_neon_stream_xor(req, &subctx, real_iv);
+ return static_key_enabled(&have_neon);
}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 5 * CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
+EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init chacha_simd_mod_init(void)
{
- if (!cpu_have_named_feature(ASIMD))
- return 0;
-
- static_branch_enable(&have_neon);
-
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
+ if (cpu_have_named_feature(ASIMD))
+ static_branch_enable(&have_neon);
+ return 0;
}
+arch_initcall(chacha_simd_mod_init);
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && cpu_have_named_feature(ASIMD))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM64 optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig
index 545fc0e12422..0189686de3a1 100644
--- a/arch/mips/crypto/Kconfig
+++ b/arch/mips/crypto/Kconfig
@@ -56,13 +56,7 @@ config CRYPTO_SHA512_OCTEON
config CRYPTO_CHACHA_MIPS
tristate
depends on CPU_MIPS32_R2
- select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: MIPS32r2
endmenu
diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c
index f6fc2e1079a1..334ecb29fb8f 100644
--- a/arch/mips/crypto/chacha-glue.c
+++ b/arch/mips/crypto/chacha-glue.c
@@ -1,15 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * MIPS accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
+ * ChaCha and HChaCha functions (MIPS optimized)
*
* Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*/
-#include <asm/byteorder.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
+#include <crypto/chacha.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -20,127 +16,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
EXPORT_SYMBOL(hchacha_block_arch);
-static int chacha_mips_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
+bool chacha_is_arch_optimized(void)
{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_mips(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_mips_stream_xor(req, ctx, req->iv);
+ return true;
}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
-static int xchacha_mips(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- hchacha_block(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_mips_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_mips,
- .decrypt = chacha_mips,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_mips,
- .decrypt = xchacha_mips,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-mips",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_mips,
- .decrypt = xchacha_mips,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (MIPS optimized)");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-mips");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-mips");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-mips");
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 370db8192ce6..47dccdd49637 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -95,17 +95,9 @@ config CRYPTO_AES_GCM_P10
config CRYPTO_CHACHA20_P10
tristate
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
- select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: PowerPC64
- - Power10 or later
- - Little-endian
config CRYPTO_POLY1305_P10
tristate "Hash functions: Poly1305 (P10 or later)"
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
index d8796decc1fb..9982929573ad 100644
--- a/arch/powerpc/crypto/chacha-p10-glue.c
+++ b/arch/powerpc/crypto/chacha-p10-glue.c
@@ -1,15 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
+ * ChaCha stream cipher (P10 accelerated)
*
* Copyright 2023- IBM Corp. All rights reserved.
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
+#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
@@ -78,144 +75,20 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
}
EXPORT_SYMBOL(chacha_crypt_arch);
-static int chacha_p10_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
+bool chacha_is_arch_optimized(void)
{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (err)
- return err;
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = rounddown(nbytes, walk.stride);
-
- if (!crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- vsx_begin();
- chacha_p10_do_8x(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- vsx_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int chacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_p10_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_p10(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_arch(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_p10_stream_xor(req, &subctx, real_iv);
+ return static_key_enabled(&have_p10);
}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_p10,
- .decrypt = chacha_p10,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-p10",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_p10,
- .decrypt = xchacha_p10,
- }
-};
+EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init chacha_p10_init(void)
{
- if (!cpu_has_feature(CPU_FTR_ARCH_31))
- return 0;
-
- static_branch_enable(&have_p10);
-
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ static_branch_enable(&have_p10);
+ return 0;
}
+arch_initcall(chacha_p10_init);
-static void __exit chacha_p10_exit(void)
-{
- if (!static_branch_likely(&have_p10))
- return;
-
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_p10_init);
-module_exit(chacha_p10_exit);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
+MODULE_DESCRIPTION("ChaCha stream cipher (P10 accelerated)");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-p10");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-p10");
diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig
index c67095a3d669..6392e1e11bc9 100644
--- a/arch/riscv/crypto/Kconfig
+++ b/arch/riscv/crypto/Kconfig
@@ -19,14 +19,11 @@ config CRYPTO_AES_RISCV64
- Zvkg vector crypto extension (XTS)
config CRYPTO_CHACHA_RISCV64
- tristate "Ciphers: ChaCha"
+ tristate
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
- select CRYPTO_SKCIPHER
- help
- Length-preserving ciphers: ChaCha20 stream cipher algorithm
-
- Architecture: riscv64 using:
- - Zvkb vector crypto extension
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC
+ default CRYPTO_LIB_CHACHA_INTERNAL
config CRYPTO_GHASH_RISCV64
tristate "Hash functions: GHASH"
diff --git a/arch/riscv/crypto/chacha-riscv64-glue.c b/arch/riscv/crypto/chacha-riscv64-glue.c
index 10b46f36375a..ccaab0dea383 100644
--- a/arch/riscv/crypto/chacha-riscv64-glue.c
+++ b/arch/riscv/crypto/chacha-riscv64-glue.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * ChaCha20 using the RISC-V vector crypto extensions
+ * ChaCha stream cipher (RISC-V optimized)
*
* Copyright (C) 2023 SiFive, Inc.
* Author: Jerry Shih <jerry.shih@sifive.com>
@@ -8,94 +8,62 @@
#include <asm/simd.h>
#include <asm/vector.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <linux/linkage.h>
#include <linux/module.h>
-asmlinkage void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out,
- size_t len, const u32 iv[4]);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_zvkb);
-static int riscv64_chacha20_crypt(struct skcipher_request *req)
+asmlinkage void chacha_zvkb(u32 state[16], const u8 *in, u8 *out,
+ size_t nblocks, int nrounds);
+
+void hchacha_block_arch(const u32 *state, u32 *out, int nrounds)
{
- u32 iv[CHACHA_IV_SIZE / sizeof(u32)];
- u8 block_buffer[CHACHA_BLOCK_SIZE];
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- unsigned int tail_bytes;
- int err;
+ hchacha_block_generic(state, out, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
- iv[0] = get_unaligned_le32(req->iv);
- iv[1] = get_unaligned_le32(req->iv + 4);
- iv[2] = get_unaligned_le32(req->iv + 8);
- iv[3] = get_unaligned_le32(req->iv + 12);
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+ int nrounds)
+{
+ u8 block_buffer[CHACHA_BLOCK_SIZE];
+ unsigned int full_blocks = bytes / CHACHA_BLOCK_SIZE;
+ unsigned int tail_bytes = bytes % CHACHA_BLOCK_SIZE;
- err = skcipher_walk_virt(&walk, req, false);
- while (walk.nbytes) {
- nbytes = walk.nbytes & ~(CHACHA_BLOCK_SIZE - 1);
- tail_bytes = walk.nbytes & (CHACHA_BLOCK_SIZE - 1);
- kernel_vector_begin();
- if (nbytes) {
- chacha20_zvkb(ctx->key, walk.src.virt.addr,
- walk.dst.virt.addr, nbytes, iv);
- iv[0] += nbytes / CHACHA_BLOCK_SIZE;
- }
- if (walk.nbytes == walk.total && tail_bytes > 0) {
- memcpy(block_buffer, walk.src.virt.addr + nbytes,
- tail_bytes);
- chacha20_zvkb(ctx->key, block_buffer, block_buffer,
- CHACHA_BLOCK_SIZE, iv);
- memcpy(walk.dst.virt.addr + nbytes, block_buffer,
- tail_bytes);
- tail_bytes = 0;
- }
- kernel_vector_end();
+ if (!static_branch_likely(&use_zvkb) || !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
- err = skcipher_walk_done(&walk, tail_bytes);
+ kernel_vector_begin();
+ if (full_blocks) {
+ chacha_zvkb(state, src, dst, full_blocks, nrounds);
+ src += full_blocks * CHACHA_BLOCK_SIZE;
+ dst += full_blocks * CHACHA_BLOCK_SIZE;
}
-
- return err;
+ if (tail_bytes) {
+ memcpy(block_buffer, src, tail_bytes);
+ chacha_zvkb(state, block_buffer, block_buffer, 1, nrounds);
+ memcpy(dst, block_buffer, tail_bytes);
+ }
+ kernel_vector_end();
}
+EXPORT_SYMBOL(chacha_crypt_arch);
-static struct skcipher_alg riscv64_chacha_alg = {
- .setkey = chacha20_setkey,
- .encrypt = riscv64_chacha20_crypt,
- .decrypt = riscv64_chacha20_crypt,
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .base = {
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct chacha_ctx),
- .cra_priority = 300,
- .cra_name = "chacha20",
- .cra_driver_name = "chacha20-riscv64-zvkb",
- .cra_module = THIS_MODULE,
- },
-};
+bool chacha_is_arch_optimized(void)
+{
+ return static_key_enabled(&use_zvkb);
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init riscv64_chacha_mod_init(void)
{
if (riscv_isa_extension_available(NULL, ZVKB) &&
riscv_vector_vlen() >= 128)
- return crypto_register_skcipher(&riscv64_chacha_alg);
-
- return -ENODEV;
+ static_branch_enable(&use_zvkb);
+ return 0;
}
+arch_initcall(riscv64_chacha_mod_init);
-static void __exit riscv64_chacha_mod_exit(void)
-{
- crypto_unregister_skcipher(&riscv64_chacha_alg);
-}
-
-module_init(riscv64_chacha_mod_init);
-module_exit(riscv64_chacha_mod_exit);
-
-MODULE_DESCRIPTION("ChaCha20 (RISC-V accelerated)");
+MODULE_DESCRIPTION("ChaCha stream cipher (RISC-V optimized)");
MODULE_AUTHOR("Jerry Shih <jerry.shih@sifive.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/crypto/chacha-riscv64-zvkb.S
index bf057737ac69..ab4423b3880e 100644
--- a/arch/riscv/crypto/chacha-riscv64-zvkb.S
+++ b/arch/riscv/crypto/chacha-riscv64-zvkb.S
@@ -46,11 +46,11 @@
.text
.option arch, +zvkb
-#define KEYP a0
+#define STATEP a0
#define INP a1
#define OUTP a2
-#define LEN a3
-#define IVP a4
+#define NBLOCKS a3
+#define NROUNDS a4
#define CONSTS0 a5
#define CONSTS1 a6
@@ -59,7 +59,7 @@
#define TMP t1
#define VL t2
#define STRIDE t3
-#define NROUNDS t4
+#define ROUND_CTR t4
#define KEY0 s0
#define KEY1 s1
#define KEY2 s2
@@ -132,14 +132,16 @@
vror.vi \b3, \b3, 32 - 7
.endm
-// void chacha20_zvkb(const u32 key[8], const u8 *in, u8 *out, size_t len,
-// const u32 iv[4]);
+// void chacha_zvkb(u32 state[16], const u8 *in, u8 *out, size_t nblocks,
+// int nrounds);
//
-// |len| must be nonzero and a multiple of 64 (CHACHA_BLOCK_SIZE).
-// The counter is treated as 32-bit, following the RFC7539 convention.
-SYM_FUNC_START(chacha20_zvkb)
- srli LEN, LEN, 6 // Bytes to blocks
-
+// |nblocks| is the number of 64-byte blocks to process, and must be nonzero.
+//
+// |state| gives the ChaCha state matrix, including the 32-bit counter in
+// state[12] following the RFC7539 convention; note that this differs from the
+// original Salsa20 paper which uses a 64-bit counter in state[12..13]. The
+// updated 32-bit counter is written back to state[12] before returning.
+SYM_FUNC_START(chacha_zvkb)
addi sp, sp, -96
sd s0, 0(sp)
sd s1, 8(sp)
@@ -157,26 +159,26 @@ SYM_FUNC_START(chacha20_zvkb)
li STRIDE, 64
// Set up the initial state matrix in scalar registers.
- li CONSTS0, 0x61707865 // "expa" little endian
- li CONSTS1, 0x3320646e // "nd 3" little endian
- li CONSTS2, 0x79622d32 // "2-by" little endian
- li CONSTS3, 0x6b206574 // "te k" little endian
- lw KEY0, 0(KEYP)
- lw KEY1, 4(KEYP)
- lw KEY2, 8(KEYP)
- lw KEY3, 12(KEYP)
- lw KEY4, 16(KEYP)
- lw KEY5, 20(KEYP)
- lw KEY6, 24(KEYP)
- lw KEY7, 28(KEYP)
- lw COUNTER, 0(IVP)
- lw NONCE0, 4(IVP)
- lw NONCE1, 8(IVP)
- lw NONCE2, 12(IVP)
+ lw CONSTS0, 0(STATEP)
+ lw CONSTS1, 4(STATEP)
+ lw CONSTS2, 8(STATEP)
+ lw CONSTS3, 12(STATEP)
+ lw KEY0, 16(STATEP)
+ lw KEY1, 20(STATEP)
+ lw KEY2, 24(STATEP)
+ lw KEY3, 28(STATEP)
+ lw KEY4, 32(STATEP)
+ lw KEY5, 36(STATEP)
+ lw KEY6, 40(STATEP)
+ lw KEY7, 44(STATEP)
+ lw COUNTER, 48(STATEP)
+ lw NONCE0, 52(STATEP)
+ lw NONCE1, 56(STATEP)
+ lw NONCE2, 60(STATEP)
.Lblock_loop:
// Set vl to the number of blocks to process in this iteration.
- vsetvli VL, LEN, e32, m1, ta, ma
+ vsetvli VL, NBLOCKS, e32, m1, ta, ma
// Set up the initial state matrix for the next VL blocks in v0-v15.
// v{i} holds the i'th 32-bit word of the state matrix for all blocks.
@@ -203,16 +205,16 @@ SYM_FUNC_START(chacha20_zvkb)
// v{16+i} holds the i'th 32-bit word for all blocks.
vlsseg8e32.v v16, (INP), STRIDE
- li NROUNDS, 20
+ mv ROUND_CTR, NROUNDS
.Lnext_doubleround:
- addi NROUNDS, NROUNDS, -2
+ addi ROUND_CTR, ROUND_CTR, -2
// column round
chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
v2, v6, v10, v14, v3, v7, v11, v15
// diagonal round
chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
v2, v7, v8, v13, v3, v4, v9, v14
- bnez NROUNDS, .Lnext_doubleround
+ bnez ROUND_CTR, .Lnext_doubleround
// Load the second half of the input data for each block into v24-v31.
// v{24+i} holds the {8+i}'th 32-bit word for all blocks.
@@ -271,12 +273,13 @@ SYM_FUNC_START(chacha20_zvkb)
// Update the counter, the remaining number of blocks, and the input and
// output pointers according to the number of blocks processed (VL).
add COUNTER, COUNTER, VL
- sub LEN, LEN, VL
+ sub NBLOCKS, NBLOCKS, VL
slli TMP, VL, 6
add OUTP, OUTP, TMP
add INP, INP, TMP
- bnez LEN, .Lblock_loop
+ bnez NBLOCKS, .Lblock_loop
+ sw COUNTER, 48(STATEP)
ld s0, 0(sp)
ld s1, 8(sp)
ld s2, 16(sp)
@@ -291,4 +294,4 @@ SYM_FUNC_START(chacha20_zvkb)
ld s11, 88(sp)
addi sp, sp, 96
ret
-SYM_FUNC_END(chacha20_zvkb)
+SYM_FUNC_END(chacha_zvkb)
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index 8c4db8b64fa2..055b08f259ab 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -110,16 +110,9 @@ config CRYPTO_DES_S390
config CRYPTO_CHACHA_S390
tristate
depends on S390
- select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)
-
- Architecture: s390
-
- It is available as of z13.
config CRYPTO_HMAC_S390
tristate "Keyed-hash message authentication code: HMAC"
diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c
index 920e9f0941e7..b3ffaa555385 100644
--- a/arch/s390/crypto/chacha-glue.c
+++ b/arch/s390/crypto/chacha-glue.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * s390 ChaCha stream cipher.
+ * ChaCha stream cipher (s390 optimized)
*
* Copyright IBM Corp. 2021
*/
@@ -8,9 +8,7 @@
#define KMSG_COMPONENT "chacha_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
+#include <crypto/chacha.h>
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,50 +16,6 @@
#include <asm/fpu.h>
#include "chacha-s390.h"
-static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
- unsigned int nbytes, const u32 *key,
- u32 *counter)
-{
- DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
-
- kernel_fpu_begin(&vxstate, KERNEL_VXR);
- chacha20_vx(dst, src, nbytes, key, counter);
- kernel_fpu_end(&vxstate, KERNEL_VXR);
-
- *counter += round_up(nbytes, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
-}
-
-static int chacha20_s390(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 state[CHACHA_STATE_WORDS] __aligned(16);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int rc;
-
- rc = skcipher_walk_virt(&walk, req, false);
- chacha_init(state, ctx->key, req->iv);
-
- while (walk.nbytes > 0) {
- nbytes = walk.nbytes;
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (nbytes <= CHACHA_BLOCK_SIZE) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- chacha20_crypt_s390(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- &state[4], &state[12]);
- }
- rc = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
- return rc;
-}
-
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
/* TODO: implement hchacha_block_arch() in assembly */
@@ -76,49 +30,26 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
* it cannot handle a block of data or less, but otherwise
* it can handle data of arbitrary size
*/
- if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx())
+ if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx()) {
chacha_crypt_generic(state, dst, src, bytes, nrounds);
- else
- chacha20_crypt_s390(state, dst, src, bytes,
- &state[4], &state[12]);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
+ } else {
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
-static struct skcipher_alg chacha_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-s390",
- .base.cra_priority = 900,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ chacha20_vx(dst, src, bytes, &state[4], &state[12]);
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha20_s390,
- .decrypt = chacha20_s390,
+ state[12] += round_up(bytes, CHACHA_BLOCK_SIZE) /
+ CHACHA_BLOCK_SIZE;
}
-};
-
-static int __init chacha_mod_init(void)
-{
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)) : 0;
}
+EXPORT_SYMBOL(chacha_crypt_arch);
-static void __exit chacha_mod_fini(void)
+bool chacha_is_arch_optimized(void)
{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
- crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
+ return cpu_has_vx();
}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
-module_cpu_feature_match(S390_CPU_FEATURE_VXRS, chacha_mod_init);
-module_exit(chacha_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha20 stream cipher");
+MODULE_DESCRIPTION("ChaCha stream cipher (s390 optimized)");
MODULE_LICENSE("GPL v2");
-
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
index 6d20a6ce0507..4d06fd3c8dfe 100644
--- a/arch/x86/Kconfig.assembler
+++ b/arch/x86/Kconfig.assembler
@@ -6,15 +6,6 @@ config AS_AVX512
help
Supported by binutils >= 2.25 and LLVM integrated assembler
-config AS_SHA1_NI
- def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1)
- help
- Supported by binutils >= 2.24 and LLVM integrated assembler
-
-config AS_SHA256_NI
- def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1)
- help
- Supported by binutils >= 2.24 and LLVM integrated assembler
config AS_TPAUSE
def_bool $(as-instr,tpause %ecx)
help
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 3d948f10c94c..0cec75926380 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -23,7 +23,6 @@ config CRYPTO_AES_NI_INTEL
select CRYPTO_LIB_GF128MUL
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
help
Block cipher: AES cipher algorithms
AEAD cipher: AES with GCM
@@ -64,7 +63,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_SIMD
imply CRYPTO_XTS
help
Length-preserving ciphers: Camellia with ECB and CBC modes
@@ -90,7 +88,6 @@ config CRYPTO_CAST5_AVX_X86_64
select CRYPTO_SKCIPHER
select CRYPTO_CAST5
select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: CAST5 (CAST-128) cipher algorithm
@@ -107,7 +104,6 @@ config CRYPTO_CAST6_AVX_X86_64
select CRYPTO_SKCIPHER
select CRYPTO_CAST6
select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
imply CRYPTO_XTS
imply CRYPTO_CTR
help
@@ -138,7 +134,6 @@ config CRYPTO_SERPENT_SSE2_X86_64
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: Serpent cipher algorithm
@@ -154,7 +149,6 @@ config CRYPTO_SERPENT_SSE2_586
depends on X86 && !64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_CTR
help
Length-preserving ciphers: Serpent cipher algorithm
@@ -170,7 +164,6 @@ config CRYPTO_SERPENT_AVX_X86_64
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
select CRYPTO_SERPENT
- select CRYPTO_SIMD
imply CRYPTO_XTS
imply CRYPTO_CTR
help
@@ -199,7 +192,6 @@ config CRYPTO_SM4_AESNI_AVX_X86_64
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_SM4
help
@@ -220,7 +212,6 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64
tristate "Ciphers: SM4 with modes: ECB, CBC, CTR (AES-NI/AVX2)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_SM4
select CRYPTO_SM4_AESNI_AVX_X86_64
@@ -279,7 +270,6 @@ config CRYPTO_TWOFISH_AVX_X86_64
tristate "Ciphers: Twofish with modes: ECB, CBC (AVX)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_TWOFISH_X86_64_3WAY
@@ -297,7 +287,6 @@ config CRYPTO_ARIA_AESNI_AVX_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX/GFNI)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
help
@@ -315,7 +304,6 @@ config CRYPTO_ARIA_AESNI_AVX2_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX2/GFNI)"
depends on X86 && 64BIT
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
select CRYPTO_ARIA_AESNI_AVX_X86_64
@@ -332,9 +320,8 @@ config CRYPTO_ARIA_AESNI_AVX2_X86_64
config CRYPTO_ARIA_GFNI_AVX512_X86_64
tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)"
- depends on X86 && 64BIT && AS_AVX512 && AS_GFNI
+ depends on X86 && 64BIT && AS_GFNI
select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
select CRYPTO_ALGAPI
select CRYPTO_ARIA
select CRYPTO_ARIA_AESNI_AVX_X86_64
@@ -352,24 +339,14 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64
config CRYPTO_CHACHA20_X86_64
tristate
depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: x86_64 using:
- - SSSE3 (Supplemental SSE3)
- - AVX2 (Advanced Vector Extensions 2)
- - AVX-512VL (Advanced Vector Extensions-512VL)
config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE4.1)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_SIMD
help
AEGIS-128 AEAD algorithm
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 5d19f41bde58..2f22b381f244 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -43,8 +43,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o
-chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o
-chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o
+chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha-avx512vl-x86_64.o chacha_glue.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
@@ -56,12 +55,10 @@ aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o
endif
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o
-sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o
+sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ni_asm.o sha1_ssse3_glue.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
-sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
-sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o
+sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ni_asm.o sha256_ssse3_glue.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 26786e15abac..f1b6d40154e3 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -8,7 +8,6 @@
*/
#include <crypto/internal/aead.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -233,21 +232,18 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.chunksize = AEGIS128_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
.cra_priority = 400,
- .cra_name = "__aegis128",
- .cra_driver_name = "__aegis128-aesni",
+ .cra_name = "aegis128",
+ .cra_driver_name = "aegis128-aesni",
.cra_module = THIS_MODULE,
}
};
-static struct simd_aead_alg *simd_alg;
-
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM4_1) ||
@@ -255,13 +251,12 @@ static int __init crypto_aegis128_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
- &simd_alg);
+ return crypto_register_aead(&crypto_aegis128_aesni_alg);
}
static void __exit crypto_aegis128_aesni_module_exit(void)
{
- simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
+ crypto_unregister_aead(&crypto_aegis128_aesni_alg);
}
module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aes-ctr-avx-x86_64.S b/arch/x86/crypto/aes-ctr-avx-x86_64.S
index 1685d8b24b2c..bbbfd80f5a50 100644
--- a/arch/x86/crypto/aes-ctr-avx-x86_64.S
+++ b/arch/x86/crypto/aes-ctr-avx-x86_64.S
@@ -48,8 +48,7 @@
// using the following sets of CPU features:
// - AES-NI && AVX
// - VAES && AVX2
-// - VAES && (AVX10/256 || (AVX512BW && AVX512VL)) && BMI2
-// - VAES && (AVX10/512 || (AVX512BW && AVX512VL)) && BMI2
+// - VAES && AVX512BW && AVX512VL && BMI2
//
// See the function definitions at the bottom of the file for more information.
@@ -76,7 +75,6 @@
.text
// Move a vector between memory and a register.
-// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -86,7 +84,6 @@
.endm
// Move a vector between registers.
-// The registers must be in the first 16 vector registers.
.macro _vmovdqa src, dst
.if VL < 64
vmovdqa \src, \dst
@@ -96,7 +93,7 @@
.endm
// Broadcast a 128-bit value from memory to all 128-bit lanes of a vector
-// register. The register operand must be in the first 16 vector registers.
+// register.
.macro _vbroadcast128 src, dst
.if VL == 16
vmovdqu \src, \dst
@@ -108,7 +105,6 @@
.endm
// XOR two vectors together.
-// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
.if VL < 64
vpxor \src1, \src2, \dst
@@ -199,8 +195,8 @@
// XOR each with the zero-th round key. Also update LE_CTR if !\final.
.macro _prepare_2_ctr_vecs is_xctr, i0, i1, final=0
.if \is_xctr
- .if USE_AVX10
- _vmovdqa LE_CTR, AESDATA\i0
+ .if USE_AVX512
+ vmovdqa64 LE_CTR, AESDATA\i0
vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i0
.else
vpxor XCTR_IV, LE_CTR, AESDATA\i0
@@ -208,7 +204,7 @@
.endif
vpaddq LE_CTR_INC1, LE_CTR, AESDATA\i1
- .if USE_AVX10
+ .if USE_AVX512
vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i1
.else
vpxor XCTR_IV, AESDATA\i1, AESDATA\i1
@@ -481,18 +477,12 @@
.Lxor_tail_partial_vec_0\@:
// XOR the remaining 1 <= LEN < VL bytes. It's easy if masked
// loads/stores are available; otherwise it's a bit harder...
-.if USE_AVX10
- .if VL <= 32
- mov $-1, %eax
- bzhi LEN, %eax, %eax
- kmovd %eax, %k1
- .else
+.if USE_AVX512
mov $-1, %rax
bzhi LEN64, %rax, %rax
kmovq %rax, %k1
- .endif
vmovdqu8 (SRC), AESDATA1{%k1}{z}
- _vpxor AESDATA1, AESDATA0, AESDATA0
+ vpxord AESDATA1, AESDATA0, AESDATA0
vmovdqu8 AESDATA0, (DST){%k1}
.else
.if VL == 32
@@ -554,7 +544,7 @@
// eliminates carries. |ctr| is the per-message block counter starting at 1.
.set VL, 16
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_ctr64_crypt_aesni_avx)
_aes_ctr_crypt 0
SYM_FUNC_END(aes_ctr64_crypt_aesni_avx)
@@ -564,7 +554,7 @@ SYM_FUNC_END(aes_xctr_crypt_aesni_avx)
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2)
_aes_ctr_crypt 0
SYM_FUNC_END(aes_ctr64_crypt_vaes_avx2)
@@ -572,21 +562,12 @@ SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx2)
_aes_ctr_crypt 1
SYM_FUNC_END(aes_xctr_crypt_vaes_avx2)
-.set VL, 32
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_256)
- _aes_ctr_crypt 0
-SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_256)
-SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_256)
- _aes_ctr_crypt 1
-SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_256)
-
.set VL, 64
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_512)
+.set USE_AVX512, 1
+SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx512)
_aes_ctr_crypt 0
-SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_512)
-SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_512)
+SYM_FUNC_END(aes_ctr64_crypt_vaes_avx512)
+SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512)
_aes_ctr_crypt 1
-SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xctr_crypt_vaes_avx512)
#endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ
diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S
index 93ba0ddbe009..db79cdf81588 100644
--- a/arch/x86/crypto/aes-xts-avx-x86_64.S
+++ b/arch/x86/crypto/aes-xts-avx-x86_64.S
@@ -52,32 +52,25 @@
* different code, it uses a macro to generate several implementations that
* share similar source code but are targeted at different CPUs, listed below:
*
- * AES-NI + AVX
+ * AES-NI && AVX
* - 128-bit vectors (1 AES block per vector)
* - VEX-coded instructions
* - xmm0-xmm15
* - This is for older CPUs that lack VAES but do have AVX.
*
- * VAES + VPCLMULQDQ + AVX2
+ * VAES && VPCLMULQDQ && AVX2
* - 256-bit vectors (2 AES blocks per vector)
* - VEX-coded instructions
* - ymm0-ymm15
- * - This is for CPUs that have VAES but lack AVX512 or AVX10,
- * e.g. Intel's Alder Lake and AMD's Zen 3.
+ * - This is for CPUs that have VAES but either lack AVX512 (e.g. Intel's
+ * Alder Lake and AMD's Zen 3) or downclock too eagerly when using zmm
+ * registers (e.g. Intel's Ice Lake).
*
- * VAES + VPCLMULQDQ + AVX10/256 + BMI2
- * - 256-bit vectors (2 AES blocks per vector)
+ * VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
+ * - 512-bit vectors (4 AES blocks per vector)
* - EVEX-coded instructions
- * - ymm0-ymm31
- * - This is for CPUs that have AVX512 but where using zmm registers causes
- * downclocking, and for CPUs that have AVX10/256 but not AVX10/512.
- * - By "AVX10/256" we really mean (AVX512BW + AVX512VL) || AVX10/256.
- * To avoid confusion with 512-bit, we just write AVX10/256.
- *
- * VAES + VPCLMULQDQ + AVX10/512 + BMI2
- * - Same as the previous one, but upgrades to 512-bit vectors
- * (4 AES blocks per vector) in zmm0-zmm31.
- * - This is for CPUs that have good AVX512 or AVX10/512 support.
+ * - zmm0-zmm31
+ * - This is for CPUs that have good AVX512 support.
*
* This file doesn't have an implementation for AES-NI alone (without AVX), as
* the lack of VEX would make all the assembly code different.
@@ -107,9 +100,20 @@
// exists when there's a carry out of the low 64 bits of the tweak.
.quad 0x87, 1
+ // These are the shift amounts that are needed when multiplying by [x^0,
+ // x^1, x^2, x^3] to compute the first vector of tweaks when VL=64.
+ //
+ // The right shifts by 64 are expected to zeroize the destination.
+ // 'vpsrlvq' is indeed defined to do that; i.e. it doesn't truncate the
+ // amount to 64 & 63 = 0 like the 'shr' scalar shift instruction would.
+.Lrshift_amounts:
+ .byte 64, 64, 63, 63, 62, 62, 61, 61
+.Llshift_amounts:
+ .byte 0, 0, 1, 1, 2, 2, 3, 3
+
// This table contains constants for vpshufb and vpblendvb, used to
// handle variable byte shifts and blending during ciphertext stealing
- // on CPUs that don't support AVX10-style masking.
+ // on CPUs that don't support AVX512-style masking.
.Lcts_permute_table:
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
.byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
@@ -138,7 +142,7 @@
.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
_define_Vi \i
.endr
-.if USE_AVX10
+.if USE_AVX512
.irp i, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
_define_Vi \i
.endr
@@ -193,7 +197,7 @@
// keys to the *end* of this register range. I.e., AES-128 uses
// KEY5-KEY14, AES-192 uses KEY3-KEY14, and AES-256 uses KEY1-KEY14.
// (All also use KEY0 for the XOR-only "round" at the beginning.)
-.if USE_AVX10
+.if USE_AVX512
.set KEY1_XMM, %xmm16
.set KEY1, V16
.set KEY2_XMM, %xmm17
@@ -227,7 +231,6 @@
.endm
// Move a vector between memory and a register.
-// The register operand must be in the first 16 vector registers.
.macro _vmovdqu src, dst
.if VL < 64
vmovdqu \src, \dst
@@ -238,9 +241,9 @@
// Broadcast a 128-bit value into a vector.
.macro _vbroadcast128 src, dst
-.if VL == 16 && !USE_AVX10
+.if VL == 16
vmovdqu \src, \dst
-.elseif VL == 32 && !USE_AVX10
+.elseif VL == 32
vbroadcasti128 \src, \dst
.else
vbroadcasti32x4 \src, \dst
@@ -248,7 +251,6 @@
.endm
// XOR two vectors together.
-// Any register operands must be in the first 16 vector registers.
.macro _vpxor src1, src2, dst
.if VL < 64
vpxor \src1, \src2, \dst
@@ -259,7 +261,7 @@
// XOR three vectors together.
.macro _xor3 src1, src2, src3_and_dst
-.if USE_AVX10
+.if USE_AVX512
// vpternlogd with immediate 0x96 is a three-argument XOR.
vpternlogd $0x96, \src1, \src2, \src3_and_dst
.else
@@ -274,7 +276,7 @@
vpshufd $0x13, \src, \tmp
vpaddq \src, \src, \dst
vpsrad $31, \tmp, \tmp
-.if USE_AVX10
+.if USE_AVX512
vpternlogd $0x78, GF_POLY_XMM, \tmp, \dst
.else
vpand GF_POLY_XMM, \tmp, \tmp
@@ -303,52 +305,75 @@
// Given the first XTS tweak at (TWEAK), compute the first set of tweaks and
// store them in the vector registers TWEAK0-TWEAK3. Clobbers V0-V5.
.macro _compute_first_set_of_tweaks
- vmovdqu (TWEAK), TWEAK0_XMM
- _vbroadcast128 .Lgf_poly(%rip), GF_POLY
.if VL == 16
- // With VL=16, multiplying by x serially is fastest.
+ vmovdqu (TWEAK), TWEAK0_XMM
+ vmovdqu .Lgf_poly(%rip), GF_POLY
_next_tweak TWEAK0, %xmm0, TWEAK1
_next_tweak TWEAK1, %xmm0, TWEAK2
_next_tweak TWEAK2, %xmm0, TWEAK3
-.else
-.if VL == 32
- // Compute the second block of TWEAK0.
+.elseif VL == 32
+ vmovdqu (TWEAK), TWEAK0_XMM
+ vbroadcasti128 .Lgf_poly(%rip), GF_POLY
+
+ // Compute the first vector of tweaks.
_next_tweak TWEAK0_XMM, %xmm0, %xmm1
vinserti128 $1, %xmm1, TWEAK0, TWEAK0
-.elseif VL == 64
- // Compute the remaining blocks of TWEAK0.
- _next_tweak TWEAK0_XMM, %xmm0, %xmm1
- _next_tweak %xmm1, %xmm0, %xmm2
- _next_tweak %xmm2, %xmm0, %xmm3
- vinserti32x4 $1, %xmm1, TWEAK0, TWEAK0
- vinserti32x4 $2, %xmm2, TWEAK0, TWEAK0
- vinserti32x4 $3, %xmm3, TWEAK0, TWEAK0
-.endif
- // Compute TWEAK[1-3] from TWEAK0.
- vpsrlq $64 - 1*VL/16, TWEAK0, V0
- vpsrlq $64 - 2*VL/16, TWEAK0, V2
- vpsrlq $64 - 3*VL/16, TWEAK0, V4
+
+ // Compute the next three vectors of tweaks:
+ // TWEAK1 = TWEAK0 * [x^2, x^2]
+ // TWEAK2 = TWEAK0 * [x^4, x^4]
+ // TWEAK3 = TWEAK0 * [x^6, x^6]
+ vpsrlq $64 - 2, TWEAK0, V0
+ vpsrlq $64 - 4, TWEAK0, V2
+ vpsrlq $64 - 6, TWEAK0, V4
vpclmulqdq $0x01, GF_POLY, V0, V1
vpclmulqdq $0x01, GF_POLY, V2, V3
vpclmulqdq $0x01, GF_POLY, V4, V5
vpslldq $8, V0, V0
vpslldq $8, V2, V2
vpslldq $8, V4, V4
- vpsllq $1*VL/16, TWEAK0, TWEAK1
- vpsllq $2*VL/16, TWEAK0, TWEAK2
- vpsllq $3*VL/16, TWEAK0, TWEAK3
-.if USE_AVX10
- vpternlogd $0x96, V0, V1, TWEAK1
- vpternlogd $0x96, V2, V3, TWEAK2
- vpternlogd $0x96, V4, V5, TWEAK3
-.else
+ vpsllq $2, TWEAK0, TWEAK1
+ vpsllq $4, TWEAK0, TWEAK2
+ vpsllq $6, TWEAK0, TWEAK3
vpxor V0, TWEAK1, TWEAK1
vpxor V2, TWEAK2, TWEAK2
vpxor V4, TWEAK3, TWEAK3
vpxor V1, TWEAK1, TWEAK1
vpxor V3, TWEAK2, TWEAK2
vpxor V5, TWEAK3, TWEAK3
-.endif
+.else
+ vbroadcasti32x4 (TWEAK), TWEAK0
+ vbroadcasti32x4 .Lgf_poly(%rip), GF_POLY
+
+ // Compute the first vector of tweaks:
+ // TWEAK0 = broadcast128(TWEAK) * [x^0, x^1, x^2, x^3]
+ vpmovzxbq .Lrshift_amounts(%rip), V4
+ vpsrlvq V4, TWEAK0, V0
+ vpclmulqdq $0x01, GF_POLY, V0, V1
+ vpmovzxbq .Llshift_amounts(%rip), V4
+ vpslldq $8, V0, V0
+ vpsllvq V4, TWEAK0, TWEAK0
+ vpternlogd $0x96, V0, V1, TWEAK0
+
+ // Compute the next three vectors of tweaks:
+ // TWEAK1 = TWEAK0 * [x^4, x^4, x^4, x^4]
+ // TWEAK2 = TWEAK0 * [x^8, x^8, x^8, x^8]
+ // TWEAK3 = TWEAK0 * [x^12, x^12, x^12, x^12]
+ // x^8 only needs byte-aligned shifts, so optimize accordingly.
+ vpsrlq $64 - 4, TWEAK0, V0
+ vpsrldq $(64 - 8) / 8, TWEAK0, V2
+ vpsrlq $64 - 12, TWEAK0, V4
+ vpclmulqdq $0x01, GF_POLY, V0, V1
+ vpclmulqdq $0x01, GF_POLY, V2, V3
+ vpclmulqdq $0x01, GF_POLY, V4, V5
+ vpslldq $8, V0, V0
+ vpslldq $8, V4, V4
+ vpsllq $4, TWEAK0, TWEAK1
+ vpslldq $8 / 8, TWEAK0, TWEAK2
+ vpsllq $12, TWEAK0, TWEAK3
+ vpternlogd $0x96, V0, V1, TWEAK1
+ vpxord V3, TWEAK2, TWEAK2
+ vpternlogd $0x96, V4, V5, TWEAK3
.endif
.endm
@@ -474,26 +499,26 @@
lea OFFS-16(KEY, KEYLEN64, 4), KEY
// If all 32 SIMD registers are available, cache all the round keys.
-.if USE_AVX10
+.if USE_AVX512
cmp $24, KEYLEN
jl .Laes128\@
je .Laes192\@
- _vbroadcast128 -6*16(KEY), KEY1
- _vbroadcast128 -5*16(KEY), KEY2
+ vbroadcasti32x4 -6*16(KEY), KEY1
+ vbroadcasti32x4 -5*16(KEY), KEY2
.Laes192\@:
- _vbroadcast128 -4*16(KEY), KEY3
- _vbroadcast128 -3*16(KEY), KEY4
+ vbroadcasti32x4 -4*16(KEY), KEY3
+ vbroadcasti32x4 -3*16(KEY), KEY4
.Laes128\@:
- _vbroadcast128 -2*16(KEY), KEY5
- _vbroadcast128 -1*16(KEY), KEY6
- _vbroadcast128 0*16(KEY), KEY7
- _vbroadcast128 1*16(KEY), KEY8
- _vbroadcast128 2*16(KEY), KEY9
- _vbroadcast128 3*16(KEY), KEY10
- _vbroadcast128 4*16(KEY), KEY11
- _vbroadcast128 5*16(KEY), KEY12
- _vbroadcast128 6*16(KEY), KEY13
- _vbroadcast128 7*16(KEY), KEY14
+ vbroadcasti32x4 -2*16(KEY), KEY5
+ vbroadcasti32x4 -1*16(KEY), KEY6
+ vbroadcasti32x4 0*16(KEY), KEY7
+ vbroadcasti32x4 1*16(KEY), KEY8
+ vbroadcasti32x4 2*16(KEY), KEY9
+ vbroadcasti32x4 3*16(KEY), KEY10
+ vbroadcasti32x4 4*16(KEY), KEY11
+ vbroadcasti32x4 5*16(KEY), KEY12
+ vbroadcasti32x4 6*16(KEY), KEY13
+ vbroadcasti32x4 7*16(KEY), KEY14
.endif
.endm
@@ -521,7 +546,7 @@
// using the same key for all block(s). The round key is loaded from the
// appropriate register or memory location for round \i. May clobber \tmp.
.macro _vaes_1x enc, i, xmm_suffix, data, tmp
-.if USE_AVX10
+.if USE_AVX512
_vaes \enc, KEY\i\xmm_suffix, \data
.else
.ifnb \xmm_suffix
@@ -538,7 +563,7 @@
// appropriate register or memory location for round \i. In addition, does two
// steps of the computation of the next set of tweaks. May clobber V4 and V5.
.macro _vaes_4x enc, i
-.if USE_AVX10
+.if USE_AVX512
_tweak_step (2*(\i-5))
_vaes \enc, KEY\i, V0
_vaes \enc, KEY\i, V1
@@ -574,7 +599,7 @@
.irp i, 5,6,7,8,9,10,11,12,13
_vaes_1x \enc, \i, \xmm_suffix, \data, tmp=\tmp
.endr
-.if USE_AVX10
+.if USE_AVX512
vpxord KEY14\xmm_suffix, \tweak, \tmp
.else
.ifnb \xmm_suffix
@@ -617,11 +642,11 @@
// This is the main loop, en/decrypting 4*VL bytes per iteration.
// XOR each source block with its tweak and the zero-th round key.
-.if USE_AVX10
- _vmovdqu 0*VL(SRC), V0
- _vmovdqu 1*VL(SRC), V1
- _vmovdqu 2*VL(SRC), V2
- _vmovdqu 3*VL(SRC), V3
+.if USE_AVX512
+ vmovdqu8 0*VL(SRC), V0
+ vmovdqu8 1*VL(SRC), V1
+ vmovdqu8 2*VL(SRC), V2
+ vmovdqu8 3*VL(SRC), V3
vpternlogd $0x96, TWEAK0, KEY0, V0
vpternlogd $0x96, TWEAK1, KEY0, V1
vpternlogd $0x96, TWEAK2, KEY0, V2
@@ -654,7 +679,7 @@
// Reduce latency by doing the XOR before the vaesenclast, utilizing the
// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a)
// (and likewise for vaesdeclast).
-.if USE_AVX10
+.if USE_AVX512
_tweak_step 18
_tweak_step 19
vpxord TWEAK0, KEY14, V4
@@ -762,7 +787,7 @@
_aes_crypt \enc, _XMM, TWEAK1_XMM, %xmm0, tmp=%xmm1
.endif
-.if USE_AVX10
+.if USE_AVX512
// Create a mask that has the first LEN bits set.
mov $-1, %r9d
bzhi LEN, %r9d, %r9d
@@ -811,7 +836,7 @@
// u8 iv[AES_BLOCK_SIZE]);
//
// Encrypt |iv| using the AES key |tweak_key| to get the first tweak. Assumes
-// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX10.
+// that the CPU supports AES-NI and AVX, but not necessarily VAES or AVX512.
SYM_TYPED_FUNC_START(aes_xts_encrypt_iv)
.set TWEAK_KEY, %rdi
.set IV, %rsi
@@ -853,7 +878,7 @@ SYM_FUNC_END(aes_xts_encrypt_iv)
// multiple of 16, then this function updates |tweak| to contain the next tweak.
.set VL, 16
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_aesni_avx)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_aesni_avx)
@@ -863,7 +888,7 @@ SYM_FUNC_END(aes_xts_decrypt_aesni_avx)
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
.set VL, 32
-.set USE_AVX10, 0
+.set USE_AVX512, 0
SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2)
_aes_xts_crypt 1
SYM_FUNC_END(aes_xts_encrypt_vaes_avx2)
@@ -871,21 +896,12 @@ SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx2)
_aes_xts_crypt 0
SYM_FUNC_END(aes_xts_decrypt_vaes_avx2)
-.set VL, 32
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_256)
- _aes_xts_crypt 1
-SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_256)
-SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_256)
- _aes_xts_crypt 0
-SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_256)
-
.set VL, 64
-.set USE_AVX10, 1
-SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx10_512)
+.set USE_AVX512, 1
+SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx512)
_aes_xts_crypt 1
-SYM_FUNC_END(aes_xts_encrypt_vaes_avx10_512)
-SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xts_encrypt_vaes_avx512)
+SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512)
_aes_xts_crypt 0
-SYM_FUNC_END(aes_xts_decrypt_vaes_avx10_512)
+SYM_FUNC_END(aes_xts_decrypt_vaes_avx512)
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index bc655d794a95..061b1ced93c5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -566,10 +566,9 @@ static struct crypto_alg aesni_cipher_alg = {
static struct skcipher_alg aesni_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(aes)",
- .cra_driver_name = "__ecb-aes-aesni",
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -581,10 +580,9 @@ static struct skcipher_alg aesni_skciphers[] = {
.decrypt = ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(aes)",
- .cra_driver_name = "__cbc-aes-aesni",
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -597,10 +595,9 @@ static struct skcipher_alg aesni_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__cts(cbc(aes))",
- .cra_driver_name = "__cts-cbc-aes-aesni",
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "cts-cbc-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -615,10 +612,9 @@ static struct skcipher_alg aesni_skciphers[] = {
#ifdef CONFIG_X86_64
}, {
.base = {
- .cra_name = "__ctr(aes)",
- .cra_driver_name = "__ctr-aes-aesni",
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-aesni",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = CRYPTO_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -633,10 +629,9 @@ static struct skcipher_alg aesni_skciphers[] = {
#endif
}, {
.base = {
- .cra_name = "__xts(aes)",
- .cra_driver_name = "__xts-aes-aesni",
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-aesni",
.cra_priority = 401,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = XTS_AES_CTX_SIZE,
.cra_module = THIS_MODULE,
@@ -651,9 +646,6 @@ static struct skcipher_alg aesni_skciphers[] = {
}
};
-static
-struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
-
#ifdef CONFIG_X86_64
asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key,
u8 iv[AES_BLOCK_SIZE]);
@@ -792,10 +784,9 @@ static int xctr_crypt_##suffix(struct skcipher_request *req) \
} \
\
static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
- .base.cra_name = "__xts(aes)", \
- .base.cra_driver_name = "__xts-aes-" driver_name_suffix, \
+ .base.cra_name = "xts(aes)", \
+ .base.cra_driver_name = "xts-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = AES_BLOCK_SIZE, \
.base.cra_ctxsize = XTS_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -807,10 +798,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.encrypt = xts_encrypt_##suffix, \
.decrypt = xts_decrypt_##suffix, \
}, { \
- .base.cra_name = "__ctr(aes)", \
- .base.cra_driver_name = "__ctr-aes-" driver_name_suffix, \
+ .base.cra_name = "ctr(aes)", \
+ .base.cra_driver_name = "ctr-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = 1, \
.base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -822,10 +812,9 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.encrypt = ctr_crypt_##suffix, \
.decrypt = ctr_crypt_##suffix, \
}, { \
- .base.cra_name = "__xctr(aes)", \
- .base.cra_driver_name = "__xctr-aes-" driver_name_suffix, \
+ .base.cra_name = "xctr(aes)", \
+ .base.cra_driver_name = "xctr-aes-" driver_name_suffix, \
.base.cra_priority = priority, \
- .base.cra_flags = CRYPTO_ALG_INTERNAL, \
.base.cra_blocksize = 1, \
.base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \
.base.cra_module = THIS_MODULE, \
@@ -836,16 +825,12 @@ static struct skcipher_alg skcipher_algs_##suffix[] = {{ \
.setkey = aesni_skcipher_setkey, \
.encrypt = xctr_crypt_##suffix, \
.decrypt = xctr_crypt_##suffix, \
-}}; \
- \
-static struct simd_skcipher_alg * \
-simd_skcipher_algs_##suffix[ARRAY_SIZE(skcipher_algs_##suffix)]
+}}
DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600);
-DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_256, "vaes-avx10_256", 700);
-DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_512, "vaes-avx10_512", 800);
+DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800);
#endif
/* The common part of the x86_64 AES-GCM key struct */
@@ -1499,10 +1484,9 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \
.chunksize = AES_BLOCK_SIZE, \
.maxauthsize = 16, \
.base = { \
- .cra_name = "__gcm(aes)", \
- .cra_driver_name = "__" generic_driver_name, \
+ .cra_name = "gcm(aes)", \
+ .cra_driver_name = generic_driver_name, \
.cra_priority = (priority), \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
.cra_blocksize = 1, \
.cra_ctxsize = (ctxsize), \
.cra_module = THIS_MODULE, \
@@ -1516,17 +1500,14 @@ static struct aead_alg aes_gcm_algs_##suffix[] = { { \
.chunksize = AES_BLOCK_SIZE, \
.maxauthsize = 16, \
.base = { \
- .cra_name = "__rfc4106(gcm(aes))", \
- .cra_driver_name = "__" rfc_driver_name, \
+ .cra_name = "rfc4106(gcm(aes))", \
+ .cra_driver_name = rfc_driver_name, \
.cra_priority = (priority), \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
.cra_blocksize = 1, \
.cra_ctxsize = (ctxsize), \
.cra_module = THIS_MODULE, \
}, \
-} }; \
- \
-static struct simd_aead_alg *aes_gcm_simdalgs_##suffix[2] \
+} }
/* aes_gcm_algs_aesni */
DEFINE_GCM_ALGS(aesni, /* no flags */ 0,
@@ -1556,14 +1537,12 @@ static int __init register_avx_algs(void)
if (!boot_cpu_has(X86_FEATURE_AVX))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_aesni_avx,
- ARRAY_SIZE(skcipher_algs_aesni_avx),
- simd_skcipher_algs_aesni_avx);
+ err = crypto_register_skciphers(skcipher_algs_aesni_avx,
+ ARRAY_SIZE(skcipher_algs_aesni_avx));
if (err)
return err;
- err = simd_register_aeads_compat(aes_gcm_algs_aesni_avx,
- ARRAY_SIZE(aes_gcm_algs_aesni_avx),
- aes_gcm_simdalgs_aesni_avx);
+ err = crypto_register_aeads(aes_gcm_algs_aesni_avx,
+ ARRAY_SIZE(aes_gcm_algs_aesni_avx));
if (err)
return err;
/*
@@ -1579,9 +1558,8 @@ static int __init register_avx_algs(void)
!boot_cpu_has(X86_FEATURE_PCLMULQDQ) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx2,
- ARRAY_SIZE(skcipher_algs_vaes_avx2),
- simd_skcipher_algs_vaes_avx2);
+ err = crypto_register_skciphers(skcipher_algs_vaes_avx2,
+ ARRAY_SIZE(skcipher_algs_vaes_avx2));
if (err)
return err;
@@ -1592,76 +1570,52 @@ static int __init register_avx_algs(void)
XFEATURE_MASK_AVX512, NULL))
return 0;
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_256,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_256),
- simd_skcipher_algs_vaes_avx10_256);
- if (err)
- return err;
- err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_256,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256),
- aes_gcm_simdalgs_vaes_avx10_256);
+ err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_256,
+ ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256));
if (err)
return err;
if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) {
int i;
- for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx10_512); i++)
- skcipher_algs_vaes_avx10_512[i].base.cra_priority = 1;
+ for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx512); i++)
+ skcipher_algs_vaes_avx512[i].base.cra_priority = 1;
for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++)
aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1;
}
- err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_512,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_512),
- simd_skcipher_algs_vaes_avx10_512);
+ err = crypto_register_skciphers(skcipher_algs_vaes_avx512,
+ ARRAY_SIZE(skcipher_algs_vaes_avx512));
if (err)
return err;
- err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_512,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512),
- aes_gcm_simdalgs_vaes_avx10_512);
+ err = crypto_register_aeads(aes_gcm_algs_vaes_avx10_512,
+ ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512));
if (err)
return err;
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
return 0;
}
+#define unregister_skciphers(A) \
+ if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
+ crypto_unregister_skciphers((A), ARRAY_SIZE(A))
+#define unregister_aeads(A) \
+ if (refcount_read(&(A)[0].base.cra_refcnt) != 0) \
+ crypto_unregister_aeads((A), ARRAY_SIZE(A))
+
static void unregister_avx_algs(void)
{
- if (simd_skcipher_algs_aesni_avx[0])
- simd_unregister_skciphers(skcipher_algs_aesni_avx,
- ARRAY_SIZE(skcipher_algs_aesni_avx),
- simd_skcipher_algs_aesni_avx);
- if (aes_gcm_simdalgs_aesni_avx[0])
- simd_unregister_aeads(aes_gcm_algs_aesni_avx,
- ARRAY_SIZE(aes_gcm_algs_aesni_avx),
- aes_gcm_simdalgs_aesni_avx);
+ unregister_skciphers(skcipher_algs_aesni_avx);
+ unregister_aeads(aes_gcm_algs_aesni_avx);
#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ)
- if (simd_skcipher_algs_vaes_avx2[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx2,
- ARRAY_SIZE(skcipher_algs_vaes_avx2),
- simd_skcipher_algs_vaes_avx2);
- if (simd_skcipher_algs_vaes_avx10_256[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx10_256,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_256),
- simd_skcipher_algs_vaes_avx10_256);
- if (aes_gcm_simdalgs_vaes_avx10_256[0])
- simd_unregister_aeads(aes_gcm_algs_vaes_avx10_256,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256),
- aes_gcm_simdalgs_vaes_avx10_256);
- if (simd_skcipher_algs_vaes_avx10_512[0])
- simd_unregister_skciphers(skcipher_algs_vaes_avx10_512,
- ARRAY_SIZE(skcipher_algs_vaes_avx10_512),
- simd_skcipher_algs_vaes_avx10_512);
- if (aes_gcm_simdalgs_vaes_avx10_512[0])
- simd_unregister_aeads(aes_gcm_algs_vaes_avx10_512,
- ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512),
- aes_gcm_simdalgs_vaes_avx10_512);
+ unregister_skciphers(skcipher_algs_vaes_avx2);
+ unregister_skciphers(skcipher_algs_vaes_avx512);
+ unregister_aeads(aes_gcm_algs_vaes_avx10_256);
+ unregister_aeads(aes_gcm_algs_vaes_avx10_512);
#endif
}
#else /* CONFIG_X86_64 */
static struct aead_alg aes_gcm_algs_aesni[0];
-static struct simd_aead_alg *aes_gcm_simdalgs_aesni[0];
static int __init register_avx_algs(void)
{
@@ -1690,15 +1644,13 @@ static int __init aesni_init(void)
if (err)
return err;
- err = simd_register_skciphers_compat(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ err = crypto_register_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
if (err)
goto unregister_cipher;
- err = simd_register_aeads_compat(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
+ err = crypto_register_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
if (err)
goto unregister_skciphers;
@@ -1710,12 +1662,11 @@ static int __init aesni_init(void)
unregister_avx:
unregister_avx_algs();
- simd_unregister_aeads(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
+ crypto_unregister_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
unregister_skciphers:
- simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
unregister_cipher:
crypto_unregister_alg(&aesni_cipher_alg);
return err;
@@ -1723,11 +1674,10 @@ unregister_cipher:
static void __exit aesni_exit(void)
{
- simd_unregister_aeads(aes_gcm_algs_aesni,
- ARRAY_SIZE(aes_gcm_algs_aesni),
- aes_gcm_simdalgs_aesni);
- simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
- aesni_simd_skciphers);
+ crypto_unregister_aeads(aes_gcm_algs_aesni,
+ ARRAY_SIZE(aes_gcm_algs_aesni));
+ crypto_unregister_skciphers(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers));
crypto_unregister_alg(&aesni_cipher_alg);
unregister_avx_algs();
}
diff --git a/arch/x86/crypto/aria_aesni_avx2_glue.c b/arch/x86/crypto/aria_aesni_avx2_glue.c
index 87a11804fc77..b4bddcd58457 100644
--- a/arch/x86/crypto/aria_aesni_avx2_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx2_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -165,10 +164,9 @@ static int aria_avx2_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx2",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx2_ecb_encrypt,
.decrypt = aria_avx2_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx2",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
+ .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx2_init(void)
{
const char *feature_name;
@@ -233,15 +228,12 @@ static int __init aria_avx2_init(void)
aria_ops.aria_ctr_crypt_32way = aria_aesni_avx2_ctr_crypt_32way;
}
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx2_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx2_init);
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
index 4e1516b76669..ab9b38d05332 100644
--- a/arch/x86/crypto/aria_aesni_avx_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -152,10 +151,9 @@ static int aria_avx_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -165,10 +163,9 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx_ecb_encrypt,
.decrypt = aria_avx_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -184,8 +181,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx_init(void)
{
const char *feature_name;
@@ -213,15 +208,12 @@ static int __init aria_avx_init(void)
aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
}
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx_init);
diff --git a/arch/x86/crypto/aria_gfni_avx512_glue.c b/arch/x86/crypto/aria_gfni_avx512_glue.c
index f4a2208d2638..363cbf4399cc 100644
--- a/arch/x86/crypto/aria_gfni_avx512_glue.c
+++ b/arch/x86/crypto/aria_gfni_avx512_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/aria.h>
#include <linux/crypto.h>
#include <linux/err.h>
@@ -165,10 +164,9 @@ static int aria_avx512_init_tfm(struct crypto_skcipher *tfm)
static struct skcipher_alg aria_algs[] = {
{
- .base.cra_name = "__ecb(aria)",
- .base.cra_driver_name = "__ecb-aria-avx512",
+ .base.cra_name = "ecb(aria)",
+ .base.cra_driver_name = "ecb-aria-avx512",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = ARIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -178,11 +176,10 @@ static struct skcipher_alg aria_algs[] = {
.encrypt = aria_avx512_ecb_encrypt,
.decrypt = aria_avx512_ecb_decrypt,
}, {
- .base.cra_name = "__ctr(aria)",
- .base.cra_driver_name = "__ctr-aria-avx512",
+ .base.cra_name = "ctr(aria)",
+ .base.cra_driver_name = "ctr-aria-avx512",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
+ .base.cra_flags = CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aria_ctx),
.base.cra_module = THIS_MODULE,
@@ -197,8 +194,6 @@ static struct skcipher_alg aria_algs[] = {
}
};
-static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
-
static int __init aria_avx512_init(void)
{
const char *feature_name;
@@ -229,15 +224,12 @@ static int __init aria_avx512_init(void)
aria_ops.aria_decrypt_64way = aria_gfni_avx512_decrypt_64way;
aria_ops.aria_ctr_crypt_64way = aria_gfni_avx512_ctr_crypt_64way;
- return simd_register_skciphers_compat(aria_algs,
- ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ return crypto_register_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
static void __exit aria_avx512_exit(void)
{
- simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
- aria_simd_algs);
+ crypto_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs));
}
module_init(aria_avx512_init);
diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S
index b50b35ff1fdb..ac1c845445a4 100644
--- a/arch/x86/crypto/blake2s-core.S
+++ b/arch/x86/crypto/blake2s-core.S
@@ -29,7 +29,6 @@ SIGMA:
.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6
.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4
.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12
-#ifdef CONFIG_AS_AVX512
.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640
.align 64
SIGMA2:
@@ -43,7 +42,6 @@ SIGMA2:
.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10
.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14
.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9
-#endif /* CONFIG_AS_AVX512 */
.text
SYM_FUNC_START(blake2s_compress_ssse3)
@@ -174,7 +172,6 @@ SYM_FUNC_START(blake2s_compress_ssse3)
RET
SYM_FUNC_END(blake2s_compress_ssse3)
-#ifdef CONFIG_AS_AVX512
SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu (%rdi),%xmm0
vmovdqu 0x10(%rdi),%xmm1
@@ -253,4 +250,3 @@ SYM_FUNC_START(blake2s_compress_avx512)
vzeroupper
RET
SYM_FUNC_END(blake2s_compress_avx512)
-#endif /* CONFIG_AS_AVX512 */
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index 0313f9673f56..00f84f29cc8c 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -41,8 +41,7 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block,
SZ_4K / BLAKE2S_BLOCK_SIZE);
kernel_fpu_begin();
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- static_branch_likely(&blake2s_use_avx512))
+ if (static_branch_likely(&blake2s_use_avx512))
blake2s_compress_avx512(state, block, blocks, inc);
else
blake2s_compress_ssse3(state, block, blocks, inc);
@@ -59,8 +58,7 @@ static int __init blake2s_mod_init(void)
if (boot_cpu_has(X86_FEATURE_SSSE3))
static_branch_enable(&blake2s_use_ssse3);
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- boot_cpu_has(X86_FEATURE_AVX) &&
+ if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index e7e4d64e9577..2d2f4e16537c 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg camellia_algs[] = {
{
- .base.cra_name = "__ecb(camellia)",
- .base.cra_driver_name = "__ecb-camellia-aesni-avx2",
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-aesni-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(camellia)",
- .base.cra_driver_name = "__cbc-camellia-aesni-avx2",
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-aesni-avx2",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = {
},
};
-static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
-
static int __init camellia_aesni_init(void)
{
const char *feature_name;
@@ -118,15 +113,13 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(camellia_algs,
- ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ return crypto_register_skciphers(camellia_algs,
+ ARRAY_SIZE(camellia_algs));
}
static void __exit camellia_aesni_fini(void)
{
- simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs));
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index c7ccf63e741e..a7d162388142 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -69,10 +68,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg camellia_algs[] = {
{
- .base.cra_name = "__ecb(camellia)",
- .base.cra_driver_name = "__ecb-camellia-aesni",
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-aesni",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -82,10 +80,9 @@ static struct skcipher_alg camellia_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(camellia)",
- .base.cra_driver_name = "__cbc-camellia-aesni",
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-aesni",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct camellia_ctx),
.base.cra_module = THIS_MODULE,
@@ -98,8 +95,6 @@ static struct skcipher_alg camellia_algs[] = {
}
};
-static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
-
static int __init camellia_aesni_init(void)
{
const char *feature_name;
@@ -117,15 +112,13 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(camellia_algs,
- ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ return crypto_register_skciphers(camellia_algs,
+ ARRAY_SIZE(camellia_algs));
}
static void __exit camellia_aesni_fini(void)
{
- simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
- camellia_simd_algs);
+ crypto_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs));
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 3976a87f92ad..3aca04d43b34 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -8,7 +8,6 @@
#include <crypto/algapi.h>
#include <crypto/cast5.h>
-#include <crypto/internal/simd.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg cast5_algs[] = {
{
- .base.cra_name = "__ecb(cast5)",
- .base.cra_driver_name = "__ecb-cast5-avx",
+ .base.cra_name = "ecb(cast5)",
+ .base.cra_driver_name = "ecb-cast5-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
@@ -77,10 +75,9 @@ static struct skcipher_alg cast5_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(cast5)",
- .base.cra_driver_name = "__cbc-cast5-avx",
+ .base.cra_name = "cbc(cast5)",
+ .base.cra_driver_name = "cbc-cast5-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST5_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast5_ctx),
.base.cra_module = THIS_MODULE,
@@ -93,8 +90,6 @@ static struct skcipher_alg cast5_algs[] = {
}
};
-static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
-
static int __init cast5_init(void)
{
const char *feature_name;
@@ -105,15 +100,13 @@ static int __init cast5_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(cast5_algs,
- ARRAY_SIZE(cast5_algs),
- cast5_simd_algs);
+ return crypto_register_skciphers(cast5_algs,
+ ARRAY_SIZE(cast5_algs));
}
static void __exit cast5_exit(void)
{
- simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
- cast5_simd_algs);
+ crypto_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs));
}
module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 7e2aea372349..c4dd28c30303 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
-#include <crypto/internal/simd.h>
#include "ecb_cbc_helpers.h"
@@ -64,10 +63,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg cast6_algs[] = {
{
- .base.cra_name = "__ecb(cast6)",
- .base.cra_driver_name = "__ecb-cast6-avx",
+ .base.cra_name = "ecb(cast6)",
+ .base.cra_driver_name = "ecb-cast6-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
@@ -77,10 +75,9 @@ static struct skcipher_alg cast6_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(cast6)",
- .base.cra_driver_name = "__cbc-cast6-avx",
+ .base.cra_name = "cbc(cast6)",
+ .base.cra_driver_name = "cbc-cast6-avx",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = CAST6_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
@@ -93,8 +90,6 @@ static struct skcipher_alg cast6_algs[] = {
},
};
-static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
-
static int __init cast6_init(void)
{
const char *feature_name;
@@ -105,15 +100,12 @@ static int __init cast6_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(cast6_algs,
- ARRAY_SIZE(cast6_algs),
- cast6_simd_algs);
+ return crypto_register_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs));
}
static void __exit cast6_exit(void)
{
- simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
- cast6_simd_algs);
+ crypto_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs));
}
module_init(cast6_init);
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 8bb74a272879..b7fd7a1f0e15 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -1,15 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
+ * ChaCha and HChaCha functions (x86_64 optimized)
*
* Copyright (C) 2015 Martin Willi
*/
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
+#include <crypto/chacha.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -35,7 +31,6 @@ asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl);
@@ -48,8 +43,7 @@ static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- static_branch_likely(&chacha_use_avx512vl)) {
+ if (static_branch_likely(&chacha_use_avx512vl)) {
while (bytes >= CHACHA_BLOCK_SIZE * 8) {
chacha_8block_xor_avx512vl(state, dst, src, bytes,
nrounds);
@@ -123,23 +117,15 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
- if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
- hchacha_block_generic(state, stream, nrounds);
- } else {
- kernel_fpu_begin();
- hchacha_block_ssse3(state, stream, nrounds);
- kernel_fpu_end();
- }
+ kernel_fpu_begin();
+ hchacha_block_ssse3(state, stream, nrounds);
+ kernel_fpu_end();
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
- if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
- bytes <= CHACHA_BLOCK_SIZE)
- return chacha_crypt_generic(state, dst, src, bytes, nrounds);
-
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
@@ -154,158 +140,30 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
}
EXPORT_SYMBOL(chacha_crypt_arch);
-static int chacha_simd_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- u32 state[CHACHA_STATE_WORDS] __aligned(8);
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (!static_branch_likely(&chacha_use_simd) ||
- !crypto_simd_usable()) {
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- } else {
- kernel_fpu_begin();
- chacha_dosimd(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes,
- ctx->nrounds);
- kernel_fpu_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int chacha_simd(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_simd_stream_xor(req, ctx, req->iv);
-}
-
-static int xchacha_simd(struct skcipher_request *req)
+bool chacha_is_arch_optimized(void)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 state[CHACHA_STATE_WORDS] __aligned(8);
- struct chacha_ctx subctx;
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
- kernel_fpu_begin();
- hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
- kernel_fpu_end();
- } else {
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- }
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_simd_stream_xor(req, &subctx, real_iv);
+ return true;
}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_simd,
- .decrypt = chacha_simd,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_simd,
- .decrypt = xchacha_simd,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_simd,
- .decrypt = xchacha_simd,
- },
-};
+EXPORT_SYMBOL(chacha_is_arch_optimized);
static int __init chacha_simd_mod_init(void)
{
if (!boot_cpu_has(X86_FEATURE_SSSE3))
return 0;
- static_branch_enable(&chacha_use_simd);
-
if (boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
static_branch_enable(&chacha_use_avx2);
- if (IS_ENABLED(CONFIG_AS_AVX512) &&
- boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ if (boot_cpu_has(X86_FEATURE_AVX512VL) &&
boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
static_branch_enable(&chacha_use_avx512vl);
}
- return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
- crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
+ return 0;
}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3))
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
+arch_initcall(chacha_simd_mod_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-simd");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-simd");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-simd");
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (x86_64 optimized)");
diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
index b9abcd79c1f4..409ec6955733 100644
--- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl
@@ -2811,18 +2811,10 @@ if ($avx>2) {
# reason stack layout is kept identical to poly1305_blocks_avx2. If not
# for this tail, we wouldn't have to even allocate stack frame...
-if($kernel) {
- $code .= "#ifdef CONFIG_AS_AVX512\n";
-}
-
&declare_function("poly1305_blocks_avx512", 32, 4);
poly1305_blocks_avxN(1);
&end_function("poly1305_blocks_avx512");
-if ($kernel) {
- $code .= "#endif\n";
-}
-
if (!$kernel && $avx>3) {
########################################################################
# VPMADD52 version using 2^44 radix.
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 08ff4b489f7e..8b5593c46da7 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -107,7 +107,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
const size_t bytes = min_t(size_t, len, SZ_4K);
kernel_fpu_begin();
- if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
+ if (static_branch_likely(&poly1305_use_avx512))
poly1305_blocks_avx512(ctx, inp, bytes, padbit);
else if (static_branch_likely(&poly1305_use_avx2))
poly1305_blocks_avx2(ctx, inp, bytes, padbit);
@@ -265,8 +265,8 @@ static int __init poly1305_simd_mod_init(void)
if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
static_branch_enable(&poly1305_use_avx2);
- if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
+ if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
/* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X)
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 347e97f4b713..f5f2121b7956 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -10,7 +10,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
@@ -65,10 +64,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-avx2",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-avx2",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -78,10 +76,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-avx2",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-avx2",
.base.cra_priority = 600,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -94,8 +91,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_avx2_init(void)
{
const char *feature_name;
@@ -110,15 +105,13 @@ static int __init serpent_avx2_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_avx2_fini(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_avx2_init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 6c248e1ea4ef..e640abc1cb8a 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -13,7 +13,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-avx.h"
@@ -71,10 +70,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-avx",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-avx",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -84,10 +82,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-avx",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-avx",
.base.cra_priority = 500,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -100,8 +97,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_init(void)
{
const char *feature_name;
@@ -112,15 +107,13 @@ static int __init serpent_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_exit(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_init);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index d78f37e9b2cf..80ee17ec21b4 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
-#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
#include "serpent-sse2.h"
@@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg serpent_algs[] = {
{
- .base.cra_name = "__ecb(serpent)",
- .base.cra_driver_name = "__ecb-serpent-sse2",
+ .base.cra_name = "ecb(serpent)",
+ .base.cra_driver_name = "ecb-serpent-sse2",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -87,10 +85,9 @@ static struct skcipher_alg serpent_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(serpent)",
- .base.cra_driver_name = "__cbc-serpent-sse2",
+ .base.cra_name = "cbc(serpent)",
+ .base.cra_driver_name = "cbc-serpent-sse2",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct serpent_ctx),
.base.cra_module = THIS_MODULE,
@@ -103,8 +100,6 @@ static struct skcipher_alg serpent_algs[] = {
},
};
-static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
-
static int __init serpent_sse2_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2)) {
@@ -112,15 +107,13 @@ static int __init serpent_sse2_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(serpent_algs,
- ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ return crypto_register_skciphers(serpent_algs,
+ ARRAY_SIZE(serpent_algs));
}
static void __exit serpent_sse2_exit(void)
{
- simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
- serpent_simd_algs);
+ crypto_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs));
}
module_init(serpent_sse2_init);
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index ab8bc54f254d..abb793cbad01 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -28,9 +28,7 @@
#include <asm/simd.h>
static const struct x86_cpu_id module_cpu_ids[] = {
-#ifdef CONFIG_AS_SHA1_NI
X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
-#endif
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
@@ -256,7 +254,6 @@ static void unregister_sha1_avx2(void)
crypto_unregister_shash(&sha1_avx2_alg);
}
-#ifdef CONFIG_AS_SHA1_NI
asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data,
int rounds);
@@ -306,11 +303,6 @@ static void unregister_sha1_ni(void)
crypto_unregister_shash(&sha1_ni_alg);
}
-#else
-static inline int register_sha1_ni(void) { return 0; }
-static inline void unregister_sha1_ni(void) { }
-#endif
-
static int __init sha1_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
@@ -360,6 +352,4 @@ MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-ssse3");
MODULE_ALIAS_CRYPTO("sha1-avx");
MODULE_ALIAS_CRYPTO("sha1-avx2");
-#ifdef CONFIG_AS_SHA1_NI
MODULE_ALIAS_CRYPTO("sha1-ni");
-#endif
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index e04a43d9f7d5..429a3cefbab4 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -45,9 +45,7 @@ asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks);
static const struct x86_cpu_id module_cpu_ids[] = {
-#ifdef CONFIG_AS_SHA256_NI
X86_MATCH_FEATURE(X86_FEATURE_SHA_NI, NULL),
-#endif
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
@@ -329,7 +327,6 @@ static void unregister_sha256_avx2(void)
ARRAY_SIZE(sha256_avx2_algs));
}
-#ifdef CONFIG_AS_SHA256_NI
asmlinkage void sha256_ni_transform(struct sha256_state *digest,
const u8 *data, int rounds);
@@ -403,11 +400,6 @@ static void unregister_sha256_ni(void)
ARRAY_SIZE(sha256_ni_algs));
}
-#else
-static inline int register_sha256_ni(void) { return 0; }
-static inline void unregister_sha256_ni(void) { }
-#endif
-
static int __init sha256_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
@@ -461,7 +453,5 @@ MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha224-ssse3");
MODULE_ALIAS_CRYPTO("sha224-avx");
MODULE_ALIAS_CRYPTO("sha224-avx2");
-#ifdef CONFIG_AS_SHA256_NI
MODULE_ALIAS_CRYPTO("sha256-ni");
MODULE_ALIAS_CRYPTO("sha224-ni");
-#endif
diff --git a/arch/x86/crypto/sm4_aesni_avx2_glue.c b/arch/x86/crypto/sm4_aesni_avx2_glue.c
index 1148fd4cd57f..fec0ab7a63dd 100644
--- a/arch/x86/crypto/sm4_aesni_avx2_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx2_glue.c
@@ -8,11 +8,10 @@
* Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
+#include <asm/fpu/api.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
@@ -48,10 +47,9 @@ static int ctr_crypt(struct skcipher_request *req)
static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(sm4)",
- .cra_driver_name = "__ecb-sm4-aesni-avx2",
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -64,10 +62,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(sm4)",
- .cra_driver_name = "__cbc-sm4-aesni-avx2",
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -81,10 +78,9 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__ctr(sm4)",
- .cra_driver_name = "__ctr-sm4-aesni-avx2",
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-aesni-avx2",
.cra_priority = 500,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -100,9 +96,6 @@ static struct skcipher_alg sm4_aesni_avx2_skciphers[] = {
}
};
-static struct simd_skcipher_alg *
-simd_sm4_aesni_avx2_skciphers[ARRAY_SIZE(sm4_aesni_avx2_skciphers)];
-
static int __init sm4_init(void)
{
const char *feature_name;
@@ -121,16 +114,14 @@ static int __init sm4_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(sm4_aesni_avx2_skciphers,
- ARRAY_SIZE(sm4_aesni_avx2_skciphers),
- simd_sm4_aesni_avx2_skciphers);
+ return crypto_register_skciphers(sm4_aesni_avx2_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx2_skciphers));
}
static void __exit sm4_exit(void)
{
- simd_unregister_skciphers(sm4_aesni_avx2_skciphers,
- ARRAY_SIZE(sm4_aesni_avx2_skciphers),
- simd_sm4_aesni_avx2_skciphers);
+ crypto_unregister_skciphers(sm4_aesni_avx2_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx2_skciphers));
}
module_init(sm4_init);
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index 85b4ca78b47b..72867fc49ce8 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -8,11 +8,10 @@
* Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
+#include <asm/fpu/api.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
#include "sm4-avx.h"
@@ -263,10 +262,9 @@ static int ctr_crypt(struct skcipher_request *req)
static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
{
.base = {
- .cra_name = "__ecb(sm4)",
- .cra_driver_name = "__ecb-sm4-aesni-avx",
+ .cra_name = "ecb(sm4)",
+ .cra_driver_name = "ecb-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -279,10 +277,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
.decrypt = sm4_avx_ecb_decrypt,
}, {
.base = {
- .cra_name = "__cbc(sm4)",
- .cra_driver_name = "__cbc-sm4-aesni-avx",
+ .cra_name = "cbc(sm4)",
+ .cra_driver_name = "cbc-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -296,10 +293,9 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
.decrypt = cbc_decrypt,
}, {
.base = {
- .cra_name = "__ctr(sm4)",
- .cra_driver_name = "__ctr-sm4-aesni-avx",
+ .cra_name = "ctr(sm4)",
+ .cra_driver_name = "ctr-sm4-aesni-avx",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
@@ -315,9 +311,6 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
}
};
-static struct simd_skcipher_alg *
-simd_sm4_aesni_avx_skciphers[ARRAY_SIZE(sm4_aesni_avx_skciphers)];
-
static int __init sm4_init(void)
{
const char *feature_name;
@@ -335,16 +328,14 @@ static int __init sm4_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(sm4_aesni_avx_skciphers,
- ARRAY_SIZE(sm4_aesni_avx_skciphers),
- simd_sm4_aesni_avx_skciphers);
+ return crypto_register_skciphers(sm4_aesni_avx_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx_skciphers));
}
static void __exit sm4_exit(void)
{
- simd_unregister_skciphers(sm4_aesni_avx_skciphers,
- ARRAY_SIZE(sm4_aesni_avx_skciphers),
- simd_sm4_aesni_avx_skciphers);
+ crypto_unregister_skciphers(sm4_aesni_avx_skciphers,
+ ARRAY_SIZE(sm4_aesni_avx_skciphers));
}
module_init(sm4_init);
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 3eb3440b477a..9e20db013750 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -13,7 +13,6 @@
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/algapi.h>
-#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
#include "twofish.h"
@@ -74,10 +73,9 @@ static int cbc_decrypt(struct skcipher_request *req)
static struct skcipher_alg twofish_algs[] = {
{
- .base.cra_name = "__ecb(twofish)",
- .base.cra_driver_name = "__ecb-twofish-avx",
+ .base.cra_name = "ecb(twofish)",
+ .base.cra_driver_name = "ecb-twofish-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
@@ -87,10 +85,9 @@ static struct skcipher_alg twofish_algs[] = {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(twofish)",
- .base.cra_driver_name = "__cbc-twofish-avx",
+ .base.cra_name = "cbc(twofish)",
+ .base.cra_driver_name = "cbc-twofish-avx",
.base.cra_priority = 400,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = TF_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct twofish_ctx),
.base.cra_module = THIS_MODULE,
@@ -103,8 +100,6 @@ static struct skcipher_alg twofish_algs[] = {
},
};
-static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
-
static int __init twofish_init(void)
{
const char *feature_name;
@@ -114,15 +109,13 @@ static int __init twofish_init(void)
return -ENODEV;
}
- return simd_register_skciphers_compat(twofish_algs,
- ARRAY_SIZE(twofish_algs),
- twofish_simd_algs);
+ return crypto_register_skciphers(twofish_algs,
+ ARRAY_SIZE(twofish_algs));
}
static void __exit twofish_exit(void)
{
- simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
- twofish_simd_algs);
+ crypto_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs));
}
module_init(twofish_init);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0bc708ee2788..7be4e1647903 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2933,6 +2933,7 @@ void __init sev_set_cpu_caps(void)
void __init sev_hardware_setup(void)
{
unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
+ struct sev_platform_init_args init_args = {0};
bool sev_snp_supported = false;
bool sev_es_supported = false;
bool sev_supported = false;
@@ -3059,6 +3060,15 @@ out:
sev_supported_vmsa_features = 0;
if (sev_es_debug_swap_enabled)
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+
+ if (!sev_enabled)
+ return;
+
+ /*
+ * Do both SNP and SEV initialization at KVM module load.
+ */
+ init_args.probe = true;
+ sev_platform_init(&init_args);
}
void sev_hardware_unsetup(void)
@@ -3074,6 +3084,8 @@ void sev_hardware_unsetup(void)
misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
+
+ sev_platform_shutdown();
}
int sev_cpu_init(struct svm_cpu_data *sd)
diff --git a/crypto/Makefile b/crypto/Makefile
index 0e6ab5ffd3f7..98510a2aa0b1 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -148,7 +148,8 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o
+CFLAGS_chacha.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
index f7a3fbe5447e..5807b87bb508 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,20 +8,36 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
+#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_SRC_FOLIO = 1 << 2,
+ ACOMP_WALK_DST_LINEAR = 1 << 3,
+ ACOMP_WALK_DST_FOLIO = 1 << 4,
+};
+
static const struct crypto_type crypto_acomp_type;
static void acomp_reqchain_done(void *data, int err);
@@ -434,5 +450,220 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ else if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)) {
+ src = &req->chain.ssg;
+ sg_init_table(src, 1);
+ sg_set_folio(src, req->sfolio, walk->slen, req->soff);
+ }
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+ else if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO)) {
+ dst = &req->chain.dsg;
+ sg_init_table(dst, 1);
+ sg_set_folio(dst, req->dfolio, walk->dlen, req->doff);
+ }
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index ea9ed9580aa8..5b8a4c787387 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -66,17 +66,34 @@ static int crypto_check_alg(struct crypto_alg *alg)
static void crypto_free_instance(struct crypto_instance *inst)
{
- inst->alg.cra_type->free(inst);
+ struct crypto_alg *alg = &inst->alg;
+ const struct crypto_type *type;
+
+ type = alg->cra_type;
+ if (type->destroy)
+ type->destroy(alg);
+ type->free(inst);
}
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ struct crypto_template *tmpl = container_of(w, struct crypto_template,
free_work);
- struct crypto_template *tmpl = inst->tmpl;
+ struct crypto_instance *inst;
+ struct hlist_node *n;
+ HLIST_HEAD(list);
+
+ down_write(&crypto_alg_sem);
+ hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) {
+ if (refcount_read(&inst->alg.cra_refcnt) != -1)
+ continue;
+ hlist_del(&inst->list);
+ hlist_add_head(&inst->list, &list);
+ }
+ up_write(&crypto_alg_sem);
- crypto_free_instance(inst);
- crypto_tmpl_put(tmpl);
+ hlist_for_each_entry_safe(inst, n, &list, list)
+ crypto_free_instance(inst);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
@@ -84,9 +101,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
+ struct crypto_template *tmpl = inst->tmpl;
- INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
- schedule_work(&inst->free_work);
+ refcount_set(&alg->cra_refcnt, -1);
+ schedule_work(&tmpl->free_work);
}
/*
@@ -132,14 +150,16 @@ static void crypto_remove_instance(struct crypto_instance *inst,
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (!tmpl || !crypto_tmpl_get(tmpl))
+ if (!tmpl)
return;
- list_move(&inst->alg.cra_list, list);
+ list_del_init(&inst->alg.cra_list);
hlist_del(&inst->list);
- inst->alg.cra_destroy = crypto_destroy_instance;
+ hlist_add_head(&inst->list, &tmpl->dead);
BUG_ON(!list_empty(&inst->alg.cra_users));
+
+ crypto_alg_put(&inst->alg);
}
/*
@@ -464,7 +484,8 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
return;
- crypto_alg_put(alg);
+ if (alg->cra_type && alg->cra_type->destroy)
+ alg->cra_type->destroy(alg);
crypto_remove_final(&list);
}
@@ -504,6 +525,8 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
+ INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn);
+
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@@ -565,6 +588,8 @@ void crypto_unregister_template(struct crypto_template *tmpl)
crypto_free_instance(inst);
}
crypto_remove_final(&users);
+
+ flush_work(&tmpl->free_work);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
@@ -618,6 +643,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ inst->alg.cra_destroy = crypto_destroy_instance;
down_write(&crypto_alg_sem);
diff --git a/crypto/api.c b/crypto/api.c
index 3416e98128a0..2880aa04bb99 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -703,15 +703,5 @@ void crypto_req_done(void *data, int err)
}
EXPORT_SYMBOL_GPL(crypto_req_done);
-void crypto_destroy_alg(struct crypto_alg *alg)
-{
- if (alg->cra_type && alg->cra_type->destroy)
- alg->cra_type->destroy(alg);
-
- if (alg->cra_destroy)
- alg->cra_destroy(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_destroy_alg);
-
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha.c b/crypto/chacha.c
new file mode 100644
index 000000000000..5103bc0b2881
--- /dev/null
+++ b/crypto/chacha.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers
+ *
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2018 Google LLC
+ */
+
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/module.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static int chacha_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static int chacha20_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int chacha12_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx, const u8 *iv,
+ bool arch)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init(state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
+
+ if (arch)
+ chacha_crypt(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ else
+ chacha_crypt_generic(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int crypto_chacha_crypt_generic(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, false);
+}
+
+static int crypto_chacha_crypt_arch(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, true);
+}
+
+static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(state, ctx->key, req->iv);
+ if (arch)
+ hchacha_block(state, subctx.key, ctx->nrounds);
+ else
+ hchacha_block_generic(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ /* Build the real IV */
+ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
+ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+
+ /* Generate the stream and XOR it with the data */
+ return chacha_stream_xor(req, &subctx, real_iv, arch);
+}
+
+static int crypto_xchacha_crypt_generic(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, false);
+}
+
+static int crypto_xchacha_crypt_arch(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, true);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_generic,
+ .decrypt = crypto_chacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_arch,
+ .decrypt = crypto_chacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ }
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_chacha_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0);
+ if (!chacha_is_arch_optimized())
+ num_algs /= 2;
+
+ return crypto_register_skciphers(algs, num_algs);
+}
+
+static void __exit crypto_chacha_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, num_algs);
+}
+
+subsys_initcall(crypto_chacha_mod_init);
+module_exit(crypto_chacha_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-generic");
+MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH));
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
deleted file mode 100644
index 1fb9fbd302c6..000000000000
--- a/crypto/chacha_generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2015 Martin Willi
- * Copyright (C) 2018 Google LLC
- */
-
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
-
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int crypto_chacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv);
-}
-
-static int crypto_xchacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- /* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init(state, ctx->key, req->iv);
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- /* Build the real IV */
- memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
- memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
-
- /* Generate the stream and XOR it with the data */
- return chacha_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_chacha_crypt,
- .decrypt = crypto_chacha_crypt,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }
-};
-
-static int __init chacha_generic_mod_init(void)
-{
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_generic_mod_fini(void)
-{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-subsys_initcall(chacha_generic_mod_init);
-module_exit(chacha_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-generic");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 5c346c544093..bc76c343a0cf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,246 +6,255 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
-
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+static DEFINE_MUTEX(deflate_stream_lock);
-static int deflate_decomp_init(struct deflate_ctx *ctx)
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+ ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
-static void deflate_comp_exit(struct deflate_ctx *ctx)
-{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
-}
+ ctx->stream.workspace = ctx->workspace;
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
+ return ctx;
}
-static int __deflate_init(void *ctx)
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .cfree_ctx = kvfree,
+};
+
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
int ret;
- ret = deflate_comp_init(ctx);
+ ret = acomp_walk_virt(&walk, req);
if (ret)
- goto out;
- ret = deflate_decomp_init(ctx);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
+ return ret;
-static void *deflate_alloc_ctx(void)
-{
- struct deflate_ctx *ctx;
- int ret;
+ do {
+ unsigned int dcur;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
- ret = __deflate_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
- return ctx;
-}
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
-static void __deflate_exit(void *ctx)
-{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
-}
+ stream->avail_in = 0;
+ stream->next_in = NULL;
-static void deflate_free_ctx(void *ctx)
-{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ ret = zlib_deflate(stream, flush);
+
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
+
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ struct acomp_req *r2;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
+ req->base.err = err;
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
+ list_for_each_entry(r2, &req->base.list, base.list) {
+ zlib_deflateReset(&ds->stream);
+ r2->base.err = deflate_compress_one(r2, ds);
}
- ret = 0;
- *dlen = stream->total_out;
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- return __deflate_compress(src, slen, dst, dlen, ctx);
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
+
+ ret = acomp_walk_virt(&walk, req);
+ if (ret)
+ return ret;
+
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_decompress(struct acomp_req *req)
{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ struct acomp_req *r2;
+ int err;
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
+ err = deflate_decompress_one(req, ds);
+ req->base.err = err;
+
+ list_for_each_entry(r2, &req->base.list, base.list) {
+ zlib_inflateReset(&ds->stream);
+ r2->base.err = deflate_decompress_one(r2, ds);
}
- ret = 0;
- *dlen = stream->total_out;
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
+static int deflate_init(struct crypto_acomp *tfm)
{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
+ int ret;
+
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
+
+ return ret;
}
-static struct scomp_alg scomp = {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_CHAIN,
+ .base.cra_module = THIS_MODULE,
};
static int __init deflate_mod_init(void)
{
- return crypto_register_scomp(&scomp);
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
subsys_initcall(deflate_mod_init);
@@ -254,5 +263,7 @@ module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");
diff --git a/crypto/internal.h b/crypto/internal.h
index 11567ea24fc3..2edefb546ad4 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -128,7 +128,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
-void crypto_destroy_alg(struct crypto_alg *alg);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -166,7 +165,7 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (refcount_dec_and_test(&alg->cra_refcnt))
- crypto_destroy_alg(alg);
+ alg->cra_destroy(alg);
}
static inline int crypto_tmpl_get(struct crypto_template *tmpl)
diff --git a/crypto/scompress.c b/crypto/scompress.c
index f67ce38d203d..4a2b3933aa95 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -7,9 +7,9 @@
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
+#include <linux/cpumask.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -20,20 +20,17 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
-#define SCOMP_SCRATCH_SIZE 65400
-
struct scomp_scratch {
spinlock_t lock;
union {
void *src;
unsigned long saddr;
};
- void *dst;
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -44,6 +41,10 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
+static cpumask_t scomp_scratch_want;
+static void scomp_scratch_workfn(struct work_struct *work);
+static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
+
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -74,81 +75,48 @@ static void crypto_scomp_free_scratches(void)
scratch = per_cpu_ptr(&scomp_scratch, i);
free_page(scratch->saddr);
- vfree(scratch->dst);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
-static int crypto_scomp_alloc_scratches(void)
+static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
{
- struct scomp_scratch *scratch;
- int i;
-
- for_each_possible_cpu(i) {
- struct page *page;
- void *mem;
-
- scratch = per_cpu_ptr(&scomp_scratch, i);
+ int node = cpu_to_node(cpu);
+ struct page *page;
- page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
- if (!page)
- goto error;
- scratch->src = page_address(page);
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
- }
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_bh(&scratch->lock);
+ scratch->src = page_address(page);
+ spin_unlock_bh(&scratch->lock);
return 0;
-error:
- crypto_scomp_free_scratches();
- return -ENOMEM;
}
-static void scomp_free_streams(struct scomp_alg *alg)
+static void scomp_scratch_workfn(struct work_struct *work)
{
- struct crypto_acomp_stream __percpu *stream = alg->stream;
- int i;
-
- if (!stream)
- return;
+ int cpu;
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
+ for_each_cpu(cpu, &scomp_scratch_want) {
+ struct scomp_scratch *scratch;
- if (!ps->ctx)
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ if (scratch->src)
+ continue;
+ if (scomp_alloc_scratch(scratch, cpu))
break;
- alg->free_ctx(ps->ctx);
+ cpumask_clear_cpu(cpu, &scomp_scratch_want);
}
-
- free_percpu(stream);
}
-static int scomp_alloc_streams(struct scomp_alg *alg)
+static int crypto_scomp_alloc_scratches(void)
{
- struct crypto_acomp_stream __percpu *stream;
- int i;
-
- stream = alloc_percpu(struct crypto_acomp_stream);
- if (!stream)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
-
- ps->ctx = alg->alloc_ctx();
- if (IS_ERR(ps->ctx)) {
- scomp_free_streams(alg);
- return PTR_ERR(ps->ctx);
- }
-
- spin_lock_init(&ps->lock);
- }
+ unsigned int i = cpumask_first(cpu_possible_mask);
+ struct scomp_scratch *scratch;
- alg->stream = stream;
- return 0;
+ scratch = per_cpu_ptr(&scomp_scratch, i);
+ return scomp_alloc_scratch(scratch, i);
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
@@ -157,11 +125,9 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
int ret = 0;
mutex_lock(&scomp_lock);
- if (!alg->stream) {
- ret = scomp_alloc_streams(alg);
- if (ret)
- goto unlock;
- }
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
if (!scomp_scratch_users) {
ret = crypto_scomp_alloc_scratches();
if (ret)
@@ -174,13 +140,40 @@ unlock:
return ret;
}
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
+{
+ int cpu = raw_smp_processor_id();
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ spin_lock(&scratch->lock);
+ if (likely(scratch->src))
+ return scratch;
+ spin_unlock(&scratch->lock);
+
+ cpumask_set_cpu(cpu, &scomp_scratch_want);
+ schedule_work(&scomp_scratch_work);
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
+ spin_lock(&scratch->lock);
+ return scratch;
+}
+
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
+ __releases(scratch)
+{
+ spin_unlock(&scratch->lock);
+}
+
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
- struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
+ bool src_isvirt = acomp_request_src_isvirt(req);
+ bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
struct crypto_acomp_stream *stream;
+ struct scomp_scratch *scratch;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
struct page *spage, *dpage;
@@ -197,10 +190,33 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
if (!req->dst || !dlen)
return -EINVAL;
- if (acomp_request_src_isvirt(req))
+ if (dst_isvirt)
+ dst = req->dvirt;
+ else {
+ if (acomp_request_dst_isfolio(req)) {
+ dpage = folio_page(req->dfolio, 0);
+ doff = req->doff;
+ } else if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ return -ENOSYS;
+
+ dpage = nth_page(dpage, doff / PAGE_SIZE);
+ doff = offset_in_page(doff);
+
+ n = dlen / PAGE_SIZE;
+ n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
+ if (PageHighMem(dpage + n) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ return -ENOSYS;
+ dst = kmap_local_page(dpage) + doff;
+ }
+
+ if (src_isvirt)
src = req->svirt;
else {
- src = scratch->src;
+ src = NULL;
do {
if (acomp_request_src_isfolio(req)) {
spage = folio_page(req->sfolio, 0);
@@ -223,59 +239,37 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
} while (0);
}
- if (acomp_request_dst_isvirt(req))
- dst = req->dvirt;
- else {
- unsigned int max = SCOMP_SCRATCH_SIZE;
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
- dst = scratch->dst;
- do {
- if (acomp_request_dst_isfolio(req)) {
- dpage = folio_page(req->dfolio, 0);
- doff = req->doff;
- } else if (dlen <= req->dst->length) {
- dpage = sg_page(req->dst);
- doff = req->dst->offset;
- } else
- break;
-
- dpage = nth_page(dpage, doff / PAGE_SIZE);
- doff = offset_in_page(doff);
-
- n = dlen / PAGE_SIZE;
- n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
- if (PageHighMem(dpage + n) &&
- size_add(doff, dlen) > PAGE_SIZE)
- break;
- dst = kmap_local_page(dpage) + doff;
- max = dlen;
- } while (0);
- dlen = min(dlen, max);
- }
-
- spin_lock_bh(&scratch->lock);
+ if (!src_isvirt && !src) {
+ const u8 *src;
- if (src == scratch->src)
+ scratch = scomp_lock_scratch();
+ src = scratch->src;
memcpy_from_sglist(scratch->src, req->src, 0, slen);
- stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
- spin_lock(&stream->lock);
- if (dir)
+ if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ scomp_unlock_scratch(scratch);
+ } else if (dir)
ret = crypto_scomp_compress(scomp, src, slen,
dst, &dlen, stream->ctx);
else
ret = crypto_scomp_decompress(scomp, src, slen,
dst, &dlen, stream->ctx);
- if (dst == scratch->dst)
- memcpy_to_sglist(req->dst, 0, dst, dlen);
-
- spin_unlock(&stream->lock);
- spin_unlock_bh(&scratch->lock);
+ crypto_acomp_unlock_stream_bh(stream);
req->dlen = dlen;
- if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
+ if (!src_isvirt && src)
+ kunmap_local(src);
+ if (!dst_isvirt) {
kunmap_local(dst);
dlen += doff;
for (;;) {
@@ -286,8 +280,6 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dpage = nth_page(dpage, 1);
}
}
- if (!acomp_request_src_isvirt(req) && src != scratch->src)
- kunmap_local(src);
return ret;
}
@@ -322,6 +314,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
+ flush_work(&scomp_scratch_work);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
@@ -355,7 +348,9 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- scomp_free_streams(__crypto_scomp_alg(alg));
+ struct scomp_alg *scomp = __crypto_scomp_alg(alg);
+
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47082782008a..5686369779be 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -530,13 +530,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
-config CRYPTO_DEV_CAVIUM_ZIP
- tristate "Cavium ZIP driver"
- depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- help
- Select this option if you want to enable compression/decompression
- acceleration on Cavium's ARM based SoCs
-
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c97f0ebc55ec..22eadcc8f4a2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
-obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
-obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -50,3 +47,4 @@ obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
+obj-y += cavium/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..ba13fb75c05d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -23,6 +23,18 @@
#include <linux/string.h>
#include "sun8i-ce.h"
+static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
+ algt->stat_fb++;
+ }
+}
+
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
@@ -48,8 +60,10 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
@@ -78,7 +92,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -90,7 +106,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -102,7 +120,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -113,21 +133,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -139,10 +151,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -153,24 +165,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -181,24 +183,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -213,22 +205,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
/* we need to reserve one SG for padding one */
if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
sg = areq->src;
while (sg) {
if (sg->length % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
sg = sg_next(sg);
@@ -244,21 +244,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ce_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ce_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -343,9 +333,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
+ void *buf, *result;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@@ -365,22 +354,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
- goto theend;
+ goto err_out;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) {
err = -ENOMEM;
- goto theend;
+ goto err_free_buf;
}
flow = rctx->flow;
chan = &ce->chanlist[flow];
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
+
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
cet = chan->tl;
@@ -398,7 +387,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_free_result;
}
len = areq->nbytes;
@@ -411,7 +400,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
@@ -419,7 +408,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,7 +430,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
@@ -450,7 +439,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -463,16 +452,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+
+err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
+err_free_result:
kfree(result);
+
+err_free_buf:
+ kfree(buf);
+
+err_out:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index e0af611a95d8..289750f34ccf 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -72,7 +72,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[AES_IV_SIZE];
+ __le32 iv[AES_IV_SIZE / 4];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
@@ -429,7 +429,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- __le32 iv[16];
+ __le32 iv[4];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index ec3ccfa60445..c77d06ddb1ec 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -676,7 +676,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 3adcc5e65694..9c56c7ac6e4c 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -147,6 +147,12 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000
+#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7)))
+#else
+#define BUILD_PD_ACCESS
+#endif
+
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
@@ -154,11 +160,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen,
- struct scatterlist *dst_tmp);
+ struct scatterlist *dst_tmp) BUILD_PD_ACCESS;
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
index 4679c06b611f..75227c587ed0 100644
--- a/drivers/crypto/cavium/Makefile
+++ b/drivers/crypto/cavium/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for Cavium crypto device drivers
#
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
deleted file mode 100644
index 020d189d793d..000000000000
--- a/drivers/crypto/cavium/zip/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Cavium's ZIP Driver.
-#
-
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
-thunderx_zip-y := zip_main.o \
- zip_device.o \
- zip_crypto.o \
- zip_mem.o \
- zip_deflate.o \
- zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
deleted file mode 100644
index 54f6fb054119..000000000000
--- a/drivers/crypto/cavium/zip/common.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-/* Device specific zlib function definitions */
-#include "zip_device.h"
-
-/* ZIP device definitions */
-#include "zip_main.h"
-
-/* ZIP memory allocation/deallocation related definitions */
-#include "zip_mem.h"
-
-/* Device specific structure definitions */
-#include "zip_regs.h"
-
-#define ZIP_ERROR -1
-
-#define ZIP_FLUSH_FINISH 4
-
-#define RAW_FORMAT 0 /* for rawpipe */
-#define ZLIB_FORMAT 1 /* for zpipe */
-#define GZIP_FORMAT 2 /* for gzpipe */
-#define LZS_FORMAT 3 /* for lzspipe */
-
-/* Max number of ZIP devices supported */
-#define MAX_ZIP_DEVICES 2
-
-/* Configures the number of zip queues to be used */
-#define ZIP_NUM_QUEUES 2
-
-#define DYNAMIC_STOP_EXCESS 1024
-
-/* Maximum buffer sizes in direct mode */
-#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
-#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
-
-/**
- * struct zip_operation - common data structure for comp and decomp operations
- * @input: Next input byte is read from here
- * @output: Next output byte written here
- * @ctx_addr: Inflate context buffer address
- * @history: Pointer to the history buffer
- * @input_len: Number of bytes available at next_in
- * @input_total_len: Total number of input bytes read
- * @output_len: Remaining free space at next_out
- * @output_total_len: Total number of bytes output so far
- * @csum: Checksum value of the uncompressed data
- * @flush: Flush flag
- * @format: Format (depends on stream's wrap)
- * @speed: Speed depends on stream's level
- * @ccode: Compression code ( stream's strategy)
- * @lzs_flag: Flag for LZS support
- * @begin_file: Beginning of file indication for inflate
- * @history_len: Size of the history data
- * @end_file: Ending of the file indication for inflate
- * @compcode: Completion status of the ZIP invocation
- * @bytes_read: Input bytes read in current instruction
- * @bits_processed: Total bits processed for entire file
- * @sizeofptr: To distinguish between ILP32 and LP64
- * @sizeofzops: Optional just for padding
- *
- * This structure is used to maintain the required meta data for the
- * comp and decomp operations.
- */
-struct zip_operation {
- u8 *input;
- u8 *output;
- u64 ctx_addr;
- u64 history;
-
- u32 input_len;
- u32 input_total_len;
-
- u32 output_len;
- u32 output_total_len;
-
- u32 csum;
- u32 flush;
-
- u32 format;
- u32 speed;
- u32 ccode;
- u32 lzs_flag;
-
- u32 begin_file;
- u32 history_len;
-
- u32 end_file;
- u32 compcode;
- u32 bytes_read;
- u32 bits_processed;
-
- u32 sizeofptr;
- u32 sizeofzops;
-};
-
-static inline int zip_poll_result(union zip_zres_s *result)
-{
- int retries = 1000;
-
- while (!result->s.compcode) {
- if (!--retries) {
- pr_err("ZIP ERR: request timed out");
- return -ETIMEDOUT;
- }
- udelay(10);
- /*
- * Force re-reading of compcode which is updated
- * by the ZIP coprocessor.
- */
- rmb();
- }
- return 0;
-}
-
-/* error messages */
-#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#ifdef MSG_ENABLE
-/* Enable all messages */
-#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
-#else
-#define zip_msg(fmt, args...)
-#endif
-
-#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
-
-#ifdef DEBUG_LEVEL
-
-#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
- strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
-
-#if DEBUG_LEVEL >= 4
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 3
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 2
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG LEVEL >=4 */
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG_LEVEL */
-#else
-
-#define zip_dbg(fmt, args...)
-
-#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
deleted file mode 100644
index 02e87f2d50db..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "zip_crypto.h"
-
-static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
- int lzs_flag)
-{
- zip_ops->flush = ZIP_FLUSH_FINISH;
-
- /* equivalent to level 6 of opensource zlib */
- zip_ops->speed = 1;
-
- if (!lzs_flag) {
- zip_ops->ccode = 0; /* Auto Huffman */
- zip_ops->lzs_flag = 0;
- zip_ops->format = ZLIB_FORMAT;
- } else {
- zip_ops->ccode = 3; /* LZS Encoding */
- zip_ops->lzs_flag = 1;
- zip_ops->format = LZS_FORMAT;
- }
- zip_ops->begin_file = 1;
- zip_ops->history_len = 0;
- zip_ops->end_file = 1;
- zip_ops->compcode = 0;
- zip_ops->csum = 1; /* Adler checksum desired */
-}
-
-static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
-
- zip_static_init_zip_ops(comp_ctx, lzs_flag);
- zip_static_init_zip_ops(decomp_ctx, lzs_flag);
-
- comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!comp_ctx->input)
- return -ENOMEM;
-
- comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!comp_ctx->output)
- goto err_comp_input;
-
- decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!decomp_ctx->input)
- goto err_comp_output;
-
- decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!decomp_ctx->output)
- goto err_decomp_input;
-
- return 0;
-
-err_decomp_input:
- zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
-err_comp_output:
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
-err_comp_input:
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
- return -ENOMEM;
-}
-
-static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
-
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
- zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-}
-
-static int zip_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_comp;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
- memcpy(zip_ops->input, src, slen);
-
- ret = zip_deflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-static int zip_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_decomp;
- memcpy(zip_ops->input, src, slen);
-
- /* Work around for a bug in zlib which needs an extra bytes sometimes */
- if (zip_ops->ccode != 3) /* Not LZS Encoding */
- zip_ops->input[slen++] = 0;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
-
- ret = zip_inflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 0);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void *zip_alloc_scomp_ctx_lzs(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 1);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void zip_free_scomp_ctx(void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- zip_ctx_exit(zip_ctx);
- kfree_sensitive(zip_ctx);
-}
-
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
deleted file mode 100644
index 10899ece2d1f..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_CRYPTO_H__
-#define __ZIP_CRYPTO_H__
-
-#include <crypto/internal/scompress.h>
-#include "common.h"
-#include "zip_deflate.h"
-#include "zip_inflate.h"
-
-struct zip_kernel_ctx {
- struct zip_operation zip_comp;
- struct zip_operation zip_decomp;
-};
-
-void *zip_alloc_scomp_ctx_deflate(void);
-void *zip_alloc_scomp_ctx_lzs(void);
-void zip_free_scomp_ctx(void *zip_ctx);
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
deleted file mode 100644
index d7133f857d67..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/* Prepares the deflate zip command */
-static int prepare_zip_command(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD #0 */
- /* History gather */
- zip_cmd->s.hg = 0;
- /* compression enable = 1 for deflate */
- zip_cmd->s.ce = 1;
- /* sf (sync flush) */
- zip_cmd->s.sf = 1;
- /* ef (end of file) */
- if (zip_ops->flush == ZIP_FLUSH_FINISH) {
- zip_cmd->s.ef = 1;
- zip_cmd->s.sf = 0;
- }
-
- zip_cmd->s.cc = zip_ops->ccode;
- /* ss (compression speed/storage) */
- zip_cmd->s.ss = zip_ops->speed;
-
- /* IWORD #1 */
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
- zip_cmd->s.historylength = zip_ops->history_len;
- zip_cmd->s.dg = 0;
-
- /* IWORD # 6 and 7 - compression input/history pointer */
- zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
- zip_ops->history_len);
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
- /* maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- return 0;
-}
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepares zip command based on the input parameters */
- prepare_zip_command(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
- /* Loads zip command into command queues and rings door bell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Stats update for compression requests submitted */
- atomic64_inc(&zip_dev->stats.comp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Stats update for compression requests completed */
- atomic64_inc(&zip_dev->stats.comp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip instruction not yet completed");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip instruction completed successfully");
- zip_update_cmd_bufs(zip_dev, queue);
- break;
-
- case ZIP_CMD_DTRUNC:
- zip_dbg("Output Truncate error");
- /* Returning ZIP_ERROR to avoid copy to user */
- return ZIP_ERROR;
-
- default:
- zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
- return ZIP_ERROR;
- }
-
- /* Update the CRC depending on the format */
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine, need to feed it again */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Unknown Format:%d\n", zip_ops->format);
- }
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.comp_out_bytes);
-
- /* Update output_len */
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
- zip_err("output_len (%d) < total bytes written(%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
-
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
deleted file mode 100644
index 1d32e76edc4d..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEFLATE_H__
-#define __ZIP_DEFLATE_H__
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine by ringing the doorbell.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
deleted file mode 100644
index f174ec29ed69..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/**
- * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
- *
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- *
- * Return: Bytes consumed in the command queue buffer.
- */
-static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
-{
- return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
- sizeof(u64 *));
-}
-
-/**
- * zip_load_instr - Submits the instruction into the ZIP command queue
- * @instr: Pointer to the instruction to be submitted
- * @zip_dev: Pointer to ZIP device structure to which the instruction is to
- * be submitted
- *
- * This function copies the ZIP instruction to the command queue and rings the
- * doorbell to notify the engine of the instruction submission. The command
- * queue is maintained in a circular fashion. When there is space for exactly
- * one instruction in the queue, next chunk pointer of the queue is made to
- * point to the head of the queue, thus maintaining a circular queue.
- *
- * Return: Queue number to which the instruction was submitted
- */
-u32 zip_load_instr(union zip_inst_s *instr,
- struct zip_device *zip_dev)
-{
- union zip_quex_doorbell dbell;
- u32 queue = 0;
- u32 consumed = 0;
- u64 *ncb_ptr = NULL;
- union zip_nptr_s ncp;
-
- /*
- * Distribute the instructions between the enabled queues based on
- * the CPU id.
- */
- if (raw_smp_processor_id() % 2 == 0)
- queue = 0;
- else
- queue = 1;
-
- zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
-
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /*
- * Command Queue implementation
- * 1. If there is place for new instructions, push the cmd at sw_head.
- * 2. If there is place for exactly one instruction, push the new cmd
- * at the sw_head. Make sw_head point to the sw_tail to make it
- * circular. Write sw_head's physical address to the "Next-Chunk
- * Buffer Ptr" to make it cmd_hw_tail.
- * 3. Ring the door bell.
- */
- zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
- zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
-
- consumed = zip_cmd_queue_consumed(zip_dev, queue);
- /* Check if there is space to push just one cmd */
- if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
- zip_dbg("Cmd queue space available for single command");
- /* Space for one cmd, pust it and make it circular queue */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
- ncb_ptr = zip_dev->iq[queue].sw_head;
-
- zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
- ncb_ptr, zip_dev->iq[queue].sw_head - 16);
-
- /* Using Circular command queue */
- zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
- /* Mark this buffer for free */
- zip_dev->iq[queue].free_flag = 1;
-
- /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
- ncp.u_reg64 = 0ull;
- ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
- *ncb_ptr = ncp.u_reg64;
- zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
- *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
-
- zip_dev->iq[queue].pend_cnt++;
-
- } else {
- zip_dbg("Enough space is available for commands");
- /* Push this cmd to cmd queue buffer */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- zip_dev->iq[queue].pend_cnt++;
- }
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
-
- zip_dbg(" Pushed the new cmd : pend_cnt : %d",
- zip_dev->iq[queue].pend_cnt);
-
- /* Ring the doorbell */
- dbell.u_reg64 = 0ull;
- dbell.s.dbell_cnt = 1;
- zip_reg_write(dbell.u_reg64,
- (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
-
- /* Unlock cmd buffer lock */
- spin_unlock(&zip_dev->iq[queue].lock);
-
- return queue;
-}
-
-/**
- * zip_update_cmd_bufs - Updates the queue statistics after posting the
- * instruction
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- */
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
-{
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /* Check if the previous buffer can be freed */
- if (zip_dev->iq[queue].free_flag == 1) {
- zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
- /* Reset the free flag */
- zip_dev->iq[queue].free_flag = 0;
-
- /* Point the hw_tail to start of the new chunk buffer */
- zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
- } else {
- zip_dbg("Free flag not set. increment hw tail");
- zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
- }
-
- zip_dev->iq[queue].done_cnt++;
- zip_dev->iq[queue].pend_cnt--;
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
- zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
-
- spin_unlock(&zip_dev->iq[queue].lock);
-}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
deleted file mode 100644
index 9e18b3b93d38..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEVICE_H__
-#define __ZIP_DEVICE_H__
-
-#include <linux/types.h>
-#include "zip_main.h"
-
-struct sg_info {
- /*
- * Pointer to the input data when scatter_gather == 0 and
- * pointer to the input gather list buffer when scatter_gather == 1
- */
- union zip_zptr_s *gather;
-
- /*
- * Pointer to the output data when scatter_gather == 0 and
- * pointer to the output scatter list buffer when scatter_gather == 1
- */
- union zip_zptr_s *scatter;
-
- /*
- * Holds size of the output buffer pointed by scatter list
- * when scatter_gather == 1
- */
- u64 scatter_buf_size;
-
- /* for gather data */
- u64 gather_enable;
-
- /* for scatter data */
- u64 scatter_enable;
-
- /* Number of gather list pointers for gather data */
- u32 gbuf_cnt;
-
- /* Number of scatter list pointers for scatter data */
- u32 sbuf_cnt;
-
- /* Buffers allocation state */
- u8 alloc_state;
-};
-
-/**
- * struct zip_state - Structure representing the required information related
- * to a command
- * @zip_cmd: Pointer to zip instruction structure
- * @result: Pointer to zip result structure
- * @ctx: Context pointer for inflate
- * @history: Decompression history pointer
- * @sginfo: Scatter-gather info structure
- */
-struct zip_state {
- union zip_inst_s zip_cmd;
- union zip_zres_s result;
- union zip_zptr_s *ctx;
- union zip_zptr_s *history;
- struct sg_info sginfo;
-};
-
-#define ZIP_CONTEXT_SIZE 2048
-#define ZIP_INFLATE_HISTORY_SIZE 32768
-#define ZIP_DEFLATE_HISTORY_SIZE 32768
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
deleted file mode 100644
index 7e0d73e2f89e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_inflate.h"
-
-static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD#0 */
-
- /* Decompression History Gather list - no gather list */
- zip_cmd->s.hg = 0;
- /* For decompression, CE must be 0x0. */
- zip_cmd->s.ce = 0;
- /* For decompression, SS must be 0x0. */
- zip_cmd->s.ss = 0;
- /* For decompression, SF should always be set. */
- zip_cmd->s.sf = 1;
-
- /* Begin File */
- if (zip_ops->begin_file == 0)
- zip_cmd->s.bf = 0;
- else
- zip_cmd->s.bf = 1;
-
- zip_cmd->s.ef = 1;
- /* 0: for Deflate decompression, 3: for LZS decompression */
- zip_cmd->s.cc = zip_ops->ccode;
-
- /* IWORD #1*/
-
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
-
- /*
- * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
- * History data is added to a decompression operation via IWORD3.
- */
- zip_cmd->s.historylength = 0;
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
-
- /* Maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- zip_dbg("Data Direct Input case ");
-
- /* IWORD # 6 and 7 - input pointer */
- zip_cmd->s.dg = 0;
- zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
-
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- /* Returning 0 for time being.*/
- return 0;
-}
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepare inflate zip command */
- prepare_inflate_zcmd(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
-
- /* Load inflate command to zip queue and ring the doorbell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Decompression requests submitted stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Decompression requests completed stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip Instruction not yet completed\n");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip Instruction completed successfully\n");
- break;
-
- case ZIP_CMD_DYNAMIC_STOP:
- zip_dbg(" Dynamic stop Initiated\n");
- break;
-
- default:
- zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
- atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
- zip_update_cmd_bufs(zip_dev, queue);
- return ZIP_ERROR;
- }
-
- zip_update_cmd_bufs(zip_dev, queue);
-
- if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
- (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
- result_ptr->s.ef = 1;
-
- zip_ops->csum = result_ptr->s.adler32;
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.decomp_out_bytes);
-
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- zip_err("output_len (%d) < total bytes written (%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- zip_ops->bytes_read = result_ptr->s.totalbytesread;
- zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
- zip_ops->end_file = result_ptr->s.ef;
- if (zip_ops->end_file) {
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Format error:%d\n", zip_ops->format);
- }
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
deleted file mode 100644
index 6b20f179978e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_INFLATE_H__
-#define __ZIP_INFLATE_H__
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
deleted file mode 100644
index abd58de4343d..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_crypto.h"
-
-#define DRV_NAME "ThunderX-ZIP"
-
-static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
-
-static const struct pci_device_id zip_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
- { 0, }
-};
-
-static void zip_debugfs_init(void);
-static void zip_debugfs_exit(void);
-static int zip_register_compression_device(void);
-static void zip_unregister_compression_device(void);
-
-void zip_reg_write(u64 val, u64 __iomem *addr)
-{
- writeq(val, addr);
-}
-
-u64 zip_reg_read(u64 __iomem *addr)
-{
- return readq(addr);
-}
-
-/*
- * Allocates new ZIP device structure
- * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
- */
-static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
-{
- struct zip_device *zip = NULL;
- int idx;
-
- for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
- if (!zip_dev[idx])
- break;
- }
-
- /* To ensure that the index is within the limit */
- if (idx < MAX_ZIP_DEVICES)
- zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
-
- if (!zip)
- return NULL;
-
- zip_dev[idx] = zip;
- zip->index = idx;
- return zip;
-}
-
-/**
- * zip_get_device - Get ZIP device based on node id of cpu
- *
- * @node: Node id of the current cpu
- * Return: Pointer to Zip device structure
- */
-struct zip_device *zip_get_device(int node)
-{
- if ((node < MAX_ZIP_DEVICES) && (node >= 0))
- return zip_dev[node];
-
- zip_err("ZIP device not found for node id %d\n", node);
- return NULL;
-}
-
-/**
- * zip_get_node_id - Get the node id of the current cpu
- *
- * Return: Node id of the current cpu
- */
-int zip_get_node_id(void)
-{
- return cpu_to_node(raw_smp_processor_id());
-}
-
-/* Initializes the ZIP h/w sub-system */
-static int zip_init_hw(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
- union zip_constants constants;
- union zip_que_ena que_ena;
- union zip_quex_map que_map;
- union zip_que_pri que_pri;
-
- union zip_quex_sbuf_addr que_sbuf_addr;
- union zip_quex_sbuf_ctl que_sbuf_ctl;
-
- int q = 0;
-
- /* Enable the ZIP Engine(Core) Clock */
- cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
- cmd_ctl.s.forceclk = 1;
- zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
-
- zip_msg("ZIP_CMD_CTL : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
-
- constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
- zip->depth = constants.s.depth;
- zip->onfsize = constants.s.onfsize;
- zip->ctxsize = constants.s.ctxsize;
-
- zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
- zip->depth, zip->onfsize, zip->ctxsize);
-
- /*
- * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
- * have the correct buffer pointer and size configured for each
- * instruction queue.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_sbuf_ctl.u_reg64 = 0ull;
- que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
- que_sbuf_ctl.s.inst_be = 0;
- que_sbuf_ctl.s.stream_id = 0;
- zip_reg_write(que_sbuf_ctl.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
-
- zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
- }
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
-
- spin_lock_init(&zip->iq[q].lock);
-
- if (zip_cmd_qbuf_alloc(zip, q)) {
- while (q != 0) {
- q--;
- zip_cmd_qbuf_free(zip, q);
- }
- return -ENOMEM;
- }
-
- /* Initialize tail ptr to head */
- zip->iq[q].sw_tail = zip->iq[q].sw_head;
- zip->iq[q].hw_tail = zip->iq[q].sw_head;
-
- /* Write the physical addr to register */
- que_sbuf_addr.u_reg64 = 0ull;
- que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
- ZIP_128B_ALIGN);
-
- zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
- (u64)que_sbuf_addr.s.ptr);
-
- zip_reg_write(que_sbuf_addr.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip->iq[q].sw_head, zip->iq[q].sw_tail,
- zip->iq[q].hw_tail);
- zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
- }
-
- /*
- * Queue-to-ZIP core mapping
- * If a queue is not mapped to a particular core, it is equivalent to
- * the ZIP core being disabled.
- */
- que_ena.u_reg64 = 0x0ull;
- /* Enabling queues based on ZIP_NUM_QUEUES */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_ena.s.ena |= (0x1 << q);
- zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
-
- zip_msg("QUE_ENA : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_map.u_reg64 = 0ull;
- /* Mapping each queue to two ZIP cores */
- que_map.s.zce = 0x3;
- zip_reg_write(que_map.u_reg64,
- (zip->reg_base + ZIP_QUEX_MAP(q)));
-
- zip_msg("QUE_MAP(%d) : 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
- }
-
- que_pri.u_reg64 = 0ull;
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
- zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
-
- zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
-
- return 0;
-}
-
-static void zip_reset(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
-
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
-}
-
-static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct device *dev = &pdev->dev;
- struct zip_device *zip = NULL;
- int err;
-
- zip = zip_alloc_device(pdev);
- if (!zip)
- return -ENOMEM;
-
- dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
- pdev->vendor, pdev->device, dev_to_node(dev));
-
- pci_set_drvdata(pdev, zip);
- zip->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device");
- goto err_free_device;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(dev, "PCI request regions failed 0x%x", err);
- goto err_disable_device;
- }
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
- goto err_release_regions;
- }
-
- /* MAP configuration registers */
- zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
- if (!zip->reg_base) {
- dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- /* Initialize ZIP Hardware */
- err = zip_init_hw(zip);
- if (err)
- goto err_release_regions;
-
- /* Register with the Kernel Crypto Interface */
- err = zip_register_compression_device();
- if (err < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_register;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return 0;
-
-err_register:
- zip_reset(zip);
-
-err_release_regions:
- if (zip->reg_base)
- iounmap(zip->reg_base);
- pci_release_regions(pdev);
-
-err_disable_device:
- pci_disable_device(pdev);
-
-err_free_device:
- pci_set_drvdata(pdev, NULL);
-
- /* Remove zip_dev from zip_device list, free the zip_device memory */
- zip_dev[zip->index] = NULL;
- devm_kfree(dev, zip);
-
- return err;
-}
-
-static void zip_remove(struct pci_dev *pdev)
-{
- struct zip_device *zip = pci_get_drvdata(pdev);
- int q = 0;
-
- if (!zip)
- return;
-
- zip_debugfs_exit();
-
- zip_unregister_compression_device();
-
- if (zip->reg_base) {
- zip_reset(zip);
- iounmap(zip->reg_base);
- }
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- /*
- * Free Command Queue buffers. This free should be called for all
- * the enabled Queues.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- zip_cmd_qbuf_free(zip, q);
-
- pci_set_drvdata(pdev, NULL);
- /* remove zip device from zip device list */
- zip_dev[zip->index] = NULL;
-}
-
-/* PCI Sub-System Interface */
-static struct pci_driver zip_driver = {
- .name = DRV_NAME,
- .id_table = zip_id_table,
- .probe = zip_probe,
- .remove = zip_remove,
-};
-
-/* Kernel Crypto Subsystem Interface */
-
-static struct scomp_alg zip_scomp_deflate = {
- .alloc_ctx = zip_alloc_scomp_ctx_deflate,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static struct scomp_alg zip_scomp_lzs = {
- .alloc_ctx = zip_alloc_scomp_ctx_lzs,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static int zip_register_compression_device(void)
-{
- int ret;
-
- ret = crypto_register_scomp(&zip_scomp_deflate);
- if (ret < 0) {
- zip_err("Deflate scomp algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_scomp(&zip_scomp_lzs);
- if (ret < 0) {
- zip_err("LZS scomp algorithm registration failed\n");
- goto err_unregister_scomp_deflate;
- }
-
- return ret;
-
-err_unregister_scomp_deflate:
- crypto_unregister_scomp(&zip_scomp_deflate);
-
- return ret;
-}
-
-static void zip_unregister_compression_device(void)
-{
- crypto_unregister_scomp(&zip_scomp_deflate);
- crypto_unregister_scomp(&zip_scomp_lzs);
-}
-
-/*
- * debugfs functions
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-/* Displays ZIP device statistics */
-static int zip_stats_show(struct seq_file *s, void *unused)
-{
- u64 val = 0ull;
- u64 avg_chunk = 0ull, avg_cr = 0ull;
- u32 q = 0;
-
- int index = 0;
- struct zip_device *zip;
- struct zip_stats *st;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- u64 pending = 0;
-
- if (zip_dev[index]) {
- zip = zip_dev[index];
- st = &zip->stats;
-
- /* Get all the pending requests */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- val = zip_reg_read((zip->reg_base +
- ZIP_DBG_QUEX_STA(q)));
- pending += val >> 32 & 0xffffff;
- }
-
- val = atomic64_read(&st->comp_req_complete);
- avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
-
- val = atomic64_read(&st->comp_out_bytes);
- avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
- seq_printf(s, " ZIP Device %d Stats\n"
- "-----------------------------------\n"
- "Comp Req Submitted : \t%lld\n"
- "Comp Req Completed : \t%lld\n"
- "Compress In Bytes : \t%lld\n"
- "Compressed Out Bytes : \t%lld\n"
- "Average Chunk size : \t%llu\n"
- "Average Compression ratio : \t%llu\n"
- "Decomp Req Submitted : \t%lld\n"
- "Decomp Req Completed : \t%lld\n"
- "Decompress In Bytes : \t%lld\n"
- "Decompressed Out Bytes : \t%lld\n"
- "Decompress Bad requests : \t%lld\n"
- "Pending Req : \t%lld\n"
- "---------------------------------\n",
- index,
- (u64)atomic64_read(&st->comp_req_submit),
- (u64)atomic64_read(&st->comp_req_complete),
- (u64)atomic64_read(&st->comp_in_bytes),
- (u64)atomic64_read(&st->comp_out_bytes),
- avg_chunk,
- avg_cr,
- (u64)atomic64_read(&st->decomp_req_submit),
- (u64)atomic64_read(&st->decomp_req_complete),
- (u64)atomic64_read(&st->decomp_in_bytes),
- (u64)atomic64_read(&st->decomp_out_bytes),
- (u64)atomic64_read(&st->decomp_bad_reqs),
- pending);
- }
- }
- return 0;
-}
-
-/* Clears stats data */
-static int zip_clear_show(struct seq_file *s, void *unused)
-{
- int index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- memset(&zip_dev[index]->stats, 0,
- sizeof(struct zip_stats));
- seq_printf(s, "Cleared stats for zip %d\n", index);
- }
- }
-
- return 0;
-}
-
-static struct zip_registers zipregs[64] = {
- {"ZIP_CMD_CTL ", 0x0000ull},
- {"ZIP_THROTTLE ", 0x0010ull},
- {"ZIP_CONSTANTS ", 0x00A0ull},
- {"ZIP_QUE0_MAP ", 0x1400ull},
- {"ZIP_QUE1_MAP ", 0x1408ull},
- {"ZIP_QUE_ENA ", 0x0500ull},
- {"ZIP_QUE_PRI ", 0x0508ull},
- {"ZIP_QUE0_DONE ", 0x2000ull},
- {"ZIP_QUE1_DONE ", 0x2008ull},
- {"ZIP_QUE0_DOORBELL ", 0x4000ull},
- {"ZIP_QUE1_DOORBELL ", 0x4008ull},
- {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
- {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
- {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
- {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
- { NULL, 0}
-};
-
-/* Prints registers' contents */
-static int zip_regs_show(struct seq_file *s, void *unused)
-{
- u64 val = 0;
- int i = 0, index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- seq_printf(s, "--------------------------------\n"
- " ZIP Device %d Registers\n"
- "--------------------------------\n",
- index);
-
- i = 0;
-
- while (zipregs[i].reg_name) {
- val = zip_reg_read((zip_dev[index]->reg_base +
- zipregs[i].reg_offset));
- seq_printf(s, "%s: 0x%016llx\n",
- zipregs[i].reg_name, val);
- i++;
- }
- }
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(zip_stats);
-DEFINE_SHOW_ATTRIBUTE(zip_clear);
-DEFINE_SHOW_ATTRIBUTE(zip_regs);
-
-/* Root directory for thunderx_zip debugfs entry */
-static struct dentry *zip_debugfs_root;
-
-static void zip_debugfs_init(void)
-{
- if (!debugfs_initialized())
- return;
-
- zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-
- /* Creating files for entries inside thunderx_zip directory */
- debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
- &zip_stats_fops);
-
- debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
- &zip_clear_fops);
-
- debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
- &zip_regs_fops);
-
-}
-
-static void zip_debugfs_exit(void)
-{
- debugfs_remove_recursive(zip_debugfs_root);
-}
-
-#else
-static void __init zip_debugfs_init(void) { }
-static void __exit zip_debugfs_exit(void) { }
-#endif
-/* debugfs - end */
-
-module_pci_driver(zip_driver);
-
-MODULE_AUTHOR("Cavium Inc");
-MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
deleted file mode 100644
index e1e4fa92ce80..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MAIN_H__
-#define __ZIP_MAIN_H__
-
-#include "zip_device.h"
-#include "zip_regs.h"
-
-/* PCI device IDs */
-#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
-
-/* ZIP device BARs */
-#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
-
-/* Maximum available zip queues */
-#define ZIP_MAX_NUM_QUEUES 8
-
-#define ZIP_128B_ALIGN 7
-
-/* Command queue buffer size */
-#define ZIP_CMD_QBUF_SIZE (8064 + 8)
-
-struct zip_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
-/* ZIP Compression - Decompression stats */
-struct zip_stats {
- atomic64_t comp_req_submit;
- atomic64_t comp_req_complete;
- atomic64_t decomp_req_submit;
- atomic64_t decomp_req_complete;
- atomic64_t comp_in_bytes;
- atomic64_t comp_out_bytes;
- atomic64_t decomp_in_bytes;
- atomic64_t decomp_out_bytes;
- atomic64_t decomp_bad_reqs;
-};
-
-/* ZIP Instruction Queue */
-struct zip_iq {
- u64 *sw_head;
- u64 *sw_tail;
- u64 *hw_tail;
- u64 done_cnt;
- u64 pend_cnt;
- u64 free_flag;
-
- /* ZIP IQ lock */
- spinlock_t lock;
-};
-
-/* ZIP Device */
-struct zip_device {
- u32 index;
- void __iomem *reg_base;
- struct pci_dev *pdev;
-
- /* Different ZIP Constants */
- u64 depth;
- u64 onfsize;
- u64 ctxsize;
-
- struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
- struct zip_stats stats;
-};
-
-/* Prototypes */
-struct zip_device *zip_get_device(int node_id);
-int zip_get_node_id(void);
-void zip_reg_write(u64 val, u64 __iomem *addr);
-u64 zip_reg_read(u64 __iomem *addr);
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
-u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
-
-#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
deleted file mode 100644
index b3e0843a9169..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include "common.h"
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, -ENOMEM otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-{
- zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(ZIP_CMD_QBUF_SIZE));
-
- if (!zip->iq[q].sw_head)
- return -ENOMEM;
-
- memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
-
- zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
- return 0;
-}
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue number to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-{
- zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
-
- free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
-}
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size)
-{
- u8 *ptr;
-
- ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(size));
-
- if (!ptr)
- return NULL;
-
- memset(ptr, 0, size);
-
- zip_dbg("Data buffer allocation success\n");
- return ptr;
-}
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size)
-{
- zip_dbg("Freeing data buffer 0x%lx\n", ptr);
-
- free_pages((u64)ptr, get_order(size));
-}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
deleted file mode 100644
index f8f2f08c4a5c..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MEM_H__
-#define __ZIP_MEM_H__
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue nmber to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q);
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, 1 otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size);
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size);
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
deleted file mode 100644
index 874e0236c87e..000000000000
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_REGS_H__
-#define __ZIP_REGS_H__
-
-/*
- * Configuration and status register (CSR) address and type definitions for
- * Cavium ZIP.
- */
-
-#include <linux/kern_levels.h>
-
-/* ZIP invocation result completion status codes */
-#define ZIP_CMD_NOTDONE 0x0
-
-/* Successful completion. */
-#define ZIP_CMD_SUCCESS 0x1
-
-/* Output truncated */
-#define ZIP_CMD_DTRUNC 0x2
-
-/* Dynamic Stop */
-#define ZIP_CMD_DYNAMIC_STOP 0x3
-
-/* Uncompress ran out of input data when IWORD0[EF] was set */
-#define ZIP_CMD_ITRUNC 0x4
-
-/* Uncompress found the reserved block type 3 */
-#define ZIP_CMD_RBLOCK 0x5
-
-/*
- * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
- */
-#define ZIP_CMD_NLEN 0x6
-
-/* Uncompress found a bad code in the main Huffman codes. */
-#define ZIP_CMD_BADCODE 0x7
-
-/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
-#define ZIP_CMD_BADCODE2 0x8
-
-/* Compress found a zero-length input. */
-#define ZIP_CMD_ZERO_LEN 0x9
-
-/* The compress or decompress encountered an internal parity error. */
-#define ZIP_CMD_PARITY 0xA
-
-/*
- * Uncompress found a string identifier that precedes the uncompressed data and
- * decompression history.
- */
-#define ZIP_CMD_FATAL 0xB
-
-/**
- * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
- * interrupt vectors.
- */
-enum zip_int_vec_e {
- ZIP_INT_VEC_E_ECCE = 0x10,
- ZIP_INT_VEC_E_FIFE = 0x11,
- ZIP_INT_VEC_E_QUE0_DONE = 0x0,
- ZIP_INT_VEC_E_QUE0_ERR = 0x8,
- ZIP_INT_VEC_E_QUE1_DONE = 0x1,
- ZIP_INT_VEC_E_QUE1_ERR = 0x9,
- ZIP_INT_VEC_E_QUE2_DONE = 0x2,
- ZIP_INT_VEC_E_QUE2_ERR = 0xa,
- ZIP_INT_VEC_E_QUE3_DONE = 0x3,
- ZIP_INT_VEC_E_QUE3_ERR = 0xb,
- ZIP_INT_VEC_E_QUE4_DONE = 0x4,
- ZIP_INT_VEC_E_QUE4_ERR = 0xc,
- ZIP_INT_VEC_E_QUE5_DONE = 0x5,
- ZIP_INT_VEC_E_QUE5_ERR = 0xd,
- ZIP_INT_VEC_E_QUE6_DONE = 0x6,
- ZIP_INT_VEC_E_QUE6_ERR = 0xe,
- ZIP_INT_VEC_E_QUE7_DONE = 0x7,
- ZIP_INT_VEC_E_QUE7_ERR = 0xf,
- ZIP_INT_VEC_E_ENUM_LAST = 0x12,
-};
-
-/**
- * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_addr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-
-};
-
-/**
- * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_ctl_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_inst_s - ZIP Instruction Structure.
- * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
- * the structure).
- */
-union zip_inst_s {
- u64 u_reg64[16];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1;
- u64 reserved_56_62 : 7;
- u64 totaloutputlength : 24;
- u64 reserved_27_31 : 5;
- u64 exn : 3;
- u64 reserved_23_23 : 1;
- u64 exbits : 7;
- u64 reserved_12_15 : 4;
- u64 sf : 1;
- u64 ss : 2;
- u64 cc : 2;
- u64 ef : 1;
- u64 bf : 1;
- u64 ce : 1;
- u64 reserved_3_3 : 1;
- u64 ds : 1;
- u64 dg : 1;
- u64 hg : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 hg : 1;
- u64 dg : 1;
- u64 ds : 1;
- u64 reserved_3_3 : 1;
- u64 ce : 1;
- u64 bf : 1;
- u64 ef : 1;
- u64 cc : 2;
- u64 ss : 2;
- u64 sf : 1;
- u64 reserved_12_15 : 4;
- u64 exbits : 7;
- u64 reserved_23_23 : 1;
- u64 exn : 3;
- u64 reserved_27_31 : 5;
- u64 totaloutputlength : 24;
- u64 reserved_56_62 : 7;
- u64 doneint : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 historylength : 16;
- u64 reserved_96_111 : 16;
- u64 adlercrc32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adlercrc32 : 32;
- u64 reserved_96_111 : 16;
- u64 historylength : 16;
-#endif
- union zip_zptr_addr_s ctx_ptr_addr;
- union zip_zptr_ctl_s ctx_ptr_ctl;
- union zip_zptr_addr_s his_ptr_addr;
- union zip_zptr_ctl_s his_ptr_ctl;
- union zip_zptr_addr_s inp_ptr_addr;
- union zip_zptr_ctl_s inp_ptr_ctl;
- union zip_zptr_addr_s out_ptr_addr;
- union zip_zptr_ctl_s out_ptr_ctl;
- union zip_zptr_addr_s res_ptr_addr;
- union zip_zptr_ctl_s res_ptr_ctl;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_817_831 : 15;
- u64 wq_ptr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 wq_ptr : 49;
- u64 reserved_817_831 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_882_895 : 14;
- u64 tt : 2;
- u64 reserved_874_879 : 6;
- u64 grp : 10;
- u64 tag : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 tag : 32;
- u64 grp : 10;
- u64 reserved_874_879 : 6;
- u64 tt : 2;
- u64 reserved_882_895 : 14;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#endif
- } s;
-};
-
-/**
- * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
- * Structure
- *
- * ZIP_NPTR structure is used to chain all the zip instruction buffers
- * together. ZIP instruction buffers are managed (allocated and released) by
- * the software.
- */
-union zip_nptr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-/**
- * union zip_zptr_s - ZIP Generic Pointer Structure.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_s {
- u64 u_reg64[2];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_zres_s - ZIP Result Structure
- *
- * The ZIP coprocessor writes the result structure after it completes the
- * invocation. The result structure is exactly 24 bytes, and each invocation of
- * the ZIP coprocessor produces exactly one result structure.
- */
-union zip_zres_s {
- u64 u_reg64[3];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 crc32 : 32;
- u64 adler32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adler32 : 32;
- u64 crc32 : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbyteswritten : 32;
- u64 totalbytesread : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 totalbytesread : 32;
- u64 totalbyteswritten : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbitsprocessed : 32;
- u64 doneint : 1;
- u64 reserved_155_158 : 4;
- u64 exn : 3;
- u64 reserved_151_151 : 1;
- u64 exbits : 7;
- u64 reserved_137_143 : 7;
- u64 ef : 1;
-
- volatile u64 compcode : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-
- volatile u64 compcode : 8;
- u64 ef : 1;
- u64 reserved_137_143 : 7;
- u64 exbits : 7;
- u64 reserved_151_151 : 1;
- u64 exn : 3;
- u64 reserved_155_158 : 4;
- u64 doneint : 1;
- u64 totalbitsprocessed : 32;
-#endif
- } s;
-};
-
-/**
- * union zip_cmd_ctl - Structure representing the register that controls
- * clock and reset.
- */
-union zip_cmd_ctl {
- u64 u_reg64;
- struct zip_cmd_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 forceclk : 1;
- u64 reset : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reset : 1;
- u64 forceclk : 1;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-#define ZIP_CMD_CTL 0x0ull
-
-/**
- * union zip_constants - Data structure representing the register that contains
- * all of the current implementation-related parameters of the zip core in this
- * chip.
- */
-union zip_constants {
- u64 u_reg64;
- struct zip_constants_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 nexec : 8;
- u64 reserved_49_55 : 7;
- u64 syncflush_capable : 1;
- u64 depth : 16;
- u64 onfsize : 12;
- u64 ctxsize : 12;
- u64 reserved_1_7 : 7;
- u64 disabled : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 disabled : 1;
- u64 reserved_1_7 : 7;
- u64 ctxsize : 12;
- u64 onfsize : 12;
- u64 depth : 16;
- u64 syncflush_capable : 1;
- u64 reserved_49_55 : 7;
- u64 nexec : 8;
-#endif
- } s;
-};
-
-#define ZIP_CONSTANTS 0x00A0ull
-
-/**
- * union zip_corex_bist_status - Represents registers which have the BIST
- * status of memories in zip cores.
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_corex_bist_status {
- u64 u_reg64;
- struct zip_corex_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_53_63 : 11;
- u64 bstatus : 53;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 53;
- u64 reserved_53_63 : 11;
-#endif
- } s;
-};
-
-static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
-{
- if (param1 <= 1)
- return 0x0520ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ctl_bist_status - Represents register that has the BIST status of
- * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
- * buffer, output data buffers).
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_ctl_bist_status {
- u64 u_reg64;
- struct zip_ctl_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_9_63 : 55;
- u64 bstatus : 9;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 9;
- u64 reserved_9_63 : 55;
-#endif
- } s;
-};
-
-#define ZIP_CTL_BIST_STATUS 0x0510ull
-
-/**
- * union zip_ctl_cfg - Represents the register that controls the behavior of
- * the ZIP DMA engines.
- *
- * It is recommended to keep default values for normal operation. Changing the
- * values of the fields may be useful for diagnostics.
- */
-union zip_ctl_cfg {
- u64 u_reg64;
- struct zip_ctl_cfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_52_63 : 12;
- u64 ildf : 4;
- u64 reserved_36_47 : 12;
- u64 drtf : 4;
- u64 reserved_27_31 : 5;
- u64 stcf : 3;
- u64 reserved_19_23 : 5;
- u64 ldf : 3;
- u64 reserved_2_15 : 14;
- u64 busy : 1;
- u64 reserved_0_0 : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_0_0 : 1;
- u64 busy : 1;
- u64 reserved_2_15 : 14;
- u64 ldf : 3;
- u64 reserved_19_23 : 5;
- u64 stcf : 3;
- u64 reserved_27_31 : 5;
- u64 drtf : 4;
- u64 reserved_36_47 : 12;
- u64 ildf : 4;
- u64 reserved_52_63 : 12;
-#endif
- } s;
-};
-
-#define ZIP_CTL_CFG 0x0560ull
-
-/**
- * union zip_dbg_corex_inst - Represents the registers that reflect the status
- * of the current instruction that the ZIP core is executing or has executed.
- *
- * These registers are only for debug use.
- */
-union zip_dbg_corex_inst {
- u64 u_reg64;
- struct zip_dbg_corex_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_35_62 : 28;
- u64 qid : 3;
- u64 iid : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iid : 32;
- u64 qid : 3;
- u64 reserved_35_62 : 28;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_INST(u64 param1)
-{
- if (param1 <= 1)
- return 0x0640ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_corex_sta - Represents registers that reflect the status of
- * the zip cores.
- *
- * They are for debug use only.
- */
-union zip_dbg_corex_sta {
- u64 u_reg64;
- struct zip_dbg_corex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_37_62 : 26;
- u64 ist : 5;
- u64 nie : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nie : 32;
- u64 ist : 5;
- u64 reserved_37_62 : 26;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_STA(u64 param1)
-{
- if (param1 <= 1)
- return 0x0680ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
- * instruction queues.
- *
- * They are for debug use only.
- */
-union zip_dbg_quex_sta {
- u64 u_reg64;
- struct zip_dbg_quex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_56_62 : 7;
- u64 rqwc : 24;
- u64 nii : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nii : 32;
- u64 rqwc : 24;
- u64 reserved_56_62 : 7;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
-{
- if (param1 <= 7)
- return 0x1800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ecc_ctl - Represents the register that enables ECC for each
- * individual internal memory that requires ECC.
- *
- * For debug purpose, it can also flip one or two bits in the ECC data.
- */
-union zip_ecc_ctl {
- u64 u_reg64;
- struct zip_ecc_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_19_63 : 45;
- u64 vmem_cdis : 1;
- u64 vmem_fs : 2;
- u64 reserved_15_15 : 1;
- u64 idf1_cdis : 1;
- u64 idf1_fs : 2;
- u64 reserved_11_11 : 1;
- u64 idf0_cdis : 1;
- u64 idf0_fs : 2;
- u64 reserved_7_7 : 1;
- u64 gspf_cdis : 1;
- u64 gspf_fs : 2;
- u64 reserved_3_3 : 1;
- u64 iqf_cdis : 1;
- u64 iqf_fs : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iqf_fs : 2;
- u64 iqf_cdis : 1;
- u64 reserved_3_3 : 1;
- u64 gspf_fs : 2;
- u64 gspf_cdis : 1;
- u64 reserved_7_7 : 1;
- u64 idf0_fs : 2;
- u64 idf0_cdis : 1;
- u64 reserved_11_11 : 1;
- u64 idf1_fs : 2;
- u64 idf1_cdis : 1;
- u64 reserved_15_15 : 1;
- u64 vmem_fs : 2;
- u64 vmem_cdis : 1;
- u64 reserved_19_63 : 45;
-#endif
- } s;
-};
-
-#define ZIP_ECC_CTL 0x0568ull
-
-/* NCB - zip_ecce_ena_w1c */
-union zip_ecce_ena_w1c {
- u64 u_reg64;
- struct zip_ecce_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1C 0x0598ull
-
-/* NCB - zip_ecce_ena_w1s */
-union zip_ecce_ena_w1s {
- u64 u_reg64;
- struct zip_ecce_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1S 0x0590ull
-
-/**
- * union zip_ecce_int - Represents the register that contains the status of the
- * ECC interrupt sources.
- */
-union zip_ecce_int {
- u64 u_reg64;
- struct zip_ecce_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT 0x0580ull
-
-/* NCB - zip_ecce_int_w1s */
-union zip_ecce_int_w1s {
- u64 u_reg64;
- struct zip_ecce_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT_W1S 0x0588ull
-
-/* NCB - zip_fife_ena_w1c */
-union zip_fife_ena_w1c {
- u64 u_reg64;
- struct zip_fife_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1C 0x0090ull
-
-/* NCB - zip_fife_ena_w1s */
-union zip_fife_ena_w1s {
- u64 u_reg64;
- struct zip_fife_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1S 0x0088ull
-
-/* NCB - zip_fife_int */
-union zip_fife_int {
- u64 u_reg64;
- struct zip_fife_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT 0x0078ull
-
-/* NCB - zip_fife_int_w1s */
-union zip_fife_int_w1s {
- u64 u_reg64;
- struct zip_fife_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT_W1S 0x0080ull
-
-/**
- * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
- *
- * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_pbax {
- u64 u_reg64;
- struct zip_msix_pbax_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 pend : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pend : 64;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_PBAX(u64 param1)
-{
- if (param1 == 0)
- return 0x0000838000FF0000ull;
- pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_addr {
- u64 u_reg64;
- struct zip_msix_vecx_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 47;
- u64 reserved_1_1 : 1;
- u64 secvec : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 secvec : 1;
- u64 reserved_1_1 : 1;
- u64 addr : 47;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_ctl {
- u64 u_reg64;
- struct zip_msix_vecx_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_33_63 : 31;
- u64 mask : 1;
- u64 reserved_20_31 : 12;
- u64 data : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data : 20;
- u64 reserved_20_31 : 12;
- u64 mask : 1;
- u64 reserved_33_63 : 31;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done - Represents the registers that contain the per-queue
- * instruction done count.
- */
-union zip_quex_done {
- u64 u_reg64;
- struct zip_quex_done_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE(u64 param1)
-{
- if (param1 <= 7)
- return 0x2000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ack - Represents the registers on write to which will
- * decrement the per-queue instructiona done count.
- */
-union zip_quex_done_ack {
- u64 u_reg64;
- struct zip_quex_done_ack_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done_ack : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ack : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
-{
- if (param1 <= 7)
- return 0x2200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1c - Represents the register which when written
- * 1 to will disable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1c {
- u64 u_reg64;
- struct zip_quex_done_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x2600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
- * will enable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1s {
- u64 u_reg64;
- struct zip_quex_done_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x2400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_wait - Represents the register that specifies the per
- * queue interrupt coalescing settings.
- */
-union zip_quex_done_wait {
- u64 u_reg64;
- struct zip_quex_done_wait_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_48_63 : 16;
- u64 time_wait : 16;
- u64 reserved_20_31 : 12;
- u64 num_wait : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 num_wait : 20;
- u64 reserved_20_31 : 12;
- u64 time_wait : 16;
- u64 reserved_48_63 : 16;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
-{
- if (param1 <= 7)
- return 0x2800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_doorbell - Represents doorbell registers for the ZIP
- * instruction queues.
- */
-union zip_quex_doorbell {
- u64 u_reg64;
- struct zip_quex_doorbell_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 dbell_cnt : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dbell_cnt : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
-{
- if (param1 <= 7)
- return 0x4000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1c {
- u64 u_reg64;
- struct zip_quex_err_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x3600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1s {
- u64 u_reg64;
- struct zip_quex_err_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_err_int - Represents registers that contain the per-queue
- * error interrupts.
- */
-union zip_quex_err_int {
- u64 u_reg64;
- struct zip_quex_err_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
-{
- if (param1 <= 7)
- return 0x3000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
- return 0;
-}
-
-/* NCB - zip_que#_err_int_w1s */
-union zip_quex_err_int_w1s {
- u64 u_reg64;
- struct zip_quex_err_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_gcfg - Represents the registers that reflect status of the
- * zip instruction queues,debug use only.
- */
-union zip_quex_gcfg {
- u64 u_reg64;
- struct zip_quex_gcfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_4_63 : 60;
- u64 iqb_ldwb : 1;
- u64 cbw_sty : 1;
- u64 l2ld_cmd : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 l2ld_cmd : 2;
- u64 cbw_sty : 1;
- u64 iqb_ldwb : 1;
- u64 reserved_4_63 : 60;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_GCFG(u64 param1)
-{
- if (param1 <= 7)
- return 0x1A00ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_map - Represents the registers that control how each
- * instruction queue maps to zip cores.
- */
-union zip_quex_map {
- u64 u_reg64;
- struct zip_quex_map_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 zce : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 zce : 2;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_MAP(u64 param1)
-{
- if (param1 <= 7)
- return 0x1400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_MAP: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_addr - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed after SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_CTL.
- */
-union zip_quex_sbuf_addr {
- u64 u_reg64;
- struct zip_quex_sbuf_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 ptr : 42;
- u64 off : 7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 off : 7;
- u64 ptr : 42;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
-{
- if (param1 <= 7)
- return 0x1000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed before SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_ADDR.
- */
-union zip_quex_sbuf_ctl {
- u64 u_reg64;
- struct zip_quex_sbuf_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_45_63 : 19;
- u64 size : 13;
- u64 inst_be : 1;
- u64 reserved_24_30 : 7;
- u64 stream_id : 8;
- u64 reserved_12_15 : 4;
- u64 aura : 12;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 aura : 12;
- u64 reserved_12_15 : 4;
- u64 stream_id : 8;
- u64 reserved_24_30 : 7;
- u64 inst_be : 1;
- u64 size : 13;
- u64 reserved_45_63 : 19;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
-{
- if (param1 <= 7)
- return 0x1200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_que_ena - Represents queue enable register
- *
- * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
- */
-union zip_que_ena {
- u64 u_reg64;
- struct zip_que_ena_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 ena : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ena : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_ENA 0x0500ull
-
-/**
- * union zip_que_pri - Represents the register that defines the priority
- * between instruction queues.
- */
-union zip_que_pri {
- u64 u_reg64;
- struct zip_que_pri_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 pri : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pri : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_PRI 0x0508ull
-
-/**
- * union zip_throttle - Represents the register that controls the maximum
- * number of in-flight X2I data fetch transactions.
- *
- * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
- * accesses; it is not recommended for normal operation, but may be useful for
- * diagnostics.
- */
-union zip_throttle {
- u64 u_reg64;
- struct zip_throttle_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_6_63 : 58;
- u64 ld_infl : 6;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ld_infl : 6;
- u64 reserved_6_63 : 58;
-#endif
- } s;
-};
-
-#define ZIP_THROTTLE 0x0010ull
-
-#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2e87ca0e292a..19fb51558a7d 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -109,6 +109,15 @@ static void *sev_init_ex_buffer;
*/
static struct sev_data_range_list *snp_range_list;
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1112,7 +1121,7 @@ static int __sev_snp_init_locked(int *error)
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
- return 0;
+ return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
@@ -1176,21 +1185,34 @@ static int __sev_snp_init_locked(int *error)
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(cmd, arg, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
return rc;
+ }
/* Prepare for first SNP guest launch after INIT. */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
return rc;
+ }
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
sev_es_tmr_size = SNP_TMR_SIZE;
- return rc;
+ return 0;
}
static void __sev_platform_init_handle_tmr(struct sev_device *sev)
@@ -1287,16 +1309,22 @@ static int __sev_platform_init_locked(int *error)
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
sev->state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ *error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -1319,19 +1347,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (sev->state == SEV_STATE_INIT)
return 0;
- /*
- * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
- * so perform SEV-SNP initialization at probe time.
- */
rc = __sev_snp_init_locked(&args->error);
- if (rc && rc != -ENODEV) {
- /*
- * Don't abort the probe if SNP INIT failed,
- * continue to initialize the legacy SEV firmware.
- */
- dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
- rc, args->error);
- }
+ if (rc && rc != -ENODEV)
+ return rc;
/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
if (args->probe && !psp_init_on_probe)
@@ -1367,8 +1385,11 @@ static int __sev_platform_shutdown_locked(int *error)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
sev->state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
@@ -1389,6 +1410,37 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ struct sev_platform_init_args init_args = {0};
+ int rc;
+
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
@@ -1441,24 +1493,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -1490,7 +1549,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
cmd:
if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -1511,6 +1570,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -1682,9 +1744,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
/* SHUTDOWN may require DF_FLUSH */
if (*error == SEV_RET_DFFLUSH_REQUIRED) {
- ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ int dfflush_error;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
if (ret) {
- dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
return ret;
}
/* reissue the shutdown command */
@@ -1692,7 +1757,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
error);
}
if (ret) {
- dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
return ret;
}
@@ -1718,6 +1784,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
return ret;
}
@@ -1726,6 +1798,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -1756,7 +1829,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -1764,6 +1837,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -1880,32 +1956,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1931,6 +1998,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1957,6 +2035,9 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
@@ -1966,12 +2047,13 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
+ int ret, error;
void *data;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -1980,6 +2062,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
@@ -2008,6 +2096,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
__free_pages(status_page, 0);
return ret;
}
@@ -2016,21 +2107,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized)
- return -EINVAL;
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
buf.len = sizeof(buf);
- return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2039,17 +2142,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
return -EFAULT;
- return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
void *blob;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2068,8 +2183,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
input.vlek_wrapped_address = __psp_pa(blob);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
kfree(blob);
return ret;
@@ -2339,6 +2464,15 @@ static void sev_firmware_shutdown(struct sev_device *sev)
mutex_unlock(&sev_cmd_mutex);
}
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -2373,10 +2507,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block snp_panic_notifier = {
- .notifier_call = snp_shutdown_on_panic,
-};
-
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -2390,9 +2520,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
- int rc;
if (!sev)
return;
@@ -2415,18 +2543,6 @@ void sev_pci_init(void)
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
- /* Initialize the platform */
- args.probe = true;
- rc = sev_platform_init(&args);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- args.error, rc);
-
- dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
- "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &snp_panic_notifier);
return;
err:
@@ -2443,7 +2559,4 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
-
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index df1b05ac5a57..ac13d90a2b7c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err)
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
{
- u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
- u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
- u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
+ static const u32 sha256_init[] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+ };
+ static const u32 sha224_init[] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
+ };
+ static const u32 sha1_init[] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
+ };
+ static const u32 md5_init[] = {
+ MD5_H0, MD5_H1, MD5_H2, MD5_H3
+ };
/* Init HASH constant */
switch (hash) {
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 09d9589f2d68..aa63df16f20b 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -1020,8 +1020,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc);
+ dma_addr_t dst_addr, unsigned int *dlen);
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
@@ -1087,10 +1086,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
if (ctx->compress && compression_ctx->verify_compress) {
+ u32 *compression_crc = acomp_request_ctx(ctx->req);
dma_addr_t src_addr, dst_addr;
- u32 compression_crc;
- compression_crc = idxd_desc->iax_completion->crc;
+ *compression_crc = idxd_desc->iax_completion->crc;
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
@@ -1100,8 +1099,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
- ctx->req->slen, dst_addr, &ctx->req->dlen,
- compression_crc);
+ ctx->req->slen, dst_addr, &ctx->req->dlen);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
@@ -1130,11 +1128,11 @@ out:
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1187,8 +1185,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1282,11 +1279,11 @@ out:
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1346,10 +1343,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}
- if (compression_crc != idxd_desc->iax_completion->crc) {
+ if (*compression_crc != idxd_desc->iax_completion->crc) {
ret = -EINVAL;
dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
- " comp=0x%x, decomp=0x%x\n", compression_crc,
+ " comp=0x%x, decomp=0x%x\n", *compression_crc,
idxd_desc->iax_completion->crc);
print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
8, 1, idxd_desc->iax_completion, 64, 0);
@@ -1369,8 +1366,7 @@ err:
static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1412,7 +1408,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1425,8 +1421,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1446,7 +1441,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_decomp_calls();
update_wq_decomp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1474,7 +1469,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1496,7 +1491,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
dma_addr_t src_addr, dst_addr;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
- u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
@@ -1557,7 +1551,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc);
+ &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1569,7 +1563,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
}
ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, compression_crc);
+ dst_addr, &req->dlen);
if (ret)
dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
@@ -1655,7 +1649,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, false);
+ dst_addr, &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1694,6 +1688,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.init = iaa_comp_init_fixed,
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
+ .reqsize = sizeof(u32),
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 4feeef83f7a3..795f4598400b 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -93,7 +93,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = {
static struct adf_hw_device_class adf_420xx_class = {
.name = ADF_420XX_DEVICE_NAME,
.type = DEV_420XX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 8084aa0f7f41..cfa00daeb4fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -14,7 +14,7 @@
#include "adf_420xx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 4eb6ef99efdd..7d4c366aa8b2 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -96,7 +96,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -422,13 +421,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
- case ADF_402XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
- case ADF_401XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 5537a9991e4e..c9be5dcddb27 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -14,9 +14,9 @@
#include "adf_4xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index e78f7bfd30b8..9425af26d34c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -22,7 +22,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index b825b35ab4bf..bceb5dd8b148 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C3XXX_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a512ca4efd3f..f73d9a4a9ab7 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -13,7 +13,6 @@
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 32ebe09477a8..1a2f36b603fb 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -22,7 +22,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 8a7bdec358d6..23ccb72b6ea2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C62X_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 4aaaaf921734..29e53b41a895 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -13,7 +13,6 @@
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index dc21551153cb..1e301a20c244 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -25,14 +25,14 @@
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_420XX_DEVICE_NAME "420xx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_420XX_PCI_DEVICE_ID 0x4946
-#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
+#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
+#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
+#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
+#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
+#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
+#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
+#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
+#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index ef8a9cf74f0c..841c1d7d3ffe 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -694,16 +694,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
handle->chip_info->icp_rst_mask = 0x100155;
else
handle->chip_info->icp_rst_mask = 0x100015;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7678a93c6853..620300e70238 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
@@ -731,10 +732,10 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
@@ -1205,7 +1206,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
@@ -1223,7 +1223,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
- SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
@@ -1438,7 +1438,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
bus_addr = img_desc.dram_bus_addr + simg_offset;
auth_desc = img_desc.dram_base_addr_v;
- auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->css_hdr_low = (unsigned int)bus_addr;
virt_addr = virt_base;
@@ -1448,7 +1448,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
- auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
@@ -1470,7 +1470,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
- auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->signature_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
@@ -1484,7 +1484,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
- auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_high = (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->img_low = (unsigned int)bus_addr;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr +
@@ -1507,12 +1507,12 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
- auth_desc->img_ae_init_data_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_ae_init_data_high =
+ (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
- auth_desc->img_ae_insts_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
+ auth_desc->img_ae_insts_high =
+ (unsigned int)(bus_addr >> BITS_PER_TYPE(u32));
auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index e48bcf1818cd..bf9e8f34f451 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -24,7 +24,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
static struct adf_hw_device_class dh895xcc_class = {
.name = ADF_DH895XCC_DEVICE_NAME,
.type = DEV_DH895XCC,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 07e9d7e52861..b59e0cc49e52 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_DH895XCC_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f4ee4c2e00da..bc59c1473eef 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -13,7 +13,6 @@
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 551dd32a8db0..1ecf5f6ac04e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_aes_attr_group = {
- .attrs = omap_aes_attrs,
-};
+ATTRIBUTE_GROUPS(omap_aes);
static int omap_aes_probe(struct platform_device *pdev)
{
@@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_aead_algs;
- }
-
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
@@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
}
#ifdef CONFIG_PM_SLEEP
@@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
+ .dev_groups = omap_aes_groups,
},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7021481bf027..56f192cb976d 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_sham_attr_group = {
- .attrs = omap_sham_attrs,
-};
+ATTRIBUTE_GROUPS(omap_sham);
static int omap_sham_probe(struct platform_device *pdev)
{
@@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_algs;
- }
-
return 0;
err_algs:
@@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
}
static struct platform_driver omap_sham_driver = {
@@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = {
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
+ .dev_groups = omap_sham_groups,
},
};
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 091612b066f1..fdc0b2486069 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req)
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
- int ret = 0;
+ int ret;
ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- crypto_ahash_init(subreq);
-
- subreq->nbytes = auth_len;
- subreq->src = req->src;
- subreq->result = req->result;
-
- ret |= crypto_ahash_update(subreq);
-
- subreq->nbytes = 0;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(subreq, req->src, req->result, auth_len);
- ret |= crypto_ahash_final(subreq);
+ ret = crypto_ahash_digest(subreq);
return ret;
}
@@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return ret;
if (alg_base) {
- ctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->shash = crypto_alloc_shash(alg_base, 0, 0);
if (IS_ERR(ctx->shash)) {
dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
alg_base);
@@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
/* for fallback */
ctx->fallback.ahash =
- crypto_alloc_ahash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->fallback.ahash)) {
dev_err(ctx->dev_data->dev,
"Could not load fallback driver\n");
@@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0);
return crypto_ahash_init(&rctx->fallback_req);
}
static int sa_sha_update(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
static int sa_sha_final(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
static int sa_sha_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
static int sa_sha_export(struct ahash_request *req, void *out)
{
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = &rctx->fallback_req;
- ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_export(subreq, out);
}
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index c497c73baf13..f0e01ff77d92 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -130,14 +130,8 @@ struct crypto_acomp {
struct crypto_tfm base;
};
-struct crypto_acomp_stream {
- spinlock_t lock;
- void *ctx;
-};
-
#define COMP_ALG_COMMON { \
struct crypto_alg base; \
- struct crypto_acomp_stream __percpu *stream; \
}
struct comp_alg_common COMP_ALG_COMMON;
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 6e07bbc04089..03b7eca8af9a 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -68,16 +68,17 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
- struct work_struct free_work;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index f8cc073bba41..58129e18cc31 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -99,4 +99,13 @@ static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
chacha_crypt(state, dst, src, bytes, 20);
}
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)
+bool chacha_is_arch_optimized(void);
+#else
+static inline bool chacha_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index da1ee73e9ce9..c41685874f00 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -10,56 +10,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2aa83ee0ec98..b987904afdba 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -834,7 +834,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -851,7 +851,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -871,7 +871,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
@@ -886,7 +886,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
@@ -900,7 +900,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out);
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
@@ -913,7 +913,7 @@ int crypto_shash_import(struct shash_desc *desc, const void *in);
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
@@ -935,7 +935,7 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
@@ -952,7 +952,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -969,7 +969,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index aaf59f3236fa..fbbff9a8a2d9 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,6 +11,11 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
#define ACOMP_REQUEST_ON_STACK(name, tfm) \
char __##name##_req[sizeof(struct acomp_req) + \
@@ -37,7 +42,6 @@
*
* @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
@@ -54,6 +58,55 @@ struct acomp_alg {
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -158,4 +211,27 @@ static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
return crypto_tfm_req_chain(&tfm->base);
}
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
#endif
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f25aa2ea3b48..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,10 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -26,12 +23,10 @@ struct crypto_scomp {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -40,6 +35,14 @@ struct scomp_alg {
void *ctx);
union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
+
+ union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index f3cad182d4ef..0b3a36bdaa90 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -954,6 +954,7 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret);
void *psp_copy_user_blob(u64 uaddr, u32 len);
void *snp_alloc_firmware_page(gfp_t mask);
void snp_free_firmware_page(void *addr);
+void sev_platform_shutdown(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
@@ -988,6 +989,8 @@ static inline void *snp_alloc_firmware_page(gfp_t mask)
static inline void snp_free_firmware_page(void *addr) { }
+static inline void sev_platform_shutdown(void) { }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */