summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 08:36:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 08:36:42 -0800
commit08df88fa142f3ba298bf0f7840fa9187e2fb5956 (patch)
treea24e9cf0781e353b8c2e86cdb9b110ba90bc6a6f /drivers/crypto
parent13d83ea9d81ddcb08b46377dcc9de6e5df1248d1 (diff)
parent0ce90934c0a6baac053029ad28566536ae50d604 (diff)
Merge tag 'v7.0-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Fix race condition in hwrng core by using RCU Algorithms: - Allow authenc(sha224,rfc3686) in fips mode - Add test vectors for authenc(hmac(sha384),cbc(aes)) - Add test vectors for authenc(hmac(sha224),cbc(aes)) - Add test vectors for authenc(hmac(md5),cbc(des3_ede)) - Add lz4 support in hisi_zip - Only allow clear key use during self-test in s390/{phmac,paes} Drivers: - Set rng quality to 900 in airoha - Add gcm(aes) support for AMD/Xilinx Versal device - Allow tfms to share device in hisilicon/trng" * tag 'v7.0-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (100 commits) crypto: img-hash - Use unregister_ahashes in img_{un}register_algs crypto: testmgr - Add test vectors for authenc(hmac(md5),cbc(des3_ede)) crypto: cesa - Simplify return statement in mv_cesa_dequeue_req_locked crypto: testmgr - Add test vectors for authenc(hmac(sha224),cbc(aes)) crypto: testmgr - Add test vectors for authenc(hmac(sha384),cbc(aes)) hwrng: core - use RCU and work_struct to fix race condition crypto: starfive - Fix memory leak in starfive_aes_aead_do_one_req() crypto: xilinx - Fix inconsistant indentation crypto: rng - Use unregister_rngs in register_rngs crypto: atmel - Use unregister_{aeads,ahashes,skciphers} hwrng: optee - simplify OP-TEE context match crypto: ccp - Add sysfs attribute for boot integrity dt-bindings: crypto: atmel,at91sam9g46-sha: add microchip,lan9691-sha dt-bindings: crypto: atmel,at91sam9g46-aes: add microchip,lan9691-aes dt-bindings: crypto: qcom,inline-crypto-engine: document the Milos ICE crypto: caam - fix netdev memory leak in dpaa2_caam_probe crypto: hisilicon/qm - increase wait time for mailbox crypto: hisilicon/qm - obtain the mailbox configuration at one time crypto: hisilicon/qm - remove unnecessary code in qm_mb_write() crypto: hisilicon/qm - move the barrier before writing to the mailbox register ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h4
-rw-r--r--drivers/crypto/atmel-aes.c17
-rw-r--r--drivers/crypto/atmel-sha.c27
-rw-r--r--drivers/crypto/atmel-tdes.c25
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c27
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c3
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2
-rw-r--r--drivers/crypto/ccp/hsti.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.h2
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c2
-rw-r--r--drivers/crypto/ccp/sev-dev.c59
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h5
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c416
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c383
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h7
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c159
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c21
-rw-r--r--drivers/crypto/hisilicon/sgl.c2
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c121
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c202
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c4
-rw-r--r--drivers/crypto/img-hash.c21
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c94
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c36
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_asym_algs.c12
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c5
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c7
-rw-r--r--drivers/crypto/omap-aes.c3
-rw-r--r--drivers/crypto/omap-sham.c5
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c9
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c29
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c2
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c1007
46 files changed, 1840 insertions, 932 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index ae66eb45fb24..3fc86225edaf 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -248,9 +248,11 @@ struct sun8i_ss_hash_tfm_ctx {
struct sun8i_ss_hash_reqctx {
struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG];
- struct ahash_request fallback_req;
u32 method;
int flow;
+
+ /* Must be last as it ends in a flexible-array member. */
+ struct ahash_request fallback_req;
};
/*
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 3a2684208dda..bc0c40f10944 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2201,12 +2201,10 @@ static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
{
- int i;
-
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (dd->caps.has_authenc)
- for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
- crypto_unregister_aead(&aes_authenc_algs[i]);
+ crypto_unregister_aeads(aes_authenc_algs,
+ ARRAY_SIZE(aes_authenc_algs));
#endif
if (dd->caps.has_xts)
@@ -2215,8 +2213,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_skcipher(&aes_algs[i]);
+ crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
@@ -2229,7 +2226,7 @@ static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
{
- int err, i, j;
+ int err, i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
atmel_aes_crypto_alg_init(&aes_algs[i].base);
@@ -2272,8 +2269,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
- for (j = 0; j < i; j++)
- crypto_unregister_aead(&aes_authenc_algs[j]);
+ crypto_unregister_aeads(aes_authenc_algs, i);
crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
@@ -2281,8 +2277,7 @@ err_aes_xts_alg:
err_aes_gcm_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&aes_algs[j]);
+ crypto_unregister_skciphers(aes_algs, i);
return err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3d7573c7bd1c..b02a71061708 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2418,27 +2418,23 @@ EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
{
- int i;
-
if (dd->caps.has_hmac)
- for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
- crypto_unregister_ahash(&sha_hmac_algs[i]);
+ crypto_unregister_ahashes(sha_hmac_algs,
+ ARRAY_SIZE(sha_hmac_algs));
- for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
- crypto_unregister_ahash(&sha_1_256_algs[i]);
+ crypto_unregister_ahashes(sha_1_256_algs, ARRAY_SIZE(sha_1_256_algs));
if (dd->caps.has_sha224)
crypto_unregister_ahash(&sha_224_alg);
- if (dd->caps.has_sha_384_512) {
- for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
- crypto_unregister_ahash(&sha_384_512_algs[i]);
- }
+ if (dd->caps.has_sha_384_512)
+ crypto_unregister_ahashes(sha_384_512_algs,
+ ARRAY_SIZE(sha_384_512_algs));
}
static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
{
- int err, i, j;
+ int err, i;
for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
atmel_sha_alg_init(&sha_1_256_algs[i]);
@@ -2480,18 +2476,15 @@ static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
/*i = ARRAY_SIZE(sha_hmac_algs);*/
err_sha_hmac_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_hmac_algs[j]);
+ crypto_unregister_ahashes(sha_hmac_algs, i);
i = ARRAY_SIZE(sha_384_512_algs);
err_sha_384_512_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_384_512_algs[j]);
+ crypto_unregister_ahashes(sha_384_512_algs, i);
crypto_unregister_ahash(&sha_224_alg);
err_sha_224_algs:
i = ARRAY_SIZE(sha_1_256_algs);
err_sha_1_256_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_1_256_algs[j]);
+ crypto_unregister_ahashes(sha_1_256_algs, i);
return err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 3b2a92029b16..278c0df3c92f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -897,38 +897,25 @@ static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
return IRQ_NONE;
}
-static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
- crypto_unregister_skcipher(&tdes_algs[i]);
-}
-
static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
{
- int err, i, j;
+ int err, i;
for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
atmel_tdes_skcipher_alg_init(&tdes_algs[i]);
err = crypto_register_skcipher(&tdes_algs[i]);
- if (err)
- goto err_tdes_algs;
+ if (err) {
+ crypto_unregister_skciphers(tdes_algs, i);
+ return err;
+ }
}
return 0;
-
-err_tdes_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&tdes_algs[j]);
-
- return err;
}
static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
{
-
dd->caps.has_dma = 0;
/* keep only major version number */
@@ -1061,7 +1048,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
- atmel_tdes_unregister_algs(tdes_dd);
+ crypto_unregister_skciphers(tdes_algs, ARRAY_SIZE(tdes_algs));
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 107ccb2ade42..c6117c23eb25 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -4814,7 +4814,8 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
- int err;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i, err;
if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
@@ -4822,6 +4823,12 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
dev_err(dev, "dpseci_reset() failed\n");
}
+ for_each_cpu(i, priv->clean_mask) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ free_netdev(ppriv->net_dev);
+ }
+ free_cpumask_var(priv->clean_mask);
+
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
}
@@ -5007,16 +5014,15 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
- cpumask_var_t clean_mask;
int err, cpu;
u8 i;
err = -ENOMEM;
- if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
- goto err_cpumask;
-
priv = dev_get_drvdata(dev);
+ if (!zalloc_cpumask_var(&priv->clean_mask, GFP_KERNEL))
+ goto err_cpumask;
+
priv->dev = dev;
priv->dpsec_id = ls_dev->obj_desc.id;
@@ -5118,7 +5124,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
err = -ENOMEM;
goto err_alloc_netdev;
}
- cpumask_set_cpu(cpu, clean_mask);
+ cpumask_set_cpu(cpu, priv->clean_mask);
ppriv->net_dev->dev = *dev;
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
@@ -5126,18 +5132,16 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
DPAA2_CAAM_NAPI_WEIGHT);
}
- err = 0;
- goto free_cpumask;
+ return 0;
err_alloc_netdev:
- free_dpaa2_pcpu_netdev(priv, clean_mask);
+ free_dpaa2_pcpu_netdev(priv, priv->clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
-free_cpumask:
- free_cpumask_var(clean_mask);
+ free_cpumask_var(priv->clean_mask);
err_cpumask:
return err;
}
@@ -5182,7 +5186,6 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
ppriv = per_cpu_ptr(priv->ppriv, i);
napi_disable(&ppriv->napi);
netif_napi_del(&ppriv->napi);
- free_netdev(ppriv->net_dev);
}
return 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 61d1219a202f..8e65b4b28c7b 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -42,6 +42,7 @@
* @mc_io: pointer to MC portal's I/O object
* @domain: IOMMU domain
* @ppriv: per CPU pointers to privata data
+ * @clean_mask: CPU mask of CPUs that have allocated netdevs
*/
struct dpaa2_caam_priv {
int dpsec_id;
@@ -65,6 +66,7 @@ struct dpaa2_caam_priv {
struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
struct dentry *dfs_root;
+ cpumask_var_t clean_mask;
};
/**
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index c246920e6f54..bccd680c7f7e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -180,7 +180,8 @@ static void free_command_queues(struct cpt_vf *cptvf,
hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
nextchunk) {
- dma_free_coherent(&pdev->dev, chunk->size,
+ dma_free_coherent(&pdev->dev,
+ chunk->size + CPT_NEXT_CHUNK_PTR_SIZE,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d78865d9d5f0..d0412e584762 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -642,7 +642,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_data dst;
struct ccp_data aad;
struct ccp_op op;
- } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
+ } *wa __free(kfree) = kzalloc(sizeof(*wa), GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
index c29c6a9c0f3f..4b44729a019e 100644
--- a/drivers/crypto/ccp/hsti.c
+++ b/drivers/crypto/ccp/hsti.c
@@ -30,6 +30,8 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
security_attribute_show(fused_part)
static DEVICE_ATTR_RO(fused_part);
+security_attribute_show(boot_integrity)
+static DEVICE_ATTR_RO(boot_integrity);
security_attribute_show(debug_lock_on)
static DEVICE_ATTR_RO(debug_lock_on);
security_attribute_show(tsme_status)
@@ -47,6 +49,7 @@ static DEVICE_ATTR_RO(rom_armor_enforced);
static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
+ &dev_attr_boot_integrity.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
&dev_attr_anti_rollback_status.attr,
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 268c83f298cb..4e370e76b6ca 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -36,7 +36,7 @@ union psp_cap_register {
rsvd1 :3,
security_reporting :1,
fused_part :1,
- rsvd2 :1,
+ boot_integrity :1,
debug_lock_on :1,
rsvd3 :2,
tsme_status :1,
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
index 40d02adaf3f6..3cdc38e84500 100644
--- a/drivers/crypto/ccp/sev-dev-tsm.c
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -228,7 +228,7 @@ static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
if (is_pci_tsm_pf0(pdev))
return tio_pf0_probe(pdev, sev);
- return 0;
+ return NULL;
}
static void dsm_remove(struct pci_tsm *tsm)
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 956ea609d0cc..1cdadddb744e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -127,13 +127,6 @@ static size_t sev_es_tmr_size = SEV_TMR_SIZE;
#define NV_LENGTH (32 * 1024)
static void *sev_init_ex_buffer;
-/*
- * SEV_DATA_RANGE_LIST:
- * Array containing range of pages that firmware transitions to HV-fixed
- * page state.
- */
-static struct sev_data_range_list *snp_range_list;
-
static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
static int snp_shutdown_on_panic(struct notifier_block *nb,
@@ -1361,6 +1354,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
+ struct sev_data_range_list *snp_range_list __free(kfree) = NULL;
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
struct sev_device *sev;
@@ -2378,11 +2372,10 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
- bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
- int ret, error;
void *data;
+ int ret;
if (!argp->data)
return -EINVAL;
@@ -2393,31 +2386,35 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
- if (!sev->snp_initialized) {
- ret = snp_move_to_init_state(argp, &shutdown_required);
- if (ret)
- goto cleanup;
- }
-
/*
- * Firmware expects status page to be in firmware-owned state, otherwise
- * it will report firmware error code INVALID_PAGE_STATE (0x1A).
+ * SNP_PLATFORM_STATUS can be executed in any SNP state. But if executed
+ * when SNP has been initialized, the status page must be firmware-owned.
*/
- if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
- ret = -EFAULT;
- goto cleanup;
+ if (sev->snp_initialized) {
+ /*
+ * Firmware expects the status page to be in Firmware state,
+ * otherwise it will report an error INVALID_PAGE_STATE.
+ */
+ if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
}
buf.address = __psp_pa(data);
ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
- /*
- * Status page will be transitioned to Reclaim state upon success, or
- * left in Firmware state in failure. Use snp_reclaim_pages() to
- * transition either case back to Hypervisor-owned state.
- */
- if (snp_reclaim_pages(__pa(data), 1, true))
- return -EFAULT;
+ if (sev->snp_initialized) {
+ /*
+ * The status page will be in Reclaim state on success, or left
+ * in Firmware state on failure. Use snp_reclaim_pages() to
+ * transition either case back to Hypervisor-owned state.
+ */
+ if (snp_reclaim_pages(__pa(data), 1, true)) {
+ snp_leak_pages(__page_to_pfn(status_page), 1);
+ return -EFAULT;
+ }
+ }
if (ret)
goto cleanup;
@@ -2427,9 +2424,6 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
- if (shutdown_required)
- __sev_snp_shutdown_locked(&error, false);
-
__free_pages(status_page, 0);
return ret;
}
@@ -2780,11 +2774,6 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
sev_init_ex_buffer = NULL;
}
- if (snp_range_list) {
- kfree(snp_range_list);
- snp_range_list = NULL;
- }
-
__sev_snp_shutdown_locked(&error, panic);
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4835bdebdbb3..1e6d772f4bb6 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -57,6 +57,8 @@ config CRYPTO_DEV_HISI_ZIP
depends on UACCE || UACCE=n
depends on ACPI
select CRYPTO_DEV_HISI_QM
+ select CRYPTO_DEFLATE
+ select CRYPTO_LZ4
help
Support for HiSilicon ZIP Driver
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 0f3ddbadbcf9..021dbd9a1d48 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -94,9 +94,8 @@ struct hpre_sqe {
__le64 key;
__le64 in;
__le64 out;
- __le16 tag;
- __le16 resv2;
-#define _HPRE_SQE_ALIGN_EXT 7
+ __le64 tag;
+#define _HPRE_SQE_ALIGN_EXT 6
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 21ccf879f70c..839c1f677143 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -93,6 +93,7 @@ struct hpre_dh_ctx {
char *g; /* m */
dma_addr_t dma_g;
+ struct crypto_kpp *soft_tfm;
};
struct hpre_ecdh_ctx {
@@ -103,17 +104,15 @@ struct hpre_ecdh_ctx {
/* low address: x->y */
unsigned char *g;
dma_addr_t dma_g;
+ struct crypto_kpp *soft_tfm;
};
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
- struct hpre_asym_request **req_list;
struct hpre *hpre;
- spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
- struct idr req_idr;
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
@@ -123,6 +122,7 @@ struct hpre_ctx {
unsigned int curve_id;
/* for high performance core */
u8 enable_hpcore;
+ bool fallback;
};
struct hpre_asym_request {
@@ -136,7 +136,6 @@ struct hpre_asym_request {
struct kpp_request *ecdh;
} areq;
int err;
- int req_id;
hpre_cb cb;
struct timespec64 req_time;
};
@@ -151,79 +150,13 @@ static inline unsigned int hpre_align_pd(void)
return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
}
-static int hpre_alloc_req_id(struct hpre_ctx *ctx)
+static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req)
{
- unsigned long flags;
- int id;
-
- spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
-
- return id;
-}
-
-static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->req_lock, flags);
- idr_remove(&ctx->req_idr, req_id);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
-}
-
-static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
-{
- struct hpre_ctx *ctx;
- struct hpre_dfx *dfx;
- int id;
-
- ctx = hpre_req->ctx;
- id = hpre_alloc_req_id(ctx);
- if (unlikely(id < 0))
- return -EINVAL;
-
- ctx->req_list[id] = hpre_req;
- hpre_req->req_id = id;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- dfx = ctx->hpre->debug.dfx;
if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
ktime_get_ts64(&hpre_req->req_time);
-
- return id;
-}
-
-static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
-{
- struct hpre_ctx *ctx = hpre_req->ctx;
- int id = hpre_req->req_id;
-
- if (hpre_req->req_id >= 0) {
- hpre_req->req_id = HPRE_INVLD_REQ_ID;
- ctx->req_list[id] = NULL;
- hpre_free_req_id(ctx, id);
- }
-}
-
-static struct hisi_qp *hpre_get_qp_and_start(u8 type)
-{
- struct hisi_qp *qp;
- int ret;
-
- qp = hpre_create_qp(type);
- if (!qp) {
- pr_err("Can not create hpre qp!\n");
- return ERR_PTR(-ENODEV);
- }
-
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0) {
- hisi_qm_free_qps(&qp, 1);
- pci_err(qp->qm->pdev, "Can not start qp!\n");
- return ERR_PTR(-EINVAL);
- }
-
- return qp;
}
static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
@@ -340,26 +273,19 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
- struct hpre_asym_request *req;
unsigned int err, done, alg;
- int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK GENMASK(10, 0)
#define HREE_SQE_DONE_MASK GENMASK(1, 0)
#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
- id = (int)le16_to_cpu(sqe->tag);
- req = ctx->req_list[id];
- hpre_rm_req_from_ctx(req);
- *kreq = req;
+ *kreq = (void *)le64_to_cpu(sqe->tag);
err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
HREE_HW_ERR_MASK;
-
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
-
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
@@ -370,36 +296,10 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
return -EINVAL;
}
-static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
-{
- struct hpre *hpre;
-
- if (!ctx || !qp || qlen < 0)
- return -EINVAL;
-
- spin_lock_init(&ctx->req_lock);
- ctx->qp = qp;
- ctx->dev = &qp->qm->pdev->dev;
-
- hpre = container_of(ctx->qp->qm, struct hpre, qm);
- ctx->hpre = hpre;
- ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
- if (!ctx->req_list)
- return -ENOMEM;
- ctx->key_sz = 0;
- ctx->crt_g2_mode = false;
- idr_init(&ctx->req_idr);
-
- return 0;
-}
-
static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
{
- if (is_clear_all) {
- idr_destroy(&ctx->req_idr);
- kfree(ctx->req_list);
+ if (is_clear_all)
hisi_qm_free_qps(&ctx->qp, 1);
- }
ctx->crt_g2_mode = false;
ctx->key_sz = 0;
@@ -467,49 +367,44 @@ static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
{
- struct hpre_ctx *ctx = qp->qp_ctx;
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *h_req;
struct hpre_sqe *sqe = resp;
- struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- if (unlikely(!req)) {
- atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
+ h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag);
+ if (unlikely(!h_req)) {
+ pr_err("Failed to get request, and qp_id is %u\n", qp->qp_id);
return;
}
- req->cb(ctx, resp);
-}
-
-static void hpre_stop_qp_and_put(struct hisi_qp *qp)
-{
- hisi_qm_stop_qp(qp);
- hisi_qm_free_qps(&qp, 1);
+ h_req->cb(h_req->ctx, resp);
}
static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
- int ret;
+ struct hpre *hpre;
- qp = hpre_get_qp_and_start(type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ qp = hpre_create_qp(type);
+ if (!qp) {
+ ctx->qp = NULL;
+ return -ENODEV;
+ }
- qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
+ ctx->qp = qp;
+ ctx->dev = &qp->qm->pdev->dev;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
+ ctx->key_sz = 0;
+ ctx->crt_g2_mode = false;
- ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
- if (ret)
- hpre_stop_qp_and_put(qp);
-
- return ret;
+ return 0;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
- int req_id;
void *tmp;
if (is_rsa) {
@@ -549,11 +444,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
+ hpre_dfx_add_req_time(h_req);
+ msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
@@ -566,9 +458,7 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
- spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
- spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
@@ -619,12 +509,53 @@ static int hpre_dh_compute_value(struct kpp_request *req)
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}
+static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req)
+{
+ struct kpp_request *fb_req = kpp_request_ctx(req);
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm);
+ kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data);
+ kpp_request_set_input(fb_req, req->src, req->src_len);
+ kpp_request_set_output(fb_req, req->dst, req->dst_len);
+
+ return fb_req;
+}
+
+static int hpre_dh_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct kpp_request *fb_req;
+
+ if (ctx->fallback) {
+ fb_req = hpre_dh_prepare_fb_req(req);
+ return crypto_kpp_generate_public_key(fb_req);
+ }
+
+ return hpre_dh_compute_value(req);
+}
+
+static int hpre_dh_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct kpp_request *fb_req;
+
+ if (ctx->fallback) {
+ fb_req = hpre_dh_prepare_fb_req(req);
+ return crypto_kpp_compute_shared_secret(fb_req);
+ }
+
+ return hpre_dh_compute_value(req);
+}
+
static int hpre_is_dh_params_length_valid(unsigned int key_sz)
{
#define _HPRE_DH_GRP1 768
@@ -651,13 +582,6 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
struct device *dev = ctx->dev;
unsigned int sz;
- if (params->p_size > HPRE_DH_MAX_P_SZ)
- return -EINVAL;
-
- if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
- return -EINVAL;
-
sz = ctx->key_sz = params->p_size;
ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
&ctx->dh.dma_xa_p, GFP_KERNEL);
@@ -690,8 +614,8 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
+ if (!ctx->qp)
+ return;
if (ctx->dh.g) {
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
@@ -718,6 +642,13 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
if (crypto_dh_decode_key(buf, len, &params) < 0)
return -EINVAL;
+ if (!ctx->qp)
+ goto set_soft_secret;
+
+ if (hpre_is_dh_params_length_valid(params.p_size <<
+ HPRE_BITS_2_BYTES_SHIFT))
+ goto set_soft_secret;
+
/* Free old secret if any */
hpre_dh_clear_ctx(ctx, false);
@@ -728,27 +659,55 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
params.key_size);
+ ctx->fallback = false;
return 0;
err_clear_ctx:
hpre_dh_clear_ctx(ctx, false);
return ret;
+set_soft_secret:
+ ctx->fallback = true;
+ return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len);
}
static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->dh.soft_tfm);
+
return ctx->key_sz;
}
static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ const char *alg = kpp_alg_name(tfm);
+ unsigned int reqsize;
+ int ret;
+
+ ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->dh.soft_tfm)) {
+ pr_err("Failed to alloc dh tfm!\n");
+ return PTR_ERR(ctx->dh.soft_tfm);
+ }
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+ crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm));
- return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
+ reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(),
+ sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm));
+ kpp_set_reqsize(tfm, reqsize);
+
+ ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
+ if (ret && ret != -ENODEV) {
+ crypto_free_kpp(ctx->dh.soft_tfm);
+ return ret;
+ } else if (ret == -ENODEV) {
+ ctx->fallback = true;
+ }
+
+ return 0;
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -756,6 +715,7 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
hpre_dh_clear_ctx(ctx, true);
+ crypto_free_kpp(ctx->dh.soft_tfm);
}
static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
@@ -795,9 +755,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
struct hpre_sqe *msg = &hpre_req->req;
int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ /* For unsupported key size and unavailable devices, use soft tfm instead */
+ if (ctx->fallback) {
akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
ret = crypto_akcipher_encrypt(req);
akcipher_request_set_tfm(req, tfm);
@@ -828,7 +787,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
@@ -843,9 +801,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
struct hpre_sqe *msg = &hpre_req->req;
int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
+ /* For unsupported key size and unavailable devices, use soft tfm instead */
+ if (ctx->fallback) {
akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
ret = crypto_akcipher_decrypt(req);
akcipher_request_set_tfm(req, tfm);
@@ -883,7 +840,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
@@ -899,8 +855,10 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
ctx->key_sz = vlen;
/* if invalid key size provided, we use software tfm */
- if (!hpre_rsa_key_size_is_support(ctx->key_sz))
+ if (!hpre_rsa_key_size_is_support(ctx->key_sz)) {
+ ctx->fallback = true;
return 0;
+ }
ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
&ctx->rsa.dma_pubkey,
@@ -1035,8 +993,8 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
unsigned int half_key_sz = ctx->key_sz >> 1;
struct device *dev = ctx->dev;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
+ if (!ctx->qp)
+ return;
if (ctx->rsa.pubkey) {
dma_free_coherent(dev, ctx->key_sz << 1,
@@ -1117,6 +1075,7 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
goto free;
}
+ ctx->fallback = false;
return 0;
free:
@@ -1134,6 +1093,9 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
if (ret)
return ret;
+ if (!ctx->qp)
+ return 0;
+
return hpre_rsa_setkey(ctx, key, keylen, false);
}
@@ -1147,6 +1109,9 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
if (ret)
return ret;
+ if (!ctx->qp)
+ return 0;
+
return hpre_rsa_setkey(ctx, key, keylen, true);
}
@@ -1154,9 +1119,8 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
{
struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
+ /* For unsupported key size and unavailable devices, use soft tfm instead */
+ if (ctx->fallback)
return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
return ctx->key_sz;
@@ -1177,10 +1141,14 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
hpre_align_pd());
ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
- if (ret)
+ if (ret && ret != -ENODEV) {
crypto_free_akcipher(ctx->rsa.soft_tfm);
+ return ret;
+ } else if (ret == -ENODEV) {
+ ctx->fallback = true;
+ }
- return ret;
+ return 0;
}
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
@@ -1207,9 +1175,6 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
unsigned int sz = ctx->key_sz;
unsigned int shift = sz << 1;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
-
if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
@@ -1346,7 +1311,7 @@ static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
return 0;
}
-static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+static bool hpre_key_is_zero(const char *key, unsigned short key_sz)
{
int i;
@@ -1387,6 +1352,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
int ret;
+ if (ctx->fallback)
+ return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len);
+
if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
dev_err(dev, "failed to decode ecdh key!\n");
return -EINVAL;
@@ -1488,7 +1456,6 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
{
struct hpre_asym_request *h_req;
struct hpre_sqe *msg;
- int req_id;
void *tmp;
if (req->dst_len < ctx->key_sz << 1) {
@@ -1510,11 +1477,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
+ hpre_dfx_add_req_time(h_req);
+ msg->tag = cpu_to_le64((uintptr_t)h_req);
return 0;
}
@@ -1612,28 +1576,86 @@ static int hpre_ecdh_compute_value(struct kpp_request *req)
return -EINPROGRESS;
clear_all:
- hpre_rm_req_from_ctx(hpre_req);
hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
return ret;
}
+static int hpre_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ int ret;
+
+ if (ctx->fallback) {
+ kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
+ ret = crypto_kpp_generate_public_key(req);
+ kpp_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ return hpre_ecdh_compute_value(req);
+}
+
+static int hpre_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ int ret;
+
+ if (ctx->fallback) {
+ kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
+ ret = crypto_kpp_compute_shared_secret(req);
+ kpp_request_set_tfm(req, tfm);
+ return ret;
+ }
+
+ return hpre_ecdh_compute_value(req);
+}
+
static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->ecdh.soft_tfm);
+
/* max size is the pub_key_size, include x and y */
return ctx->key_sz << 1;
}
+static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ const char *alg = kpp_alg_name(tfm);
+ int ret;
+
+ ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+ if (!ret) {
+ kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
+ return 0;
+ } else if (ret && ret != -ENODEV) {
+ return ret;
+ }
+
+ ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ecdh.soft_tfm)) {
+ pr_err("Failed to alloc %s tfm!\n", alg);
+ return PTR_ERR(ctx->ecdh.soft_tfm);
+ }
+
+ crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm));
+ ctx->fallback = true;
+
+ return 0;
+}
+
static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P192;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+ return hpre_ecdh_init_tfm(tfm);
}
static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
@@ -1643,9 +1665,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->enable_hpcore = 1;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+ return hpre_ecdh_init_tfm(tfm);
}
static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
@@ -1654,15 +1674,18 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
ctx->curve_id = ECC_CURVE_NIST_P384;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+ return hpre_ecdh_init_tfm(tfm);
}
static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ if (ctx->fallback) {
+ crypto_free_kpp(ctx->ecdh.soft_tfm);
+ return;
+ }
+
hpre_ecc_clear_ctx(ctx, true);
}
@@ -1680,13 +1703,14 @@ static struct akcipher_alg rsa = {
.cra_name = "rsa",
.cra_driver_name = "hpre-rsa",
.cra_module = THIS_MODULE,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
static struct kpp_alg dh = {
.set_secret = hpre_dh_set_secret,
- .generate_public_key = hpre_dh_compute_value,
- .compute_shared_secret = hpre_dh_compute_value,
+ .generate_public_key = hpre_dh_generate_public_key,
+ .compute_shared_secret = hpre_dh_compute_shared_secret,
.max_size = hpre_dh_max_size,
.init = hpre_dh_init_tfm,
.exit = hpre_dh_exit_tfm,
@@ -1696,14 +1720,15 @@ static struct kpp_alg dh = {
.cra_name = "dh",
.cra_driver_name = "hpre-dh",
.cra_module = THIS_MODULE,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
};
static struct kpp_alg ecdh_curves[] = {
{
.set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
+ .generate_public_key = hpre_ecdh_generate_public_key,
+ .compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p192_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@@ -1713,11 +1738,12 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p192",
.cra_driver_name = "hpre-ecdh-nist-p192",
.cra_module = THIS_MODULE,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}, {
.set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
+ .generate_public_key = hpre_ecdh_generate_public_key,
+ .compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p256_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@@ -1727,11 +1753,12 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "hpre-ecdh-nist-p256",
.cra_module = THIS_MODULE,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}, {
.set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
+ .generate_public_key = hpre_ecdh_generate_public_key,
+ .compute_shared_secret = hpre_ecdh_compute_shared_secret,
.max_size = hpre_ecdh_max_size,
.init = hpre_ecdh_nist_p384_init_tfm,
.exit = hpre_ecdh_exit_tfm,
@@ -1741,6 +1768,7 @@ static struct kpp_alg ecdh_curves[] = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "hpre-ecdh-nist-p384",
.cra_module = THIS_MODULE,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
},
}
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index b94fecd765ee..884d5d0afaf4 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -465,7 +465,7 @@ struct hisi_qp *hpre_create_qp(u8 type)
* type: 0 - RSA/DH. algorithm supported in V2,
* 1 - ECC algorithm in V3.
*/
- ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, &type, node, &qp);
if (!ret)
return qp;
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d47bf06a90f7..571d0d250242 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -31,6 +31,9 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_STATUS_MASK GENMASK(12, 9)
+#define QM_MB_BUSY_MASK BIT(13)
+#define QM_MB_MAX_WAIT_TIMEOUT USEC_PER_SEC
+#define QM_MB_MAX_STOP_TIMEOUT (5 * USEC_PER_SEC)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -188,8 +191,8 @@
#define QM_IFC_INT_DISABLE BIT(0)
#define QM_IFC_INT_STATUS_MASK BIT(0)
#define QM_IFC_INT_SET_MASK BIT(0)
-#define QM_WAIT_DST_ACK 10
-#define QM_MAX_PF_WAIT_COUNT 10
+#define QM_WAIT_DST_ACK 1000
+#define QM_MAX_PF_WAIT_COUNT 20
#define QM_MAX_VF_WAIT_COUNT 40
#define QM_VF_RESET_WAIT_US 20000
#define QM_VF_RESET_WAIT_CNT 3000
@@ -582,36 +585,44 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
mailbox->rsvd = 0;
}
-/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
-int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
+/*
+ * The mailbox is 128 bits and requires a single read/write operation.
+ * Since there is no general 128-bit IO memory access API in the current
+ * ARM64 architecture, this needs to be implemented in the driver.
+ */
+static struct qm_mailbox qm_mb_read(struct hisi_qm *qm)
{
- u32 val;
+ struct qm_mailbox mailbox = {0};
+
+#if IS_ENABLED(CONFIG_ARM64)
+ const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
+ unsigned long tmp0, tmp1;
+
+ asm volatile("ldp %0, %1, %3\n"
+ "stp %0, %1, %2\n"
+ : "=&r" (tmp0),
+ "=&r" (tmp1),
+ "+Q" (mailbox)
+ : "Q" (*((char __iomem *)fun_base))
+ : "memory");
+#endif
- return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
- val, !((val >> QM_MB_BUSY_SHIFT) &
- 0x1), POLL_PERIOD, POLL_TIMEOUT);
+ return mailbox;
}
-EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
/* 128 bit should be written to hardware at one time to trigger a mailbox */
static void qm_mb_write(struct hisi_qm *qm, const void *src)
{
- void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
-
-#if IS_ENABLED(CONFIG_ARM64)
- unsigned long tmp0 = 0, tmp1 = 0;
-#endif
-
- if (!IS_ENABLED(CONFIG_ARM64)) {
- memcpy_toio(fun_base, src, 16);
- dma_wmb();
- return;
- }
-
#if IS_ENABLED(CONFIG_ARM64)
+ void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
+ unsigned long tmp0, tmp1;
+ /*
+ * The dmb oshst instruction ensures that the data in the
+ * mailbox is written before it is sent to the hardware.
+ */
asm volatile("ldp %0, %1, %3\n"
- "stp %0, %1, %2\n"
"dmb oshst\n"
+ "stp %0, %1, %2\n"
: "=&r" (tmp0),
"=&r" (tmp1),
"+Q" (*((char __iomem *)fun_base))
@@ -620,35 +631,61 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
#endif
}
-static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
{
+ struct qm_mailbox mailbox = {0};
int ret;
- u32 val;
- if (unlikely(hisi_qm_wait_mb_ready(qm))) {
+ ret = read_poll_timeout(qm_mb_read, mailbox,
+ !(le16_to_cpu(mailbox.w0) & QM_MB_BUSY_MASK),
+ POLL_PERIOD, POLL_TIMEOUT,
+ true, qm);
+ if (ret)
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
- ret = -EBUSY;
- goto mb_busy;
- }
- qm_mb_write(qm, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
+
+static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
+{
+ struct device *dev = &qm->pdev->dev;
+ int ret;
- if (unlikely(hisi_qm_wait_mb_ready(qm))) {
- dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
- ret = -ETIMEDOUT;
- goto mb_busy;
+ ret = read_poll_timeout(qm_mb_read, *mailbox,
+ !(le16_to_cpu(mailbox->w0) & QM_MB_BUSY_MASK),
+ POLL_PERIOD, wait_timeout,
+ true, qm);
+ if (ret) {
+ dev_err(dev, "QM mailbox operation timeout!\n");
+ return ret;
}
- val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
- if (val & QM_MB_STATUS_MASK) {
- dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
- ret = -EIO;
- goto mb_busy;
+ if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) {
+ dev_err(dev, "QM mailbox operation failed!\n");
+ return -EIO;
}
return 0;
+}
+
+static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox, u32 wait_timeout)
+{
+ int ret;
+
+ ret = hisi_qm_wait_mb_ready(qm);
+ if (ret)
+ goto mb_err_cnt_increase;
+
+ qm_mb_write(qm, mailbox);
+
+ ret = qm_wait_mb_finish(qm, mailbox, wait_timeout);
+ if (ret)
+ goto mb_err_cnt_increase;
+
+ return 0;
-mb_busy:
+mb_err_cnt_increase:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
return ret;
}
@@ -657,18 +694,49 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op)
{
struct qm_mailbox mailbox;
+ u32 wait_timeout;
int ret;
+ if (cmd == QM_MB_CMD_STOP_QP || cmd == QM_MB_CMD_FLUSH_QM)
+ wait_timeout = QM_MB_MAX_STOP_TIMEOUT;
+ else
+ wait_timeout = QM_MB_MAX_WAIT_TIMEOUT;
+
+ /* No need to judge if master OOO is blocked. */
+ if (qm_check_dev_error(qm)) {
+ dev_err(&qm->pdev->dev, "QM mailbox operation failed since qm is stop!\n");
+ return -EIO;
+ }
+
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm_mb_nolock(qm, &mailbox, wait_timeout);
mutex_unlock(&qm->mailbox_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_mb);
+int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
+{
+ struct qm_mailbox mailbox;
+ int ret;
+
+ qm_mb_pre_init(&mailbox, cmd, 0, queue, 1);
+ mutex_lock(&qm->mailbox_lock);
+ ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
+ mutex_unlock(&qm->mailbox_lock);
+ if (ret)
+ return ret;
+
+ *base = le32_to_cpu(mailbox.base_l) |
+ ((u64)le32_to_cpu(mailbox.base_h) << 32);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_mb_read);
+
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
@@ -715,7 +783,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
memcpy(tmp_xqc, xqc, size);
qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
if (!ret && op)
memcpy(xqc, tmp_xqc, size);
@@ -1385,12 +1453,10 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
u64 sqc_vft;
int ret;
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
+ ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0);
if (ret)
return ret;
- sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
- ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
*number = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
@@ -1530,25 +1596,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
-static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
-{
- struct qm_mailbox mailbox;
- int ret;
-
- qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
- if (ret)
- goto err_unlock;
-
- *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
- ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
-
-err_unlock:
- mutex_unlock(&qm->mailbox_lock);
- return ret;
-}
-
static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
{
u32 val;
@@ -1864,7 +1911,7 @@ static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data
qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
mutex_lock(&qm->mailbox_lock);
- return qm_mb_nolock(qm, &mailbox);
+ return qm_mb_nolock(qm, &mailbox, QM_MB_MAX_WAIT_TIMEOUT);
}
static void qm_set_ifc_end_v3(struct hisi_qm *qm)
@@ -1877,7 +1924,7 @@ static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u3
u64 msg;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = hisi_qm_mb_read(qm, &msg, QM_MB_CMD_DST, fun_num);
if (ret)
return ret;
@@ -2002,7 +2049,38 @@ static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
*addr = 0;
}
-static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
+static struct hisi_qp *find_shareable_qp(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
+{
+ struct device *dev = &qm->pdev->dev;
+ struct hisi_qp *share_qp = NULL;
+ struct hisi_qp *qp;
+ u32 ref_count = ~0;
+ int i;
+
+ if (!is_in_kernel)
+ goto queues_busy;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = &qm->qp_array[i];
+ if (qp->is_in_kernel && qp->alg_type == alg_type && qp->ref_count < ref_count) {
+ ref_count = qp->ref_count;
+ share_qp = qp;
+ }
+ }
+
+ if (share_qp) {
+ share_qp->ref_count++;
+ return share_qp;
+ }
+
+queues_busy:
+ dev_info_ratelimited(dev, "All %u queues of QM are busy and no shareable queue\n",
+ qm->qp_num);
+ atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
+ return ERR_PTR(-EBUSY);
+}
+
+static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
@@ -2013,12 +2091,9 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
return ERR_PTR(-EPERM);
}
- if (qm->qp_in_used == qm->qp_num) {
- dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
- qm->qp_num);
- atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
- return ERR_PTR(-EBUSY);
- }
+ /* Try to find a shareable queue when all queues are busy */
+ if (qm->qp_in_used == qm->qp_num)
+ return find_shareable_qp(qm, alg_type, is_in_kernel);
qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
if (qp_id < 0) {
@@ -2034,10 +2109,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->event_cb = NULL;
qp->req_cb = NULL;
- qp->qp_id = qp_id;
qp->alg_type = alg_type;
- qp->is_in_kernel = true;
+ qp->is_in_kernel = is_in_kernel;
qm->qp_in_used++;
+ qp->ref_count = 1;
return qp;
}
@@ -2059,7 +2134,7 @@ static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
return ERR_PTR(ret);
down_write(&qm->qps_lock);
- qp = qm_create_qp_nolock(qm, alg_type);
+ qp = qm_create_qp_nolock(qm, alg_type, false);
up_write(&qm->qps_lock);
if (IS_ERR(qp))
@@ -2219,6 +2294,7 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
for (i = 0; i < qp_used; i++) {
pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
+ qm_cq_head_update(qp);
atomic_dec(&qp->qp_status.used);
}
}
@@ -2368,25 +2444,33 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
- u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
- void *sqe = qm_get_avail_sqe(qp);
+ u16 sq_tail, sq_tail_next;
+ void *sqe;
+ spin_lock_bh(&qp->qp_lock);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP ||
qp->is_resetting)) {
+ spin_unlock_bh(&qp->qp_lock);
dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
- if (!sqe)
+ sqe = qm_get_avail_sqe(qp);
+ if (!sqe) {
+ spin_unlock_bh(&qp->qp_lock);
return -EBUSY;
+ }
+ sq_tail = qp_status->sq_tail;
+ sq_tail_next = (sq_tail + 1) % qp->sq_depth;
memcpy(sqe, msg, qp->qm->sqe_size);
+ qp->msg[sq_tail] = msg;
qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
atomic_inc(&qp->qp_status.used);
qp_status->sq_tail = sq_tail_next;
+ spin_unlock_bh(&qp->qp_lock);
return 0;
}
@@ -2449,7 +2533,6 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
qp->uacce_q = q;
qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg;
- qp->is_in_kernel = false;
return 0;
}
@@ -2919,12 +3002,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_dma *qdma;
+ struct hisi_qp *qp;
int i;
for (i = num - 1; i >= 0; i--) {
- qdma = &qm->qp_array[i].qdma;
- dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
+ qp = &qm->qp_array[i];
+ dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, qp->qdma.dma);
+ kfree(qp->msg);
kfree(qm->poll_data[i].qp_finish_id);
}
@@ -2946,10 +3030,14 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
return -ENOMEM;
qp = &qm->qp_array[id];
+ qp->msg = kmalloc_array(sq_depth, sizeof(void *), GFP_KERNEL);
+ if (!qp->msg)
+ goto err_free_qp_finish_id;
+
qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
GFP_KERNEL);
if (!qp->qdma.va)
- goto err_free_qp_finish_id;
+ goto err_free_qp_msg;
qp->sqe = qp->qdma.va;
qp->sqe_dma = qp->qdma.dma;
@@ -2961,8 +3049,14 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
qp->qm = qm;
qp->qp_id = id;
+ spin_lock_init(&qp->qp_lock);
+ spin_lock_init(&qp->backlog.lock);
+ INIT_LIST_HEAD(&qp->backlog.list);
+
return 0;
+err_free_qp_msg:
+ kfree(qp->msg);
err_free_qp_finish_id:
kfree(qm->poll_data[id].qp_finish_id);
return ret;
@@ -3533,6 +3627,17 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+static void qm_release_qp_nolock(struct hisi_qp *qp)
+{
+ struct hisi_qm *qm = qp->qm;
+
+ if (--qp->ref_count)
+ return;
+
+ qm->qp_in_used--;
+ idr_remove(&qm->qp_idr, qp->qp_id);
+}
+
/**
* hisi_qm_free_qps() - free multiple queue pairs.
* @qps: The queue pairs need to be freed.
@@ -3545,11 +3650,34 @@ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
if (!qps || qp_num <= 0)
return;
- for (i = qp_num - 1; i >= 0; i--)
- hisi_qm_release_qp(qps[i]);
+ down_write(&qps[0]->qm->qps_lock);
+
+ for (i = qp_num - 1; i >= 0; i--) {
+ if (qps[i]->ref_count == 1)
+ qm_stop_qp_nolock(qps[i]);
+
+ qm_release_qp_nolock(qps[i]);
+ }
+
+ up_write(&qps[0]->qm->qps_lock);
+ qm_pm_put_sync(qps[0]->qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+static void qm_insert_sorted(struct list_head *head, struct hisi_qm_resource *res)
+{
+ struct hisi_qm_resource *tmp;
+ struct list_head *n = head;
+
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
+}
+
static void free_list(struct list_head *head)
{
struct hisi_qm_resource *res, *tmp;
@@ -3560,14 +3688,57 @@ static void free_list(struct list_head *head)
}
}
+static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp **qps, u8 *alg_type)
+{
+ int i, ret;
+
+ ret = qm_pm_get_sync(qm);
+ if (ret)
+ return ret;
+
+ down_write(&qm->qps_lock);
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = qm_create_qp_nolock(qm, alg_type[i], true);
+ if (IS_ERR(qps[i])) {
+ ret = -ENODEV;
+ goto stop_and_free;
+ }
+
+ if (qps[i]->ref_count != 1)
+ continue;
+
+ ret = qm_start_qp_nolock(qps[i], 0);
+ if (ret) {
+ qm_release_qp_nolock(qps[i]);
+ goto stop_and_free;
+ }
+ }
+ up_write(&qm->qps_lock);
+
+ return 0;
+
+stop_and_free:
+ for (i--; i >= 0; i--) {
+ if (qps[i]->ref_count == 1)
+ qm_stop_qp_nolock(qps[i]);
+
+ qm_release_qp_nolock(qps[i]);
+ }
+ up_write(&qm->qps_lock);
+ qm_pm_put_sync(qm);
+
+ return ret;
+}
+
static int hisi_qm_sort_devices(int node, struct list_head *head,
struct hisi_qm_list *qm_list)
{
- struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm_resource *res;
struct hisi_qm *qm;
- struct list_head *n;
struct device *dev;
int dev_node;
+ LIST_HEAD(non_full_list);
+ LIST_HEAD(full_list);
list_for_each_entry(qm, &qm_list->list, list) {
dev = &qm->pdev->dev;
@@ -3582,16 +3753,16 @@ static int hisi_qm_sort_devices(int node, struct list_head *head,
res->qm = qm;
res->distance = node_distance(dev_node, node);
- n = head;
- list_for_each_entry(tmp, head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
+
+ if (qm->qp_in_used == qm->qp_num)
+ qm_insert_sorted(&full_list, res);
+ else
+ qm_insert_sorted(&non_full_list, res);
}
+ list_splice_tail(&non_full_list, head);
+ list_splice_tail(&full_list, head);
+
return 0;
}
@@ -3608,12 +3779,11 @@ static int hisi_qm_sort_devices(int node, struct list_head *head,
* not meet the requirements will return error.
*/
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
- u8 alg_type, int node, struct hisi_qp **qps)
+ u8 *alg_type, int node, struct hisi_qp **qps)
{
struct hisi_qm_resource *tmp;
int ret = -ENODEV;
LIST_HEAD(head);
- int i;
if (!qps || !qm_list || qp_num <= 0)
return -EINVAL;
@@ -3625,24 +3795,15 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
}
list_for_each_entry(tmp, &head, list) {
- for (i = 0; i < qp_num; i++) {
- qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
- if (IS_ERR(qps[i])) {
- hisi_qm_free_qps(qps, i);
- break;
- }
- }
-
- if (i == qp_num) {
- ret = 0;
+ ret = qm_get_and_start_qp(tmp->qm, qp_num, qps, alg_type);
+ if (!ret)
break;
- }
}
mutex_unlock(&qm_list->lock);
if (ret)
- pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
- node, alg_type, qp_num);
+ pr_info("Failed to create qps, node[%d], qp[%d]!\n",
+ node, qp_num);
err:
free_list(&head);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 81d0beda93b2..0710977861f3 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -82,11 +82,6 @@ struct sec_aead_req {
__u8 out_mac_buf[SEC_MAX_MAC_LEN];
};
-struct sec_instance_backlog {
- struct list_head list;
- spinlock_t lock;
-};
-
/* SEC request of Crypto */
struct sec_req {
union {
@@ -112,7 +107,6 @@ struct sec_req {
bool use_pbuf;
struct list_head list;
- struct sec_instance_backlog *backlog;
struct sec_request_buf buf;
};
@@ -172,7 +166,6 @@ struct sec_qp_ctx {
spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
- struct sec_instance_backlog backlog;
u16 send_head;
};
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 31590d01139a..c462b58d3034 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -54,7 +54,6 @@
#define SEC_AUTH_CIPHER_V3 0x40
#define SEC_FLAG_OFFSET 7
#define SEC_FLAG_MASK 0x0780
-#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
@@ -148,7 +147,7 @@ static void sec_free_req_id(struct sec_req *req)
spin_unlock_bh(&qp_ctx->id_lock);
}
-static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
+static void pre_parse_finished_bd(struct bd_status *status, void *resp)
{
struct sec_sqe *bd = resp;
@@ -158,11 +157,9 @@ static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le16_to_cpu(bd->type2.tag);
status->err_type = bd->type2.error_type;
-
- return bd->type_cipher_auth & SEC_TYPE_MASK;
}
-static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
+static void pre_parse_finished_bd3(struct bd_status *status, void *resp)
{
struct sec_sqe3 *bd3 = resp;
@@ -172,8 +169,6 @@ static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
status->tag = le64_to_cpu(bd3->tag);
status->err_type = bd3->error_type;
-
- return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
}
static int sec_cb_status_check(struct sec_req *req,
@@ -244,7 +239,7 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp
struct sec_req *req, *tmp;
int ret;
- list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_for_each_entry_safe(req, tmp, &qp_ctx->qp->backlog.list, list) {
list_del(&req->list);
ctx->req_op->buf_unmap(ctx, req);
if (req->req_id >= 0)
@@ -265,11 +260,12 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp
static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
+ struct hisi_qp *qp = qp_ctx->qp;
struct sec_req *req, *tmp;
int ret;
- spin_lock_bh(&qp_ctx->backlog.lock);
- list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ spin_lock_bh(&qp->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp->backlog.list, list) {
ret = qp_send_message(req);
switch (ret) {
case -EINPROGRESS:
@@ -287,42 +283,46 @@ static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
}
unlock:
- spin_unlock_bh(&qp_ctx->backlog.lock);
+ spin_unlock_bh(&qp->backlog.lock);
}
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
- u8 type_supported = qp_ctx->ctx->type_supported;
+ const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head];
+ struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe);
+ struct sec_ctx *ctx = req->ctx;
+ struct sec_dfx *dfx = &ctx->sec->debug.dfx;
struct bd_status status;
- struct sec_ctx *ctx;
- struct sec_req *req;
int err;
- u8 type;
- if (type_supported == SEC_BD_TYPE2) {
- type = pre_parse_finished_bd(&status, resp);
- req = qp_ctx->req_list[status.tag];
- } else {
- type = pre_parse_finished_bd3(&status, resp);
- req = (void *)(uintptr_t)status.tag;
- }
+ pre_parse_finished_bd(&status, resp);
- if (unlikely(type != type_supported)) {
- atomic64_inc(&dfx->err_bd_cnt);
- pr_err("err bd type [%u]\n", type);
- return;
- }
+ req->err_type = status.err_type;
+ err = sec_cb_status_check(req, &status);
+ if (err)
+ atomic64_inc(&dfx->done_flag_cnt);
- if (unlikely(!req)) {
- atomic64_inc(&dfx->invalid_req_cnt);
- atomic_inc(&qp->qp_status.used);
- return;
- }
+ atomic64_inc(&dfx->recv_cnt);
+ ctx->req_op->buf_unmap(ctx, req);
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static void sec_req_cb3(struct hisi_qp *qp, void *resp)
+{
+ struct bd_status status;
+ struct sec_ctx *ctx;
+ struct sec_dfx *dfx;
+ struct sec_req *req;
+ int err;
+
+ pre_parse_finished_bd3(&status, resp);
+
+ req = (void *)(uintptr_t)status.tag;
req->err_type = status.err_type;
ctx = req->ctx;
+ dfx = &ctx->sec->debug.dfx;
+
err = sec_cb_status_check(req, &status);
if (err)
atomic64_inc(&dfx->done_flag_cnt);
@@ -330,7 +330,6 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
atomic64_inc(&dfx->recv_cnt);
ctx->req_op->buf_unmap(ctx, req);
-
ctx->req_op->callback(ctx, req, err);
}
@@ -348,8 +347,10 @@ static int sec_alg_send_message_retry(struct sec_req *req)
static int sec_alg_try_enqueue(struct sec_req *req)
{
+ struct hisi_qp *qp = req->qp_ctx->qp;
+
/* Check if any request is already backlogged */
- if (!list_empty(&req->backlog->list))
+ if (!list_empty(&qp->backlog.list))
return -EBUSY;
/* Try to enqueue to HW ring */
@@ -359,17 +360,18 @@ static int sec_alg_try_enqueue(struct sec_req *req)
static int sec_alg_send_message_maybacklog(struct sec_req *req)
{
+ struct hisi_qp *qp = req->qp_ctx->qp;
int ret;
ret = sec_alg_try_enqueue(req);
if (ret != -EBUSY)
return ret;
- spin_lock_bh(&req->backlog->lock);
+ spin_lock_bh(&qp->backlog.lock);
ret = sec_alg_try_enqueue(req);
if (ret == -EBUSY)
- list_add_tail(&req->list, &req->backlog->list);
- spin_unlock_bh(&req->backlog->lock);
+ list_add_tail(&req->list, &qp->backlog.list);
+ spin_unlock_bh(&qp->backlog.lock);
return ret;
}
@@ -624,32 +626,25 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
- qp->req_type = 0;
- qp->qp_ctx = qp_ctx;
qp_ctx->qp = qp;
qp_ctx->ctx = ctx;
- qp->req_cb = sec_req_cb;
+ if (ctx->type_supported == SEC_BD_TYPE3)
+ qp->req_cb = sec_req_cb3;
+ else
+ qp->req_cb = sec_req_cb;
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- spin_lock_init(&qp_ctx->backlog.lock);
spin_lock_init(&qp_ctx->id_lock);
- INIT_LIST_HEAD(&qp_ctx->backlog.list);
qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
goto err_destroy_idr;
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_resource_free;
-
return 0;
-err_resource_free:
- sec_free_qp_ctx_resource(ctx, qp_ctx);
err_destroy_idr:
idr_destroy(&qp_ctx->req_idr);
return ret;
@@ -658,7 +653,6 @@ err_destroy_idr:
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- hisi_qm_stop_qp(qp_ctx->qp);
sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@@ -669,10 +663,8 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
int i, ret;
ctx->qps = sec_create_qps();
- if (!ctx->qps) {
- pr_err("Can not create sec qps!\n");
+ if (!ctx->qps)
return -ENODEV;
- }
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
@@ -708,6 +700,9 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
int i;
+ if (!ctx->qps)
+ return;
+
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
@@ -719,6 +714,9 @@ static int sec_cipher_init(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ if (!ctx->qps)
+ return 0;
+
c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
@@ -731,6 +729,9 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ if (!ctx->qps)
+ return;
+
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
@@ -752,6 +753,9 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ if (!ctx->qps)
+ return;
+
memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
@@ -789,7 +793,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
}
ret = sec_ctx_base_init(ctx);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
ret = sec_cipher_init(ctx);
@@ -898,6 +902,9 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
struct device *dev = ctx->dev;
int ret;
+ if (!ctx->qps)
+ goto set_soft_key;
+
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
@@ -928,13 +935,14 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fbtfm) {
- ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
- if (ret) {
- dev_err(dev, "failed to set fallback skcipher key!\n");
- return ret;
- }
+
+set_soft_key:
+ ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "failed to set fallback skcipher key!\n");
+ return ret;
}
+
return 0;
}
@@ -1398,6 +1406,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
struct crypto_authenc_keys keys;
int ret;
+ if (!ctx->qps)
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
c_ctx->c_mode = c_mode;
@@ -1952,7 +1963,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
} while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
req->qp_ctx = qp_ctx;
- req->backlog = &qp_ctx->backlog;
return 0;
}
@@ -2055,6 +2065,9 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
if (ret)
return ret;
+ if (!ctx->qps)
+ return 0;
+
if (ctx->sec->qm.ver < QM_HW_V3) {
ctx->type_supported = SEC_BD_TYPE2;
ctx->req_op = &sec_skcipher_req_ops;
@@ -2063,7 +2076,7 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
ctx->req_op = &sec_skcipher_req_ops_v3;
}
- return ret;
+ return 0;
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
@@ -2131,7 +2144,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
int ret;
ret = sec_aead_init(tfm);
- if (ret) {
+ if (ret && ret != -ENODEV) {
pr_err("hisi_sec2: aead init error!\n");
return ret;
}
@@ -2173,7 +2186,7 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
int ret;
ret = sec_aead_init(tfm);
- if (ret) {
+ if (ret && ret != -ENODEV) {
dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
return ret;
}
@@ -2318,6 +2331,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
bool need_fallback = false;
int ret;
+ if (!ctx->qps)
+ goto soft_crypto;
+
if (!sk_req->cryptlen) {
if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
return -EINVAL;
@@ -2335,9 +2351,12 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
return -EINVAL;
if (unlikely(ctx->c_ctx.fallback || need_fallback))
- return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
+ goto soft_crypto;
return ctx->req_op->process(ctx, req);
+
+soft_crypto:
+ return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@@ -2545,6 +2564,9 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
bool need_fallback = false;
int ret;
+ if (!ctx->qps)
+ goto soft_crypto;
+
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
@@ -2555,11 +2577,14 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
if (need_fallback)
- return sec_aead_soft_crypto(ctx, a_req, encrypt);
+ goto soft_crypto;
return -EINVAL;
}
return ctx->req_op->process(ctx, req);
+
+soft_crypto:
+ return sec_aead_soft_crypto(ctx, a_req, encrypt);
}
static int sec_aead_encrypt(struct aead_request *a_req)
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 5eb2d6820742..7dd125f5f511 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -417,18 +417,29 @@ struct hisi_qp **sec_create_qps(void)
int node = cpu_to_node(raw_smp_processor_id());
u32 ctx_num = ctx_q_num;
struct hisi_qp **qps;
+ u8 *type;
int ret;
qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
if (!qps)
return NULL;
- ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
- if (!ret)
- return qps;
+ /* The type of SEC is all 0, so just allocated by kcalloc */
+ type = kcalloc(ctx_num, sizeof(u8), GFP_KERNEL);
+ if (!type) {
+ kfree(qps);
+ return NULL;
+ }
- kfree(qps);
- return NULL;
+ ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, type, node, qps);
+ if (ret) {
+ kfree(type);
+ kfree(qps);
+ return NULL;
+ }
+
+ kfree(type);
+ return qps;
}
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 24c7b6ab285b..d41b34405c21 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -260,7 +260,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
return curr_hw_sgl;
err_unmap:
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sgl, sg_n, dir);
return ERR_PTR(ret);
}
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index ac74df4a9471..5ca0b90859a8 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -40,6 +40,7 @@
#define SEED_SHIFT_24 24
#define SEED_SHIFT_16 16
#define SEED_SHIFT_8 8
+#define SW_MAX_RANDOM_BYTES 65520
struct hisi_trng_list {
struct mutex lock;
@@ -53,8 +54,10 @@ struct hisi_trng {
struct list_head list;
struct hwrng rng;
u32 ver;
- bool is_used;
- struct mutex mutex;
+ u32 ctx_num;
+ /* The bytes of the random number generated since the last seeding. */
+ u32 random_bytes;
+ struct mutex lock;
};
struct hisi_trng_ctx {
@@ -63,10 +66,14 @@ struct hisi_trng_ctx {
static atomic_t trng_active_devs;
static struct hisi_trng_list trng_devices;
+static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait);
-static void hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
+static int hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
{
u32 val, seed_reg, i;
+ int ret;
+
+ writel(0x0, trng->base + SW_DRBG_BLOCKS);
for (i = 0; i < SW_DRBG_SEED_SIZE;
i += SW_DRBG_SEED_SIZE / SW_DRBG_SEED_REGS_NUM) {
@@ -78,6 +85,20 @@ static void hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed)
seed_reg = (i >> SW_DRBG_NUM_SHIFT) % SW_DRBG_SEED_REGS_NUM;
writel(val, trng->base + SW_DRBG_SEED(seed_reg));
}
+
+ writel(SW_DRBG_BLOCKS_NUM | (0x1 << SW_DRBG_ENABLE_SHIFT),
+ trng->base + SW_DRBG_BLOCKS);
+ writel(0x1, trng->base + SW_DRBG_INIT);
+ ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
+ val, val & BIT(0), SLEEP_US, TIMEOUT_US);
+ if (ret) {
+ pr_err("failed to init trng(%d)\n", ret);
+ return -EIO;
+ }
+
+ trng->random_bytes = 0;
+
+ return 0;
}
static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
@@ -85,8 +106,7 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
{
struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
struct hisi_trng *trng = ctx->trng;
- u32 val = 0;
- int ret = 0;
+ int ret;
if (slen < SW_DRBG_SEED_SIZE) {
pr_err("slen(%u) is not matched with trng(%d)\n", slen,
@@ -94,43 +114,45 @@ static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed,
return -EINVAL;
}
- writel(0x0, trng->base + SW_DRBG_BLOCKS);
- hisi_trng_set_seed(trng, seed);
+ mutex_lock(&trng->lock);
+ ret = hisi_trng_set_seed(trng, seed);
+ mutex_unlock(&trng->lock);
- writel(SW_DRBG_BLOCKS_NUM | (0x1 << SW_DRBG_ENABLE_SHIFT),
- trng->base + SW_DRBG_BLOCKS);
- writel(0x1, trng->base + SW_DRBG_INIT);
+ return ret;
+}
- ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
- val, val & BIT(0), SLEEP_US, TIMEOUT_US);
- if (ret)
- pr_err("fail to init trng(%d)\n", ret);
+static int hisi_trng_reseed(struct hisi_trng *trng)
+{
+ u8 seed[SW_DRBG_SEED_SIZE];
+ int size;
- return ret;
+ if (!trng->random_bytes)
+ return 0;
+
+ size = hisi_trng_read(&trng->rng, seed, SW_DRBG_SEED_SIZE, false);
+ if (size != SW_DRBG_SEED_SIZE)
+ return -EIO;
+
+ return hisi_trng_set_seed(trng, seed);
}
-static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
- unsigned int slen, u8 *dstn, unsigned int dlen)
+static int hisi_trng_get_bytes(struct hisi_trng *trng, u8 *dstn, unsigned int dlen)
{
- struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
- struct hisi_trng *trng = ctx->trng;
u32 data[SW_DRBG_DATA_NUM];
u32 currsize = 0;
u32 val = 0;
int ret;
u32 i;
- if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) {
- pr_err("dlen(%u) exceeds limit(%d)!\n", dlen,
- SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES);
- return -EINVAL;
- }
+ ret = hisi_trng_reseed(trng);
+ if (ret)
+ return ret;
do {
ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS,
- val, val & BIT(1), SLEEP_US, TIMEOUT_US);
+ val, val & BIT(1), SLEEP_US, TIMEOUT_US);
if (ret) {
- pr_err("fail to generate random number(%d)!\n", ret);
+ pr_err("failed to generate random number(%d)!\n", ret);
break;
}
@@ -145,30 +167,57 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
currsize = dlen;
}
+ trng->random_bytes += SW_DRBG_BYTES;
writel(0x1, trng->base + SW_DRBG_GEN);
} while (currsize < dlen);
return ret;
}
+static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dstn, unsigned int dlen)
+{
+ struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct hisi_trng *trng = ctx->trng;
+ unsigned int currsize = 0;
+ unsigned int block_size;
+ int ret;
+
+ if (!dstn || !dlen) {
+ pr_err("output is error, dlen %u!\n", dlen);
+ return -EINVAL;
+ }
+
+ do {
+ block_size = min_t(unsigned int, dlen - currsize, SW_MAX_RANDOM_BYTES);
+ mutex_lock(&trng->lock);
+ ret = hisi_trng_get_bytes(trng, dstn + currsize, block_size);
+ mutex_unlock(&trng->lock);
+ if (ret)
+ return ret;
+ currsize += block_size;
+ } while (currsize < dlen);
+
+ return 0;
+}
+
static int hisi_trng_init(struct crypto_tfm *tfm)
{
struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm);
struct hisi_trng *trng;
- int ret = -EBUSY;
+ u32 ctx_num = ~0;
mutex_lock(&trng_devices.lock);
list_for_each_entry(trng, &trng_devices.list, list) {
- if (!trng->is_used) {
- trng->is_used = true;
+ if (trng->ctx_num < ctx_num) {
+ ctx_num = trng->ctx_num;
ctx->trng = trng;
- ret = 0;
- break;
}
}
+ ctx->trng->ctx_num++;
mutex_unlock(&trng_devices.lock);
- return ret;
+ return 0;
}
static void hisi_trng_exit(struct crypto_tfm *tfm)
@@ -176,7 +225,7 @@ static void hisi_trng_exit(struct crypto_tfm *tfm)
struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm);
mutex_lock(&trng_devices.lock);
- ctx->trng->is_used = false;
+ ctx->trng->ctx_num--;
mutex_unlock(&trng_devices.lock);
}
@@ -238,7 +287,7 @@ static int hisi_trng_del_from_list(struct hisi_trng *trng)
int ret = -EBUSY;
mutex_lock(&trng_devices.lock);
- if (!trng->is_used) {
+ if (!trng->ctx_num) {
list_del(&trng->list);
ret = 0;
}
@@ -262,7 +311,9 @@ static int hisi_trng_probe(struct platform_device *pdev)
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
- trng->is_used = false;
+ trng->ctx_num = 0;
+ trng->random_bytes = SW_MAX_RANDOM_BYTES;
+ mutex_init(&trng->lock);
trng->ver = readl(trng->base + HISI_TRNG_VERSION);
if (!trng_devices.is_init) {
INIT_LIST_HEAD(&trng_devices.list);
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 9fb2a9c01132..b83f228281ab 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -99,7 +99,7 @@ enum zip_cap_table_type {
ZIP_CORE5_BITMAP,
};
-int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index b97513981a3b..98a68e44ac34 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -17,13 +17,17 @@
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
#define HZIP_ALG_TYPE_DEFLATE 0x01
+#define HZIP_ALG_TYPE_LZ4 0x04
#define HZIP_BUF_TYPE_M GENMASK(11, 8)
#define HZIP_SGL 0x1
+#define HZIP_WIN_SIZE_M GENMASK(15, 12)
+#define HZIP_16K_WINSZ 0x2
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
#define HZIP_ALG_DEFLATE GENMASK(5, 4)
+#define HZIP_ALG_LZ4 BIT(8)
static DEFINE_MUTEX(zip_algs_lock);
static unsigned int zip_available_devs;
@@ -39,8 +43,10 @@ enum {
HZIP_CTX_Q_NUM
};
+#define GET_REQ_FROM_SQE(sqe) ((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32)
#define COMP_NAME_TO_TYPE(alg_name) \
- (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
+ (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : \
+ (!strcmp((alg_name), "lz4") ? HZIP_ALG_TYPE_LZ4 : 0))
struct hisi_zip_req {
struct acomp_req *req;
@@ -48,6 +54,7 @@ struct hisi_zip_req {
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
dma_addr_t dma_dst;
+ struct hisi_zip_qp_ctx *qp_ctx;
u16 req_id;
};
@@ -64,6 +71,7 @@ struct hisi_zip_qp_ctx {
struct hisi_acc_sgl_pool *sgl_pool;
struct hisi_zip *zip_dev;
struct hisi_zip_ctx *ctx;
+ u8 req_type;
};
struct hisi_zip_sqe_ops {
@@ -72,9 +80,9 @@ struct hisi_zip_sqe_ops {
void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
+ void (*fill_win_size)(struct hisi_zip_sqe *sqe, u8 win_size);
void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
- u32 (*get_tag)(struct hisi_zip_sqe *sqe);
u32 (*get_status)(struct hisi_zip_sqe *sqe);
u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
};
@@ -82,6 +90,7 @@ struct hisi_zip_sqe_ops {
struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
const struct hisi_zip_sqe_ops *ops;
+ bool fallback;
};
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@@ -108,6 +117,24 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
+static int hisi_zip_fallback_do_work(struct acomp_req *acomp_req, bool is_decompress)
+{
+ ACOMP_FBREQ_ON_STACK(fbreq, acomp_req);
+ int ret;
+
+ if (!is_decompress)
+ ret = crypto_acomp_compress(fbreq);
+ else
+ ret = crypto_acomp_decompress(fbreq);
+ if (ret) {
+ pr_err("failed to do fallback work, ret=%d\n", ret);
+ return ret;
+ }
+
+ acomp_req->dlen = fbreq->dlen;
+ return ret;
+}
+
static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
struct acomp_req *req)
{
@@ -131,6 +158,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
+ req_cache->qp_ctx = qp_ctx;
return req_cache;
}
@@ -179,9 +207,19 @@ static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
sqe->dw9 = val;
}
+static void hisi_zip_fill_win_size(struct hisi_zip_sqe *sqe, u8 win_size)
+{
+ u32 val;
+
+ val = sqe->dw9 & ~HZIP_WIN_SIZE_M;
+ val |= FIELD_PREP(HZIP_WIN_SIZE_M, win_size);
+ sqe->dw9 = val;
+}
+
static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
- sqe->dw26 = req->req_id;
+ sqe->dw26 = lower_32_bits((u64)req);
+ sqe->dw27 = upper_32_bits((u64)req);
}
static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
@@ -204,6 +242,7 @@ static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe
ops->fill_buf_size(sqe, req);
ops->fill_buf_type(sqe, HZIP_SGL);
ops->fill_req_type(sqe, req_type);
+ ops->fill_win_size(sqe, HZIP_16K_WINSZ);
ops->fill_tag(sqe, req);
ops->fill_sqe_type(sqe, ops->sqe_type);
}
@@ -213,7 +252,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@@ -237,18 +275,16 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
&req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
- dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ dev_err(dev, "failed to map the dst buffer to hw sgl (%d)!\n",
ret);
goto err_unmap_input;
}
- hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
+ hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp_ctx->req_type, req);
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
- spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
- spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
@@ -265,11 +301,6 @@ err_unmap_input:
return ret;
}
-static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
-{
- return sqe->dw26;
-}
-
static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
{
return sqe->dw3 & HZIP_BD_STATUS_M;
@@ -282,14 +313,12 @@ static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
{
- struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct hisi_zip_sqe *sqe = data;
+ struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe);
+ struct hisi_zip_qp_ctx *qp_ctx = req->qp_ctx;
const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct device *dev = &qp->qm->pdev->dev;
- struct hisi_zip_sqe *sqe = data;
- u32 tag = ops->get_tag(sqe);
- struct hisi_zip_req *req = req_q->q + tag;
struct acomp_req *acomp_req = req->req;
int err = 0;
u32 status;
@@ -319,10 +348,15 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
+ struct device *dev;
int ret;
+ if (ctx->fallback)
+ return hisi_zip_fallback_do_work(acomp_req, 0);
+
+ dev = &qp_ctx->qp->qm->pdev->dev;
+
req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -340,10 +374,15 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
+ struct device *dev;
int ret;
+ if (ctx->fallback)
+ return hisi_zip_fallback_do_work(acomp_req, 1);
+
+ dev = &qp_ctx->qp->qm->pdev->dev;
+
req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -358,31 +397,9 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
- int alg_type, int req_type)
-{
- struct device *dev = &qp->qm->pdev->dev;
- int ret;
-
- qp->req_type = req_type;
- qp->alg_type = alg_type;
- qp->qp_ctx = qp_ctx;
-
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0) {
- dev_err(dev, "failed to start qp (%d)!\n", ret);
- return ret;
- }
-
- qp_ctx->qp = qp;
-
- return 0;
-}
-
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
+static int hisi_zip_decompress(struct acomp_req *acomp_req)
{
- hisi_qm_stop_qp(qp_ctx->qp);
- hisi_qm_free_qps(&qp_ctx->qp, 1);
+ return hisi_zip_fallback_do_work(acomp_req, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops = {
@@ -391,9 +408,9 @@ static const struct hisi_zip_sqe_ops hisi_zip_ops = {
.fill_buf_size = hisi_zip_fill_buf_size,
.fill_buf_type = hisi_zip_fill_buf_type,
.fill_req_type = hisi_zip_fill_req_type,
+ .fill_win_size = hisi_zip_fill_win_size,
.fill_tag = hisi_zip_fill_tag,
.fill_sqe_type = hisi_zip_fill_sqe_type,
- .get_tag = hisi_zip_get_tag,
.get_status = hisi_zip_get_status,
.get_dstlen = hisi_zip_get_dstlen,
};
@@ -402,10 +419,15 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip_qp_ctx *qp_ctx;
+ u8 alg_type[HZIP_CTX_Q_NUM];
struct hisi_zip *hisi_zip;
- int ret, i, j;
+ int ret, i;
+
+ /* alg_type = 0 for compress, 1 for decompress in hw sqe */
+ for (i = 0; i < HZIP_CTX_Q_NUM; i++)
+ alg_type[i] = i;
- ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type);
if (ret) {
pr_err("failed to create zip qps (%d)!\n", ret);
return -ENODEV;
@@ -414,19 +436,11 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
- /* alg_type = 0 for compress, 1 for decompress in hw sqe */
qp_ctx = &hisi_zip_ctx->qp_ctx[i];
qp_ctx->ctx = hisi_zip_ctx;
- ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
- if (ret) {
- for (j = i - 1; j >= 0; j--)
- hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
-
- hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
- return ret;
- }
-
qp_ctx->zip_dev = hisi_zip;
+ qp_ctx->req_type = req_type;
+ qp_ctx->qp = qps[i];
}
hisi_zip_ctx->ops = &hisi_zip_ops;
@@ -436,10 +450,13 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
int i;
for (i = 0; i < HZIP_CTX_Q_NUM; i++)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
+ qps[i] = hisi_zip_ctx->qp_ctx[i].qp;
+
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
@@ -549,7 +566,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
if (ret) {
pr_err("failed to init ctx (%d)!\n", ret);
- return ret;
+ goto switch_to_soft;
}
dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
@@ -574,14 +591,18 @@ err_release_req_q:
hisi_zip_release_req_q(ctx);
err_ctx_exit:
hisi_zip_ctx_exit(ctx);
- return ret;
+switch_to_soft:
+ ctx->fallback = true;
+ return 0;
}
static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- hisi_zip_set_acomp_cb(ctx, NULL);
+ if (ctx->fallback)
+ return;
+
hisi_zip_release_sgl_pool(ctx);
hisi_zip_release_req_q(ctx);
hisi_zip_ctx_exit(ctx);
@@ -595,7 +616,8 @@ static struct acomp_alg hisi_zip_acomp_deflate = {
.base = {
.cra_name = "deflate",
.cra_driver_name = "hisi-deflate-acomp",
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_module = THIS_MODULE,
.cra_priority = HZIP_ALG_PRIORITY,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
@@ -624,18 +646,69 @@ static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
crypto_unregister_acomp(&hisi_zip_acomp_deflate);
}
+static struct acomp_alg hisi_zip_acomp_lz4 = {
+ .init = hisi_zip_acomp_init,
+ .exit = hisi_zip_acomp_exit,
+ .compress = hisi_zip_acompress,
+ .decompress = hisi_zip_decompress,
+ .base = {
+ .cra_name = "lz4",
+ .cra_driver_name = "hisi-lz4-acomp",
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_module = THIS_MODULE,
+ .cra_priority = HZIP_ALG_PRIORITY,
+ .cra_ctxsize = sizeof(struct hisi_zip_ctx),
+ }
+};
+
+static int hisi_zip_register_lz4(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
+ return 0;
+
+ ret = crypto_register_acomp(&hisi_zip_acomp_lz4);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to LZ4 (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hisi_zip_unregister_lz4(struct hisi_qm *qm)
+{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_LZ4))
+ return;
+
+ crypto_unregister_acomp(&hisi_zip_acomp_lz4);
+}
+
int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret = 0;
mutex_lock(&zip_algs_lock);
- if (zip_available_devs++)
+ if (zip_available_devs) {
+ zip_available_devs++;
goto unlock;
+ }
ret = hisi_zip_register_deflate(qm);
if (ret)
- zip_available_devs--;
+ goto unlock;
+
+ ret = hisi_zip_register_lz4(qm);
+ if (ret)
+ goto unreg_deflate;
+ zip_available_devs++;
+ mutex_unlock(&zip_algs_lock);
+
+ return 0;
+
+unreg_deflate:
+ hisi_zip_unregister_deflate(qm);
unlock:
mutex_unlock(&zip_algs_lock);
return ret;
@@ -648,6 +721,7 @@ void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
goto unlock;
hisi_zip_unregister_deflate(qm);
+ hisi_zip_unregister_lz4(qm);
unlock:
mutex_unlock(&zip_algs_lock);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 4fcbe6bada06..85b26ef17548 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -446,12 +446,12 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type)
{
if (node == NUMA_NO_NODE)
node = cpu_to_node(raw_smp_processor_id());
- return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
+ return hisi_qm_alloc_qps_node(&zip_devices, qp_num, alg_type, node, qps);
}
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index f22c12e36b56..7195c37dd102 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -870,25 +870,18 @@ static int img_register_algs(struct img_hash_dev *hdev)
for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
err = crypto_register_ahash(&img_algs[i]);
- if (err)
- goto err_reg;
+ if (err) {
+ crypto_unregister_ahashes(img_algs, i);
+ return err;
+ }
}
- return 0;
-err_reg:
- for (; i--; )
- crypto_unregister_ahash(&img_algs[i]);
-
- return err;
+ return 0;
}
-static int img_unregister_algs(struct img_hash_dev *hdev)
+static void img_unregister_algs(struct img_hash_dev *hdev)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(img_algs); i++)
- crypto_unregister_ahash(&img_algs[i]);
- return 0;
+ crypto_unregister_ahashes(img_algs, ARRAY_SIZE(img_algs));
}
static void img_hash_done_task(unsigned long data)
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c
index 0b38a567da0e..b7fd9795062d 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-main.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
@@ -77,11 +77,44 @@ inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask)
__raw_writel(mask, eip93->base + EIP93_REG_INT_CLR);
}
-static void eip93_unregister_algs(unsigned int i)
+static int eip93_algo_is_supported(u32 alg_flags, u32 supported_algo_flags)
+{
+ if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_TDES))
+ return 0;
+
+ if (IS_AES(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_AES))
+ return 0;
+
+ if (IS_HASH_MD5(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_MD5))
+ return 0;
+
+ if (IS_HASH_SHA1(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
+ return 0;
+
+ if (IS_HASH_SHA224(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
+ return 0;
+
+ if (IS_HASH_SHA256(alg_flags) &&
+ !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
+ return 0;
+
+ return 1;
+}
+
+static void eip93_unregister_algs(u32 supported_algo_flags, unsigned int i)
{
unsigned int j;
for (j = 0; j < i; j++) {
+ if (!eip93_algo_is_supported(eip93_algs[j]->flags,
+ supported_algo_flags))
+ continue;
+
switch (eip93_algs[j]->type) {
case EIP93_ALG_TYPE_SKCIPHER:
crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
@@ -90,7 +123,7 @@ static void eip93_unregister_algs(unsigned int i)
crypto_unregister_aead(&eip93_algs[j]->alg.aead);
break;
case EIP93_ALG_TYPE_HASH:
- crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
+ crypto_unregister_ahash(&eip93_algs[j]->alg.ahash);
break;
}
}
@@ -106,49 +139,27 @@ static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_fl
eip93_algs[i]->eip93 = eip93;
- if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
- !(supported_algo_flags & EIP93_PE_OPTION_TDES))
+ if (!eip93_algo_is_supported(alg_flags, supported_algo_flags))
continue;
- if (IS_AES(alg_flags)) {
- if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
- continue;
+ if (IS_AES(alg_flags) && !IS_HMAC(alg_flags)) {
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_128;
- if (!IS_HMAC(alg_flags)) {
- if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
- eip93_algs[i]->alg.skcipher.max_keysize =
- AES_KEYSIZE_128;
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_192;
- if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
- eip93_algs[i]->alg.skcipher.max_keysize =
- AES_KEYSIZE_192;
+ if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
+ eip93_algs[i]->alg.skcipher.max_keysize =
+ AES_KEYSIZE_256;
- if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
- eip93_algs[i]->alg.skcipher.max_keysize =
- AES_KEYSIZE_256;
-
- if (IS_RFC3686(alg_flags))
- eip93_algs[i]->alg.skcipher.max_keysize +=
- CTR_RFC3686_NONCE_SIZE;
- }
+ if (IS_RFC3686(alg_flags))
+ eip93_algs[i]->alg.skcipher.max_keysize +=
+ CTR_RFC3686_NONCE_SIZE;
}
- if (IS_HASH_MD5(alg_flags) &&
- !(supported_algo_flags & EIP93_PE_OPTION_MD5))
- continue;
-
- if (IS_HASH_SHA1(alg_flags) &&
- !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
- continue;
-
- if (IS_HASH_SHA224(alg_flags) &&
- !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
- continue;
-
- if (IS_HASH_SHA256(alg_flags) &&
- !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
- continue;
-
switch (eip93_algs[i]->type) {
case EIP93_ALG_TYPE_SKCIPHER:
ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
@@ -167,7 +178,7 @@ static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_fl
return 0;
fail:
- eip93_unregister_algs(i);
+ eip93_unregister_algs(supported_algo_flags, i);
return ret;
}
@@ -469,8 +480,11 @@ static int eip93_crypto_probe(struct platform_device *pdev)
static void eip93_crypto_remove(struct platform_device *pdev)
{
struct eip93_device *eip93 = platform_get_drvdata(pdev);
+ u32 algo_flags;
+
+ algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1);
- eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
+ eip93_unregister_algs(algo_flags, ARRAY_SIZE(eip93_algs));
eip93_cleanup(eip93);
}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index d0058757b000..f79ea22e9abe 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
@@ -96,7 +97,7 @@ static bool iaa_verify_compress = true;
static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
{
- return sprintf(buf, "%d\n", iaa_verify_compress);
+ return sysfs_emit(buf, "%d\n", iaa_verify_compress);
}
static ssize_t verify_compress_store(struct device_driver *driver,
@@ -188,11 +189,11 @@ static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
int ret = 0;
if (!async_mode && !use_irq)
- ret = sprintf(buf, "%s\n", "sync");
+ ret = sysfs_emit(buf, "%s\n", "sync");
else if (async_mode && !use_irq)
- ret = sprintf(buf, "%s\n", "async");
+ ret = sysfs_emit(buf, "%s\n", "async");
else if (async_mode && use_irq)
- ret = sprintf(buf, "%s\n", "async_irq");
+ ret = sysfs_emit(buf, "%s\n", "async_irq");
return ret;
}
@@ -221,15 +222,13 @@ static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
static int find_empty_iaa_compression_mode(void)
{
- int i = -EINVAL;
+ int i;
- for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
- if (iaa_compression_modes[i])
- continue;
- break;
- }
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++)
+ if (!iaa_compression_modes[i])
+ return i;
- return i;
+ return -EINVAL;
}
static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
@@ -544,13 +543,7 @@ static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
{
- int ret = 0;
-
- ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
- if (ret)
- return ret;
-
- return ret;
+ return init_device_compression_modes(iaa_device, iaa_wq->wq);
}
static void del_iaa_device(struct iaa_device *iaa_device)
@@ -1704,12 +1697,10 @@ out:
return ret;
}
-static int iaa_unregister_compression_device(void)
+static void iaa_unregister_compression_device(void)
{
if (iaa_crypto_registered)
crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
-
- return 0;
}
static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
@@ -1925,8 +1916,7 @@ err_aecs_init:
static void __exit iaa_crypto_cleanup_module(void)
{
- if (iaa_unregister_compression_device())
- pr_debug("IAA compression device unregister failed\n");
+ iaa_unregister_compression_device();
iaa_crypto_debugfs_cleanup();
driver_remove_file(&iaa_crypto_driver.drv,
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 53fa91d577ed..35105213d40c 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -3,6 +3,7 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -459,6 +460,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
index b9b5e744a3f1..af8dbc7517cf 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
@@ -148,6 +148,16 @@ static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
break;
+ default:
+ dev_err(&GET_DEV(vf_info->accel_dev),
+ "Invalid BlockMsg type 0x%.4x received from VF%u\n",
+ req.type, vf_info->vf_nr);
+ resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+ resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR) |
+ FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK,
+ ADF_PF2VF_UNSPECIFIED_ERROR);
+ return resp;
}
/* Is this a request for CRC or data? */
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
index 85c682e248fb..e09b9edfce42 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
@@ -255,8 +255,8 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->areq.dh = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
- QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
/*
* If no source is provided use g as base
@@ -731,8 +731,8 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
- QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
qat_req->in.rsa.enc.e = ctx->dma_e;
qat_req->in.rsa.enc.n = ctx->dma_n;
@@ -867,8 +867,8 @@ static int qat_rsa_dec(struct akcipher_request *req)
qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
- QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
if (ctx->crt_mode) {
qat_req->in.rsa.dec_crt.p = ctx->dma_p;
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 301bdf239e7d..8afa3a87e38d 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -38,15 +38,9 @@ struct crypto_async_request *
mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
struct crypto_async_request **backlog)
{
- struct crypto_async_request *req;
-
*backlog = crypto_get_backlog(&engine->queue);
- req = crypto_dequeue_request(&engine->queue);
-
- if (!req)
- return NULL;
- return req;
+ return crypto_dequeue_request(&engine->queue);
}
static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 9f5601c0280b..417a48f41350 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -1326,7 +1326,7 @@ static ssize_t ucode_load_store(struct device *dev,
int del_grp_idx = -1;
int ucode_idx = 0;
- if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
+ if (count >= OTX_CPT_UCODE_NAME_LENGTH)
return -EINVAL;
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 88a41d1ca5f6..6c0bfb3ea1c9 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -168,7 +168,8 @@ static void free_command_queues(struct otx_cptvf *cptvf,
chunk = list_first_entry(&cqinfo->queue[i].chead,
struct otx_cpt_cmd_chunk, nextchunk);
- dma_free_coherent(&pdev->dev, chunk->size,
+ dma_free_coherent(&pdev->dev,
+ chunk->size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
chunk->head,
chunk->dma_addr);
chunk->head = NULL;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 1c5c262af48d..f54f90588d86 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2020 Marvell. */
#include <linux/firmware.h>
+#include <linux/sysfs.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
#include "otx2_cpt_devlink.h"
@@ -507,7 +508,7 @@ static ssize_t sso_pf_func_ovrd_show(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
+ return sysfs_emit(buf, "%d\n", cptpf->sso_pf_func_ovrd);
}
static ssize_t sso_pf_func_ovrd_store(struct device *dev,
@@ -533,7 +534,7 @@ static ssize_t kvf_limits_show(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", cptpf->kvf_limits);
+ return sysfs_emit(buf, "%d\n", cptpf->kvf_limits);
}
static ssize_t kvf_limits_store(struct device *dev,
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 0493041ea088..56aa1c29d782 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -908,7 +908,6 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
{
int chip_id, vasid, ret = 0;
int ct_842 = 0, ct_gzip = 0;
- struct device_node *dn;
chip_id = of_get_ibm_chip_id(pn);
if (chip_id < 0) {
@@ -922,7 +921,7 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
return -EINVAL;
}
- for_each_child_of_node(pn, dn) {
+ for_each_child_of_node_scoped(pn, dn) {
ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
"ibm,p9-nx-842", &ct_842);
@@ -930,10 +929,8 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
- if (ret) {
- of_node_put(dn);
+ if (ret)
return ret;
- }
}
if (!ct_842 || !ct_gzip) {
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 3cc802622dd5..3eadaf7a64fa 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "omap-crypto.h"
@@ -1042,7 +1043,7 @@ static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
{
struct omap_aes_dev *dd = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
+ return sysfs_emit(buf, "%d\n", dd->engine->queue.max_qlen);
}
static ssize_t queue_len_store(struct device *dev,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ff8aac02994a..1ffc240e016a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@@ -1973,7 +1974,7 @@ static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
{
struct omap_sham_dev *dd = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", dd->fallback_sz);
+ return sysfs_emit(buf, "%d\n", dd->fallback_sz);
}
static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
@@ -2003,7 +2004,7 @@ static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
{
struct omap_sham_dev *dd = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", dd->queue.max_qlen);
+ return sysfs_emit(buf, "%d\n", dd->queue.max_qlen);
}
static ssize_t queue_len_store(struct device *dev,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index f1edb4fbf364..c1dc1e43e117 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -669,8 +669,10 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
- rctx->adata, cryp->assoclen) != cryp->assoclen)
+ rctx->adata, cryp->assoclen) != cryp->assoclen) {
+ kfree(rctx->adata);
return -EINVAL;
+ }
}
if (cryp->total_in)
@@ -681,8 +683,11 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
ctx->rctx = rctx;
ret = starfive_aes_hw_init(ctx);
- if (ret)
+ if (ret) {
+ if (cryp->assoclen)
+ kfree(rctx->adata);
return ret;
+ }
if (!cryp->assoclen)
goto write_text;
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 5ed4ba5da7f9..f85d6fb81ca8 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -216,13 +216,15 @@ struct starfive_cryp_request_ctx {
struct scatterlist *in_sg;
struct scatterlist *out_sg;
- struct ahash_request ahash_fbk_req;
size_t total;
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
unsigned char *adata;
u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
+
+ /* Must be last as it ends in a flexible-array member. */
+ struct ahash_request ahash_fbk_req;
};
struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 5e82e8a1f71a..d206eddb67bf 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/minmax.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -1922,20 +1923,19 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32];
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out));
- cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
+ cryp->payload_out));
+ cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
u32 block[AES_BLOCK_32] = {0};
- memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_in));
+ memcpy_from_scatterwalk(block, &cryp->in_walk, min(cryp->hw_blocksize,
+ cryp->payload_in));
writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
- cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
+ cryp->payload_in -= min(cryp->hw_blocksize, cryp->payload_in);
}
static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
@@ -1980,10 +1980,9 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out));
- cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
+ cryp->payload_out));
+ cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
/* d) change mode back to AES GCM */
cfg &= ~CR_ALGO_MASK;
@@ -2078,9 +2077,9 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
*/
readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
- memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize,
- cryp->payload_out));
- cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
+ memcpy_to_scatterwalk(&cryp->out_walk, block, min(cryp->hw_blocksize,
+ cryp->payload_out));
+ cryp->payload_out -= min(cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
@@ -2158,7 +2157,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
u32 block[AES_BLOCK_32] = {0};
size_t written;
- written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
+ written = min(AES_BLOCK_SIZE, cryp->header_in);
memcpy_from_scatterwalk(block, &cryp->in_walk, written);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index a4436728b0db..d60147a7594e 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1115,8 +1115,7 @@ static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
return -ENOMEM;
}
- if (state->bufcnt)
- memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
+ memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
min(new_len, rctx->total) - state->bufcnt, 0);
@@ -1300,8 +1299,7 @@ static int stm32_hash_prepare_request(struct ahash_request *req)
}
/* copy buffer in a temporary one that is used for sg alignment */
- if (state->bufcnt)
- memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
+ memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
if (ret)
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 19c934af3df6..e559bdadf4f9 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -135,7 +135,7 @@ static inline int virtio_crypto_get_current_node(void)
int cpu, node;
cpu = get_cpu();
- node = topology_physical_package_id(cpu);
+ node = cpu_to_node(cpu);
put_cpu();
return node;
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 3d241446099c..ccc6b5c1b24b 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -75,15 +75,20 @@ static void virtcrypto_done_task(unsigned long data)
struct data_queue *data_vq = (struct data_queue *)data;
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
+ unsigned long flags;
unsigned int len;
+ spin_lock_irqsave(&data_vq->lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+ spin_unlock_irqrestore(&data_vq->lock, flags);
if (vc_req->alg_cb)
vc_req->alg_cb(vc_req, len);
+ spin_lock_irqsave(&data_vq->lock, flags);
}
} while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&data_vq->lock, flags);
}
static void virtcrypto_dataq_callback(struct virtqueue *vq)
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index 1b3fb21a2a7d..11053d1786d4 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -541,8 +541,6 @@ int virtio_crypto_skcipher_crypt_req(
if (ret < 0)
return ret;
- virtqueue_kick(data_vq->vq);
-
return 0;
}
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 6e72d9229410..2421bf30556d 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx ZynqMP AES Driver.
- * Copyright (c) 2020 Xilinx Inc.
+ * Copyright (C) 2020-2022 Xilinx Inc.
+ * Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
*/
#include <crypto/aes.h>
@@ -19,21 +20,21 @@
#include <linux/string.h>
#define ZYNQMP_DMA_BIT_MASK 32U
-
-#define ZYNQMP_AES_KEY_SIZE AES_KEYSIZE_256
-#define ZYNQMP_AES_AUTH_SIZE 16U
-#define ZYNQMP_KEY_SRC_SEL_KEY_LEN 1U
-#define ZYNQMP_AES_BLK_SIZE 1U
+#define VERSAL_DMA_BIT_MASK 64U
+#define XILINX_AES_AUTH_SIZE 16U
+#define XILINX_AES_BLK_SIZE 1U
#define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U
#define ZYNQMP_AES_WORD_LEN 4U
-#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
-#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
-#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+#define VERSAL_AES_QWORD_LEN 16U
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+#define XILINX_KEY_MAGIC 0x3EA0
-enum zynqmp_aead_op {
- ZYNQMP_AES_DECRYPT = 0,
- ZYNQMP_AES_ENCRYPT
+enum xilinx_aead_op {
+ XILINX_AES_DECRYPT = 0,
+ XILINX_AES_ENCRYPT
};
enum zynqmp_aead_keysrc {
@@ -42,14 +43,24 @@ enum zynqmp_aead_keysrc {
ZYNQMP_AES_PUF_KEY
};
-struct zynqmp_aead_drv_ctx {
- union {
- struct aead_engine_alg aead;
- } alg;
+struct xilinx_aead_dev {
struct device *dev;
struct crypto_engine *engine;
+ struct xilinx_aead_alg *aead_algs;
+};
+
+struct xilinx_aead_alg {
+ struct xilinx_aead_dev *aead_dev;
+ struct aead_engine_alg aead;
+ int (*aes_aead_cipher)(struct aead_request *areq);
+ u8 dma_bit_mask;
};
+struct xilinx_hwkey_info {
+ u16 magic;
+ u16 type;
+} __packed;
+
struct zynqmp_aead_hw_req {
u64 src;
u64 iv;
@@ -60,177 +71,368 @@ struct zynqmp_aead_hw_req {
u64 keysrc;
};
-struct zynqmp_aead_tfm_ctx {
+struct xilinx_aead_tfm_ctx {
struct device *dev;
- u8 key[ZYNQMP_AES_KEY_SIZE];
- u8 *iv;
+ dma_addr_t key_dma_addr;
+ u8 *key;
u32 keylen;
u32 authsize;
- enum zynqmp_aead_keysrc keysrc;
+ u8 keysrc;
struct crypto_aead *fbk_cipher;
};
-struct zynqmp_aead_req_ctx {
- enum zynqmp_aead_op op;
+struct xilinx_aead_req_ctx {
+ enum xilinx_aead_op op;
+};
+
+static struct xilinx_aead_dev *aead_dev;
+
+enum versal_aead_keysrc {
+ VERSAL_AES_BBRAM_KEY = 0,
+ VERSAL_AES_BBRAM_RED_KEY,
+ VERSAL_AES_BH_KEY,
+ VERSAL_AES_BH_RED_KEY,
+ VERSAL_AES_EFUSE_KEY,
+ VERSAL_AES_EFUSE_RED_KEY,
+ VERSAL_AES_EFUSE_USER_KEY_0,
+ VERSAL_AES_EFUSE_USER_KEY_1,
+ VERSAL_AES_EFUSE_USER_RED_KEY_0,
+ VERSAL_AES_EFUSE_USER_RED_KEY_1,
+ VERSAL_AES_KUP_KEY,
+ VERSAL_AES_PUF_KEY,
+ VERSAL_AES_USER_KEY_0,
+ VERSAL_AES_USER_KEY_1,
+ VERSAL_AES_USER_KEY_2,
+ VERSAL_AES_USER_KEY_3,
+ VERSAL_AES_USER_KEY_4,
+ VERSAL_AES_USER_KEY_5,
+ VERSAL_AES_USER_KEY_6,
+ VERSAL_AES_USER_KEY_7,
+ VERSAL_AES_EXPANDED_KEYS,
+ VERSAL_AES_ALL_KEYS,
+};
+
+enum versal_aead_op {
+ VERSAL_AES_ENCRYPT = 0,
+ VERSAL_AES_DECRYPT
+};
+
+enum versal_aes_keysize {
+ HW_AES_KEY_SIZE_128 = 0,
+ HW_AES_KEY_SIZE_256 = 2,
+};
+
+struct versal_init_ops {
+ u64 iv;
+ u32 op;
+ u32 keysrc;
+ u32 size;
+};
+
+struct versal_in_params {
+ u64 in_data_addr;
+ u32 size;
+ u32 is_last;
};
static int zynqmp_aes_aead_cipher(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
- struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ dma_addr_t dma_addr_data, dma_addr_hw_req;
struct device *dev = tfm_ctx->dev;
struct zynqmp_aead_hw_req *hwreq;
- dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
int ret;
size_t dma_size;
+ void *dmabuf;
char *kbuf;
- int err;
-
- if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
- dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
- + GCM_AES_IV_SIZE;
- else
- dma_size = req->cryptlen + GCM_AES_IV_SIZE;
- kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
+ dma_size = req->cryptlen + XILINX_AES_AUTH_SIZE;
+ kbuf = kmalloc(dma_size, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
- hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
- &dma_addr_hw_req, GFP_KERNEL);
- if (!hwreq) {
- dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ dmabuf = kmalloc(sizeof(*hwreq) + GCM_AES_IV_SIZE, GFP_KERNEL);
+ if (!dmabuf) {
+ kfree(kbuf);
return -ENOMEM;
}
-
+ hwreq = dmabuf;
data_size = req->cryptlen;
scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
- memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
+ memcpy(dmabuf + sizeof(struct zynqmp_aead_hw_req), req->iv, GCM_AES_IV_SIZE);
+ dma_addr_data = dma_map_single(dev, kbuf, dma_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, dma_addr_data))) {
+ ret = -ENOMEM;
+ goto freemem;
+ }
hwreq->src = dma_addr_data;
hwreq->dst = dma_addr_data;
- hwreq->iv = hwreq->src + data_size;
hwreq->keysrc = tfm_ctx->keysrc;
hwreq->op = rq_ctx->op;
- if (hwreq->op == ZYNQMP_AES_ENCRYPT)
+ if (hwreq->op == XILINX_AES_ENCRYPT)
hwreq->size = data_size;
else
- hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
+ hwreq->size = data_size - XILINX_AES_AUTH_SIZE;
- if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
- memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
- tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
-
- hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
- } else {
+ if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY)
+ hwreq->key = tfm_ctx->key_dma_addr;
+ else
hwreq->key = 0;
- }
+ dma_addr_hw_req = dma_map_single(dev, dmabuf, sizeof(struct zynqmp_aead_hw_req) +
+ GCM_AES_IV_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr_hw_req))) {
+ ret = -ENOMEM;
+ dma_unmap_single(dev, dma_addr_data, dma_size, DMA_BIDIRECTIONAL);
+ goto freemem;
+ }
+ hwreq->iv = dma_addr_hw_req + sizeof(struct zynqmp_aead_hw_req);
+ dma_sync_single_for_device(dev, dma_addr_hw_req, sizeof(struct zynqmp_aead_hw_req) +
+ GCM_AES_IV_SIZE, DMA_TO_DEVICE);
ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
-
+ dma_unmap_single(dev, dma_addr_hw_req, sizeof(struct zynqmp_aead_hw_req) + GCM_AES_IV_SIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_addr_data, dma_size, DMA_BIDIRECTIONAL);
if (ret) {
dev_err(dev, "ERROR: AES PM API failed\n");
- err = ret;
} else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
- dev_err(dev, "ERROR: Gcm Tag mismatch\n");
+ ret = -EBADMSG;
break;
case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ ret = -EINVAL;
dev_err(dev, "ERROR: Wrong KeySrc, enable secure mode\n");
break;
case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ ret = -EINVAL;
dev_err(dev, "ERROR: PUF is not registered\n");
break;
default:
- dev_err(dev, "ERROR: Unknown error\n");
+ ret = -EINVAL;
break;
}
- err = -status;
} else {
- if (hwreq->op == ZYNQMP_AES_ENCRYPT)
- data_size = data_size + ZYNQMP_AES_AUTH_SIZE;
+ if (hwreq->op == XILINX_AES_ENCRYPT)
+ data_size = data_size + crypto_aead_authsize(aead);
else
- data_size = data_size - ZYNQMP_AES_AUTH_SIZE;
+ data_size = data_size - XILINX_AES_AUTH_SIZE;
sg_copy_from_buffer(req->dst, sg_nents(req->dst),
kbuf, data_size);
- err = 0;
+ ret = 0;
}
- if (kbuf) {
- memzero_explicit(kbuf, dma_size);
- dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+freemem:
+ memzero_explicit(kbuf, dma_size);
+ kfree(kbuf);
+ memzero_explicit(dmabuf, sizeof(struct zynqmp_aead_hw_req) + GCM_AES_IV_SIZE);
+ kfree(dmabuf);
+
+ return ret;
+}
+
+static int versal_aes_aead_cipher(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ dma_addr_t dma_addr_data, dma_addr_hw_req, dma_addr_in;
+ u32 total_len = req->assoclen + req->cryptlen;
+ struct device *dev = tfm_ctx->dev;
+ struct versal_init_ops *hwreq;
+ struct versal_in_params *in;
+ u32 gcm_offset, out_len;
+ size_t dmabuf_size;
+ size_t kbuf_size;
+ void *dmabuf;
+ char *kbuf;
+ int ret;
+
+ kbuf_size = total_len + XILINX_AES_AUTH_SIZE;
+ kbuf = kmalloc(kbuf_size, GFP_KERNEL);
+ if (unlikely(!kbuf)) {
+ ret = -ENOMEM;
+ goto err;
}
- if (hwreq) {
- memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
- dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
- hwreq, dma_addr_hw_req);
+ dmabuf_size = sizeof(struct versal_init_ops) +
+ sizeof(struct versal_in_params) +
+ GCM_AES_IV_SIZE;
+ dmabuf = kmalloc(dmabuf_size, GFP_KERNEL);
+ if (unlikely(!dmabuf)) {
+ ret = -ENOMEM;
+ goto buf1_free;
}
- return err;
+
+ dma_addr_hw_req = dma_map_single(dev, dmabuf, dmabuf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, dma_addr_hw_req))) {
+ ret = -ENOMEM;
+ goto buf2_free;
+ }
+ scatterwalk_map_and_copy(kbuf, req->src, 0, total_len, 0);
+ dma_addr_data = dma_map_single(dev, kbuf, kbuf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, dma_addr_data))) {
+ dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+ ret = -ENOMEM;
+ goto buf2_free;
+ }
+ hwreq = dmabuf;
+ in = dmabuf + sizeof(struct versal_init_ops);
+ memcpy(dmabuf + sizeof(struct versal_init_ops) +
+ sizeof(struct versal_in_params), req->iv, GCM_AES_IV_SIZE);
+ hwreq->iv = dma_addr_hw_req + sizeof(struct versal_init_ops) +
+ sizeof(struct versal_in_params);
+ hwreq->keysrc = tfm_ctx->keysrc;
+ dma_addr_in = dma_addr_hw_req + sizeof(struct versal_init_ops);
+ if (rq_ctx->op == XILINX_AES_ENCRYPT) {
+ hwreq->op = VERSAL_AES_ENCRYPT;
+ out_len = total_len + crypto_aead_authsize(aead);
+ in->size = req->cryptlen;
+ } else {
+ hwreq->op = VERSAL_AES_DECRYPT;
+ out_len = total_len - XILINX_AES_AUTH_SIZE;
+ in->size = req->cryptlen - XILINX_AES_AUTH_SIZE;
+ }
+
+ if (tfm_ctx->keylen == AES_KEYSIZE_128)
+ hwreq->size = HW_AES_KEY_SIZE_128;
+ else
+ hwreq->size = HW_AES_KEY_SIZE_256;
+
+ /* Request aes key write for volatile user keys */
+ if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7) {
+ ret = versal_pm_aes_key_write(hwreq->size, hwreq->keysrc,
+ tfm_ctx->key_dma_addr);
+ if (ret)
+ goto unmap;
+ }
+
+ in->in_data_addr = dma_addr_data + req->assoclen;
+ in->is_last = 1;
+ gcm_offset = req->assoclen + in->size;
+ dma_sync_single_for_device(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+ ret = versal_pm_aes_op_init(dma_addr_hw_req);
+ if (ret)
+ goto clearkey;
+
+ if (req->assoclen > 0) {
+ /* Currently GMAC is OFF by default */
+ ret = versal_pm_aes_update_aad(dma_addr_data, req->assoclen);
+ if (ret)
+ goto clearkey;
+ }
+ if (rq_ctx->op == XILINX_AES_ENCRYPT) {
+ ret = versal_pm_aes_enc_update(dma_addr_in,
+ dma_addr_data + req->assoclen);
+ if (ret)
+ goto clearkey;
+
+ ret = versal_pm_aes_enc_final(dma_addr_data + gcm_offset);
+ if (ret)
+ goto clearkey;
+ } else {
+ ret = versal_pm_aes_dec_update(dma_addr_in,
+ dma_addr_data + req->assoclen);
+ if (ret)
+ goto clearkey;
+
+ ret = versal_pm_aes_dec_final(dma_addr_data + gcm_offset);
+ if (ret) {
+ ret = -EBADMSG;
+ goto clearkey;
+ }
+ }
+ dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+ sg_copy_from_buffer(req->dst, sg_nents(req->dst),
+ kbuf, out_len);
+ dma_addr_data = 0;
+ dma_addr_hw_req = 0;
+
+clearkey:
+ if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7)
+ versal_pm_aes_key_zero(hwreq->keysrc);
+unmap:
+ if (unlikely(dma_addr_data))
+ dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_addr_hw_req))
+ dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+buf2_free:
+ memzero_explicit(dmabuf, dmabuf_size);
+ kfree(dmabuf);
+buf1_free:
+ memzero_explicit(kbuf, kbuf_size);
+ kfree(kbuf);
+err:
+ return ret;
}
-static int zynqmp_fallback_check(struct zynqmp_aead_tfm_ctx *tfm_ctx,
+static int zynqmp_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx,
struct aead_request *req)
{
- int need_fallback = 0;
- struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
- if (tfm_ctx->authsize != ZYNQMP_AES_AUTH_SIZE)
- need_fallback = 1;
+ if (tfm_ctx->authsize != XILINX_AES_AUTH_SIZE && rq_ctx->op == XILINX_AES_DECRYPT)
+ return 1;
- if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
- tfm_ctx->keylen != ZYNQMP_AES_KEY_SIZE) {
- need_fallback = 1;
- }
if (req->assoclen != 0 ||
- req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE) {
- need_fallback = 1;
- }
+ req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE)
+ return 1;
+ if (tfm_ctx->keylen == AES_KEYSIZE_128 ||
+ tfm_ctx->keylen == AES_KEYSIZE_192)
+ return 1;
+
if ((req->cryptlen % ZYNQMP_AES_WORD_LEN) != 0)
- need_fallback = 1;
+ return 1;
- if (rq_ctx->op == ZYNQMP_AES_DECRYPT &&
- req->cryptlen <= ZYNQMP_AES_AUTH_SIZE) {
- need_fallback = 1;
- }
- return need_fallback;
+ if (rq_ctx->op == XILINX_AES_DECRYPT &&
+ req->cryptlen <= XILINX_AES_AUTH_SIZE)
+ return 1;
+
+ return 0;
}
-static int zynqmp_handle_aes_req(struct crypto_engine *engine,
- void *req)
+static int versal_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx,
+ struct aead_request *req)
+{
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+ if (tfm_ctx->authsize != XILINX_AES_AUTH_SIZE && rq_ctx->op == XILINX_AES_DECRYPT)
+ return 1;
+
+ if (tfm_ctx->keylen == AES_KEYSIZE_192)
+ return 1;
+
+ if (req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE ||
+ req->cryptlen % ZYNQMP_AES_WORD_LEN ||
+ req->assoclen % VERSAL_AES_QWORD_LEN)
+ return 1;
+
+ if (rq_ctx->op == XILINX_AES_DECRYPT &&
+ req->cryptlen <= XILINX_AES_AUTH_SIZE)
+ return 1;
+
+ return 0;
+}
+
+static int xilinx_handle_aes_req(struct crypto_engine *engine, void *req)
{
struct aead_request *areq =
container_of(req, struct aead_request, base);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
- struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(areq);
- struct aead_request *subreq = aead_request_ctx(req);
- int need_fallback;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct xilinx_aead_alg *drv_ctx;
int err;
- need_fallback = zynqmp_fallback_check(tfm_ctx, areq);
-
- if (need_fallback) {
- aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
-
- aead_request_set_callback(subreq, areq->base.flags,
- NULL, NULL);
- aead_request_set_crypt(subreq, areq->src, areq->dst,
- areq->cryptlen, areq->iv);
- aead_request_set_ad(subreq, areq->assoclen);
- if (rq_ctx->op == ZYNQMP_AES_ENCRYPT)
- err = crypto_aead_encrypt(subreq);
- else
- err = crypto_aead_decrypt(subreq);
- } else {
- err = zynqmp_aes_aead_cipher(areq);
- }
-
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ err = drv_ctx->aes_aead_cipher(areq);
local_bh_disable();
crypto_finalize_aead_request(engine, areq, err);
local_bh_enable();
@@ -242,209 +444,584 @@ static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct zynqmp_aead_tfm_ctx *tfm_ctx =
- (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ int err;
+
+ if (keylen == AES_KEYSIZE_256) {
+ memcpy(tfm_ctx->key, key, keylen);
+ dma_sync_single_for_device(tfm_ctx->dev, tfm_ctx->key_dma_addr,
+ AES_KEYSIZE_256,
+ DMA_TO_DEVICE);
+ }
+
+ tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ err = crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+ if (err)
+ goto err;
+ tfm_ctx->keylen = keylen;
+ tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
+err:
+ return err;
+}
+
+static int zynqmp_paes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_hwkey_info hwkey;
+ unsigned char keysrc;
+ int err = -EINVAL;
+
+ if (keylen != sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
+ memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info));
+ if (hwkey.magic != XILINX_KEY_MAGIC)
+ return -EINVAL;
+ keysrc = hwkey.type;
+ if (keysrc == ZYNQMP_AES_DEV_KEY ||
+ keysrc == ZYNQMP_AES_PUF_KEY) {
+ tfm_ctx->keysrc = keysrc;
+ tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int versal_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_hwkey_info hwkey;
unsigned char keysrc;
+ int err;
- if (keylen == ZYNQMP_KEY_SRC_SEL_KEY_LEN) {
- keysrc = *key;
- if (keysrc == ZYNQMP_AES_KUP_KEY ||
- keysrc == ZYNQMP_AES_DEV_KEY ||
- keysrc == ZYNQMP_AES_PUF_KEY) {
- tfm_ctx->keysrc = (enum zynqmp_aead_keysrc)keysrc;
- } else {
- tfm_ctx->keylen = keylen;
+ tfm_ctx->keysrc = VERSAL_AES_USER_KEY_0;
+ if (keylen == sizeof(struct xilinx_hwkey_info)) {
+ memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info));
+ if (hwkey.magic != XILINX_KEY_MAGIC)
+ return -EINVAL;
+
+ keysrc = hwkey.type;
+ if (keysrc >= VERSAL_AES_USER_KEY_1 &&
+ keysrc <= VERSAL_AES_USER_KEY_7) {
+ tfm_ctx->keysrc = keysrc;
+ tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info);
+ return 0;
}
- } else {
+ return -EINVAL;
+ }
+
+ if (keylen == AES_KEYSIZE_256 || keylen == AES_KEYSIZE_128) {
tfm_ctx->keylen = keylen;
- if (keylen == ZYNQMP_AES_KEY_SIZE) {
- tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
- memcpy(tfm_ctx->key, key, keylen);
- }
+ memcpy(tfm_ctx->key, key, keylen);
+ dma_sync_single_for_device(tfm_ctx->dev, tfm_ctx->key_dma_addr,
+ AES_KEYSIZE_256,
+ DMA_TO_DEVICE);
}
tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+ if (!err)
+ tfm_ctx->keylen = keylen;
- return crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+ return err;
}
-static int zynqmp_aes_aead_setauthsize(struct crypto_aead *aead,
+static int versal_paes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_hwkey_info hwkey;
+ unsigned char keysrc;
+ int err = 0;
+
+ if (keylen != sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
+
+ memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info));
+ if (hwkey.magic != XILINX_KEY_MAGIC)
+ return -EINVAL;
+
+ keysrc = hwkey.type;
+
+ switch (keysrc) {
+ case VERSAL_AES_EFUSE_USER_KEY_0:
+ case VERSAL_AES_EFUSE_USER_KEY_1:
+ case VERSAL_AES_EFUSE_USER_RED_KEY_0:
+ case VERSAL_AES_EFUSE_USER_RED_KEY_1:
+ case VERSAL_AES_PUF_KEY:
+ tfm_ctx->keysrc = keysrc;
+ tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int xilinx_aes_aead_setauthsize(struct crypto_aead *aead,
unsigned int authsize)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct zynqmp_aead_tfm_ctx *tfm_ctx =
- (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
tfm_ctx->authsize = authsize;
- return crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize);
+ return tfm_ctx->fbk_cipher ? crypto_aead_setauthsize(tfm_ctx->fbk_cipher, authsize) : 0;
+}
+
+static int xilinx_aes_fallback_crypt(struct aead_request *req, bool encrypt)
+{
+ struct aead_request *subreq = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+
+ aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+ aead_request_set_callback(subreq, req->base.flags, NULL, NULL);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return encrypt ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
static int zynqmp_aes_aead_encrypt(struct aead_request *req)
{
- struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct xilinx_aead_alg *drv_ctx;
+ int err;
+
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
+ tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
- rq_ctx->op = ZYNQMP_AES_ENCRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
+ rq_ctx->op = XILINX_AES_ENCRYPT;
+ err = zynqmp_fallback_check(tfm_ctx, req);
+ if (err && tfm_ctx->keysrc != ZYNQMP_AES_KUP_KEY)
+ return -EOPNOTSUPP;
- return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+ if (err)
+ return xilinx_aes_fallback_crypt(req, true);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req);
+}
+
+static int versal_aes_aead_encrypt(struct aead_request *req)
+{
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct xilinx_aead_alg *drv_ctx;
+ int err;
+
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ rq_ctx->op = XILINX_AES_ENCRYPT;
+ if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 &&
+ tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 &&
+ tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
+ err = versal_fallback_check(tfm_ctx, req);
+ if (err && (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 ||
+ tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7))
+ return -EOPNOTSUPP;
+ if (err)
+ return xilinx_aes_fallback_crypt(req, true);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req);
}
static int zynqmp_aes_aead_decrypt(struct aead_request *req)
{
- struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct xilinx_aead_alg *drv_ctx;
+ int err;
- rq_ctx->op = ZYNQMP_AES_DECRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
+ rq_ctx->op = XILINX_AES_DECRYPT;
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY &&
+ tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
+ err = zynqmp_fallback_check(tfm_ctx, req);
+ if (err && tfm_ctx->keysrc != ZYNQMP_AES_KUP_KEY)
+ return -EOPNOTSUPP;
+ if (err)
+ return xilinx_aes_fallback_crypt(req, false);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req);
+}
- return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+static int xilinx_paes_aead_init(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_aead_alg *drv_alg;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ drv_alg = container_of(alg, struct xilinx_aead_alg, aead.base);
+ tfm_ctx->dev = drv_alg->aead_dev->dev;
+ tfm_ctx->keylen = 0;
+ tfm_ctx->key = NULL;
+ tfm_ctx->fbk_cipher = NULL;
+ crypto_aead_set_reqsize(aead, sizeof(struct xilinx_aead_req_ctx));
+
+ return 0;
+}
+
+static int versal_aes_aead_decrypt(struct aead_request *req)
+{
+ struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct xilinx_aead_alg *drv_ctx;
+ int err;
+
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ rq_ctx->op = XILINX_AES_DECRYPT;
+ if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 &&
+ tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 &&
+ tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+ return -EINVAL;
+
+ err = versal_fallback_check(tfm_ctx, req);
+ if (err &&
+ (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 ||
+ tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7))
+ return -EOPNOTSUPP;
+ if (err)
+ return xilinx_aes_fallback_crypt(req, false);
+
+ return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req);
}
-static int zynqmp_aes_aead_init(struct crypto_aead *aead)
+static int xilinx_aes_aead_init(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct zynqmp_aead_tfm_ctx *tfm_ctx =
- (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
- struct zynqmp_aead_drv_ctx *drv_ctx;
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_aead_alg *drv_ctx;
struct aead_alg *alg = crypto_aead_alg(aead);
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
- tfm_ctx->dev = drv_ctx->dev;
+ drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base);
+ tfm_ctx->dev = drv_ctx->aead_dev->dev;
+ tfm_ctx->keylen = 0;
- tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name,
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->aead.base.base.cra_name,
0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tfm_ctx->fbk_cipher)) {
- pr_err("%s() Error: failed to allocate fallback for %s\n",
- __func__, drv_ctx->alg.aead.base.base.cra_name);
+ dev_err(tfm_ctx->dev, "failed to allocate fallback for %s\n",
+ drv_ctx->aead.base.base.cra_name);
return PTR_ERR(tfm_ctx->fbk_cipher);
}
-
+ tfm_ctx->key = kmalloc(AES_KEYSIZE_256, GFP_KERNEL);
+ if (!tfm_ctx->key) {
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ return -ENOMEM;
+ }
+ tfm_ctx->key_dma_addr = dma_map_single(tfm_ctx->dev, tfm_ctx->key,
+ AES_KEYSIZE_256,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tfm_ctx->dev, tfm_ctx->key_dma_addr))) {
+ kfree(tfm_ctx->key);
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ tfm_ctx->fbk_cipher = NULL;
+ return -ENOMEM;
+ }
crypto_aead_set_reqsize(aead,
- max(sizeof(struct zynqmp_aead_req_ctx),
+ max(sizeof(struct xilinx_aead_req_ctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(tfm_ctx->fbk_cipher)));
return 0;
}
-static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
+static void xilinx_paes_aead_exit(struct crypto_aead *aead)
{
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
- struct zynqmp_aead_tfm_ctx *tfm_ctx =
- (struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ memzero_explicit(tfm_ctx, sizeof(struct xilinx_aead_tfm_ctx));
+}
+
+static void xilinx_aes_aead_exit(struct crypto_aead *aead)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single(tfm_ctx->dev, tfm_ctx->key_dma_addr, AES_KEYSIZE_256, DMA_TO_DEVICE);
+ kfree(tfm_ctx->key);
if (tfm_ctx->fbk_cipher) {
crypto_free_aead(tfm_ctx->fbk_cipher);
tfm_ctx->fbk_cipher = NULL;
}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_aead_tfm_ctx));
+ memzero_explicit(tfm_ctx, sizeof(struct xilinx_aead_tfm_ctx));
}
-static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
- .alg.aead.base = {
- .setkey = zynqmp_aes_aead_setkey,
- .setauthsize = zynqmp_aes_aead_setauthsize,
- .encrypt = zynqmp_aes_aead_encrypt,
- .decrypt = zynqmp_aes_aead_decrypt,
- .init = zynqmp_aes_aead_init,
- .exit = zynqmp_aes_aead_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = ZYNQMP_AES_AUTH_SIZE,
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "xilinx-zynqmp-aes-gcm",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = ZYNQMP_AES_BLK_SIZE,
- .cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
- .cra_module = THIS_MODULE,
- }
+static struct xilinx_aead_alg zynqmp_aes_algs[] = {
+ {
+ .aes_aead_cipher = zynqmp_aes_aead_cipher,
+ .aead.base = {
+ .setkey = zynqmp_aes_aead_setkey,
+ .setauthsize = xilinx_aes_aead_setauthsize,
+ .encrypt = zynqmp_aes_aead_encrypt,
+ .decrypt = zynqmp_aes_aead_decrypt,
+ .init = xilinx_aes_aead_init,
+ .exit = xilinx_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = XILINX_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "xilinx-zynqmp-aes-gcm",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = XILINX_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .aead.op = {
+ .do_one_request = xilinx_handle_aes_req,
+ },
+ .dma_bit_mask = ZYNQMP_DMA_BIT_MASK,
},
- .alg.aead.op = {
- .do_one_request = zynqmp_handle_aes_req,
+ {
+ .aes_aead_cipher = zynqmp_aes_aead_cipher,
+ .aead.base = {
+ .setkey = zynqmp_paes_aead_setkey,
+ .setauthsize = xilinx_aes_aead_setauthsize,
+ .encrypt = zynqmp_aes_aead_encrypt,
+ .decrypt = zynqmp_aes_aead_decrypt,
+ .init = xilinx_paes_aead_init,
+ .exit = xilinx_paes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = XILINX_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(paes)",
+ .cra_driver_name = "xilinx-zynqmp-paes-gcm",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = XILINX_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .aead.op = {
+ .do_one_request = xilinx_handle_aes_req,
+ },
+ .dma_bit_mask = ZYNQMP_DMA_BIT_MASK,
},
+ { /* sentinel */ }
+};
+
+static struct xilinx_aead_alg versal_aes_algs[] = {
+ {
+ .aes_aead_cipher = versal_aes_aead_cipher,
+ .aead.base = {
+ .setkey = versal_aes_aead_setkey,
+ .setauthsize = xilinx_aes_aead_setauthsize,
+ .encrypt = versal_aes_aead_encrypt,
+ .decrypt = versal_aes_aead_decrypt,
+ .init = xilinx_aes_aead_init,
+ .exit = xilinx_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = XILINX_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "versal-aes-gcm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = XILINX_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .aead.op = {
+ .do_one_request = xilinx_handle_aes_req,
+ },
+ .dma_bit_mask = VERSAL_DMA_BIT_MASK,
+ },
+ {
+ .aes_aead_cipher = versal_aes_aead_cipher,
+ .aead.base = {
+ .setkey = versal_paes_aead_setkey,
+ .setauthsize = xilinx_aes_aead_setauthsize,
+ .encrypt = versal_aes_aead_encrypt,
+ .decrypt = versal_aes_aead_decrypt,
+ .init = xilinx_paes_aead_init,
+ .exit = xilinx_paes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = XILINX_AES_AUTH_SIZE,
+ .base = {
+ .cra_name = "gcm(paes)",
+ .cra_driver_name = "versal-paes-gcm",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = XILINX_AES_BLK_SIZE,
+ .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .aead.op = {
+ .do_one_request = xilinx_handle_aes_req,
+ },
+ .dma_bit_mask = VERSAL_DMA_BIT_MASK,
+ },
+ { /* sentinel */ }
};
-static int zynqmp_aes_aead_probe(struct platform_device *pdev)
+static struct xlnx_feature aes_feature_map[] = {
+ {
+ .family = PM_ZYNQMP_FAMILY_CODE,
+ .feature_id = PM_SECURE_AES,
+ .data = zynqmp_aes_algs,
+ },
+ {
+ .family = PM_VERSAL_FAMILY_CODE,
+ .feature_id = XSECURE_API_AES_OP_INIT,
+ .data = versal_aes_algs,
+ },
+ { /* sentinel */ }
+};
+
+static int xilinx_aes_aead_probe(struct platform_device *pdev)
{
+ struct xilinx_aead_alg *aead_algs;
struct device *dev = &pdev->dev;
int err;
+ int i;
+
+ /* Verify the hardware is present */
+ aead_algs = xlnx_get_crypto_dev_data(aes_feature_map);
+ if (IS_ERR(aead_algs)) {
+ dev_err(dev, "AES is not supported on the platform\n");
+ return PTR_ERR(aead_algs);
+ }
/* ZynqMP AES driver supports only one instance */
- if (!aes_drv_ctx.dev)
- aes_drv_ctx.dev = dev;
- else
+ if (aead_dev)
return -ENODEV;
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ aead_dev = devm_kzalloc(dev, sizeof(*aead_dev), GFP_KERNEL);
+ if (!aead_dev)
+ return -ENOMEM;
+ aead_dev->dev = dev;
+ aead_dev->aead_algs = aead_algs;
+ platform_set_drvdata(pdev, aead_dev);
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aead_algs[0].dma_bit_mask));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
return err;
}
- aes_drv_ctx.engine = crypto_engine_alloc_init(dev, 1);
- if (!aes_drv_ctx.engine) {
+ aead_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!aead_dev->engine) {
dev_err(dev, "Cannot alloc AES engine\n");
- err = -ENOMEM;
- goto err_engine;
+ return -ENOMEM;
}
- err = crypto_engine_start(aes_drv_ctx.engine);
+ err = crypto_engine_start(aead_dev->engine);
if (err) {
dev_err(dev, "Cannot start AES engine\n");
- goto err_engine;
+ goto err_engine_start;
}
- err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead);
- if (err < 0) {
- dev_err(dev, "Failed to register AEAD alg.\n");
- goto err_aead;
+ for (i = 0; aead_dev->aead_algs[i].dma_bit_mask; i++) {
+ aead_dev->aead_algs[i].aead_dev = aead_dev;
+ err = crypto_engine_register_aead(&aead_dev->aead_algs[i].aead);
+ if (err < 0) {
+ dev_err(dev, "Failed to register AEAD alg %d.\n", i);
+ goto err_alg_register;
+ }
}
- return 0;
-err_aead:
- crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
+ return 0;
-err_engine:
- if (aes_drv_ctx.engine)
- crypto_engine_exit(aes_drv_ctx.engine);
+err_alg_register:
+ while (i > 0)
+ crypto_engine_unregister_aead(&aead_dev->aead_algs[--i].aead);
+err_engine_start:
+ crypto_engine_exit(aead_dev->engine);
return err;
}
-static void zynqmp_aes_aead_remove(struct platform_device *pdev)
+static void xilinx_aes_aead_remove(struct platform_device *pdev)
{
- crypto_engine_exit(aes_drv_ctx.engine);
- crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
-}
+ aead_dev = platform_get_drvdata(pdev);
+ crypto_engine_exit(aead_dev->engine);
+ for (int i = 0; aead_dev->aead_algs[i].dma_bit_mask; i++)
+ crypto_engine_unregister_aead(&aead_dev->aead_algs[i].aead);
-static const struct of_device_id zynqmp_aes_dt_ids[] = {
- { .compatible = "xlnx,zynqmp-aes" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+ aead_dev = NULL;
+}
-static struct platform_driver zynqmp_aes_driver = {
- .probe = zynqmp_aes_aead_probe,
- .remove = zynqmp_aes_aead_remove,
+static struct platform_driver xilinx_aes_driver = {
+ .probe = xilinx_aes_aead_probe,
+ .remove = xilinx_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
- .of_match_table = zynqmp_aes_dt_ids,
},
};
-module_platform_driver(zynqmp_aes_driver);
-MODULE_DESCRIPTION("Xilinx ZynqMP AES Driver");
+static struct platform_device *platform_dev;
+
+static int __init aes_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&xilinx_aes_driver);
+ if (ret)
+ return ret;
+
+ platform_dev = platform_device_register_simple(xilinx_aes_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(platform_dev)) {
+ ret = PTR_ERR(platform_dev);
+ platform_driver_unregister(&xilinx_aes_driver);
+ }
+
+ return ret;
+}
+
+static void __exit aes_driver_exit(void)
+{
+ platform_device_unregister(platform_dev);
+ platform_driver_unregister(&xilinx_aes_driver);
+}
+
+module_init(aes_driver_init);
+module_exit(aes_driver_exit);
+MODULE_DESCRIPTION("zynqmp aes-gcm hardware acceleration support.");
MODULE_LICENSE("GPL");