summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Liu <r64343@freescale.com>2013-09-07 13:09:59 +0800
committerJason Liu <r64343@freescale.com>2013-09-09 10:48:51 +0800
commit9d896050184571bc4c24225fe864110be692232d (patch)
tree92b65148083debb8c7432214ac598231d360a811
parent6a7c50e9e62c33ef488b74a2c91420d0fba4c9c4 (diff)
ENGR00278672-1 crypto/caam: add the crypto/caam driver support
This patch add the crypto/caam driver support Signed-off-by: Kudrick Jeffery <B37172@freescale.com>
-rw-r--r--drivers/crypto/caam/Kconfig50
-rw-r--r--drivers/crypto/caam/Makefile3
-rw-r--r--drivers/crypto/caam/caamalg.c276
-rw-r--r--drivers/crypto/caam/caamhash.c387
-rw-r--r--drivers/crypto/caam/caamrng.c73
-rw-r--r--drivers/crypto/caam/compat.h6
-rw-r--r--drivers/crypto/caam/ctrl.c198
-rw-r--r--drivers/crypto/caam/desc.h67
-rw-r--r--drivers/crypto/caam/intern.h13
-rw-r--r--drivers/crypto/caam/jr.c48
-rw-r--r--drivers/crypto/caam/jr.h3
-rw-r--r--drivers/crypto/caam/key_gen.c6
-rw-r--r--drivers/crypto/caam/regs.h254
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h18
14 files changed, 1300 insertions, 102 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index b44091c47f75..493fe4073597 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_MXC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -98,3 +98,51 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ boolean "Test caam rng"
+ depends on CRYPTO_DEV_FSL_CAAM_RNG_API
+ default n
+ help
+ Selecting this will enable self-test for caam rng.
+
+config CRYPTO_DEV_FSL_CAAM_SM
+ boolean "CAAM Secure Memory / Keystore API (EXPERIMENTAL)"
+ default n
+ help
+ Enables use of a prototype kernel-level Keystore API with CAAM
+ Secure Memory for insertion/extraction of bus-protected secrets.
+
+config CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE
+ int "Size of each keystore slot in Secure Memory"
+ depends on CRYPTO_DEV_FSL_CAAM_SM
+ range 5 9
+ default 7
+ help
+ Select size of allocation units to divide Secure Memory pages into
+ (the size of a "slot" as referenced inside the API code).
+ Established as powers of two.
+ Examples:
+ 5 => 32 bytes
+ 6 => 64 bytes
+ 7 => 128 bytes
+ 8 => 256 bytes
+ 9 => 512 bytes
+
+config CRYPTO_DEV_FSL_CAAM_SM_TEST
+ boolean "CAAM Secure Memory - Keystore Test/Example (EXPERIMENTAL)"
+ depends on CRYPTO_DEV_FSL_CAAM_SM
+ default n
+ help
+ Example thread to exercise the Keystore API and to verify that
+ stored and recovered secrets can be used for general purpose
+ encryption/decryption.
+
+config CRYPTO_DEV_FSL_CAAM_SECVIO
+ boolean "CAAM/SNVS Security Violation Handler (EXPERIMENTAL)"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default n
+ help
+ Enables installation of an interrupt handler with registrable
+ handler functions which can be specified to act on the consequences
+ of a security violation.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index b1eb44838db5..b7db9e9efea5 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,5 +6,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+#obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM) += sm_store.o
+#obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST) += sm_test.o
+#obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO) += secvio.o
caam-objs := ctrl.o jr.o error.o key_gen.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index bf416a8391a7..b7ed1059779f 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for crypto API
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*
* Based on talitos crypto API driver.
*
@@ -53,6 +53,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include <linux/string.h>
/*
* crypto alg
@@ -290,6 +291,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
@@ -357,6 +360,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
@@ -440,6 +445,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -523,6 +530,9 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len + enckeylen, 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->split_key_pad_len + enckeylen,
+ DMA_TO_DEVICE);
ctx->enckeylen = enckeylen;
@@ -561,6 +571,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -ENOMEM;
}
ctx->enckeylen = keylen;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -579,10 +590,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- /* Load iv */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
-
+ /* load IV */
+ if (strncmp(ablkcipher->base.__crt_alg->cra_name, "ctr(aes)", 8) == 0) {
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize |
+ (16 << LDST_OFFSET_SHIFT));
+ } else {
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+ }
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -602,6 +618,9 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
@@ -622,11 +641,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, jump_cmd);
/* load IV */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
+ if (strncmp(ablkcipher->base.__crt_alg->cra_name, "ctr(aes)", 8) == 0) {
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize |
+ (16 << LDST_OFFSET_SHIFT));
- /* Choose operation */
- append_dec_op1(desc, ctx->class1_alg_type);
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ } else {
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
+ }
/* Perform operation */
ablkcipher_append_src_dst(desc);
@@ -647,6 +675,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return ret;
}
@@ -1154,7 +1184,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1178,9 +1208,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_len += dst_nents;
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1221,6 +1252,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
return edesc;
}
@@ -1336,7 +1369,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1369,8 +1402,10 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1412,6 +1447,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
return edesc;
}
@@ -1505,6 +1542,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
* If so, include it. If not, create scatterlist.
*/
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
iv_contig = true;
else
@@ -1513,7 +1551,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ edesc = kzalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1545,6 +1583,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, DMA_TO_DEVICE);
edesc->iv_dma = iv_dma;
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
@@ -2008,6 +2049,70 @@ static struct caam_alg_template driver_algs[] = {
},
/* ablkcipher descriptor */
{
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+ .name = "ecb(arc4)",
+ .driver_name = "ecb-arc4-caam",
+ .blocksize = ARC4_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .ivsize = ARC4_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB
+ },
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
.name = "cbc(aes)",
.driver_name = "cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
@@ -2024,6 +2129,22 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-des3-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+ },
+ {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
@@ -2079,7 +2200,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
* distribute tfms across job rings to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
+ ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2116,6 +2237,7 @@ static void __exit caam_algapi_exit(void)
struct device *ctrldev;
struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
+ int i, err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -2125,21 +2247,33 @@ static void __exit caam_algapi_exit(void)
}
pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!pdev) {
+ of_node_put(dev_node);
return;
+ }
ctrldev = &pdev->dev;
- of_node_put(dev_node);
priv = dev_get_drvdata(ctrldev);
- if (!priv->alg_list.next)
+ if (!priv->alg_list.next) {
+ of_node_put(dev_node);
return;
+ }
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
crypto_unregister_alg(&t_alg->crypto_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
+
+ for (i = 0; i < priv->total_jobrs; i++) {
+ err = caam_jr_deregister(priv->algapi_jr[i]);
+ if (err < 0)
+ break;
+ }
+ kfree(priv->algapi_jr);
+
+ of_node_put(dev_node);
}
static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
@@ -2167,8 +2301,12 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
+
+#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
+ alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
+#endif
+
switch (template->type) {
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg->cra_type = &crypto_ablkcipher_type;
@@ -2192,9 +2330,11 @@ static int __init caam_algapi_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev;
+ struct device *ctrldev, **jrdev;
struct caam_drv_private *priv;
- int i = 0, err = 0;
+ int i = 0, err = 0, md_limit = 0;
+ int des_inst, aes_inst, md_inst;
+ u64 cha_inst;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -2204,21 +2344,81 @@ static int __init caam_algapi_init(void)
}
pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!pdev) {
+ of_node_put(dev_node);
return -ENODEV;
+ }
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
INIT_LIST_HEAD(&priv->alg_list);
+ jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
+ if (!jrdev) {
+ of_node_put(dev_node);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->total_jobrs; i++) {
+ err = caam_jr_register(ctrldev, &jrdev[i]);
+ if (err < 0)
+ break;
+ }
+ if (err < 0 && i == 0) {
+ dev_err(ctrldev, "algapi error in job ring registration: %d\n",
+ err);
+ of_node_put(dev_node);
+ kfree(jrdev);
+ return err;
+ }
+
+ priv->num_jrs_for_algapi = i;
+ priv->algapi_jr = jrdev;
atomic_set(&priv->tfm_count, -1);
- /* register crypto algorithms the device supports */
+ /*
+ * register crypto algorithms the device supports
+ * first, detect presence of DES, AES, and MD blocks. If MD present,
+ * determine limit of supported digest size
+ */
+ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
+ des_inst = (cha_inst & CHA_ID_DES_MASK) >> CHA_ID_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_AES_MASK) >> CHA_ID_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
+ if (md_inst) {
+ md_limit = SHA512_DIGEST_SIZE;
+ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
+ == CHA_ID_MD_LP256) /* LP256 limits digest size */
+ md_limit = SHA256_DIGEST_SIZE;
+ }
+
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ bool done = false;
+
+authencesn:
+ /*
+ * All registrable algs in this module require a blockcipher
+ * All aead algs require message digests, so check them for
+ * instantiation and size.
+ */
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD) {
+ /* If no MD instantiated, or MD too small, skip */
+ if ((!md_inst) ||
+ (driver_algs[i].template_aead.maxauthsize >
+ md_limit))
+ continue;
+ }
+ /* If DES alg, and CHA not instantiated, skip */
+ if ((driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_3DES) ||
+ (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_DES))
+ if (!des_inst)
+ continue;
+ /* If AES alg, and CHA not instantiated, skip */
+ if (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_AES)
+ if (!aes_inst)
+ continue;
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -2233,13 +2433,35 @@ static int __init caam_algapi_init(void)
dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
- } else
+ } else {
list_add_tail(&t_alg->entry, &priv->alg_list);
+ dev_info(ctrldev, "%s\n",
+ t_alg->crypto_alg.cra_driver_name);
+
+ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
+ !memcmp(driver_algs[i].name, "authenc", 7) &&
+ !done) {
+ char *name;
+
+ name = driver_algs[i].name;
+ memmove(name + 10, name + 7, strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ name = driver_algs[i].driver_name;
+ memmove(name + 10, name + 7, strlen(name) - 7);
+ memcpy(name + 7, "esn", 3);
+
+ done = true;
+ goto authencesn;
+ }
+ }
}
+
if (!list_empty(&priv->alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
(char *)of_get_property(dev_node, "compatible", NULL));
+ of_node_put(dev_node);
return err;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 84573b4d6f92..f90449943e31 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
@@ -62,6 +62,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@@ -116,6 +117,7 @@ struct caam_hash_ctx {
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
+ unsigned int key_len;
unsigned int split_key_len;
unsigned int split_key_pad_len;
};
@@ -167,6 +169,7 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
dma_addr_t buf_dma;
buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, buf_dma, buflen, DMA_TO_DEVICE);
dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
return buf_dma;
@@ -209,6 +212,9 @@ static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ if ((flag == DMA_TO_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_device(jrdev, state->ctx_dma, ctx_len,
+ flag);
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
}
@@ -220,6 +226,13 @@ static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
KEY_DEST_MDHA_SPLIT | KEY_ENC);
}
+static inline void append_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ append_key_as_imm(desc, ctx->key, ctx->key_len,
+ ctx->key_len, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+}
+
/* Append key if it has been set */
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
@@ -241,6 +254,25 @@ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
+static inline void init_sh_desc_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ if (ctx->key_len) {
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_axcbc(desc, ctx);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+ }
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+}
/*
* For ahash read data from seqin following state->caam_ctx,
* and write resulting class2 context to seqout, which may be state->caam_ctx
@@ -260,6 +292,20 @@ static inline void ahash_append_load_str(u32 *desc, int digestsize)
LDST_SRCDST_BYTE_CONTEXT);
}
+static inline void axcbc_append_load_str(u32 *desc, int digestsize)
+{
+ /* Calculate remaining bytes to read */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+
+ /* Store class1 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+
/*
* For ahash update, final and finup, import context, read and write to seqout
*/
@@ -282,6 +328,27 @@ static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
ahash_append_load_str(desc, digestsize);
}
+/*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+static inline void axcbc_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize,
+ struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_axcbc(desc, ctx);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ctx->ctx_len);
+
+ /* Class 1 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ axcbc_append_load_str(desc, digestsize);
+}
/* For ahash firsts and digest, read and write to seqout */
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize, struct caam_hash_ctx *ctx)
@@ -297,6 +364,21 @@ static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
ahash_append_load_str(desc, digestsize);
}
+/* For ahash firsts and digest, read and write to seqout */
+static inline void axcbc_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_axcbc(desc, ctx);
+
+ /* Class 1 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ axcbc_append_load_str(desc, digestsize);
+}
+
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -352,6 +434,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
@@ -370,6 +454,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_finup shared descriptor */
desc = ctx->sh_desc_finup;
@@ -388,6 +474,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
@@ -407,10 +495,130 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 have_key = 0;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ctx->ctx_len);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+ OP_ALG_ENCRYPT);
+
+ /* Load data and write to result or context */
+ axcbc_append_load_str(desc, ctx->ctx_len);
+
+ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+
+ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+ ctx->ctx_len, ctx);
+
+ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+
+ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+
+ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+
+ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+ digestsize, ctx);
+
+ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+}
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
@@ -444,6 +652,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
kfree(desc);
return -ENOMEM;
}
+ dma_sync_single_for_device(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+
dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dst_dma)) {
@@ -487,6 +697,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
*keylen = digestsize;
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
kfree(desc);
@@ -544,6 +755,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dev_err(jrdev, "unable to map key i/o memory\n");
return -ENOMEM;
}
+
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -564,6 +779,25 @@ badkey:
return -EINVAL;
}
+static int axcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ret = 0;
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key, key, keylen);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->key_len, 1);
+#endif
+
+ ret = axcbc_set_sh_desc(ahash);
+
+ return ret;
+}
/*
* ahash_edesc - s/w-extended ahash descriptor
* @dst_dma: physical mapped address of req->result
@@ -591,8 +825,11 @@ static inline void ahash_unmap(struct device *dev,
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
- if (edesc->dst_dma)
+ if (edesc->dst_dma) {
+ dma_sync_single_for_cpu(dev, edesc->dst_dma, dst_len,
+ DMA_FROM_DEVICE);
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+ }
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -607,8 +844,12 @@ static inline void ahash_unmap_ctx(struct device *dev,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
- if (state->ctx_dma)
+ if (state->ctx_dma) {
+ if ((flag == DMA_FROM_DEVICE) || (flag == DMA_BIDIRECTIONAL))
+ dma_sync_single_for_cpu(dev, state->ctx_dma,
+ ctx->ctx_len, flag);
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ }
ahash_unmap(dev, edesc, req, dst_len);
}
@@ -802,7 +1043,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -851,6 +1092,9 @@ static int ahash_update_ctx(struct ahash_request *req)
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -904,7 +1148,7 @@ static int ahash_final_ctx(struct ahash_request *req)
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -936,6 +1180,9 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -980,7 +1227,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1015,6 +1262,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1055,7 +1305,7 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1065,6 +1315,7 @@ static int ahash_digest(struct ahash_request *req)
DESC_JOB_IO_LEN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
edesc->chained = chained;
@@ -1082,6 +1333,9 @@ static int ahash_digest(struct ahash_request *req)
}
append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
@@ -1120,7 +1374,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1139,6 +1393,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize);
edesc->src_nents = 0;
+ dma_sync_single_for_device(jrdev, state->buf_dma, buflen,
+ DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1191,7 +1447,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1227,6 +1483,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -1288,7 +1546,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1320,6 +1578,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
@@ -1374,7 +1635,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1413,6 +1674,8 @@ static int ahash_update_first(struct ahash_request *req)
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
+ sec4_sg_bytes, DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -1460,6 +1723,8 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->current_buf = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
return 0;
}
@@ -1649,6 +1914,28 @@ static struct caam_hash_template driver_hash[] = {
.alg_type = OP_ALG_ALGSEL_MD5,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
+ {
+ .name = "xcbc(aes)",
+ .driver_name = "xcbc-aes-caam",
+ .hmac_name = "xcbc(aes)",
+ .hmac_driver_name = "xcbc-aes-caam",
+ .blocksize = XCBC_MAC_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = axcbc_setkey,
+ .halg = {
+ .digestsize = XCBC_MAC_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+ .alg_op = OP_ALG_ALGSEL_AES,
+ },
};
struct caam_hash_alg {
@@ -1702,6 +1989,39 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return ret;
}
+static int caam_axcbc_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
+ int tgt_jr = atomic_inc_return(&priv->tfm_count);
+ int ret = 0;
+
+ /*
+ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
+
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS1_ALG | caam_hash->alg_op;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ ret = axcbc_set_sh_desc(ahash);
+
+ return ret;
+}
+
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1747,21 +2067,26 @@ static void __exit caam_algapi_hash_exit(void)
}
pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!pdev) {
+ of_node_put(dev_node);
return;
+ }
ctrldev = &pdev->dev;
- of_node_put(dev_node);
priv = dev_get_drvdata(ctrldev);
- if (!priv->hash_list.next)
+ if (!priv->hash_list.next) {
+ of_node_put(dev_node);
return;
+ }
list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
+
+ of_node_put(dev_node);
}
static struct caam_hash_alg *
@@ -1794,7 +2119,11 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
template->driver_name);
}
alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_hash_cra_init;
+
+ if (strstr(alg->cra_name, "xcbc") > 0)
+ alg->cra_init = caam_axcbc_cra_init;
+ else
+ alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
alg->cra_priority = CAAM_CRA_PRIORITY;
@@ -1816,7 +2145,8 @@ static int __init caam_algapi_hash_init(void)
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
- int i = 0, err = 0;
+ int i = 0, err = 0, md_limit = 0, md_inst;
+ u64 cha_inst;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -1826,22 +2156,36 @@ static int __init caam_algapi_hash_init(void)
}
pdev = of_find_device_by_node(dev_node);
- if (!pdev)
+ if (!pdev) {
+ of_node_put(dev_node);
return -ENODEV;
-
+ }
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
- of_node_put(dev_node);
INIT_LIST_HEAD(&priv->hash_list);
atomic_set(&priv->tfm_count, -1);
- /* register crypto algorithms the device supports */
+ /* register algorithms the device supports */
+ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
+ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
+ if (md_inst) {
+ md_limit = SHA512_DIGEST_SIZE;
+ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
+ == CHA_ID_MD_LP256) /* LP256 limits digest size */
+ md_limit = SHA256_DIGEST_SIZE;
+ }
+
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
- /* TODO: check if h/w supports alg */
struct caam_hash_alg *t_alg;
+ /* If no MD instantiated, or MD too small, skip */
+ if ((!md_inst) ||
+ (driver_hash[i].template_ahash.halg.digestsize >
+ md_limit))
+ continue;
+
/* register hmac version */
t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
if (IS_ERR(t_alg)) {
@@ -1877,6 +2221,7 @@ static int __init caam_algapi_hash_init(void)
list_add_tail(&t_alg->entry, &priv->hash_list);
}
+ of_node_put(dev_node);
return err;
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..795d7086ae42 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,7 +1,7 @@
/*
* caam - Freescale FSL CAAM support for hw_random
*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
@@ -76,13 +76,16 @@ struct caam_rng_ctx {
struct buf_data bufs[2];
};
-static struct caam_rng_ctx rng_ctx;
+static struct caam_rng_ctx *rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
- if (bd->addr)
+ if (bd->addr) {
+ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
DMA_FROM_DEVICE);
+ }
}
static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
@@ -137,7 +140,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct caam_rng_ctx *ctx = &rng_ctx;
+ struct caam_rng_ctx *ctx = rng_ctx;
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
int next_buf_idx, copied_idx;
int err;
@@ -206,6 +209,9 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
+
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
@@ -237,14 +243,57 @@ static void caam_cleanup(struct hwrng *rng)
struct buf_data *bd;
for (i = 0; i < 2; i++) {
- bd = &rng_ctx.bufs[i];
+ bd = &rng_ctx->bufs[i];
if (atomic_read(&bd->empty) == BUF_PENDING)
wait_for_completion(&bd->filled);
}
- rng_unmap_ctx(&rng_ctx);
+ rng_unmap_ctx(rng_ctx);
+}
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+{
+ u8 *buf;
+ int real_len;
+
+ buf = kzalloc(sizeof(u8) * len, GFP_KERNEL);
+ real_len = rng->read(rng, buf, len, wait);
+ if (real_len == 0 && wait)
+ pr_err("WAITING FAILED\n");
+ pr_info("wanted %d bytes, got %d\n", len, real_len);
+ print_hex_dump(KERN_INFO, "random bytes@: ", DUMP_PREFIX_ADDRESS,
+ 16, 4, buf, real_len, 1);
+ kfree(buf);
+}
+
+static inline void test_mode_once(struct hwrng *rng, bool wait)
+{
+#define TEST_CHUNK (RN_BUF_SIZE / 4)
+
+ test_len(rng, TEST_CHUNK, wait);
+ test_len(rng, RN_BUF_SIZE * 2, wait);
+ test_len(rng, RN_BUF_SIZE * 2 - TEST_CHUNK, wait);
}
+static inline void test_mode(struct hwrng *rng, bool wait)
+{
+#define TEST_PASS 1
+ int i;
+
+ for (i = 0; i < TEST_PASS; i++)
+ test_mode_once(rng, wait);
+}
+
+static void self_test(struct hwrng *rng)
+{
+ pr_info("testing without waiting\n");
+ test_mode(rng, false);
+ pr_info("testing with waiting\n");
+ test_mode(rng, true);
+}
+#endif
+
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
{
struct buf_data *bd = &ctx->bufs[buf_id];
@@ -298,7 +347,17 @@ static int __init caam_rng_init(void)
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
- caam_init_rng(&rng_ctx, priv->jrdev[0]);
+ /* Check RNG present in hardware before registration */
+ if (!(rd_reg64(&priv->ctrl->perfmon.cha_num) & CHA_ID_RNG_MASK))
+ return -ENODEV;
+
+ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_KERNEL | GFP_DMA);
+
+ caam_init_rng(rng_ctx, priv->jrdev[0]);
+
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ self_test(&caam_rng);
+#endif
dev_info(priv->jrdev[0], "registering rng-caam\n");
return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 762aeff626ac..755524d3b0e8 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -14,6 +14,8 @@
#include <linux/hash.h>
#include <linux/hw_random.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
@@ -23,6 +25,10 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+
+#ifdef CONFIG_ARM /* needs the clock control subsystem */
+#include <linux/clk.h>
+#endif
#include <net/xfrm.h>
#include <crypto/algapi.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 6e94bcd94678..5bc978e8c719 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -2,7 +2,7 @@
* CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
- * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*/
#include "compat.h"
@@ -13,6 +13,9 @@
#include "error.h"
#include "ctrl.h"
+/* Used to capture the array of job rings */
+struct device **caam_jr_dev;
+
static int caam_remove(struct platform_device *pdev)
{
struct device *ctrldev;
@@ -40,6 +43,13 @@ static int caam_remove(struct platform_device *pdev)
/* Unmap controller region */
iounmap(&topregs->ctrl);
+#ifdef CONFIG_ARM
+ /* shut clocks off before finalizing shutdown */
+ clk_disable(ctrlpriv->caam_ipg);
+ clk_disable(ctrlpriv->caam_mem);
+ clk_disable(ctrlpriv->caam_aclk);
+#endif
+
kfree(ctrlpriv->jrdev);
kfree(ctrlpriv);
@@ -111,6 +121,8 @@ static int instantiate_rng(struct device *jrdev)
build_instantiation_desc(desc);
desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, desc_dma, desc_bytes(desc),
+ DMA_TO_DEVICE);
init_completion(&instantiation.completion);
ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
if (!ret) {
@@ -144,14 +156,15 @@ static void kick_trng(struct platform_device *pdev)
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
- /* 1600 clocks per sample */
+ /* Set clocks per sample to the default, and divider to zero */
val = rd_reg32(&r4tst->rtsdctl);
- val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (RNG4_ENT_CLOCKS_SAMPLE << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count */
- wr_reg32(&r4tst->rtfrqmin, 400);
+ wr_reg32(&r4tst->rtfrqmin, RNG4_ENT_CLOCKS_SAMPLE / 4);
/* max. freq. count */
- wr_reg32(&r4tst->rtfrqmax, 6400);
+ wr_reg32(&r4tst->rtfrqmax, RNG4_ENT_CLOCKS_SAMPLE * 8);
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
}
@@ -176,7 +189,20 @@ int caam_get_era(u64 caam_id)
{0x0A14, 1, 3},
{0x0A14, 2, 4},
{0x0A16, 1, 4},
- {0x0A11, 1, 4}
+ {0x0A11, 1, 4},
+ {0x0A10, 3, 4},
+ {0x0A18, 1, 4},
+ {0x0A11, 2, 5},
+ {0x0A12, 2, 5},
+ {0x0A13, 1, 5},
+ {0x0A1C, 1, 5},
+ {0x0A12, 4, 6},
+ {0x0A13, 2, 6},
+ {0x0A16, 2, 6},
+ {0x0A18, 2, 6},
+ {0x0A1A, 1, 6},
+ {0x0A1C, 2, 6},
+ {0x0A17, 1, 6}
};
int i;
@@ -189,6 +215,18 @@ int caam_get_era(u64 caam_id)
}
EXPORT_SYMBOL(caam_get_era);
+/*
+ * Return a job ring device. This is available so outside
+ * entities can gain direct access to the job ring. For now,
+ * this function returns the first job ring (at index 0).
+ */
+struct device *caam_get_jrdev(void)
+{
+ return caam_jr_dev[0];
+}
+EXPORT_SYMBOL(caam_get_jrdev);
+
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -227,6 +265,73 @@ static int caam_probe(struct platform_device *pdev)
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
+/*
+ * ARM targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device. Turn clocking on to proceed
+ */
+#ifdef CONFIG_ARM
+ ctrlpriv->caam_ipg = devm_clk_get(&ctrlpriv->pdev->dev, "caam_ipg");
+ if (IS_ERR(ctrlpriv->caam_ipg)) {
+ ret = PTR_ERR(ctrlpriv->caam_ipg);
+ dev_err(&ctrlpriv->pdev->dev,
+ "can't identify CAAM ipg clk: %d\n", ret);
+ return -ENODEV;
+ }
+ ctrlpriv->caam_mem = devm_clk_get(&ctrlpriv->pdev->dev, "caam_mem");
+ if (IS_ERR(ctrlpriv->caam_mem)) {
+ ret = PTR_ERR(ctrlpriv->caam_mem);
+ dev_err(&ctrlpriv->pdev->dev,
+ "can't identify CAAM secure mem clk: %d\n", ret);
+ return -ENODEV;
+ }
+ ctrlpriv->caam_aclk = devm_clk_get(&ctrlpriv->pdev->dev, "caam_aclk");
+ if (IS_ERR(ctrlpriv->caam_aclk)) {
+ ret = PTR_ERR(ctrlpriv->caam_aclk);
+ dev_err(&ctrlpriv->pdev->dev,
+ "can't identify CAAM aclk clk: %d\n", ret);
+ return -ENODEV;
+ }
+
+ ret = clk_prepare(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't prepare CAAM ipg clock: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_prepare(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't prepare CAAM secure mem clock: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_prepare(ctrlpriv->caam_aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't prepare CAAM aclk clock: %d\n", ret);
+ return -ENODEV;
+ }
+
+ ret = clk_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_enable(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", ret);
+ return -ENODEV;
+ }
+ ret = clk_enable(ctrlpriv->caam_aclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+ return -ENODEV;
+ }
+
+ pr_debug("%s caam_ipg clock:%d\n", __func__,
+ (int)clk_get_rate(ctrlpriv->caam_ipg));
+ pr_debug("%s caam_mem clock:%d\n", __func__,
+ (int)clk_get_rate(ctrlpriv->caam_mem));
+ pr_debug("%s caam_aclk clock:%d\n", __func__,
+ (int)clk_get_rate(ctrlpriv->caam_aclk));
+#endif
+
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
@@ -234,6 +339,22 @@ static int caam_probe(struct platform_device *pdev)
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+#ifdef CONFIG_ARCH_MX6
+ /*
+ * ERRATA: mx6 devices have an issue wherein AXI bus transactions
+ * may not occur in the correct order. This isn't a problem running
+ * single descriptors, but can be if running multiple concurrent
+ * descriptors. Reworking the driver to throttle to single requests
+ * is impractical, thus the workaround is to limit the AXI pipeline
+ * to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+ wr_reg32(&topregs->ctrl.mcr,
+ (rd_reg32(&topregs->ctrl.mcr) & ~(MCFGR_AXIPIPE_MASK)) |
+ ((1 << MCFGR_AXIPIPE_SHIFT) & MCFGR_AXIPIPE_MASK));
+#endif
+
+ /* Set DMA masks according to platform ranging */
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
dma_set_mask(dev, DMA_BIT_MASK(40));
@@ -265,13 +386,36 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ret = caam_jr_probe(pdev, np, ring);
+ if (ret < 0) {
+ /*
+ * Job ring not found, error out. At some
+ * point, we should enhance job ring handling
+ * to allow for non-consecutive job rings to
+ * be found.
+ */
+ pr_err("fsl,sec-v4.0-job-ring not found ");
+ pr_err("(ring %d)\n", ring);
+ return ret;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
+
if (!ring) {
for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
- caam_jr_probe(pdev, np, ring);
+ ret = caam_jr_probe(pdev, np, ring);
+ if (ret < 0) {
+ /*
+ * Job ring not found, error out. At some
+ * point, we should enhance job ring handling
+ * to allow for non-consecutive job rings to
+ * be found.
+ */
+ pr_err("fsl,sec4.0-job-ring not found ");
+ pr_err("(ring %d)\n", ring);
+ return ret;
+ }
ctrlpriv->total_jobrs++;
ring++;
}
@@ -294,19 +438,35 @@ static int caam_probe(struct platform_device *pdev)
}
/*
- * RNG4 based SECs (v5+) need special initialization prior
- * to executing any descriptors
+ * RNG4 based SECs (v5+ | >= i.MX6) need special initialization prior
+ * to executing any descriptors. If there's a problem with init,
+ * remove other subsystems and return; internal padding functions
+ * cannot run without an RNG. This procedure assumes a single RNG4
+ * instance.
*/
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) {
- kick_trng(pdev);
- ret = instantiate_rng(ctrlpriv->jrdev[0]);
- if (ret) {
- caam_remove(pdev);
- return ret;
+ if ((rd_reg64(&topregs->ctrl.perfmon.cha_id) & CHA_ID_RNG_MASK)
+ == CHA_ID_RNG_4) {
+ struct rng4tst __iomem *r4tst;
+ u32 rng_if;
+
+ /*
+ * Check to see if the RNG has already been instantiated.
+ * If either the state 0 or 1 instantiated flags are set,
+ * then don't continue on and try to instantiate the RNG
+ * again.
+ */
+ r4tst = &topregs->ctrl.r4tst[0];
+ rng_if = rd_reg32(&r4tst->rdsta);
+ rng_if = rng_if & RDSTA_IF;
+ if (!rng_if) {
+ kick_trng(pdev);
+ ret = instantiate_rng(ctrlpriv->jrdev[0]);
+ if (ret) {
+ caam_remove(pdev);
+ return -ENODEV;
+ }
+ ctrlpriv->rng_inst++;
}
-
- /* Enable RDB bit so that RNG works faster */
- setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index f7f833be8c67..74bad41f7919 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -2,19 +2,35 @@
* CAAM descriptor composition header
* Definitions to support CAAM descriptor instruction generation
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*/
#ifndef DESC_H
#define DESC_H
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK 0x00001fff
+
struct sec4_sg_entry {
+#ifdef CONFIG_64BIT
u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#else
+ u32 reserved;
+ u32 ptr;
+#endif
u32 len;
- u8 reserved;
- u8 buf_pool_id;
+ u16 buf_pool_id;
u16 offset;
};
@@ -1087,6 +1103,23 @@ struct sec4_sg_entry {
#define OP_PCL_PKPROT_ECC 0x0002
#define OP_PCL_PKPROT_F2M 0x0001
+/* Blob protocol protinfo bits */
+#define OP_PCL_BLOB_TK 0x0200
+#define OP_PCL_BLOB_EKT 0x0100
+
+#define OP_PCL_BLOB_K2KR_MEM 0x0000
+#define OP_PCL_BLOB_K2KR_C1KR 0x0010
+#define OP_PCL_BLOB_K2KR_C2KR 0x0030
+#define OP_PCL_BLOB_K2KR_AFHAS 0x0050
+#define OP_PCL_BLOB_K2KR_C2KR_SPLIT 0x0070
+
+#define OP_PCL_BLOB_PTXT_SECMEM 0x0008
+#define OP_PCL_BLOB_BLACK 0x0004
+
+#define OP_PCL_BLOB_FMT_NORMAL 0x0000
+#define OP_PCL_BLOB_FMT_MSTR 0x0002
+#define OP_PCL_BLOB_FMT_TEST 0x0003
+
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
@@ -1600,4 +1633,28 @@ struct sec4_sg_entry {
#define NFIFOENTRY_PLEN_SHIFT 0
#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+/*
+ * PDB internal definitions
+ */
+
+/* IPSec ESP CBC Encap/Decap Options */
+#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
+#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
+#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
+#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
+
+#define ARC4_BLOCK_SIZE 1
+#define ARC4_MAX_KEY_SIZE 256
+#define ARC4_MIN_KEY_SIZE 1
+
+#define XCBC_MAC_DIGEST_SIZE 16
+#define XCBC_MAC_BLOCK_WORDS 16
+
+
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e4a16b741371..0311f8572822 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -2,7 +2,7 @@
* CAAM/SEC 4.x driver backend
* Private/internal definitions between modules
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*
*/
@@ -12,6 +12,9 @@
#define JOBR_UNASSIGNED 0
#define JOBR_ASSIGNED 1
+/* Default clock/sample settings for an RNG4 entropy source */
+#define RNG4_ENT_CLOCKS_SAMPLE 1600
+
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
@@ -84,14 +87,22 @@ struct caam_drv_private {
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
+ int rng_inst; /* Total instantiated RNGs */
/* which jr allocated to scatterlist crypto */
atomic_t tfm_count ____cacheline_aligned;
+ int num_jrs_for_algapi;
+ struct device **algapi_jr;
/* list of registered crypto algorithms (mk generic context handle?) */
struct list_head alg_list;
/* list of registered hash algorithms (mk generic context handle?) */
struct list_head hash_list;
+#ifdef CONFIG_ARM
+ struct clk *caam_ipg;
+ struct clk *caam_mem;
+ struct clk *caam_aclk;
+#endif
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b4aa773ecbc8..80ddddb63a7b 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -2,7 +2,7 @@
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
*
- * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*/
#include "compat.h"
@@ -58,6 +58,9 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ dma_addr_t outbusaddr;
+
+ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
while (rd_reg32(&jrp->rregs->outring_used)) {
@@ -67,6 +70,9 @@ static void caam_jr_dequeue(unsigned long devarg)
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
+ dma_sync_single_for_cpu(dev, outbusaddr,
+ sizeof(struct jr_outentry) * JOBR_DEPTH,
+ DMA_FROM_DEVICE);
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
@@ -94,6 +100,8 @@ static void caam_jr_dequeue(unsigned long devarg)
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
+ smp_mb();
+
/* set done */
wr_reg32(&jrp->rregs->outring_rmvd, 1);
@@ -227,7 +235,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
int head, tail, desc_size;
- dma_addr_t desc_dma;
+ dma_addr_t desc_dma, inpbusaddr;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
@@ -236,6 +244,13 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
return -EIO;
}
+ dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
+
+ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
+
spin_lock_bh(&jrp->inplock);
head = jrp->head;
@@ -257,12 +272,18 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ dma_sync_single_for_device(dev, inpbusaddr,
+ sizeof(dma_addr_t) * JOBR_DEPTH,
+ DMA_TO_DEVICE);
+
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+ wmb();
+
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_bh(&jrp->inplock);
@@ -423,7 +444,8 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
struct platform_device *jr_pdev;
struct caam_drv_private *ctrlpriv;
struct caam_drv_private_jr *jrpriv;
- u32 *jroffset;
+ const __be32 *jroffset_addr;
+ u32 jroffset;
int error;
ctrldev = &pdev->dev;
@@ -445,9 +467,21 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
* need to add in the offset to this JobR. Don't know if I
* like this long-term, but it'll run
*/
- jroffset = (u32 *)of_get_property(np, "reg", NULL);
+ jroffset_addr = of_get_property(np, "reg", NULL);
+
+ if (jroffset_addr == NULL) {
+ kfree(jrpriv);
+ return -EINVAL;
+ }
+
+ /*
+ * Fix the endianness of this value read from the device
+ * tree if running on ARM.
+ */
+ jroffset = be32_to_cpup(jroffset_addr);
+
jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
- + *jroffset);
+ + jroffset);
/* Build a local dev for each detected queue */
jr_pdev = of_platform_device_create(np, NULL, ctrldev);
@@ -471,6 +505,10 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
/* Identify the interrupt */
jrpriv->irq = of_irq_to_resource(np, 0, NULL);
+ if (jrpriv->irq <= 0) {
+ kfree(jrpriv);
+ return -EINVAL;
+ }
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index c23df395b622..0c424d86af00 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -1,7 +1,7 @@
/*
* CAAM public-level include definitions for the JobR backend
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*/
#ifndef JR_H
@@ -18,4 +18,5 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
int ring);
extern int caam_jr_shutdown(struct device *dev);
+extern struct device *caam_get_jrdev(void);
#endif /* JR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 87138d2adb5f..2f4438012aae 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -1,7 +1,7 @@
/*
* CAAM/SEC 4.x functions for handling key-generation jobs
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*
*/
#include "compat.h"
@@ -68,6 +68,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
kfree(desc);
return -ENOMEM;
}
+ dma_sync_single_for_device(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
@@ -115,7 +116,8 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
split_key_pad_len, 1);
#endif
}
-
+ dma_sync_single_for_cpu(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index cd6fedad9935..4bfc875af0dc 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -1,7 +1,7 @@
/*
* CAAM hardware register-level view
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*/
#ifndef REGS_H
@@ -74,15 +74,21 @@
#endif
#else
#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(reg, data)
-#define rd_reg32(reg) __raw_readl(reg)
+#define wr_reg32(reg, data) writel(data, reg)
+#define rd_reg32(reg) readl(reg)
#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(reg, data)
-#define rd_reg64(reg) __raw_readq(reg)
+#define wr_reg64(reg, data) writeq(data, reg)
+#define rd_reg64(reg) readq(reg)
#endif
#endif
#endif
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARMs */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+#endif
+
#ifndef CONFIG_64BIT
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
@@ -107,6 +113,98 @@ struct jr_outentry {
} __packed;
/*
+ * CHA version ID / instantiation bitfields
+ * Defined for use within cha_id in perfmon
+ * Note that the same shift/mask selectors can be used to pull out number
+ * of instantiated blocks within cha_num in perfmon, the locations are
+ * the same.
+ */
+
+/* Job Ring */
+#define CHA_ID_JR_SHIFT 60
+#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
+
+/* DEscriptor COntroller */
+#define CHA_ID_DECO_SHIFT 56
+#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_NUM_DECONUM_SHIFT 56 /* legacy definition */
+#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+
+/* ZUC-Authentication */
+#define CHA_ID_ZA_SHIFT 44
+#define CHA_ID_ZA_MASK (0xfull << CHA_ID_ZA_SHIFT)
+
+/* ZUC-Encryption */
+#define CHA_ID_ZE_SHIFT 40
+#define CHA_ID_ZE_MASK (0xfull << CHA_ID_ZE_SHIFT)
+
+/* SNOW f9 */
+#define CHA_ID_SNW9_SHIFT 36
+#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
+
+/* CRC */
+#define CHA_ID_CRC_SHIFT 32
+#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
+
+/* Public Key */
+#define CHA_ID_PK_SHIFT 28
+#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
+
+/* Kasumi */
+#define CHA_ID_KAS_SHIFT 24
+#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
+
+/* SNOW f8 */
+#define CHA_ID_SNW8_SHIFT 20
+#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
+
+/*
+ * Random Generator
+ * RNG4 = FIPS-verification-compliant, requires init kickstart for use
+ */
+#define CHA_ID_RNG_SHIFT 16
+#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_A (0x1ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_B (0x2ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_C (0x3ull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_RNG_4 (0x4ull << CHA_ID_RNG_SHIFT)
+
+/*
+ * Message Digest
+ * LP256 = Low Power (MD5/SHA1/SHA224/SHA256 + HMAC)
+ * LP512 = Low Power (LP256 + SHA384/SHA512)
+ * HP = High Power (LP512 + SMAC)
+ */
+#define CHA_ID_MD_SHIFT 12
+#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_LP256 (0x0ull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_LP512 (0x1ull << CHA_ID_MD_SHIFT)
+#define CHA_ID_MD_HP (0x2ull << CHA_ID_MD_SHIFT)
+
+/* ARC4 Streamcipher */
+#define CHA_ID_ARC4_SHIFT 8
+#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_ARC4_LP (0x0ull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_ARC4_HP (0x1ull << CHA_ID_ARC4_SHIFT)
+
+/* DES Blockcipher Accelerator */
+#define CHA_ID_DES_SHIFT 4
+#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
+
+/*
+ * AES Blockcipher + Combo Mode Accelerator
+ * LP = Low Power (includes ECB/CBC/CFB128/OFB/CTR/CCM/CMAC/XCBC-MAC)
+ * HP = High Power (LP + CBCXCBC/CTRXCBC/XTS/GCM)
+ * DIFFPWR = ORed in if differential-power-analysis resistance implemented
+ */
+#define CHA_ID_AES_SHIFT 0
+#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_LP (0x3ull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_HP (0x4ull << CHA_ID_AES_SHIFT)
+#define CHA_ID_AES_DIFFPWR (0x1ull << CHA_ID_AES_SHIFT)
+
+
+/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
*
@@ -123,6 +221,10 @@ struct sec_vid {
u8 min_rev;
};
+#define SEC_VID_IPID_SHIFT 16
+#define SEC_VID_MAJ_SHIFT 8
+#define SEC_VID_MAJ_MASK 0xFF00
+
struct caam_perfmon {
/* Performance Monitor Registers f00-f9f */
u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
@@ -139,15 +241,21 @@ struct caam_perfmon {
#define CTPR_QI_SHIFT 57
#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
u64 comp_parms; /* CTPR - Compile Parameters Register */
- u64 rsvd1[2];
+
+ /* Secure Memory State Visibility */
+ u32 rsvd1;
+ u32 smstatus; /* Secure memory status */
+ u32 rsvd2;
+ u32 smpartown; /* Secure memory partition owner */
/* CAAM Global Status fc0-fdf */
u64 faultaddr; /* FAR - Fault Address */
u32 faultliodn; /* FALR - Fault Address LIODN */
u32 faultdetail; /* FADR - Fault Addr Detail */
- u32 rsvd2;
+ u32 rsvd3;
u32 status; /* CSTA - CAAM Status */
- u64 rsvd3;
+ u32 smpart; /* Secure Memory Partition Parameters */
+ u32 smvid; /* Secure Memory Version ID */
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
@@ -157,6 +265,62 @@ struct caam_perfmon {
u64 caam_id; /* CAAMVID - CAAM Version ID */
};
+#define SMSTATUS_PART_SHIFT 28
+#define SMSTATUS_PART_MASK (0xf << SMSTATUS_PART_SHIFT)
+#define SMSTATUS_PAGE_SHIFT 16
+#define SMSTATUS_PAGE_MASK (0x7ff << SMSTATUS_PAGE_SHIFT)
+#define SMSTATUS_MID_SHIFT 8
+#define SMSTATUS_MID_MASK (0x3f << SMSTATUS_MID_SHIFT)
+#define SMSTATUS_ACCERR_SHIFT 4
+#define SMSTATUS_ACCERR_MASK (0xf << SMSTATUS_ACCERR_SHIFT)
+#define SMSTATUS_ACCERR_NONE 0
+#define SMSTATUS_ACCERR_ALLOC 1 /* Page not allocated */
+#define SMSTATUS_ACCESS_ID 2 /* Not granted by ID */
+#define SMSTATUS_ACCESS_WRITE 3 /* Writes not allowed */
+#define SMSTATUS_ACCESS_READ 4 /* Reads not allowed */
+#define SMSTATUS_ACCESS_NONKEY 6 /* Non-key reads not allowed */
+#define SMSTATUS_ACCESS_BLOB 9 /* Blob access not allowed */
+#define SMSTATUS_ACCESS_DESCB 10 /* Descriptor Blob access spans pages */
+#define SMSTATUS_ACCESS_NON_SM 11 /* Outside Secure Memory range */
+#define SMSTATUS_ACCESS_XPAGE 12 /* Access crosses pages */
+#define SMSTATUS_ACCESS_INITPG 13 /* Page still initializing */
+#define SMSTATUS_STATE_SHIFT 0
+#define SMSTATUS_STATE_MASK (0xf << SMSTATUS_STATE_SHIFT)
+#define SMSTATUS_STATE_RESET 0
+#define SMSTATUS_STATE_INIT 1
+#define SMSTATUS_STATE_NORMAL 2
+#define SMSTATUS_STATE_FAIL 3
+
+/* up to 15 rings, 2 bits shifted by ring number */
+#define SMPARTOWN_RING_SHIFT 2
+#define SMPARTOWN_RING_MASK 3
+#define SMPARTOWN_AVAILABLE 0
+#define SMPARTOWN_NOEXIST 1
+#define SMPARTOWN_UNAVAILABLE 2
+#define SMPARTOWN_OURS 3
+
+/* Maximum number of pages possible */
+#define SMPART_MAX_NUMPG_SHIFT 16
+#define SMPART_MAX_NUMPG_MASK (0x3f << SMPART_MAX_NUMPG_SHIFT)
+
+/* Maximum partition number */
+#define SMPART_MAX_PNUM_SHIFT 12
+#define SMPART_MAX_PNUM_MASK (0xf << SMPART_MAX_PNUM_SHIFT)
+
+/* Highest possible page number */
+#define SMPART_MAX_PG_SHIFT 0
+#define SMPART_MAX_PG_MASK (0x3f << SMPART_MAX_PG_SHIFT)
+
+/* Max size of a page */
+#define SMVID_PG_SIZE_SHIFT 16
+#define SMVID_PG_SIZE_MASK (0x7 << SMVID_PG_SIZE_SHIFT)
+
+/* Major/Minor Version ID */
+#define SMVID_MAJ_VERS_SHIFT 8
+#define SMVID_MAJ_VERS (0xf << SMVID_MAJ_VERS_SHIFT)
+#define SMVID_MIN_VERS_SHIFT 0
+#define SMVID_MIN_VERS (0xf << SMVID_MIN_VERS_SHIFT)
+
/* LIODN programming for DMA configuration */
#define MSTRID_LOCK_LIODN 0x80000000
#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
@@ -228,7 +392,10 @@ struct rng4tst {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
- u32 rsvd1[56];
+ u32 rsvd1[40];
+#define RDSTA_IF 0x00000003 /* state handle instantiated flags 0 and 1 */
+ u32 rdsta; /* DRNG status register */
+ u32 rsvd2[15];
};
/*
@@ -359,7 +526,18 @@ struct caam_job_ring {
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
- u32 rsvd12[932];
+ u32 rsvd12[33];
+
+ /* Secure Memory Configuration - if you have it */
+ u32 sm_cmd; /* SMCJRx - Secure memory command */
+ u32 rsvd13;
+ u32 sm_status; /* SMCSJRx - Secure memory status */
+ u32 rsvd14;
+ u32 sm_perm; /* SMAPJRx - Secure memory access perms */
+ u32 sm_group2; /* SMAP2JRx - Secure memory access group 2 */
+ u32 sm_group1; /* SMAP1JRx - Secure memory access group 1 */
+
+ u32 rsvd15[891];
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
@@ -482,6 +660,62 @@ struct caam_job_ring {
#define JRCR_RESET 0x01
+/* secure memory command */
+#define SMC_PAGE_SHIFT 16
+#define SMC_PAGE_MASK (0xffff << SMC_PAGE_SHIFT)
+#define SMC_PART_SHIFT 8
+#define SMC_PART_MASK (0x0f << SMC_PART_SHIFT)
+#define SMC_CMD_SHIFT 0
+#define SMC_CMD_MASK (0x0f << SMC_CMD_SHIFT)
+
+#define SMC_CMD_ALLOC_PAGE 0x01 /* allocate page to this partition */
+#define SMC_CMD_DEALLOC_PAGE 0x02 /* deallocate page from partition */
+#define SMC_CMD_DEALLOC_PART 0x03 /* deallocate partition */
+#define SMC_CMD_PAGE_INQUIRY 0x05 /* find partition associate with page */
+
+/* secure memory (command) status */
+#define SMCS_PAGE_SHIFT 16
+#define SMCS_PAGE_MASK (0x0fff << SMCS_PAGE_SHIFT)
+#define SMCS_CMDERR_SHIFT 14
+#define SMCS_CMDERR_MASK (3 << SMCS_CMDERR_SHIFT)
+#define SMCS_ALCERR_SHIFT 12
+#define SMCS_ALCERR_MASK (3 << SMCS_ALCERR_SHIFT)
+#define SMCS_PGOWN_SHIFT 6
+#define SMCS_PGWON_MASK (3 << SMCS_PGOWN_SHIFT)
+#define SMCS_PART_SHIFT 0
+#define SMCS_PART_MASK (0xf << SMCS_PART_SHIFT)
+
+#define SMCS_CMDERR_NONE 0
+#define SMCS_CMDERR_INCOMP 1 /* Command not yet complete */
+#define SMCS_CMDERR_SECFAIL 2 /* Security failure occurred */
+#define SMCS_CMDERR_OVERFLOW 3 /* Command overflow */
+
+#define SMCS_ALCERR_NONE 0
+#define SMCS_ALCERR_PSPERR 1 /* Partion marked PSP (dealloc only) */
+#define SMCS_ALCERR_PAGEAVAIL 2 /* Page not available */
+#define SMCS_ALCERR_PARTOWN 3 /* Partition ownership error */
+
+#define SMCS_PGOWN_AVAIL 0 /* Page is available */
+#define SMCS_PGOWN_NOEXIST 1 /* Page initializing or nonexistent */
+#define SMCS_PGOWN_NOOWN 2 /* Page owned by another processor */
+#define SMCS_PGOWN_OWNED 3 /* Page belongs to this processor */
+
+/* secure memory access permissions */
+#define SMCS_PERM_KEYMOD_SHIFT 16
+#define SMCA_PERM_KEYMOD_MASK (0xff << SMCS_PERM_KEYMOD_SHIFT)
+#define SMCA_PERM_CSP_ZERO 0x8000 /* Zero when deallocated or released */
+#define SMCA_PERM_PSP_LOCK 0x4000 /* Part./pages can't be deallocated */
+#define SMCA_PERM_PERM_LOCK 0x2000 /* Lock permissions */
+#define SMCA_PERM_GRP_LOCK 0x1000 /* Lock access groups */
+#define SMCA_PERM_RINGID_SHIFT 10
+#define SMCA_PERM_RINGID_MASK (3 << SMCA_PERM_RINGID_SHIFT)
+#define SMCA_PERM_G2_BLOB 0x0080 /* Group 2 blob import/export */
+#define SMCA_PERM_G2_WRITE 0x0020 /* Group 2 write */
+#define SMCA_PERM_G2_READ 0x0010 /* Group 2 read */
+#define SMCA_PERM_G1_BLOB 0x0008 /* Group 1... */
+#define SMCA_PERM_G1_WRITE 0x0002
+#define SMCA_PERM_G1_READ 0x0001
+
/*
* caam_assurance - Assurance Controller View
* base + 0x6000 padded out to 0x1000
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index e0037c8ee243..86673cb4ec6d 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -1,7 +1,7 @@
/*
* CAAM/SEC 4.x functions for using scatterlists in caam driver
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
*
*/
@@ -91,13 +91,22 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
{
if (unlikely(chained)) {
int i;
+ struct scatterlist *tsg = sg;
+
+ /* We use a local copy of the sg pointer to avoid moving the
+ * head of the list pointed to by sg as we wall the list.
+ */
for (i = 0; i < nents; i++) {
- dma_map_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ dma_map_sg(dev, tsg, 1, dir);
+ tsg = scatterwalk_sg_next(tsg);
}
} else {
dma_map_sg(dev, sg, nents, dir);
}
+
+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_device(dev, sg, nents, dir);
+
return nents;
}
@@ -105,6 +114,9 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
bool chained)
{
+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+ dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {