diff options
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ce')
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 85 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 35 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 145 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c | 1 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c | 1 | ||||
-rw-r--r-- | drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 27 |
6 files changed, 147 insertions, 147 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 5663df49dd81..021614b65e39 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -111,7 +111,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ce_alg_template *algt __maybe_unused; + struct sun8i_ce_alg_template *algt; algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); @@ -131,21 +131,19 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) return err; } -static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req) +static int sun8i_ce_cipher_prepare(struct skcipher_request *areq, + struct ce_task *cet) { - struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; - struct sun8i_ce_flow *chan; - struct ce_task *cet; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; u32 common, sym; - int flow, i; + int i; int nr_sgs = 0; int nr_sgd = 0; int err = 0; @@ -163,14 +161,9 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) algt->stat_req++; - flow = rctx->flow; - - chan = &ce->chanlist[flow]; - - cet = chan->tl; memset(cet, 0, sizeof(struct ce_task)); - cet->t_id = cpu_to_le32(flow); + cet->t_id = cpu_to_le32(rctx->flow); common = ce->variant->alg_cipher[algt->ce_algo_id]; common |= rctx->op_dir | CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); @@ -209,11 +202,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req if (areq->iv && ivsize > 0) { if (rctx->op_dir & CE_DECRYPTION) { offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(chan->backup_iv, areq->src, + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, offset, ivsize, 0); } - memcpy(chan->bounce_iv, areq->iv, ivsize); - rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize, + memcpy(rctx->bounce_iv, areq->iv, ivsize); + rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, rctx->addr_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); @@ -276,7 +269,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req goto theend_sgs; } - chan->timeout = areq->cryptlen; rctx->nr_sgs = ns; rctx->nr_sgd = nd; return 0; @@ -300,13 +292,13 @@ theend_iv: offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { - memcpy(areq->iv, chan->backup_iv, ivsize); - memzero_explicit(chan->backup_iv, ivsize); + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } - memzero_explicit(chan->bounce_iv, ivsize); + memzero_explicit(rctx->bounce_iv, ivsize); } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); @@ -315,24 +307,17 @@ theend: return err; } -static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, - void *async_req) +static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq, + struct ce_task *cet) { - struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); - struct sun8i_ce_flow *chan; - struct ce_task *cet; unsigned int ivsize, offset; int nr_sgs = rctx->nr_sgs; int nr_sgd = rctx->nr_sgd; - int flow; - flow = rctx->flow; - chan = &ce->chanlist[flow]; - cet = chan->tl; ivsize = crypto_skcipher_ivsize(tfm); if (areq->src == areq->dst) { @@ -349,43 +334,43 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { - memcpy(areq->iv, chan->backup_iv, ivsize); - memzero_explicit(chan->backup_iv, ivsize); + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } - memzero_explicit(chan->bounce_iv, ivsize); + memzero_explicit(rctx->bounce_iv, ivsize); } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); } -static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) -{ - struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); - struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); - struct sun8i_ce_dev *ce = op->ce; - struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); - int flow, err; - - flow = rctx->flow; - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); - sun8i_ce_cipher_unprepare(engine, areq); - local_bh_disable(); - crypto_finalize_skcipher_request(engine, breq, err); - local_bh_enable(); -} - int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) { - int err = sun8i_ce_cipher_prepare(engine, areq); + struct skcipher_request *req = skcipher_request_cast(areq); + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sun8i_ce_dev *ce = ctx->ce; + struct sun8i_ce_flow *chan; + int err; + + chan = &ce->chanlist[rctx->flow]; + err = sun8i_ce_cipher_prepare(req, chan->tl); if (err) return err; - sun8i_ce_cipher_run(engine, areq); + err = sun8i_ce_run_task(ce, rctx->flow, + crypto_tfm_alg_name(req->base.tfm)); + + sun8i_ce_cipher_unprepare(req, chan->tl); + + local_bh_disable(); + crypto_finalize_skcipher_request(engine, req, err); + local_bh_enable(); + return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 658f520cee0c..c16bb6ce6ee3 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -169,6 +169,12 @@ static const struct ce_variant ce_r40_variant = { .trng = CE_ID_NOTSUPP, }; +static void sun8i_ce_dump_task_descriptors(struct sun8i_ce_flow *chan) +{ + print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, + chan->tl, sizeof(struct ce_task), false); +} + /* * sun8i_ce_get_engine_number() get the next channel slot * This is a simple round-robin way of getting the next channel @@ -183,7 +189,6 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) { u32 v; int err = 0; - struct ce_task *cet = ce->chanlist[flow].tl; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->chanlist[flow].stat_req++; @@ -210,11 +215,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) mutex_unlock(&ce->mlock); wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete, - msecs_to_jiffies(ce->chanlist[flow].timeout)); + msecs_to_jiffies(CE_DMA_TIMEOUT_MS)); if (ce->chanlist[flow].status == 0) { - dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name, - ce->chanlist[flow].timeout, flow); + dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow); err = -EFAULT; } /* No need to lock for this read, the channel is locked so @@ -226,9 +230,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) /* Sadly, the error bit is not per flow */ if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]); err = -EFAULT; - print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, - cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); @@ -245,9 +248,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) v &= 0xF; if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]); err = -EFAULT; - print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, - cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); @@ -261,9 +263,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) v &= 0xFF; if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); + sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]); err = -EFAULT; - print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, - cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); @@ -758,18 +759,6 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) err = -ENOMEM; goto error_engine; } - ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, - GFP_KERNEL | GFP_DMA); - if (!ce->chanlist[i].bounce_iv) { - err = -ENOMEM; - goto error_engine; - } - ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, - GFP_KERNEL); - if (!ce->chanlist[i].backup_iv) { - err = -ENOMEM; - goto error_engine; - } } return 0; error_engine: @@ -1063,7 +1052,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) pm_runtime_put_sync(ce->dev); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_dir; struct dentry *dbgfs_stats __maybe_unused; /* Ignore error of debugfs */ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 13bdfb8a2c62..d01594353d9a 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -26,7 +26,7 @@ static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm) { if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { - struct sun8i_ce_alg_template *algt __maybe_unused; + struct sun8i_ce_alg_template *algt; struct ahash_alg *alg = crypto_ahash_alg(tfm); algt = container_of(alg, struct sun8i_ce_alg_template, @@ -58,7 +58,8 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ce_hash_reqctx) + - crypto_ahash_reqsize(op->fallback_tfm)); + crypto_ahash_reqsize(op->fallback_tfm) + + CRYPTO_DMA_PADDING); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) memcpy(algt->fbname, @@ -84,7 +85,7 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm) int sun8i_ce_hash_init(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -100,7 +101,7 @@ int sun8i_ce_hash_init(struct ahash_request *areq) int sun8i_ce_hash_export(struct ahash_request *areq, void *out) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -114,7 +115,7 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out) int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -128,7 +129,7 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) int sun8i_ce_hash_final(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -145,7 +146,7 @@ int sun8i_ce_hash_final(struct ahash_request *areq) int sun8i_ce_hash_update(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -160,7 +161,7 @@ int sun8i_ce_hash_update(struct ahash_request *areq) int sun8i_ce_hash_finup(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -178,7 +179,7 @@ int sun8i_ce_hash_finup(struct ahash_request *areq) static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) { - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); @@ -238,19 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) int sun8i_ce_hash_digest(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); - struct sun8i_ce_alg_template *algt; - struct sun8i_ce_dev *ce; + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct sun8i_ce_dev *ce = ctx->ce; struct crypto_engine *engine; int e; if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); - ce = algt->ce; - e = sun8i_ce_get_engine_number(ce); rctx->flow = e; engine = ce->chanlist[e].engine; @@ -316,28 +313,22 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, return j; } -int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) +static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet) { - struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; - struct sun8i_ce_flow *chan; - struct ce_task *cet; struct scatterlist *sg; - int nr_sgs, flow, err; + int nr_sgs, err; unsigned int len; u32 common; u64 byte_count; __le32 *bf; - void *buf, *result; int j, i, todo; u64 bs; int digestsize; - dma_addr_t addr_res, addr_pad; - int ns = sg_nents_for_len(areq->src, areq->nbytes); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; @@ -349,32 +340,16 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (digestsize == SHA384_DIGEST_SIZE) digestsize = SHA512_DIGEST_SIZE; - /* the padding could be up to two block. */ - buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); - if (!buf) { - err = -ENOMEM; - goto err_out; - } - bf = (__le32 *)buf; - - result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) { - err = -ENOMEM; - goto err_free_buf; - } - - flow = rctx->flow; - chan = &ce->chanlist[flow]; + bf = (__le32 *)rctx->pad; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) algt->stat_req++; dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); - cet = chan->tl; memset(cet, 0, sizeof(struct ce_task)); - cet->t_id = cpu_to_le32(flow); + cet->t_id = cpu_to_le32(rctx->flow); common = ce->variant->alg_hash[algt->ce_algo_id]; common |= CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); @@ -382,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; - nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); + nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto err_free_result; + goto err_out; } len = areq->nbytes; @@ -401,10 +377,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = -EINVAL; goto err_unmap_src; } - addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); - cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); - cet->t_dst[0].len = cpu_to_le32(digestsize / 4); - if (dma_mapping_error(ce->dev, addr_res)) { + + rctx->result_len = digestsize; + rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len, + DMA_FROM_DEVICE); + cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res); + cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4); + if (dma_mapping_error(ce->dev, rctx->addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; goto err_unmap_src; @@ -432,10 +411,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) goto err_unmap_result; } - addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); - cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad); + rctx->pad_len = j * 4; + rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len, + DMA_TO_DEVICE); + cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad); cet->t_src[i].len = cpu_to_le32(j); - if (dma_mapping_error(ce->dev, addr_pad)) { + if (dma_mapping_error(ce->dev, rctx->addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; goto err_unmap_result; @@ -446,29 +427,59 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) else cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); - chan->timeout = areq->nbytes; - - err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); - - dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); + return 0; err_unmap_result: - dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); - if (!err) - memcpy(areq->result, result, crypto_ahash_digestsize(tfm)); + dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len, + DMA_FROM_DEVICE); err_unmap_src: - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); -err_free_result: - kfree(result); +err_out: + return err; +} -err_free_buf: - kfree(buf); +static void sun8i_ce_hash_unprepare(struct ahash_request *areq, + struct ce_task *cet) +{ + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_dev *ce = ctx->ce; + + dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE); + dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len, + DMA_FROM_DEVICE); + dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE); +} + +int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req) +{ + struct ahash_request *areq = ahash_request_cast(async_req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); + struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq); + struct sun8i_ce_dev *ce = ctx->ce; + struct sun8i_ce_flow *chan; + int err; + + chan = &ce->chanlist[rctx->flow]; + + err = sun8i_ce_hash_prepare(areq, chan->tl); + if (err) + return err; + + err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm)); + + sun8i_ce_hash_unprepare(areq, chan->tl); + + if (!err) + memcpy(areq->result, rctx->result, + crypto_ahash_digestsize(tfm)); -err_out: local_bh_disable(); - crypto_finalize_hash_request(engine, breq, err); + crypto_finalize_hash_request(engine, async_req, err); local_bh_enable(); return 0; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index 762459867b6c..d0a1ac66738b 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); - ce->chanlist[flow].timeout = 2000; err = sun8i_ce_run_task(ce, 3, "PRNG"); mutex_unlock(&ce->rnglock); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index e1e8bc15202e..244529bf0616 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); - ce->chanlist[flow].timeout = todo; err = sun8i_ce_run_task(ce, 3, "TRNG"); mutex_unlock(&ce->rnglock); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 0f9a89067016..71f5a0cd3d45 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -106,9 +106,13 @@ #define MAX_SG 8 #define CE_MAX_CLOCKS 4 +#define CE_DMA_TIMEOUT_MS 3000 #define MAXFLOW 4 +#define CE_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE +#define CE_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE + /* * struct ce_clock - Describe clocks used by sun8i-ce * @name: Name of clock needed by this variant @@ -187,8 +191,6 @@ struct ce_task { * @status: set to 1 by interrupt if task is done * @t_phy: Physical address of task * @tl: pointer to the current ce_task for this flow - * @backup_iv: buffer which contain the next IV to store - * @bounce_iv: buffer which contain the IV * @stat_req: number of request done by this flow */ struct sun8i_ce_flow { @@ -196,10 +198,7 @@ struct sun8i_ce_flow { struct completion complete; int status; dma_addr_t t_phy; - int timeout; struct ce_task *tl; - void *backup_iv; - void *bounce_iv; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG unsigned long stat_req; #endif @@ -264,6 +263,8 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev, * @nr_sgd: The number of destination SG (as given by dma_map_sg()) * @addr_iv: The IV addr returned by dma_map_single, need to unmap later * @addr_key: The key addr returned by dma_map_single, need to unmap later + * @bounce_iv: Current IV buffer + * @backup_iv: Next IV buffer * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { @@ -273,6 +274,8 @@ struct sun8i_cipher_req_ctx { int nr_sgd; dma_addr_t addr_iv; dma_addr_t addr_key; + u8 bounce_iv[AES_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 backup_iv[AES_BLOCK_SIZE]; struct skcipher_request fallback_req; // keep at the end }; @@ -304,9 +307,23 @@ struct sun8i_ce_hash_tfm_ctx { * struct sun8i_ce_hash_reqctx - context for an ahash request * @fallback_req: pre-allocated fallback request * @flow: the flow to use for this request + * @nr_sgs: number of entries in the source scatterlist + * @result_len: result length in bytes + * @pad_len: padding length in bytes + * @addr_res: DMA address of the result buffer, returned by dma_map_single() + * @addr_pad: DMA address of the padding buffer, returned by dma_map_single() + * @result: per-request result buffer + * @pad: per-request padding buffer (up to 2 blocks) */ struct sun8i_ce_hash_reqctx { int flow; + int nr_sgs; + size_t result_len; + size_t pad_len; + dma_addr_t addr_res; + dma_addr_t addr_pad; + u8 result[CE_MAX_HASH_DIGEST_SIZE] __aligned(CRYPTO_DMA_ALIGN); + u8 pad[2 * CE_MAX_HASH_BLOCK_SIZE]; struct ahash_request fallback_req; // keep at the end }; |