summaryrefslogtreecommitdiff
path: root/drivers/crypto/sahara.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/sahara.c')
-rw-r--r--drivers/crypto/sahara.c248
1 files changed, 108 insertions, 140 deletions
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index b07ae4ba165e..b9832978b935 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -44,7 +44,6 @@
#define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
-#define FLAGS_NEW_KEY BIT(3)
#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -142,8 +141,6 @@ struct sahara_hw_link {
};
struct sahara_ctx {
- unsigned long flags;
-
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
@@ -152,6 +149,7 @@ struct sahara_ctx {
struct sahara_aes_reqctx {
unsigned long mode;
+ u8 iv_out[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
@@ -447,27 +445,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
int ret;
int i, j;
int idx = 0;
+ u32 len;
- /* Copy new key if necessary */
- if (ctx->flags & FLAGS_NEW_KEY) {
- memcpy(dev->key_base, ctx->key, ctx->keylen);
- ctx->flags &= ~FLAGS_NEW_KEY;
+ memcpy(dev->key_base, ctx->key, ctx->keylen);
- if (dev->flags & FLAGS_CBC) {
- dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
- dev->hw_desc[idx]->p1 = dev->iv_phys_base;
- } else {
- dev->hw_desc[idx]->len1 = 0;
- dev->hw_desc[idx]->p1 = 0;
- }
- dev->hw_desc[idx]->len2 = ctx->keylen;
- dev->hw_desc[idx]->p2 = dev->key_phys_base;
- dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[idx]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[idx]->len1 = 0;
+ dev->hw_desc[idx]->p1 = 0;
+ }
+ dev->hw_desc[idx]->len2 = ctx->keylen;
+ dev->hw_desc[idx]->p2 = dev->key_phys_base;
+ dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
+ dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
- dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
+ idx++;
- idx++;
- }
dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
if (dev->nb_in_sg < 0) {
@@ -489,24 +484,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
DMA_TO_DEVICE);
if (ret != dev->nb_in_sg) {
dev_err(dev->device, "couldn't map in sg\n");
- goto unmap_in;
+ return -EINVAL;
}
+
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
if (ret != dev->nb_out_sg) {
dev_err(dev->device, "couldn't map out sg\n");
- goto unmap_out;
+ goto unmap_in;
}
/* Create input links */
dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
sg = dev->in_sg;
+ len = dev->total;
for (i = 0; i < dev->nb_in_sg; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -515,12 +513,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
/* Create output links */
dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
sg = dev->out_sg;
+ len = dev->total;
for (j = i; j < dev->nb_out_sg + i; j++) {
- dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->len = min(len, sg->length);
dev->hw_link[j]->p = sg->dma_address;
if (j == (dev->nb_out_sg + i - 1)) {
dev->hw_link[j]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
sg = sg_next(sg);
}
@@ -539,9 +539,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
return 0;
-unmap_out:
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
@@ -549,8 +546,24 @@ unmap_in:
return -EINVAL;
}
+static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ /* Update IV buffer to contain the last ciphertext block */
+ if (rctx->mode & FLAGS_ENCRYPT) {
+ sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
+ ivsize, req->cryptlen - ivsize);
+ } else {
+ memcpy(req->iv, rctx->iv_out, ivsize);
+ }
+}
+
static int sahara_aes_process(struct skcipher_request *req)
{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
@@ -572,8 +585,17 @@ static int sahara_aes_process(struct skcipher_request *req)
rctx->mode &= FLAGS_MODE_MASK;
dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
- if ((dev->flags & FLAGS_CBC) && req->iv)
- memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
+ if ((dev->flags & FLAGS_CBC) && req->iv) {
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ memcpy(dev->iv_base, req->iv, ivsize);
+
+ if (!(dev->flags & FLAGS_ENCRYPT)) {
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ rctx->iv_out, ivsize,
+ req->cryptlen - ivsize);
+ }
+ }
/* assign new context to device */
dev->ctx = ctx;
@@ -586,16 +608,20 @@ static int sahara_aes_process(struct skcipher_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "AES timeout\n");
- return -ETIMEDOUT;
- }
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if ((dev->flags & FLAGS_CBC) && req->iv)
+ sahara_aes_cbc_update_iv(req);
+
return 0;
}
@@ -609,7 +635,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/* SAHARA only supports 128bit keys */
if (keylen == AES_KEYSIZE_128) {
memcpy(ctx->key, key, keylen);
- ctx->flags |= FLAGS_NEW_KEY;
return 0;
}
@@ -625,12 +650,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
return crypto_skcipher_setkey(ctx->fallback, key, keylen);
}
+static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
+{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ if (mode & FLAGS_ENCRYPT)
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
+
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
+}
+
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
+ struct sahara_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr;
int err = 0;
+ if (!req->cryptlen)
+ return 0;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128))
+ return sahara_aes_fallback(req, mode);
+
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -653,81 +706,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_encrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
}
static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_decrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, 0);
}
static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_encrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
- struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct sahara_ctx *ctx = crypto_skcipher_ctx(
- crypto_skcipher_reqtfm(req));
-
- if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- skcipher_request_set_callback(&rctx->fallback_req,
- req->base.flags,
- req->base.complete,
- req->base.data);
- skcipher_request_set_crypt(&rctx->fallback_req, req->src,
- req->dst, req->cryptlen, req->iv);
- return crypto_skcipher_decrypt(&rctx->fallback_req);
- }
-
return sahara_aes_crypt(req, FLAGS_CBC);
}
@@ -784,6 +777,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
int start)
{
struct scatterlist *sg;
+ unsigned int len;
unsigned int i;
int ret;
@@ -805,12 +799,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev,
if (!ret)
return -EFAULT;
+ len = rctx->total;
for (i = start; i < dev->nb_in_sg + start; i++) {
- dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->len = min(len, sg->length);
dev->hw_link[i]->p = sg->dma_address;
if (i == (dev->nb_in_sg + start - 1)) {
dev->hw_link[i]->next = 0;
} else {
+ len -= min(len, sg->length);
dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
sg = sg_next(sg);
}
@@ -891,24 +887,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
return 0;
}
-static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
-{
- if (!sg || !sg->length)
- return nbytes;
-
- while (nbytes && sg) {
- if (nbytes <= sg->length) {
- sg->length = nbytes;
- sg_mark_end(sg);
- break;
- }
- nbytes -= sg->length;
- sg = sg_next(sg);
- }
-
- return nbytes;
-}
-
static int sahara_sha_prepare_request(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -945,36 +923,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
hash_later, 0);
}
- /* nbytes should now be multiple of blocksize */
- req->nbytes = req->nbytes - hash_later;
-
- sahara_walk_and_recalc(req->src, req->nbytes);
-
+ rctx->total = len - hash_later;
/* have data from previous operation and current */
if (rctx->buf_cnt && req->nbytes) {
sg_init_table(rctx->in_sg_chain, 2);
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
-
sg_chain(rctx->in_sg_chain, 2, req->src);
-
- rctx->total = req->nbytes + rctx->buf_cnt;
rctx->in_sg = rctx->in_sg_chain;
-
- req->src = rctx->in_sg_chain;
/* only data from previous operation */
} else if (rctx->buf_cnt) {
- if (req->src)
- rctx->in_sg = req->src;
- else
- rctx->in_sg = rctx->in_sg_chain;
- /* buf was copied into rembuf above */
+ rctx->in_sg = rctx->in_sg_chain;
sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
- rctx->total = rctx->buf_cnt;
/* no data from previous operation */
} else {
rctx->in_sg = req->src;
- rctx->total = req->nbytes;
- req->src = rctx->in_sg;
}
/* on next call, we only have the remaining data in the buffer */
@@ -995,7 +957,10 @@ static int sahara_sha_process(struct ahash_request *req)
return ret;
if (rctx->first) {
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ if (ret)
+ return ret;
+
dev->hw_desc[0]->next = 0;
rctx->first = 0;
} else {
@@ -1003,7 +968,10 @@ static int sahara_sha_process(struct ahash_request *req)
sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
dev->hw_desc[0]->next = dev->hw_phys_desc[1];
- sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ if (ret)
+ return ret;
+
dev->hw_desc[1]->next = 0;
}
@@ -1016,18 +984,19 @@ static int sahara_sha_process(struct ahash_request *req)
timeout = wait_for_completion_timeout(&dev->dma_completion,
msecs_to_jiffies(SAHARA_TIMEOUT_MS));
- if (!timeout) {
- dev_err(dev->device, "SHA timeout\n");
- return -ETIMEDOUT;
- }
if (rctx->sg_in_idx)
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
+ if (!timeout) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
memcpy(rctx->context, dev->context_base, rctx->context_size);
- if (req->result)
+ if (req->result && rctx->last)
memcpy(req->result, rctx->context, rctx->digest_size);
return 0;
@@ -1171,8 +1140,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct sahara_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ sizeof(struct sahara_sha_reqctx));
return 0;
}