diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 11:43:22 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 11:43:22 -0800 |
commit | e14e61e967f2b3bdf23f05e4ae5b9aa830151a44 (patch) | |
tree | 9412c94cbe37bf6f0d0bd9ad2d8b907ce23eb1db /drivers | |
parent | cb10ea549fdc0ab2dd8988adab5bf40b4fa642f3 (diff) | |
parent | 0ee4a96902dd7858e65f378c86f428a0355bd841 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (57 commits)
crypto: aes - Precompute tables
crypto: talitos - Ack done interrupt in isr instead of tasklet
crypto: testmgr - Correct comment about deflate parameters
crypto: salsa20 - Remove private wrappers around various operations
crypto: des3_ede - permit weak keys unless REQ_WEAK_KEY set
crypto: sha512 - Switch to shash
crypto: sha512 - Move message schedule W[80] to static percpu area
crypto: michael_mic - Switch to shash
crypto: wp512 - Switch to shash
crypto: tgr192 - Switch to shash
crypto: sha256 - Switch to shash
crypto: md5 - Switch to shash
crypto: md4 - Switch to shash
crypto: sha1 - Switch to shash
crypto: rmd320 - Switch to shash
crypto: rmd256 - Switch to shash
crypto: rmd160 - Switch to shash
crypto: rmd128 - Switch to shash
crypto: null - Switch to shash
crypto: hash - Make setkey optional
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/crypto/hifn_795x.c | 494 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 52 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 153 | ||||
-rw-r--r-- | drivers/crypto/talitos.h | 85 |
4 files changed, 419 insertions, 365 deletions
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 4d22b21bd3e3..0c79fe7f1567 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -38,9 +38,6 @@ #include <asm/kmap_types.h> -#undef dprintk - -#define HIFN_TEST //#define HIFN_DEBUG #ifdef HIFN_DEBUG @@ -363,14 +360,14 @@ static atomic_t hifn_dev_number; #define HIFN_NAMESIZE 32 #define HIFN_MAX_RESULT_ORDER 5 -#define HIFN_D_CMD_RSIZE 24*4 -#define HIFN_D_SRC_RSIZE 80*4 -#define HIFN_D_DST_RSIZE 80*4 -#define HIFN_D_RES_RSIZE 24*4 +#define HIFN_D_CMD_RSIZE 24*1 +#define HIFN_D_SRC_RSIZE 80*1 +#define HIFN_D_DST_RSIZE 80*1 +#define HIFN_D_RES_RSIZE 24*1 #define HIFN_D_DST_DALIGN 4 -#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1 +#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1) #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 @@ -406,8 +403,6 @@ struct hifn_dma { u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; - u64 test_src, test_dst; - /* * Our current positions for insertion and removal from the descriptor * rings. @@ -434,9 +429,6 @@ struct hifn_device struct pci_dev *pdev; void __iomem *bar[3]; - unsigned long result_mem; - dma_addr_t dst; - void *desc_virt; dma_addr_t desc_dma; @@ -446,8 +438,6 @@ struct hifn_device spinlock_t lock; - void *priv; - u32 flags; int active, started; struct delayed_work work; @@ -657,12 +647,17 @@ struct ablkcipher_walk struct hifn_context { - u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv; + u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; struct hifn_device *dev; - unsigned int keysize, ivsize; + unsigned int keysize; +}; + +struct hifn_request_context +{ + u8 *iv; + unsigned int ivsize; u8 op, type, mode, unused; struct ablkcipher_walk walk; - atomic_t sg_num; }; #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) @@ -1168,7 +1163,8 @@ static int hifn_setup_crypto_command(struct hifn_device *dev, } static int hifn_setup_cmd_desc(struct hifn_device *dev, - struct hifn_context *ctx, void *priv, unsigned int nbytes) + struct hifn_context *ctx, struct hifn_request_context *rctx, + void *priv, unsigned int nbytes) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int cmd_len, sa_idx; @@ -1179,7 +1175,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos = buf = dma->command_bufs[dma->cmdi]; mask = 0; - switch (ctx->op) { + switch (rctx->op) { case ACRYPTO_OP_DECRYPT: mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; break; @@ -1196,15 +1192,15 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, nbytes, mask, dev->snum); - if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) { + if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) { u16 md = 0; if (ctx->keysize) md |= HIFN_CRYPT_CMD_NEW_KEY; - if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB) + if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB) md |= HIFN_CRYPT_CMD_NEW_IV; - switch (ctx->mode) { + switch (rctx->mode) { case ACRYPTO_MODE_ECB: md |= HIFN_CRYPT_CMD_MODE_ECB; break; @@ -1221,7 +1217,7 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, goto err_out; } - switch (ctx->type) { + switch (rctx->type) { case ACRYPTO_TYPE_AES_128: if (ctx->keysize != 16) goto err_out; @@ -1256,17 +1252,18 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, buf_pos += hifn_setup_crypto_command(dev, buf_pos, nbytes, nbytes, ctx->key, ctx->keysize, - ctx->iv, ctx->ivsize, md); + rctx->iv, rctx->ivsize, md); } dev->sa[sa_idx] = priv; + dev->started++; cmd_len = buf_pos - buf; dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); if (++dma->cmdi == HIFN_D_CMD_RSIZE) { - dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND | + dma->cmdr[dma->cmdi].l = __cpu_to_le32( HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); dma->cmdi = 0; @@ -1284,7 +1281,7 @@ err_out: } static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, - unsigned int offset, unsigned int size) + unsigned int offset, unsigned int size, int last) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int idx; @@ -1296,12 +1293,12 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, dma->srcr[idx].p = __cpu_to_le32(addr); dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | - HIFN_D_JUMP | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | + (last ? HIFN_D_LAST : 0)); idx = 0; } @@ -1342,7 +1339,7 @@ static void hifn_setup_res_desc(struct hifn_device *dev) } static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, - unsigned offset, unsigned size) + unsigned offset, unsigned size, int last) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int idx; @@ -1353,12 +1350,12 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, idx = dma->dsti; dma->dstr[idx].p = __cpu_to_le32(addr); dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | - HIFN_D_MASKDONEIRQ | HIFN_D_LAST); + HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | - HIFN_D_LAST); + (last ? HIFN_D_LAST : 0)); idx = 0; } dma->dsti = idx; @@ -1370,16 +1367,52 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, } } -static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff, - struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv, - struct hifn_context *ctx) +static int hifn_setup_dma(struct hifn_device *dev, + struct hifn_context *ctx, struct hifn_request_context *rctx, + struct scatterlist *src, struct scatterlist *dst, + unsigned int nbytes, void *priv) { - dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n", - dev->name, spage, soff, dpage, doff, nbytes, priv, ctx); + struct scatterlist *t; + struct page *spage, *dpage; + unsigned int soff, doff; + unsigned int n, len; - hifn_setup_src_desc(dev, spage, soff, nbytes); - hifn_setup_cmd_desc(dev, ctx, priv, nbytes); - hifn_setup_dst_desc(dev, dpage, doff, nbytes); + n = nbytes; + while (n) { + spage = sg_page(src); + soff = src->offset; + len = min(src->length, n); + + hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); + + src++; + n -= len; + } + + t = &rctx->walk.cache[0]; + n = nbytes; + while (n) { + if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + BUG_ON(!sg_page(t)); + dpage = sg_page(t); + doff = 0; + len = t->length; + } else { + BUG_ON(!sg_page(dst)); + dpage = sg_page(dst); + doff = dst->offset; + len = dst->length; + } + len = min(len, n); + + hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); + + dst++; + t++; + n -= len; + } + + hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes); hifn_setup_res_desc(dev); return 0; } @@ -1424,32 +1457,26 @@ static void ablkcipher_walk_exit(struct ablkcipher_walk *w) w->num = 0; } -static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src, +static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, unsigned int size, unsigned int *nbytesp) { unsigned int copy, drest = *drestp, nbytes = *nbytesp; int idx = 0; - void *saddr; if (drest < size || size > nbytes) return -EINVAL; while (size) { - copy = min(drest, min(size, src->length)); - - saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1); - memcpy(daddr, saddr + src->offset, copy); - kunmap_atomic(saddr, KM_SOFTIRQ1); + copy = min(drest, min(size, dst->length)); size -= copy; drest -= copy; nbytes -= copy; - daddr += copy; dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", __func__, copy, size, drest, nbytes); - src++; + dst++; idx++; } @@ -1462,8 +1489,7 @@ static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist static int ablkcipher_walk(struct ablkcipher_request *req, struct ablkcipher_walk *w) { - struct scatterlist *src, *dst, *t; - void *daddr; + struct scatterlist *dst, *t; unsigned int nbytes = req->nbytes, offset, copy, diff; int idx, tidx, err; @@ -1473,26 +1499,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) return -EINVAL; - src = &req->src[idx]; dst = &req->dst[idx]; - dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, " - "nbytes: %u.\n", - __func__, src->length, dst->length, src->offset, - dst->offset, offset, nbytes); + dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", + __func__, dst->length, dst->offset, offset, nbytes); if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || offset) { - unsigned slen = min(src->length - offset, nbytes); + unsigned slen = min(dst->length - offset, nbytes); unsigned dlen = PAGE_SIZE; t = &w->cache[idx]; - daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); - err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes); + err = ablkcipher_add(&dlen, dst, slen, &nbytes); if (err < 0) - goto err_out_unmap; + return err; idx += err; @@ -1528,21 +1550,19 @@ static int ablkcipher_walk(struct ablkcipher_request *req, } else { copy += diff + nbytes; - src = &req->src[idx]; + dst = &req->dst[idx]; - err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes); + err = ablkcipher_add(&dlen, dst, nbytes, &nbytes); if (err < 0) - goto err_out_unmap; + return err; idx += err; } t->length = copy; t->offset = offset; - - kunmap_atomic(daddr, KM_SOFTIRQ0); } else { - nbytes -= min(src->length, nbytes); + nbytes -= min(dst->length, nbytes); idx++; } @@ -1550,26 +1570,22 @@ static int ablkcipher_walk(struct ablkcipher_request *req, } return tidx; - -err_out_unmap: - kunmap_atomic(daddr, KM_SOFTIRQ0); - return err; } static int hifn_setup_session(struct ablkcipher_request *req) { struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); struct hifn_device *dev = ctx->dev; - struct page *spage, *dpage; - unsigned long soff, doff, dlen, flags; - unsigned int nbytes = req->nbytes, idx = 0, len; + unsigned long dlen, flags; + unsigned int nbytes = req->nbytes, idx = 0; int err = -EINVAL, sg_num; - struct scatterlist *src, *dst, *t; + struct scatterlist *dst; - if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB) + if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB) goto err_out_exit; - ctx->walk.flags = 0; + rctx->walk.flags = 0; while (nbytes) { dst = &req->dst[idx]; @@ -1577,27 +1593,23 @@ static int hifn_setup_session(struct ablkcipher_request *req) if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) - ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; + rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; nbytes -= dlen; idx++; } - if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { - err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC); + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { + err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); if (err < 0) return err; } - nbytes = req->nbytes; - idx = 0; - - sg_num = ablkcipher_walk(req, &ctx->walk); + sg_num = ablkcipher_walk(req, &rctx->walk); if (sg_num < 0) { err = sg_num; goto err_out_exit; } - atomic_set(&ctx->sg_num, sg_num); spin_lock_irqsave(&dev->lock, flags); if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { @@ -1605,37 +1617,11 @@ static int hifn_setup_session(struct ablkcipher_request *req) goto err_out; } - dev->snum++; - dev->started += sg_num; - - while (nbytes) { - src = &req->src[idx]; - dst = &req->dst[idx]; - t = &ctx->walk.cache[idx]; - - if (t->length) { - spage = dpage = sg_page(t); - soff = doff = 0; - len = t->length; - } else { - spage = sg_page(src); - soff = src->offset; - - dpage = sg_page(dst); - doff = dst->offset; - - len = dst->length; - } - - idx++; - - err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes, - req, ctx); - if (err) - goto err_out; + err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req); + if (err) + goto err_out; - nbytes -= min(len, nbytes); - } + dev->snum++; dev->active = HIFN_DEFAULT_ACTIVE_NUM; spin_unlock_irqrestore(&dev->lock, flags); @@ -1645,12 +1631,13 @@ static int hifn_setup_session(struct ablkcipher_request *req) err_out: spin_unlock_irqrestore(&dev->lock, flags); err_out_exit: - if (err) - dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + if (err) { + printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " "type: %u, err: %d.\n", - dev->name, ctx->iv, ctx->ivsize, + dev->name, rctx->iv, rctx->ivsize, ctx->key, ctx->keysize, - ctx->mode, ctx->op, ctx->type, err); + rctx->mode, rctx->op, rctx->type, err); + } return err; } @@ -1660,31 +1647,33 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) int n, err; u8 src[16]; struct hifn_context ctx; + struct hifn_request_context rctx; u8 fips_aes_ecb_from_zero[16] = { 0x66, 0xE9, 0x4B, 0xD4, 0xEF, 0x8A, 0x2C, 0x3B, 0x88, 0x4C, 0xFA, 0x59, 0xCA, 0x34, 0x2B, 0x2E}; + struct scatterlist sg; memset(src, 0, sizeof(src)); memset(ctx.key, 0, sizeof(ctx.key)); ctx.dev = dev; ctx.keysize = 16; - ctx.ivsize = 0; - ctx.iv = NULL; - ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; - ctx.mode = ACRYPTO_MODE_ECB; - ctx.type = ACRYPTO_TYPE_AES_128; - atomic_set(&ctx.sg_num, 1); - - err = hifn_setup_dma(dev, - virt_to_page(src), offset_in_page(src), - virt_to_page(src), offset_in_page(src), - sizeof(src), NULL, &ctx); + rctx.ivsize = 0; + rctx.iv = NULL; + rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; + rctx.mode = ACRYPTO_MODE_ECB; + rctx.type = ACRYPTO_TYPE_AES_128; + rctx.walk.cache[0].length = 0; + + sg_init_one(&sg, &src, sizeof(src)); + + err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); if (err) goto err_out; + dev->started = 0; msleep(200); dprintk("%s: decoded: ", dev->name); @@ -1711,6 +1700,7 @@ static int hifn_start_device(struct hifn_device *dev) { int err; + dev->started = dev->active = 0; hifn_reset_dma(dev, 1); err = hifn_enable_crypto(dev); @@ -1764,90 +1754,65 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset return idx; } -static void hifn_process_ready(struct ablkcipher_request *req, int error) +static inline void hifn_complete_sa(struct hifn_device *dev, int i) { - struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); - struct hifn_device *dev; - - dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx); + unsigned long flags; - dev = ctx->dev; - dprintk("%s: req: %p, started: %d, sg_num: %d.\n", - __func__, req, dev->started, atomic_read(&ctx->sg_num)); + spin_lock_irqsave(&dev->lock, flags); + dev->sa[i] = NULL; + dev->started--; + if (dev->started < 0) + printk("%s: started: %d.\n", __func__, dev->started); + spin_unlock_irqrestore(&dev->lock, flags); + BUG_ON(dev->started < 0); +} - if (--dev->started < 0) - BUG(); +static void hifn_process_ready(struct ablkcipher_request *req, int error) +{ + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); - if (atomic_dec_and_test(&ctx->sg_num)) { + if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { unsigned int nbytes = req->nbytes; int idx = 0, err; struct scatterlist *dst, *t; void *saddr; - if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { - while (nbytes) { - t = &ctx->walk.cache[idx]; - dst = &req->dst[idx]; - - dprintk("\n%s: sg_page(t): %p, t->length: %u, " - "sg_page(dst): %p, dst->length: %u, " - "nbytes: %u.\n", - __func__, sg_page(t), t->length, - sg_page(dst), dst->length, nbytes); + while (nbytes) { + t = &rctx->walk.cache[idx]; + dst = &req->dst[idx]; - if (!t->length) { - nbytes -= min(dst->length, nbytes); - idx++; - continue; - } + dprintk("\n%s: sg_page(t): %p, t->length: %u, " + "sg_page(dst): %p, dst->length: %u, " + "nbytes: %u.\n", + __func__, sg_page(t), t->length, + sg_page(dst), dst->length, nbytes); - saddr = kmap_atomic(sg_page(t), KM_IRQ1); + if (!t->length) { + nbytes -= min(dst->length, nbytes); + idx++; + continue; + } - err = ablkcipher_get(saddr, &t->length, t->offset, - dst, nbytes, &nbytes); - if (err < 0) { - kunmap_atomic(saddr, KM_IRQ1); - break; - } + saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); - idx += err; - kunmap_atomic(saddr, KM_IRQ1); + err = ablkcipher_get(saddr, &t->length, t->offset, + dst, nbytes, &nbytes); + if (err < 0) { + kunmap_atomic(saddr, KM_SOFTIRQ0); + break; } - ablkcipher_walk_exit(&ctx->walk); + idx += err; + kunmap_atomic(saddr, KM_SOFTIRQ0); } - req->base.complete(&req->base, error); + ablkcipher_walk_exit(&rctx->walk); } -} -static void hifn_check_for_completion(struct hifn_device *dev, int error) -{ - int i; - struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; - - for (i=0; i<HIFN_D_RES_RSIZE; ++i) { - struct hifn_desc *d = &dma->resr[i]; - - if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) { - dev->success++; - dev->reset = 0; - hifn_process_ready(dev->sa[i], error); - dev->sa[i] = NULL; - } - - if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER)) - if (printk_ratelimit()) - printk("%s: overflow detected [d: %u, o: %u] " - "at %d resr: l: %08x, p: %08x.\n", - dev->name, - !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)), - !!(d->l & __cpu_to_le32(HIFN_D_OVER)), - i, d->l, d->p); - } + req->base.complete(&req->base, error); } -static void hifn_clear_rings(struct hifn_device *dev) +static void hifn_clear_rings(struct hifn_device *dev, int error) { struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i, u; @@ -1864,21 +1829,26 @@ static void hifn_clear_rings(struct hifn_device *dev) if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - if (i != HIFN_D_RES_RSIZE) - u--; + if (dev->sa[i]) { + dev->success++; + dev->reset = 0; + hifn_process_ready(dev->sa[i], error); + hifn_complete_sa(dev, i); + } - if (++i == (HIFN_D_RES_RSIZE + 1)) + if (++i == HIFN_D_RES_RSIZE) i = 0; + u--; } dma->resk = i; dma->resu = u; i = dma->srck; u = dma->srcu; while (u != 0) { - if (i == HIFN_D_SRC_RSIZE) - i = 0; if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - i++, u--; + if (++i == HIFN_D_SRC_RSIZE) + i = 0; + u--; } dma->srck = i; dma->srcu = u; @@ -1886,20 +1856,19 @@ static void hifn_clear_rings(struct hifn_device *dev) while (u != 0) { if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - if (i != HIFN_D_CMD_RSIZE) - u--; - if (++i == (HIFN_D_CMD_RSIZE + 1)) + if (++i == HIFN_D_CMD_RSIZE) i = 0; + u--; } dma->cmdk = i; dma->cmdu = u; i = dma->dstk; u = dma->dstu; while (u != 0) { - if (i == HIFN_D_DST_RSIZE) - i = 0; if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) break; - i++, u--; + if (++i == HIFN_D_DST_RSIZE) + i = 0; + u--; } dma->dstk = i; dma->dstu = u; @@ -1944,30 +1913,39 @@ static void hifn_work(struct work_struct *work) } else dev->active--; - if (dev->prev_success == dev->success && dev->started) + if ((dev->prev_success == dev->success) && dev->started) reset = 1; dev->prev_success = dev->success; spin_unlock_irqrestore(&dev->lock, flags); if (reset) { - dprintk("%s: r: %08x, active: %d, started: %d, " - "success: %lu: reset: %d.\n", - dev->name, r, dev->active, dev->started, - dev->success, reset); - if (++dev->reset >= 5) { - dprintk("%s: really hard reset.\n", dev->name); + int i; + struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; + + printk("%s: r: %08x, active: %d, started: %d, " + "success: %lu: qlen: %u/%u, reset: %d.\n", + dev->name, r, dev->active, dev->started, + dev->success, dev->queue.qlen, dev->queue.max_qlen, + reset); + + printk("%s: res: ", __func__); + for (i=0; i<HIFN_D_RES_RSIZE; ++i) { + printk("%x.%p ", dma->resr[i].l, dev->sa[i]); + if (dev->sa[i]) { + hifn_process_ready(dev->sa[i], -ENODEV); + hifn_complete_sa(dev, i); + } + } + printk("\n"); + hifn_reset_dma(dev, 1); hifn_stop_device(dev); hifn_start_device(dev); dev->reset = 0; } - spin_lock_irqsave(&dev->lock, flags); - hifn_check_for_completion(dev, -EBUSY); - hifn_clear_rings(dev); - dev->started = 0; - spin_unlock_irqrestore(&dev->lock, flags); + tasklet_schedule(&dev->tasklet); } schedule_delayed_work(&dev->work, HZ); @@ -1984,8 +1962,8 @@ static irqreturn_t hifn_interrupt(int irq, void *data) dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, - dma->cmdu, dma->srcu, dma->dstu, dma->resu, - dma->cmdi, dma->srci, dma->dsti, dma->resi); + dma->cmdi, dma->srci, dma->dsti, dma->resi, + dma->cmdu, dma->srcu, dma->dstu, dma->resu); if ((dmacsr & dev->dmareg) == 0) return IRQ_NONE; @@ -2002,11 +1980,10 @@ static irqreturn_t hifn_interrupt(int irq, void *data) if (restart) { u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); - if (printk_ratelimit()) - printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), - !!(dmacsr & HIFN_DMACSR_D_OVER), - puisr, !!(puisr & HIFN_PUISR_DSTOVER)); + printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), + puisr, !!(puisr & HIFN_PUISR_DSTOVER)); if (!!(puisr & HIFN_PUISR_DSTOVER)) hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | @@ -2016,12 +1993,11 @@ static irqreturn_t hifn_interrupt(int irq, void *data) restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { - if (printk_ratelimit()) - printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), - !!(dmacsr & HIFN_DMACSR_S_ABORT), - !!(dmacsr & HIFN_DMACSR_D_ABORT), - !!(dmacsr & HIFN_DMACSR_R_ABORT)); + printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", + dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); hifn_reset_dma(dev, 1); hifn_init_dma(dev); hifn_init_registers(dev); @@ -2034,7 +2010,6 @@ static irqreturn_t hifn_interrupt(int irq, void *data) } tasklet_schedule(&dev->tasklet); - hifn_clear_rings(dev); return IRQ_HANDLED; } @@ -2048,21 +2023,25 @@ static void hifn_flush(struct hifn_device *dev) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i; - spin_lock_irqsave(&dev->lock, flags); for (i=0; i<HIFN_D_RES_RSIZE; ++i) { struct hifn_desc *d = &dma->resr[i]; if (dev->sa[i]) { hifn_process_ready(dev->sa[i], (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + hifn_complete_sa(dev, i); } } + spin_lock_irqsave(&dev->lock, flags); while ((async_req = crypto_dequeue_request(&dev->queue))) { ctx = crypto_tfm_ctx(async_req->tfm); req = container_of(async_req, struct ablkcipher_request, base); + spin_unlock_irqrestore(&dev->lock, flags); hifn_process_ready(req, -ENODEV); + + spin_lock_irqsave(&dev->lock, flags); } spin_unlock_irqrestore(&dev->lock, flags); } @@ -2121,6 +2100,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, u8 type, u8 mode) { struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); + struct hifn_request_context *rctx = ablkcipher_request_ctx(req); unsigned ivsize; ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); @@ -2141,11 +2121,11 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, type = ACRYPTO_TYPE_AES_256; } - ctx->op = op; - ctx->mode = mode; - ctx->type = type; - ctx->iv = req->info; - ctx->ivsize = ivsize; + rctx->op = op; + rctx->mode = mode; + rctx->type = type; + rctx->iv = req->info; + rctx->ivsize = ivsize; /* * HEAVY TODO: needs to kick Herbert XU to write documentation. @@ -2158,7 +2138,7 @@ static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, static int hifn_process_queue(struct hifn_device *dev) { - struct crypto_async_request *async_req; + struct crypto_async_request *async_req, *backlog; struct hifn_context *ctx; struct ablkcipher_request *req; unsigned long flags; @@ -2166,12 +2146,16 @@ static int hifn_process_queue(struct hifn_device *dev) while (dev->started < HIFN_QUEUE_LENGTH) { spin_lock_irqsave(&dev->lock, flags); + backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); spin_unlock_irqrestore(&dev->lock, flags); if (!async_req) break; + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + ctx = crypto_tfm_ctx(async_req->tfm); req = container_of(async_req, struct ablkcipher_request, base); @@ -2496,7 +2480,7 @@ static int hifn_cra_init(struct crypto_tfm *tfm) struct hifn_context *ctx = crypto_tfm_ctx(tfm); ctx->dev = ha->dev; - + tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context); return 0; } @@ -2574,7 +2558,10 @@ static void hifn_tasklet_callback(unsigned long data) * (like dev->success), but they are used in process * context or update is atomic (like setting dev->sa[i] to NULL). */ - hifn_check_for_completion(dev, 0); + hifn_clear_rings(dev, 0); + + if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) + hifn_process_queue(dev); } static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -2631,22 +2618,11 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unmap_bars; } - dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER); - if (!dev->result_mem) { - dprintk("Failed to allocate %d pages for result_mem.\n", - HIFN_MAX_RESULT_ORDER); - goto err_out_unmap_bars; - } - memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER)); - - dev->dst = pci_map_single(pdev, (void *)dev->result_mem, - PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE); - dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), &dev->desc_dma); if (!dev->desc_virt) { dprintk("Failed to allocate descriptor rings.\n"); - goto err_out_free_result_pages; + goto err_out_unmap_bars; } memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); @@ -2706,11 +2682,6 @@ err_out_free_desc: pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); -err_out_free_result_pages: - pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER, - PCI_DMA_FROMDEVICE); - free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); - err_out_unmap_bars: for (i=0; i<3; ++i) if (dev->bar[i]) @@ -2748,10 +2719,6 @@ static void hifn_remove(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); - pci_unmap_single(pdev, dev->dst, - PAGE_SIZE << HIFN_MAX_RESULT_ORDER, - PCI_DMA_FROMDEVICE); - free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER); for (i=0; i<3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); @@ -2782,6 +2749,11 @@ static int __devinit hifn_init(void) unsigned int freq; int err; + if (sizeof(dma_addr_t) > 4) { + printk(KERN_INFO "HIFN supports only 32-bit addresses.\n"); + return -EINVAL; + } + if (strncmp(hifn_pll_ref, "ext", 3) && strncmp(hifn_pll_ref, "pci", 3)) { printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index bf2917d197a0..856b3cc25583 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -15,6 +15,8 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/smp.h> #include <asm/byteorder.h> #include <asm/i387.h> #include "padlock.h" @@ -49,6 +51,8 @@ struct aes_ctx { u32 *D; }; +static DEFINE_PER_CPU(struct cword *, last_cword); + /* Tells whether the ACE is capable to generate the extended key for a given key_len. */ static inline int @@ -89,6 +93,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, const __le32 *key = (const __le32 *)in_key; u32 *flags = &tfm->crt_flags; struct crypto_aes_ctx gen_aes; + int cpu; if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -118,7 +123,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, /* Don't generate extended keys if the hardware can do it. */ if (aes_hw_extkey_available(key_len)) - return 0; + goto ok; ctx->D = ctx->d_data; ctx->cword.encrypt.keygen = 1; @@ -131,15 +136,30 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(last_cword, cpu)) + per_cpu(last_cword, cpu) = NULL; + return 0; } /* ====== Encryption/decryption routines ====== */ /* These are the real call to PadLock. */ -static inline void padlock_reset_key(void) +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(last_cword, cpu)) + asm volatile ("pushfl; popfl"); +} + +static inline void padlock_store_cword(struct cword *cword) { - asm volatile ("pushfl; popfl"); + per_cpu(last_cword, raw_smp_processor_id()) = cword; } /* @@ -149,7 +169,7 @@ static inline void padlock_reset_key(void) */ static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, - void *control_word) + struct cword *control_word) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -213,22 +233,24 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); } static struct crypto_alg aes_alg = { @@ -261,7 +283,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -276,6 +298,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -288,7 +312,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.decrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -302,6 +326,9 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } @@ -336,7 +363,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -353,6 +380,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + padlock_store_cword(&ctx->cword.decrypt); + return err; } @@ -365,7 +394,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, int err; int ts_state; - padlock_reset_key(); + padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -380,6 +409,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, } irq_ts_restore(ts_state); + + padlock_store_cword(&ctx->cword.encrypt); + return err; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 24607669a52b..a3918c16b3db 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -127,7 +127,6 @@ struct talitos_private { /* request callback tasklet */ struct tasklet_struct done_task; - struct tasklet_struct error_task; /* list of registered algorithms */ struct list_head alg_list; @@ -138,6 +137,7 @@ struct talitos_private { /* .features flag */ #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 +#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 /* * map virtual single (contiguous) pointer to h/w descriptor pointer @@ -184,6 +184,11 @@ static int reset_channel(struct device *dev, int ch) setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); + /* and ICCR writeback, if available */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->reg + TALITOS_CCCR_LO(ch), + TALITOS_CCCR_LO_IWSE); + return 0; } @@ -239,6 +244,11 @@ static int init_device(struct device *dev) setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + /* disable integrity check error interrupts (use writeback instead) */ + if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) + setbits32(priv->reg + TALITOS_MDEUICR_LO, + TALITOS_MDEUICR_LO_ICE); + return 0; } @@ -370,6 +380,12 @@ static void talitos_done(unsigned long data) for (ch = 0; ch < priv->num_channels; ch++) flush_channel(dev, ch, 0, 0); + + /* At this point, all completed channels have been processed. + * Unmask done interrupts for channels completed later on. + */ + setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); } /* @@ -469,16 +485,13 @@ static void report_eu_error(struct device *dev, int ch, struct talitos_desc *des /* * recover from error interrupts */ -static void talitos_error(unsigned long data) +static void talitos_error(unsigned long data, u32 isr, u32 isr_lo) { struct device *dev = (struct device *)data; struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; int ch, error, reset_dev = 0, reset_ch = 0; - u32 isr, isr_lo, v, v_lo; - - isr = in_be32(priv->reg + TALITOS_ISR); - isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); + u32 v, v_lo; for (ch = 0; ch < priv->num_channels; ch++) { /* skip channels without errors */ @@ -560,16 +573,19 @@ static irqreturn_t talitos_interrupt(int irq, void *data) isr = in_be32(priv->reg + TALITOS_ISR); isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); - - /* ack */ + /* Acknowledge interrupt */ out_be32(priv->reg + TALITOS_ICR, isr); out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo)) - talitos_error((unsigned long)data); + talitos_error((unsigned long)data, isr, isr_lo); else - if (likely(isr & TALITOS_ISR_CHDONE)) + if (likely(isr & TALITOS_ISR_CHDONE)) { + /* mask further done interrupts. */ + clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE); + /* done_task will unmask done interrupts at exit */ tasklet_schedule(&priv->done_task); + } return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE; } @@ -802,7 +818,7 @@ static void ipsec_esp_encrypt_done(struct device *dev, aead_request_complete(areq, err); } -static void ipsec_esp_decrypt_done(struct device *dev, +static void ipsec_esp_decrypt_swauth_done(struct device *dev, struct talitos_desc *desc, void *context, int err) { @@ -834,6 +850,27 @@ static void ipsec_esp_decrypt_done(struct device *dev, aead_request_complete(req, err); } +static void ipsec_esp_decrypt_hwauth_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct aead_request *req = context; + struct ipsec_esp_edesc *edesc = + container_of(desc, struct ipsec_esp_edesc, desc); + + ipsec_esp_unmap(dev, edesc, req); + + /* check ICV auth status */ + if (!err) + if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != + DESC_HDR_LO_ICCR1_PASS) + err = -EBADMSG; + + kfree(edesc); + + aead_request_complete(req, err); +} + /* * convert scatterlist to SEC h/w link table format * stop at cryptlen bytes @@ -887,6 +924,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, unsigned int authsize = ctx->authsize; unsigned int ivsize; int sg_count, ret; + int sg_link_tbl_len; /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, @@ -924,33 +962,19 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, if (sg_count == 1) { desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); } else { - sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, + sg_link_tbl_len = cryptlen; + + if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && + (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { + sg_link_tbl_len = cryptlen + authsize; + } + sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, &edesc->link_tbl[0]); if (sg_count > 1) { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[sg_count-1]; - struct scatterlist *sg; - struct talitos_private *priv = dev_get_drvdata(dev); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - /* If necessary for this SEC revision, - * add a link table entry for ICV. - */ - if ((priv->features & - TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) && - (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) { - link_tbl_ptr->j_extent = 0; - link_tbl_ptr++; - link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; - link_tbl_ptr->len = cpu_to_be16(authsize); - sg = sg_last(areq->src, edesc->src_nents ? : 1); - link_tbl_ptr->ptr = cpu_to_be32( - (char *)sg_dma_address(sg) - + sg->length - authsize); - } } else { /* Only one segment now, so no link tbl needed */ desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); @@ -975,13 +999,9 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) edesc->dma_link_tbl + edesc->src_nents + 1); - if (areq->src == areq->dst) { - memcpy(link_tbl_ptr, &edesc->link_tbl[0], - edesc->src_nents * sizeof(struct talitos_ptr)); - } else { - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - link_tbl_ptr); - } + sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, + link_tbl_ptr); + /* Add an entry to the link table for ICV data */ link_tbl_ptr += sg_count - 1; link_tbl_ptr->j_extent = 0; @@ -1106,11 +1126,14 @@ static int aead_authenc_encrypt(struct aead_request *req) return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); } + + static int aead_authenc_decrypt(struct aead_request *req) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); unsigned int authsize = ctx->authsize; + struct talitos_private *priv = dev_get_drvdata(ctx->dev); struct ipsec_esp_edesc *edesc; struct scatterlist *sg; void *icvdata; @@ -1122,22 +1145,39 @@ static int aead_authenc_decrypt(struct aead_request *req) if (IS_ERR(edesc)) return PTR_ERR(edesc); - /* stash incoming ICV for later cmp with ICV generated by the h/w */ - if (edesc->dma_len) - icvdata = &edesc->link_tbl[edesc->src_nents + - edesc->dst_nents + 2]; - else - icvdata = &edesc->link_tbl[0]; + if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && + (((!edesc->src_nents && !edesc->dst_nents) || + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { + + /* decrypt and check the ICV */ + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | + DESC_HDR_MODE1_MDEU_CICV; + + /* reset integrity check result bits */ + edesc->desc.hdr_lo = 0; + + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); + + } else { + + /* Have to check the ICV with software */ - sg = sg_last(req->src, edesc->src_nents ? : 1); + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + + /* stash incoming ICV for later cmp with ICV generated by the h/w */ + if (edesc->dma_len) + icvdata = &edesc->link_tbl[edesc->src_nents + + edesc->dst_nents + 2]; + else + icvdata = &edesc->link_tbl[0]; - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, - ctx->authsize); + sg = sg_last(req->src, edesc->src_nents ? : 1); - /* decrypt */ - edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; + memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, + ctx->authsize); - return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_done); + return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); + } } static int aead_authenc_givencrypt( @@ -1391,7 +1431,6 @@ static int talitos_remove(struct of_device *ofdev) } tasklet_kill(&priv->done_task); - tasklet_kill(&priv->error_task); iounmap(priv->reg); @@ -1451,10 +1490,9 @@ static int talitos_probe(struct of_device *ofdev, priv->ofdev = ofdev; - INIT_LIST_HEAD(&priv->alg_list); - tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev); - tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev); + + INIT_LIST_HEAD(&priv->alg_list); priv->irq = irq_of_parse_and_map(np, 0); @@ -1508,6 +1546,9 @@ static int talitos_probe(struct of_device *ofdev, if (of_device_is_compatible(np, "fsl,sec3.0")) priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; + if (of_device_is_compatible(np, "fsl,sec2.1")) + priv->features |= TALITOS_FTR_HW_AUTH_CHECK; + priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, GFP_KERNEL); priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, @@ -1551,7 +1592,7 @@ static int talitos_probe(struct of_device *ofdev, goto err_out; } for (i = 0; i < priv->num_channels; i++) - atomic_set(&priv->submit_count[i], -priv->chfifo_len); + atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index c48a405abf70..575981f0cfda 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -37,7 +37,8 @@ #define TALITOS_MCR_LO 0x1038 #define TALITOS_MCR_SWR 0x1 /* s/w reset */ #define TALITOS_IMR 0x1008 /* interrupt mask register */ -#define TALITOS_IMR_INIT 0x10fff /* enable channel IRQs */ +#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ +#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ #define TALITOS_IMR_LO 0x100C #define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ #define TALITOS_ISR 0x1010 /* interrupt status register */ @@ -55,6 +56,7 @@ #define TALITOS_CCCR_CONT 0x2 /* channel continue */ #define TALITOS_CCCR_RESET 0x1 /* channel reset */ #define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) +#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ @@ -102,6 +104,9 @@ #define TALITOS_AESUISR_LO 0x4034 #define TALITOS_MDEUISR 0x6030 /* message digest unit */ #define TALITOS_MDEUISR_LO 0x6034 +#define TALITOS_MDEUICR 0x6038 /* interrupt control */ +#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ #define TALITOS_AFEUISR 0x8030 /* arc4 unit */ #define TALITOS_AFEUISR_LO 0x8034 #define TALITOS_RNGUISR 0xa030 /* random number unit */ @@ -129,31 +134,34 @@ */ /* written back when done */ -#define DESC_HDR_DONE __constant_cpu_to_be32(0xff000000) +#define DESC_HDR_DONE cpu_to_be32(0xff000000) +#define DESC_HDR_LO_ICCR1_MASK cpu_to_be32(0x00180000) +#define DESC_HDR_LO_ICCR1_PASS cpu_to_be32(0x00080000) +#define DESC_HDR_LO_ICCR1_FAIL cpu_to_be32(0x00100000) /* primary execution unit select */ -#define DESC_HDR_SEL0_MASK __constant_cpu_to_be32(0xf0000000) -#define DESC_HDR_SEL0_AFEU __constant_cpu_to_be32(0x10000000) -#define DESC_HDR_SEL0_DEU __constant_cpu_to_be32(0x20000000) -#define DESC_HDR_SEL0_MDEUA __constant_cpu_to_be32(0x30000000) -#define DESC_HDR_SEL0_MDEUB __constant_cpu_to_be32(0xb0000000) -#define DESC_HDR_SEL0_RNG __constant_cpu_to_be32(0x40000000) -#define DESC_HDR_SEL0_PKEU __constant_cpu_to_be32(0x50000000) -#define DESC_HDR_SEL0_AESU __constant_cpu_to_be32(0x60000000) -#define DESC_HDR_SEL0_KEU __constant_cpu_to_be32(0x70000000) -#define DESC_HDR_SEL0_CRCU __constant_cpu_to_be32(0x80000000) +#define DESC_HDR_SEL0_MASK cpu_to_be32(0xf0000000) +#define DESC_HDR_SEL0_AFEU cpu_to_be32(0x10000000) +#define DESC_HDR_SEL0_DEU cpu_to_be32(0x20000000) +#define DESC_HDR_SEL0_MDEUA cpu_to_be32(0x30000000) +#define DESC_HDR_SEL0_MDEUB cpu_to_be32(0xb0000000) +#define DESC_HDR_SEL0_RNG cpu_to_be32(0x40000000) +#define DESC_HDR_SEL0_PKEU cpu_to_be32(0x50000000) +#define DESC_HDR_SEL0_AESU cpu_to_be32(0x60000000) +#define DESC_HDR_SEL0_KEU cpu_to_be32(0x70000000) +#define DESC_HDR_SEL0_CRCU cpu_to_be32(0x80000000) /* primary execution unit mode (MODE0) and derivatives */ -#define DESC_HDR_MODE0_ENCRYPT __constant_cpu_to_be32(0x00100000) -#define DESC_HDR_MODE0_AESU_CBC __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_DEU_CBC __constant_cpu_to_be32(0x00400000) -#define DESC_HDR_MODE0_DEU_3DES __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_MDEU_INIT __constant_cpu_to_be32(0x01000000) -#define DESC_HDR_MODE0_MDEU_HMAC __constant_cpu_to_be32(0x00800000) -#define DESC_HDR_MODE0_MDEU_PAD __constant_cpu_to_be32(0x00400000) -#define DESC_HDR_MODE0_MDEU_MD5 __constant_cpu_to_be32(0x00200000) -#define DESC_HDR_MODE0_MDEU_SHA256 __constant_cpu_to_be32(0x00100000) -#define DESC_HDR_MODE0_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) +#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000) +#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000) +#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000) +#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000) +#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000) #define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \ DESC_HDR_MODE0_MDEU_HMAC) #define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \ @@ -162,18 +170,19 @@ DESC_HDR_MODE0_MDEU_HMAC) /* secondary execution unit select (SEL1) */ -#define DESC_HDR_SEL1_MASK __constant_cpu_to_be32(0x000f0000) -#define DESC_HDR_SEL1_MDEUA __constant_cpu_to_be32(0x00030000) -#define DESC_HDR_SEL1_MDEUB __constant_cpu_to_be32(0x000b0000) -#define DESC_HDR_SEL1_CRCU __constant_cpu_to_be32(0x00080000) +#define DESC_HDR_SEL1_MASK cpu_to_be32(0x000f0000) +#define DESC_HDR_SEL1_MDEUA cpu_to_be32(0x00030000) +#define DESC_HDR_SEL1_MDEUB cpu_to_be32(0x000b0000) +#define DESC_HDR_SEL1_CRCU cpu_to_be32(0x00080000) /* secondary execution unit mode (MODE1) and derivatives */ -#define DESC_HDR_MODE1_MDEU_INIT __constant_cpu_to_be32(0x00001000) -#define DESC_HDR_MODE1_MDEU_HMAC __constant_cpu_to_be32(0x00000800) -#define DESC_HDR_MODE1_MDEU_PAD __constant_cpu_to_be32(0x00000400) -#define DESC_HDR_MODE1_MDEU_MD5 __constant_cpu_to_be32(0x00000200) -#define DESC_HDR_MODE1_MDEU_SHA256 __constant_cpu_to_be32(0x00000100) -#define DESC_HDR_MODE1_MDEU_SHA1 __constant_cpu_to_be32(0x00000000) +#define DESC_HDR_MODE1_MDEU_CICV cpu_to_be32(0x00004000) +#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000) +#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800) +#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400) +#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200) +#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100) +#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000) #define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \ DESC_HDR_MODE1_MDEU_HMAC) #define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \ @@ -182,16 +191,16 @@ DESC_HDR_MODE1_MDEU_HMAC) /* direction of overall data flow (DIR) */ -#define DESC_HDR_DIR_INBOUND __constant_cpu_to_be32(0x00000002) +#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002) /* request done notification (DN) */ -#define DESC_HDR_DONE_NOTIFY __constant_cpu_to_be32(0x00000001) +#define DESC_HDR_DONE_NOTIFY cpu_to_be32(0x00000001) /* descriptor types */ -#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP __constant_cpu_to_be32(0 << 3) -#define DESC_HDR_TYPE_IPSEC_ESP __constant_cpu_to_be32(1 << 3) -#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU __constant_cpu_to_be32(2 << 3) -#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU __constant_cpu_to_be32(4 << 3) +#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP cpu_to_be32(0 << 3) +#define DESC_HDR_TYPE_IPSEC_ESP cpu_to_be32(1 << 3) +#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU cpu_to_be32(2 << 3) +#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU cpu_to_be32(4 << 3) /* link table extent field bits */ #define DESC_PTR_LNKTBL_JUMP 0x80 |