diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-05 12:12:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-05 12:12:33 -0700 |
commit | b2c311075db578f1433d9b303698491bfa21279a (patch) | |
tree | 41d5f1b5ad6f45be7211f524328de81f7e9754be /drivers/crypto | |
parent | 45175476ae2dbebc860d5cf486f2916044343513 (diff) | |
parent | 02c0241b600e4ab8a732c89749e252165145d60c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- Do not idle omap device between crypto operations in one session.
- Added sha224/sha384 shims for SSSE3.
- More optimisations for camellia-aesni-avx2.
- Removed defunct blowfish/twofish AVX2 implementations.
- Added unaligned buffer self-tests.
- Added PCLMULQDQ optimisation for CRCT10DIF.
- Added support for Freescale's DCP co-processor
- Misc fixes.
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (44 commits)
crypto: testmgr - test hash implementations with unaligned buffers
crypto: testmgr - test AEADs with unaligned buffers
crypto: testmgr - test skciphers with unaligned buffers
crypto: testmgr - check that entries in alg_test_descs are in correct order
Revert "crypto: twofish - add AVX2/x86_64 assembler implementation of twofish cipher"
Revert "crypto: blowfish - add AVX2/x86_64 implementation of blowfish cipher"
crypto: camellia-aesni-avx2 - tune assembly code for more performance
hwrng: bcm2835 - fix MODULE_LICENSE tag
hwrng: nomadik - use clk_prepare_enable()
crypto: picoxcell - replace strict_strtoul() with kstrtoul()
crypto: dcp - Staticize local symbols
crypto: dcp - Use NULL instead of 0
crypto: dcp - Use devm_* APIs
crypto: dcp - Remove redundant platform_set_drvdata()
hwrng: use platform_{get,set}_drvdata()
crypto: omap-aes - Don't idle/start AES device between Encrypt operations
crypto: crct10dif - Use PTR_RET
crypto: ux500 - Cocci spatch "resource_size.spatch"
crypto: sha256_ssse3 - add sha224 support
crypto: sha512_ssse3 - add sha384 support
...
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/Kconfig | 12 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 10 | ||||
-rw-r--r-- | drivers/crypto/caam/desc.h | 22 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 81 | ||||
-rw-r--r-- | drivers/crypto/caam/pdb.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/regs.h | 42 | ||||
-rw-r--r-- | drivers/crypto/dcp.c | 912 | ||||
-rw-r--r-- | drivers/crypto/hifn_795x.c | 4 | ||||
-rw-r--r-- | drivers/crypto/mv_cesa.c | 1 | ||||
-rw-r--r-- | drivers/crypto/omap-aes.c | 36 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 7 | ||||
-rw-r--r-- | drivers/crypto/picoxcell_crypto.c | 2 | ||||
-rw-r--r-- | drivers/crypto/s5p-sss.c | 2 | ||||
-rw-r--r-- | drivers/crypto/ux500/cryp/cryp_core.c | 2 |
15 files changed, 1099 insertions, 36 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dffb85525368..8ff7c230d82e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -278,7 +278,7 @@ config CRYPTO_DEV_PICOXCELL config CRYPTO_DEV_SAHARA tristate "Support for SAHARA crypto accelerator" - depends on ARCH_MXC && EXPERIMENTAL && OF + depends on ARCH_MXC && OF select CRYPTO_BLKCIPHER select CRYPTO_AES select CRYPTO_ECB @@ -286,6 +286,16 @@ config CRYPTO_DEV_SAHARA This option enables support for the SAHARA HW crypto accelerator found in some Freescale i.MX chips. +config CRYPTO_DEV_DCP + tristate "Support for the DCP engine" + depends on ARCH_MXS && OF + select CRYPTO_BLKCIPHER + select CRYPTO_AES + select CRYPTO_CBC + help + This options enables support for the hardware crypto-acceleration + capabilities of the DCP co-processor + config CRYPTO_DEV_S5P tristate "Support for Samsung S5PV210 crypto accelerator" depends on ARCH_S5PV210 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 38ce13d3b79b..b4946ddd2550 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o +obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 6e94bcd94678..f5d6deced1cb 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -202,6 +202,7 @@ static int caam_probe(struct platform_device *pdev) #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif + u64 cha_vid; ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); if (!ctrlpriv) @@ -293,11 +294,14 @@ static int caam_probe(struct platform_device *pdev) return -ENOMEM; } + cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id); + /* - * RNG4 based SECs (v5+) need special initialization prior - * to executing any descriptors + * If SEC has RNG version >= 4 and RNG state handle has not been + * already instantiated ,do RNG instantiation */ - if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) { + if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && + !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { kick_trng(pdev); ret = instantiate_rng(ctrlpriv->jrdev[0]); if (ret) { diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index f7f833be8c67..53b296f78b0d 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -231,7 +231,12 @@ struct sec4_sg_entry { #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT) +#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT) #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT) /* Offset in source/destination */ @@ -366,6 +371,7 @@ struct sec4_sg_entry { #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT) #define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT) #define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT) +#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT) #define FIFOLDST_LEN_MASK 0xffff #define FIFOLDST_EXT_LEN_MASK 0xffffffff @@ -1294,10 +1300,10 @@ struct sec4_sg_entry { #define SQOUT_SGF 0x01000000 /* Appends to a previous pointer */ -#define SQOUT_PRE 0x00800000 +#define SQOUT_PRE SQIN_PRE /* Restore sequence with pointer/length */ -#define SQOUT_RTO 0x00200000 +#define SQOUT_RTO SQIN_RTO /* Use extended length following pointer */ #define SQOUT_EXT 0x00400000 @@ -1359,6 +1365,7 @@ struct sec4_sg_entry { #define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT) #define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT) #define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT) +#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT) #define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT) #define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT) #define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT) @@ -1411,6 +1418,7 @@ struct sec4_sg_entry { #define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT) #define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT) #define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT) +#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT) #define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT) #define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT) #define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT) @@ -1425,6 +1433,7 @@ struct sec4_sg_entry { #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT) @@ -1600,4 +1609,13 @@ struct sec4_sg_entry { #define NFIFOENTRY_PLEN_SHIFT 0 #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT) +/* Append Load Immediate Command */ +#define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000 + +/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */ +#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000 + +/* Frame Descriptor Command for Replacement Job Descriptor */ +#define FD_CMD_REPLACE_JOB_DESC 0x20000000 + #endif /* DESC_H */ diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index c85c1f058401..fe3bfd1b08ca 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -110,6 +110,26 @@ static inline void append_cmd(u32 *desc, u32 command) (*desc)++; } +#define append_u32 append_cmd + +static inline void append_u64(u32 *desc, u64 data) +{ + u32 *offset = desc_end(desc); + + *offset = upper_32_bits(data); + *(++offset) = lower_32_bits(data); + + (*desc) += 2; +} + +/* Write command without affecting header, and return pointer to next word */ +static inline u32 *write_cmd(u32 *desc, u32 command) +{ + *desc = command; + + return desc + 1; +} + static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, u32 command) { @@ -122,7 +142,8 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, unsigned int len, u32 command) { append_cmd(desc, command); - append_ptr(desc, ptr); + if (!(command & (SQIN_RTO | SQIN_PRE))) + append_ptr(desc, ptr); append_cmd(desc, len); } @@ -176,17 +197,36 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \ } APPEND_CMD_PTR(key, KEY) APPEND_CMD_PTR(load, LOAD) -APPEND_CMD_PTR(store, STORE) APPEND_CMD_PTR(fifo_load, FIFO_LOAD) APPEND_CMD_PTR(fifo_store, FIFO_STORE) +static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, + u32 options) +{ + u32 cmd_src; + + cmd_src = options & LDST_SRCDST_MASK; + + append_cmd(desc, CMD_STORE | options | len); + + /* The following options do not require pointer */ + if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB || + cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE || + cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE)) + append_ptr(desc, ptr); +} + #define APPEND_SEQ_PTR_INTLEN(cmd, op) \ static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \ unsigned int len, \ u32 options) \ { \ PRINT_POS; \ - append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ + if (options & (SQIN_RTO | SQIN_PRE)) \ + append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \ + else \ + append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \ } APPEND_SEQ_PTR_INTLEN(in, IN) APPEND_SEQ_PTR_INTLEN(out, OUT) @@ -259,7 +299,7 @@ APPEND_CMD_RAW_IMM(load, LOAD, u32); */ #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ - MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); + MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len); #define append_math_add(desc, dest, src0, src1, len) \ APPEND_MATH(ADD, desc, dest, src0, src1, len) @@ -279,6 +319,8 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) #define append_math_rshift(desc, dest, src0, src1, len) \ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) +#define append_math_ldshift(desc, dest, src0, src1, len) \ + APPEND_MATH(SHLD, desc, dest, src0, src1, len) /* Exactly one source is IMM. Data is passed in as u32 value */ #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ @@ -305,3 +347,34 @@ do { \ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) + +/* Exactly one source is IMM. Data is passed in as u64 value */ +#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \ +do { \ + u32 upper = (data >> 16) >> 16; \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \ + (upper ? 0 : MATH_IFB)); \ + if (upper) \ + append_u64(desc, data); \ + else \ + append_u32(desc, data); \ +} while (0) + +#define append_math_add_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 62950d22ac13..3a87c0cf879a 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h @@ -44,6 +44,7 @@ #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */ #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */ #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */ +#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/ #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */ #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */ #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */ diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index cd6fedad9935..c09142fc13e3 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -117,6 +117,43 @@ struct jr_outentry { #define CHA_NUM_DECONUM_SHIFT 56 #define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT) +/* CHA Version IDs */ +#define CHA_ID_AES_SHIFT 0 +#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT) + +#define CHA_ID_DES_SHIFT 4 +#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT) + +#define CHA_ID_ARC4_SHIFT 8 +#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT) + +#define CHA_ID_MD_SHIFT 12 +#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT) + +#define CHA_ID_RNG_SHIFT 16 +#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT) + +#define CHA_ID_SNW8_SHIFT 20 +#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT) + +#define CHA_ID_KAS_SHIFT 24 +#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT) + +#define CHA_ID_PK_SHIFT 28 +#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT) + +#define CHA_ID_CRC_SHIFT 32 +#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT) + +#define CHA_ID_SNW9_SHIFT 36 +#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT) + +#define CHA_ID_DECO_SHIFT 56 +#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT) + +#define CHA_ID_JR_SHIFT 60 +#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT) + struct sec_vid { u16 ip_id; u8 maj_rev; @@ -228,7 +265,10 @@ struct rng4tst { u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */ u32 rtfrqcnt; /* PRGM=0: freq. count register */ }; - u32 rsvd1[56]; + u32 rsvd1[40]; +#define RDSTA_IF0 0x00000001 + u32 rdsta; + u32 rsvd2[15]; }; /* diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c new file mode 100644 index 000000000000..a8a7dd4b0d25 --- /dev/null +++ b/drivers/crypto/dcp.c @@ -0,0 +1,912 @@ +/* + * Cryptographic API. + * + * Support for DCP cryptographic accelerator. + * + * Copyright (c) 2013 + * Author: Tobias Rauter <tobias.rauter@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/interrupt.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/delay.h> +#include <linux/crypto.h> +#include <linux/miscdevice.h> + +#include <crypto/scatterwalk.h> +#include <crypto/aes.h> + + +/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/ +#define DBS_IOCTL_BASE 'd' +#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16]) +#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16]) + +/* DCP channel used for AES */ +#define USED_CHANNEL 1 +/* Ring Buffers' maximum size */ +#define DCP_MAX_PKG 20 + +/* Control Register */ +#define DCP_REG_CTRL 0x000 +#define DCP_CTRL_SFRST (1<<31) +#define DCP_CTRL_CLKGATE (1<<30) +#define DCP_CTRL_CRYPTO_PRESENT (1<<29) +#define DCP_CTRL_SHA_PRESENT (1<<28) +#define DCP_CTRL_GATHER_RES_WRITE (1<<23) +#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22) +#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21) +#define DCP_CTRL_CH_IRQ_E_0 0x01 +#define DCP_CTRL_CH_IRQ_E_1 0x02 +#define DCP_CTRL_CH_IRQ_E_2 0x04 +#define DCP_CTRL_CH_IRQ_E_3 0x08 + +/* Status register */ +#define DCP_REG_STAT 0x010 +#define DCP_STAT_OTP_KEY_READY (1<<28) +#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F) +#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F) +#define DCP_STAT_IRQ(stat) (stat&0x0F) +#define DCP_STAT_CHAN_0 (0x01) +#define DCP_STAT_CHAN_1 (0x02) +#define DCP_STAT_CHAN_2 (0x04) +#define DCP_STAT_CHAN_3 (0x08) + +/* Channel Control Register */ +#define DCP_REG_CHAN_CTRL 0x020 +#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16) +#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100) +#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200) +#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400) +#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800) +#define DCP_CHAN_CTRL_ENABLE_0 (0x01) +#define DCP_CHAN_CTRL_ENABLE_1 (0x02) +#define DCP_CHAN_CTRL_ENABLE_2 (0x04) +#define DCP_CHAN_CTRL_ENABLE_3 (0x08) + +/* + * Channel Registers: + * The DCP has 4 channels. Each of this channels + * has 4 registers (command pointer, semaphore, status and options). + * The address of register REG of channel CHAN is obtained by + * dcp_chan_reg(REG, CHAN) + */ +#define DCP_REG_CHAN_PTR 0x00000100 +#define DCP_REG_CHAN_SEMA 0x00000110 +#define DCP_REG_CHAN_STAT 0x00000120 +#define DCP_REG_CHAN_OPT 0x00000130 + +#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000 +#define DCP_CHAN_STAT_NO_CHAIN 0x020000 +#define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000 +#define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000 +#define DCP_CHAN_STAT_INVALID_MODE 0x050000 +#define DCP_CHAN_STAT_PAGEFAULT 0x40 +#define DCP_CHAN_STAT_DST 0x20 +#define DCP_CHAN_STAT_SRC 0x10 +#define DCP_CHAN_STAT_PACKET 0x08 +#define DCP_CHAN_STAT_SETUP 0x04 +#define DCP_CHAN_STAT_MISMATCH 0x02 + +/* hw packet control*/ + +#define DCP_PKT_PAYLOAD_KEY (1<<11) +#define DCP_PKT_OTP_KEY (1<<10) +#define DCP_PKT_CIPHER_INIT (1<<9) +#define DCP_PKG_CIPHER_ENCRYPT (1<<8) +#define DCP_PKT_CIPHER_ENABLE (1<<5) +#define DCP_PKT_DECR_SEM (1<<1) +#define DCP_PKT_CHAIN (1<<2) +#define DCP_PKT_IRQ 1 + +#define DCP_PKT_MODE_CBC (1<<4) +#define DCP_PKT_KEYSELECT_OTP (0xFF<<8) + +/* cipher flags */ +#define DCP_ENC 0x0001 +#define DCP_DEC 0x0002 +#define DCP_ECB 0x0004 +#define DCP_CBC 0x0008 +#define DCP_CBC_INIT 0x0010 +#define DCP_NEW_KEY 0x0040 +#define DCP_OTP_KEY 0x0080 +#define DCP_AES 0x1000 + +/* DCP Flags */ +#define DCP_FLAG_BUSY 0x01 +#define DCP_FLAG_PRODUCING 0x02 + +/* clock defines */ +#define CLOCK_ON 1 +#define CLOCK_OFF 0 + +struct dcp_dev_req_ctx { + int mode; +}; + +struct dcp_op { + unsigned int flags; + u8 key[AES_KEYSIZE_128]; + int keylen; + + struct ablkcipher_request *req; + struct crypto_ablkcipher *fallback; + + uint32_t stat; + uint32_t pkt1; + uint32_t pkt2; + struct ablkcipher_walk walk; +}; + +struct dcp_dev { + struct device *dev; + void __iomem *dcp_regs_base; + + int dcp_vmi_irq; + int dcp_irq; + + spinlock_t queue_lock; + struct crypto_queue queue; + + uint32_t pkt_produced; + uint32_t pkt_consumed; + + struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG]; + dma_addr_t hw_phys_pkg; + + /* [KEY][IV] Both with 16 Bytes */ + u8 *payload_base; + dma_addr_t payload_base_dma; + + + struct tasklet_struct done_task; + struct tasklet_struct queue_task; + struct timer_list watchdog; + + unsigned long flags; + + struct dcp_op *ctx; + + struct miscdevice dcp_bootstream_misc; +}; + +struct dcp_hw_packet { + uint32_t next; + uint32_t pkt1; + uint32_t pkt2; + uint32_t src; + uint32_t dst; + uint32_t size; + uint32_t payload; + uint32_t stat; +}; + +static struct dcp_dev *global_dev; + +static inline u32 dcp_chan_reg(u32 reg, int chan) +{ + return reg + (chan) * 0x40; +} + +static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg) +{ + writel(data, dev->dcp_regs_base + reg); +} + +static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg) +{ + writel(data, dev->dcp_regs_base + (reg | 0x04)); +} + +static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg) +{ + writel(data, dev->dcp_regs_base + (reg | 0x08)); +} + +static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg) +{ + writel(data, dev->dcp_regs_base + (reg | 0x0C)); +} + +static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg) +{ + return readl(dev->dcp_regs_base + reg); +} + +static void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt) +{ + dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE); + dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt); +} + +static int dcp_dma_map(struct dcp_dev *dev, + struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt) +{ + dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt); + /* align to length = 16 */ + pkt->size = walk->nbytes - (walk->nbytes % 16); + + pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset, + pkt->size, DMA_TO_DEVICE); + + if (pkt->src == 0) { + dev_err(dev->dev, "Unable to map src"); + return -ENOMEM; + } + + pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset, + pkt->size, DMA_FROM_DEVICE); + + if (pkt->dst == 0) { + dev_err(dev->dev, "Unable to map dst"); + dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE); + return -ENOMEM; + } + + return 0; +} + +static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt, + uint8_t last) +{ + struct dcp_op *ctx = dev->ctx; + pkt->pkt1 = ctx->pkt1; + pkt->pkt2 = ctx->pkt2; + + pkt->payload = (u32) dev->payload_base_dma; + pkt->stat = 0; + + if (ctx->flags & DCP_CBC_INIT) { + pkt->pkt1 |= DCP_PKT_CIPHER_INIT; + ctx->flags &= ~DCP_CBC_INIT; + } + + mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500)); + pkt->pkt1 |= DCP_PKT_IRQ; + if (!last) + pkt->pkt1 |= DCP_PKT_CHAIN; + + dev->pkt_produced++; + + dcp_write(dev, 1, + dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL)); +} + +static void dcp_op_proceed(struct dcp_dev *dev) +{ + struct dcp_op *ctx = dev->ctx; + struct dcp_hw_packet *pkt; + + while (ctx->walk.nbytes) { + int err = 0; + + pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG]; + err = dcp_dma_map(dev, &ctx->walk, pkt); + if (err) { + dev->ctx->stat |= err; + /* start timer to wait for already set up calls */ + mod_timer(&dev->watchdog, + jiffies + msecs_to_jiffies(500)); + break; + } + + + err = ctx->walk.nbytes - pkt->size; + ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err); + + dcp_op_one(dev, pkt, ctx->walk.nbytes == 0); + /* we have to wait if no space is left in buffer */ + if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG) + break; + } + clear_bit(DCP_FLAG_PRODUCING, &dev->flags); +} + +static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk) +{ + struct dcp_op *ctx = dev->ctx; + + if (ctx->flags & DCP_NEW_KEY) { + memcpy(dev->payload_base, ctx->key, ctx->keylen); + ctx->flags &= ~DCP_NEW_KEY; + } + + ctx->pkt1 = 0; + ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE; + ctx->pkt1 |= DCP_PKT_DECR_SEM; + + if (ctx->flags & DCP_OTP_KEY) + ctx->pkt1 |= DCP_PKT_OTP_KEY; + else + ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY; + + if (ctx->flags & DCP_ENC) + ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT; + + ctx->pkt2 = 0; + if (ctx->flags & DCP_CBC) + ctx->pkt2 |= DCP_PKT_MODE_CBC; + + dev->pkt_produced = 0; + dev->pkt_consumed = 0; + + ctx->stat = 0; + dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); + dcp_write(dev, (u32) dev->hw_phys_pkg, + dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL)); + + set_bit(DCP_FLAG_PRODUCING, &dev->flags); + + if (use_walk) { + ablkcipher_walk_init(&ctx->walk, ctx->req->dst, + ctx->req->src, ctx->req->nbytes); + ablkcipher_walk_phys(ctx->req, &ctx->walk); + dcp_op_proceed(dev); + } else { + dcp_op_one(dev, dev->hw_pkg[0], 1); + clear_bit(DCP_FLAG_PRODUCING, &dev->flags); + } +} + +static void dcp_done_task(unsigned long data) +{ + struct dcp_dev *dev = (struct dcp_dev *)data; + struct dcp_hw_packet *last_packet; + int fin; + fin = 0; + + for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG]; + last_packet->stat == 1; + last_packet = + dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) { + + dcp_dma_unmap(dev, last_packet); + last_packet->stat = 0; + fin++; + } + /* the last call of this function already consumed this IRQ's packet */ + if (fin == 0) + return; + + dev_dbg(dev->dev, + "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d", + dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed); + + last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG]; + if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) { + if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags)) + dcp_op_proceed(dev); + return; + } + + while (unlikely(dev->pkt_consumed < dev->pkt_produced)) { + dcp_dma_unmap(dev, + dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]); + } + + if (dev->ctx->flags & DCP_OTP_KEY) { + /* we used the miscdevice, no walk to finish */ + clear_bit(DCP_FLAG_BUSY, &dev->flags); + return; + } + + ablkcipher_walk_complete(&dev->ctx->walk); + dev->ctx->req->base.complete(&dev->ctx->req->base, + dev->ctx->stat); + dev->ctx->req = NULL; + /* in case there are other requests in the queue */ + tasklet_schedule(&dev->queue_task); +} + +static void dcp_watchdog(unsigned long data) +{ + struct dcp_dev *dev = (struct dcp_dev *)data; + dev->ctx->stat |= dcp_read(dev, + dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); + + dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat); + + if (!dev->ctx->stat) + dev->ctx->stat = -ETIMEDOUT; + + dcp_done_task(data); +} + + +static irqreturn_t dcp_common_irq(int irq, void *context) +{ + u32 msk; + struct dcp_dev *dev = (struct dcp_dev *) context; + + del_timer(&dev->watchdog); + + msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT)); + dcp_clear(dev, msk, DCP_REG_STAT); + if (msk == 0) + return IRQ_NONE; + + dev->ctx->stat |= dcp_read(dev, + dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL)); + + if (msk & DCP_STAT_CHAN_1) + tasklet_schedule(&dev->done_task); + + return IRQ_HANDLED; +} + +static irqreturn_t dcp_vmi_irq(int irq, void *context) +{ + return dcp_common_irq(irq, context); +} + +static irqreturn_t dcp_irq(int irq, void *context) +{ + return dcp_common_irq(irq, context); +} + +static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx) +{ + dev->ctx = ctx; + + if ((ctx->flags & DCP_CBC) && ctx->req->info) { + ctx->flags |= DCP_CBC_INIT; + memcpy(dev->payload_base + AES_KEYSIZE_128, + ctx->req->info, AES_KEYSIZE_128); + } + + dcp_op_start(dev, 1); +} + +static void dcp_queue_task(unsigned long data) +{ + struct dcp_dev *dev = (struct dcp_dev *) data; + struct crypto_async_request *async_req, *backlog; + struct crypto_ablkcipher *tfm; + struct dcp_op *ctx; + struct dcp_dev_req_ctx *rctx; + struct ablkcipher_request *req; + unsigned long flags; + + spin_lock_irqsave(&dev->queue_lock, flags); + + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + + spin_unlock_irqrestore(&dev->queue_lock, flags); + + if (!async_req) + goto ret_nothing_done; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + req = ablkcipher_request_cast(async_req); + tfm = crypto_ablkcipher_reqtfm(req); + rctx = ablkcipher_request_ctx(req); + ctx = crypto_ablkcipher_ctx(tfm); + + if (!req->src || !req->dst) + goto ret_nothing_done; + + ctx->flags |= rctx->mode; + ctx->req = req; + + dcp_crypt(dev, ctx); + + return; + +ret_nothing_done: + clear_bit(DCP_FLAG_BUSY, &dev->flags); +} + + +static int dcp_cra_init(struct crypto_tfm *tfm) +{ + const char *name = tfm->__crt_alg->cra_name; + struct dcp_op *ctx = crypto_tfm_ctx(tfm); + + tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx); + + ctx->fallback = crypto_alloc_ablkcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + + if (IS_ERR(ctx->fallback)) { + dev_err(global_dev->dev, "Error allocating fallback algo %s\n", + name); + return PTR_ERR(ctx->fallback); + } + + return 0; +} + +static void dcp_cra_exit(struct crypto_tfm *tfm) +{ + struct dcp_op *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) + crypto_free_ablkcipher(ctx->fallback); + + ctx->fallback = NULL; +} + +/* async interface */ +static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int len) +{ + struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm); + unsigned int ret = 0; + ctx->keylen = len; + ctx->flags = 0; + if (len == AES_KEYSIZE_128) { + if (memcmp(ctx->key, key, AES_KEYSIZE_128)) { + memcpy(ctx->key, key, len); + ctx->flags |= DCP_NEW_KEY; + } + return 0; + } + + ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + ctx->fallback->base.crt_flags |= + (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); + + ret = crypto_ablkcipher_setkey(ctx->fallback, key, len); + if (ret) { + struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm); + + tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm_aux->crt_flags |= + (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + return ret; +} + +static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode) +{ + struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req); + struct dcp_dev *dev = global_dev; + unsigned long flags; + int err = 0; + + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) + return -EINVAL; + + rctx->mode = mode; + + spin_lock_irqsave(&dev->queue_lock, flags); + err = ablkcipher_enqueue_request(&dev->queue, req); + spin_unlock_irqrestore(&dev->queue_lock, flags); + + flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags); + + if (!(flags & DCP_FLAG_BUSY)) + tasklet_schedule(&dev->queue_task); + + return err; +} + +static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct dcp_op *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + int err = 0; + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_encrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + + return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC); +} + +static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); + struct dcp_op *ctx = crypto_ablkcipher_ctx( + crypto_ablkcipher_reqtfm(req)); + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { + int err = 0; + ablkcipher_request_set_tfm(req, ctx->fallback); + err = crypto_ablkcipher_decrypt(req); + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); + return err; + } + return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC); +} + +static struct crypto_alg algs[] = { + { + .cra_name = "cbc(aes)", + .cra_driver_name = "dcp-cbc-aes", + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_KEYSIZE_128, + .cra_type = &crypto_ablkcipher_type, + .cra_priority = 300, + .cra_u.ablkcipher = { + .min_keysize = AES_KEYSIZE_128, + .max_keysize = AES_KEYSIZE_128, + .setkey = dcp_aes_setkey, + .encrypt = dcp_aes_cbc_encrypt, + .decrypt = dcp_aes_cbc_decrypt, + .ivsize = AES_KEYSIZE_128, + } + + }, +}; + +/* DCP bootstream verification interface: uses OTP key for crypto */ +static int dcp_bootstream_open(struct inode *inode, struct file *file) +{ + file->private_data = container_of((file->private_data), + struct dcp_dev, dcp_bootstream_misc); + return 0; +} + +static long dcp_bootstream_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dcp_dev *dev = (struct dcp_dev *) file->private_data; + void __user *argp = (void __user *)arg; + int ret; + + if (dev == NULL) + return -EBADF; + + if (cmd != DBS_ENC && cmd != DBS_DEC) + return -EINVAL; + + if (copy_from_user(dev->payload_base, argp, 16)) + return -EFAULT; + + if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags)) + return -EAGAIN; + + dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL); + if (!dev->ctx) { + dev_err(dev->dev, + "cannot allocate context for OTP crypto"); + clear_bit(DCP_FLAG_BUSY, &dev->flags); + return -ENOMEM; + } + + dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT; + dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC; + dev->hw_pkg[0]->src = dev->payload_base_dma; + dev->hw_pkg[0]->dst = dev->payload_base_dma; + dev->hw_pkg[0]->size = 16; + + dcp_op_start(dev, 0); + + while (test_bit(DCP_FLAG_BUSY, &dev->flags)) + cpu_relax(); + + ret = dev->ctx->stat; + if (!ret && copy_to_user(argp, dev->payload_base, 16)) + ret = -EFAULT; + + kfree(dev->ctx); + + return ret; +} + +static const struct file_operations dcp_bootstream_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = dcp_bootstream_ioctl, + .open = dcp_bootstream_open, +}; + +static int dcp_probe(struct platform_device *pdev) +{ + struct dcp_dev *dev = NULL; + struct resource *r; + int i, ret, j; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + global_dev = dev; + dev->dev = &pdev->dev; + + platform_set_drvdata(pdev, dev); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); + return -ENXIO; + } + dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, + resource_size(r)); + + dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); + udelay(10); + dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL); + + dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE | + DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1, + DCP_REG_CTRL); + + dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL); + + for (i = 0; i < 4; i++) + dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i)); + + dcp_clear(dev, -1, DCP_REG_STAT); + + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!r) { + dev_err(&pdev->dev, "can't get IRQ resource (0)\n"); + return -EIO; + } + dev->dcp_vmi_irq = r->start; + ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); + if (ret != 0) { + dev_err(&pdev->dev, "can't request_irq (0)\n"); + return -EIO; + } + + r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + if (!r) { + dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); + ret = -EIO; + goto err_free_irq0; + } + dev->dcp_irq = r->start; + ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); + if (ret != 0) { + dev_err(&pdev->dev, "can't request_irq (1)\n"); + ret = -EIO; + goto err_free_irq0; + } + + dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, + DCP_MAX_PKG * sizeof(struct dcp_hw_packet), + &dev->hw_phys_pkg, + GFP_KERNEL); + if (!dev->hw_pkg[0]) { + dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); + ret = -ENOMEM; + goto err_free_irq1; + } + + for (i = 1; i < DCP_MAX_PKG; i++) { + dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg + + i * sizeof(struct dcp_hw_packet); + dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1; + } + dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg; + + + dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, + &dev->payload_base_dma, GFP_KERNEL); + if (!dev->payload_base) { + dev_err(&pdev->dev, "Could not allocate memory for key\n"); + ret = -ENOMEM; + goto err_free_hw_packet; + } + tasklet_init(&dev->queue_task, dcp_queue_task, + (unsigned long) dev); + tasklet_init(&dev->done_task, dcp_done_task, + (unsigned long) dev); + spin_lock_init(&dev->queue_lock); + + crypto_init_queue(&dev->queue, 10); + + init_timer(&dev->watchdog); + dev->watchdog.function = &dcp_watchdog; + dev->watchdog.data = (unsigned long)dev; + + dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR, + dev->dcp_bootstream_misc.name = "dcpboot", + dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops, + ret = misc_register(&dev->dcp_bootstream_misc); + if (ret != 0) { + dev_err(dev->dev, "Unable to register misc device\n"); + goto err_free_key_iv; + } + + for (i = 0; i < ARRAY_SIZE(algs); i++) { + algs[i].cra_priority = 300; + algs[i].cra_ctxsize = sizeof(struct dcp_op); + algs[i].cra_module = THIS_MODULE; + algs[i].cra_init = dcp_cra_init; + algs[i].cra_exit = dcp_cra_exit; + if (crypto_register_alg(&algs[i])) { + dev_err(&pdev->dev, "register algorithm failed\n"); + ret = -ENOMEM; + goto err_unregister; + } + } + dev_notice(&pdev->dev, "DCP crypto enabled.!\n"); + + return 0; + +err_unregister: + for (j = 0; j < i; j++) + crypto_unregister_alg(&algs[j]); +err_free_key_iv: + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, + dev->payload_base_dma); +err_free_hw_packet: + dma_free_coherent(&pdev->dev, DCP_MAX_PKG * + sizeof(struct dcp_hw_packet), dev->hw_pkg[0], + dev->hw_phys_pkg); +err_free_irq1: + free_irq(dev->dcp_irq, dev); +err_free_irq0: + free_irq(dev->dcp_vmi_irq, dev); + + return ret; +} + +static int dcp_remove(struct platform_device *pdev) +{ + struct dcp_dev *dev; + int j; + dev = platform_get_drvdata(pdev); + + dma_free_coherent(&pdev->dev, + DCP_MAX_PKG * sizeof(struct dcp_hw_packet), + dev->hw_pkg[0], dev->hw_phys_pkg); + + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, + dev->payload_base_dma); + + free_irq(dev->dcp_irq, dev); + free_irq(dev->dcp_vmi_irq, dev); + + tasklet_kill(&dev->done_task); + tasklet_kill(&dev->queue_task); + + for (j = 0; j < ARRAY_SIZE(algs); j++) + crypto_unregister_alg(&algs[j]); + + misc_deregister(&dev->dcp_bootstream_misc); + + return 0; +} + +static struct of_device_id fs_dcp_of_match[] = { + { .compatible = "fsl-dcp"}, + {}, +}; + +static struct platform_driver fs_dcp_driver = { + .probe = dcp_probe, + .remove = dcp_remove, + .driver = { + .name = "fsl-dcp", + .owner = THIS_MODULE, + .of_match_table = fs_dcp_of_match + } +}; + +module_platform_driver(fs_dcp_driver); + + +MODULE_AUTHOR("Tobias Rauter <tobias.rauter@gmail.com>"); +MODULE_DESCRIPTION("Freescale DCP Crypto Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index ebf130e894b5..12fea3e22348 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2676,7 +2676,7 @@ err_out_stop_device: hifn_reset_dma(dev, 1); hifn_stop_device(dev); err_out_free_irq: - free_irq(dev->irq, dev->name); + free_irq(dev->irq, dev); tasklet_kill(&dev->tasklet); err_out_free_desc: pci_free_consistent(pdev, sizeof(struct hifn_dma), @@ -2711,7 +2711,7 @@ static void hifn_remove(struct pci_dev *pdev) hifn_reset_dma(dev, 1); hifn_stop_device(dev); - free_irq(dev->irq, dev->name); + free_irq(dev->irq, dev); tasklet_kill(&dev->tasklet); hifn_flush(dev); diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index ce6290e5471a..3374a3ebe4c7 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -1146,7 +1146,6 @@ err_unmap_reg: err: kfree(cp); cpg = NULL; - platform_set_drvdata(pdev, NULL); return ret; } diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ee15b0f7849a..5f7980586850 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -203,13 +203,6 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, static int omap_aes_hw_init(struct omap_aes_dev *dd) { - /* - * clocks are enabled when request starts and disabled when finished. - * It may be long delays between requests. - * Device might go to off mode to save power. - */ - pm_runtime_get_sync(dd->dev); - if (!(dd->flags & FLAGS_INIT)) { dd->flags |= FLAGS_INIT; dd->err = 0; @@ -636,7 +629,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); - pm_runtime_put(dd->dev); dd->flags &= ~FLAGS_BUSY; req->base.complete(&req->base, err); @@ -837,8 +829,16 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) static int omap_aes_cra_init(struct crypto_tfm *tfm) { - pr_debug("enter\n"); + struct omap_aes_dev *dd = NULL; + + /* Find AES device, currently picks the first device */ + spin_lock_bh(&list_lock); + list_for_each_entry(dd, &dev_list, list) { + break; + } + spin_unlock_bh(&list_lock); + pm_runtime_get_sync(dd->dev); tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); return 0; @@ -846,7 +846,16 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) static void omap_aes_cra_exit(struct crypto_tfm *tfm) { - pr_debug("enter\n"); + struct omap_aes_dev *dd = NULL; + + /* Find AES device, currently picks the first device */ + spin_lock_bh(&list_lock); + list_for_each_entry(dd, &dev_list, list) { + break; + } + spin_unlock_bh(&list_lock); + + pm_runtime_put_sync(dd->dev); } /* ********************** ALGS ************************************ */ @@ -1125,10 +1134,9 @@ static int omap_aes_probe(struct platform_device *pdev) if (err) goto err_res; - dd->io_base = devm_request_and_ioremap(dev, &res); - if (!dd->io_base) { - dev_err(dev, "can't ioremap\n"); - err = -ENOMEM; + dd->io_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(dd->io_base)) { + err = PTR_ERR(dd->io_base); goto err_res; } dd->phys_base = res.start; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a1e1b4756ee5..4bb67652c200 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1686,10 +1686,9 @@ static int omap_sham_probe(struct platform_device *pdev) if (err) goto res_err; - dd->io_base = devm_request_and_ioremap(dev, &res); - if (!dd->io_base) { - dev_err(dev, "can't ioremap\n"); - err = -ENOMEM; + dd->io_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(dd->io_base)) { + err = PTR_ERR(dd->io_base); goto res_err; } dd->phys_base = res.start; diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index ac30724d923d..888f7f4a6d3f 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1298,7 +1298,7 @@ static ssize_t spacc_stat_irq_thresh_store(struct device *dev, struct spacc_engine *engine = spacc_dev_to_engine(dev); unsigned long thresh; - if (strict_strtoul(buf, 0, &thresh)) + if (kstrtoul(buf, 0, &thresh)) return -EINVAL; thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 4b314326f48a..cf149b19ff47 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -647,7 +647,6 @@ static int s5p_aes_probe(struct platform_device *pdev) clk_disable(pdata->clk); s5p_dev = NULL; - platform_set_drvdata(pdev, NULL); return err; } @@ -668,7 +667,6 @@ static int s5p_aes_remove(struct platform_device *pdev) clk_disable(pdata->clk); s5p_dev = NULL; - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 83d79b964d12..a999f537228f 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1629,7 +1629,7 @@ static int ux500_cryp_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); kfree(device_data); |