summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnish Trivedi <anish@freescale.com>2010-02-23 15:13:11 -0600
committerAlejandro Gonzalez <alex.gonzalez@digi.com>2010-05-25 11:17:15 +0200
commit65f3cd8afd4803654736df00601d7d951d62cfc2 (patch)
tree35fe4c270ac3741a08a3f2fb30e4ea3ef576ce12
parent4d658511fd473c68173b27ea016a7003f0ff2312 (diff)
ENGR00117755 MX28: DCP
Driver for DCP operations: AES, SHA1, and SHA256 Signed-off-by: Anish Trivedi <anish@freescale.com> Signed-off-by: Alejandro Gonzalez <alex.gonzalez@digi.com>
-rw-r--r--arch/arm/configs/imx28evk_defconfig57
-rw-r--r--arch/arm/mach-mx28/device.c38
-rw-r--r--arch/arm/plat-mxs/device.c20
-rw-r--r--drivers/crypto/Kconfig16
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/dcp.c1547
-rw-r--r--drivers/crypto/dcp.h712
7 files changed, 2360 insertions, 31 deletions
diff --git a/arch/arm/configs/imx28evk_defconfig b/arch/arm/configs/imx28evk_defconfig
index 54e4ca945f4b..83dd3de8426c 100644
--- a/arch/arm/configs/imx28evk_defconfig
+++ b/arch/arm/configs/imx28evk_defconfig
@@ -1478,9 +1478,8 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
@@ -1489,12 +1488,12 @@ CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_NULL is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_AUTHENC=m
-# CONFIG_CRYPTO_TEST is not set
-# CONFIG_CRYPTO_CRYPTODEV is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CRYPTODEV=y
#
# Authenticated Encryption with Associated Data
@@ -1506,10 +1505,10 @@ CONFIG_CRYPTO_AUTHENC=m
#
# Block modes
#
-CONFIG_CRYPTO_CBC=m
+CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_CTR is not set
# CONFIG_CRYPTO_CTS is not set
-# CONFIG_CRYPTO_ECB is not set
+CONFIG_CRYPTO_ECB=y
# CONFIG_CRYPTO_LRW is not set
# CONFIG_CRYPTO_PCBC is not set
# CONFIG_CRYPTO_XTS is not set
@@ -1523,53 +1522,53 @@ CONFIG_CRYPTO_HMAC=y
#
# Digest
#
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
# CONFIG_CRYPTO_RMD256 is not set
# CONFIG_CRYPTO_RMD320 is not set
-CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
+# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_TGR192 is not set
-CONFIG_CRYPTO_WP512=m
+# CONFIG_CRYPTO_WP512 is not set
#
# Ciphers
#
-CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_AES=y
# CONFIG_CRYPTO_ANUBIS is not set
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_CAMELLIA is not set
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_SALSA20 is not set
# CONFIG_CRYPTO_SEED is not set
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
#
# Compression
#
-CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ZLIB is not set
-# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_LZO=y
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+CONFIG_CRYPTO_DEV_DCP=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/arm/mach-mx28/device.c b/arch/arm/mach-mx28/device.c
index 9b2e98c3e656..e618bc5b0503 100644
--- a/arch/arm/mach-mx28/device.c
+++ b/arch/arm/mach-mx28/device.c
@@ -855,6 +855,43 @@ static inline void mx28_init_flexcan(void)
}
#endif
+#if defined(CONFIG_CRYPTO_DEV_DCP)
+
+static struct resource dcp_resources[] = {
+
+ {
+ .flags = IORESOURCE_MEM,
+ .start = DCP_PHYS_ADDR,
+ .end = DCP_PHYS_ADDR + 0x2000 - 1,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_DCP_VMI,
+ .end = IRQ_DCP_VMI,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ .start = IRQ_DCP,
+ .end = IRQ_DCP,
+ },
+};
+
+static void __init mx28_init_dcp(void)
+{
+ struct platform_device *pdev;
+
+ pdev = mxs_get_device("dcp", 0);
+ if (pdev == NULL || IS_ERR(pdev))
+ return;
+ pdev->resource = dcp_resources;
+ pdev->num_resources = ARRAY_SIZE(dcp_resources);
+ mxs_add_device(pdev, 3);
+}
+#else
+static void __init mx28_init_dcp(void)
+{
+ ;
+}
+#endif
+
int __init mx28_device_init(void)
{
mx28_init_dma();
@@ -870,6 +907,7 @@ int __init mx28_device_init(void)
mx28_init_kbd();
mx28_init_ts();
mx28_init_lcdif();
+ mx28_init_dcp();
return 0;
}
diff --git a/arch/arm/plat-mxs/device.c b/arch/arm/plat-mxs/device.c
index 9a31b0a609cd..a87e6d2904d5 100644
--- a/arch/arm/plat-mxs/device.c
+++ b/arch/arm/plat-mxs/device.c
@@ -338,6 +338,18 @@ static struct platform_device mxs_flexcan[] = {
};
#endif
+#if defined(CONFIG_CRYPTO_DEV_DCP)
+static struct platform_device mxs_dcp = {
+ .name = "dcp",
+ .id = 0,
+ .dev = {
+ .release = mxs_nop_release,
+ .dma_mask = &common_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+#endif
+
static struct mxs_dev_lookup dev_lookup[] = {
#if defined(CONFIG_SERIAL_MXS_DUART) || \
defined(CONFIG_SERIAL_MXS_DUART_MODULE)
@@ -460,6 +472,14 @@ static struct mxs_dev_lookup dev_lookup[] = {
.pdev = mxs_flexcan,
},
#endif
+
+#if defined(CONFIG_CRYPTO_DEV_DCP)
+ {
+ .name = "dcp",
+ .size = 1,
+ .pdev = &mxs_dcp,
+ },
+#endif
};
struct platform_device *mxs_get_device(char *name, int id)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0b9f988564c1..f888701b05c2 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -215,8 +215,20 @@ config CRYPTO_DEV_STMP3XXX_DCP
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
- Say 'Y' here to use the STMP3XXX DCP AES
- engine for the CryptoAPI AES algorithm.
+ Say 'Y' here to use the STMP3XXX DCP AES and SHA1
+ engine for the CryptoAPI algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called geode-aes.
+
+config CRYPTO_DEV_DCP
+ tristate "Support for the DCP engine"
+ depends on ARCH_MX28
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ Say 'Y' here to use the MX28 DCP AES, SHA1, and SHA256
+ engine for the CryptoAPI algorithms.
To compile this driver as a module, choose M here: the module
will be called geode-aes.
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f69ef96f599e..0d5028b57f04 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_STMP3XXX_DCP) += stmp3xxx_dcp.o
+obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o \ No newline at end of file
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
new file mode 100644
index 000000000000..9a43c2482876
--- /dev/null
+++ b/drivers/crypto/dcp.c
@@ -0,0 +1,1547 @@
+/*
+ * Copyright (C) 2008-2010 Freescale Semiconductor, Inc.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+/*
+ * Based on geode-aes.c
+ * Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+
+#include "dcp.h"
+
+struct dcp {
+ struct device *dev;
+ spinlock_t lock;
+ struct mutex op_mutex[DCP_NUM_CHANNELS];
+ struct completion op_wait[DCP_NUM_CHANNELS];
+ int wait[DCP_NUM_CHANNELS];
+ int dcp_vmi_irq;
+ int dcp_irq;
+ u32 dcp_regs_base;
+
+ /* Following buffers used in hashing to meet 64-byte len alignment */
+ char *buf1;
+ char *buf2;
+ dma_addr_t buf1_phys;
+ dma_addr_t buf2_phys;
+ struct dcp_hash_coherent_block *buf1_desc;
+ struct dcp_hash_coherent_block *buf2_desc;
+ struct dcp_hash_coherent_block *user_buf_desc;
+};
+
+/* cipher flags */
+#define DCP_ENC 0x0001
+#define DCP_DEC 0x0002
+#define DCP_ECB 0x0004
+#define DCP_CBC 0x0008
+#define DCP_CBC_INIT 0x0010
+#define DCP_OTPKEY 0x0020
+
+/* hash flags */
+#define DCP_INIT 0x0001
+#define DCP_UPDATE 0x0002
+#define DCP_FINAL 0x0004
+
+#define DCP_AES 0x1000
+#define DCP_SHA1 0x2000
+#define DCP_CRC32 0x3000
+#define DCP_COPY 0x4000
+#define DCP_FILL 0x5000
+#define DCP_MODE_MASK 0xf000
+
+struct dcp_op {
+
+ unsigned int flags;
+
+ void *src;
+ dma_addr_t src_phys;
+
+ void *dst;
+ dma_addr_t dst_phys;
+
+ int len;
+
+ /* the key contains the IV for block modes */
+ union {
+ struct {
+ u8 key[2 * AES_KEYSIZE_128]
+ __attribute__ ((__aligned__(32)));
+ dma_addr_t key_phys;
+ int keylen;
+ } cipher;
+ struct {
+ u8 digest[SHA256_DIGEST_SIZE]
+ __attribute__ ((__aligned__(32)));
+ dma_addr_t digest_phys;
+ int digestlen;
+ int init;
+ } hash;
+ };
+
+ union {
+ struct crypto_blkcipher *blk;
+ struct crypto_cipher *cip;
+ struct crypto_hash *hash;
+ } fallback;
+
+ struct dcp_hw_packet pkt
+ __attribute__ ((__aligned__(32)));
+};
+
+struct dcp_hash_coherent_block {
+ struct dcp_hw_packet pkt[1]
+ __attribute__ ((__aligned__(32)));
+ u8 digest[SHA256_DIGEST_SIZE]
+ __attribute__ ((__aligned__(32)));
+ unsigned int len;
+ dma_addr_t src_phys;
+ void *src;
+ void *dst;
+ dma_addr_t my_phys;
+ u32 hash_sel;
+ struct dcp_hash_coherent_block *next;
+};
+
+struct dcp_hash_op {
+
+ unsigned int flags;
+
+ /* the key contains the IV for block modes */
+ union {
+ struct {
+ u8 key[2 * AES_KEYSIZE_128]
+ __attribute__ ((__aligned__(32)));
+ dma_addr_t key_phys;
+ int keylen;
+ } cipher;
+ struct {
+ u8 digest[SHA256_DIGEST_SIZE]
+ __attribute__ ((__aligned__(32)));
+ dma_addr_t digest_phys;
+ int digestlen;
+ int init;
+ } hash;
+ };
+
+ u32 length;
+ struct dcp_hash_coherent_block *head_desc;
+ struct dcp_hash_coherent_block *tail_desc;
+};
+
+/* only one */
+static struct dcp *global_sdcp;
+
+static void dcp_perform_op(struct dcp_op *op)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct mutex *mutex;
+ struct dcp_hw_packet *pkt;
+ int chan;
+ u32 pkt1, pkt2;
+ unsigned long timeout;
+ dma_addr_t pkt_phys;
+ u32 stat;
+
+ pkt1 = BM_DCP_PACKET1_DECR_SEMAPHORE | BM_DCP_PACKET1_INTERRUPT;
+
+ switch (op->flags & DCP_MODE_MASK) {
+
+ case DCP_AES:
+
+ chan = CIPHER_CHAN;
+
+ /* key is at the payload */
+ pkt1 |= BM_DCP_PACKET1_ENABLE_CIPHER;
+ if ((op->flags & DCP_OTPKEY) == 0)
+ pkt1 |= BM_DCP_PACKET1_PAYLOAD_KEY;
+ if (op->flags & DCP_ENC)
+ pkt1 |= BM_DCP_PACKET1_CIPHER_ENCRYPT;
+ if (op->flags & DCP_CBC_INIT)
+ pkt1 |= BM_DCP_PACKET1_CIPHER_INIT;
+
+ pkt2 = BF(0, DCP_PACKET2_CIPHER_CFG) |
+ BF(0, DCP_PACKET2_KEY_SELECT) |
+ BF(BV_DCP_PACKET2_CIPHER_SELECT__AES128,
+ DCP_PACKET2_CIPHER_SELECT);
+
+ if (op->flags & DCP_ECB)
+ pkt2 |= BF(BV_DCP_PACKET2_CIPHER_MODE__ECB,
+ DCP_PACKET2_CIPHER_MODE);
+ else if (op->flags & DCP_CBC)
+ pkt2 |= BF(BV_DCP_PACKET2_CIPHER_MODE__CBC,
+ DCP_PACKET2_CIPHER_MODE);
+
+ break;
+
+ case DCP_SHA1:
+
+ chan = HASH_CHAN;
+
+ pkt1 |= BM_DCP_PACKET1_ENABLE_HASH;
+ if (op->flags & DCP_INIT)
+ pkt1 |= BM_DCP_PACKET1_HASH_INIT;
+ if (op->flags & DCP_FINAL) {
+ pkt1 |= BM_DCP_PACKET1_HASH_TERM;
+ BUG_ON(op->hash.digest == NULL);
+ }
+
+ pkt2 = BF(BV_DCP_PACKET2_HASH_SELECT__SHA1,
+ DCP_PACKET2_HASH_SELECT);
+ break;
+
+ default:
+ dev_err(sdcp->dev, "Unsupported mode\n");
+ return;
+ }
+
+ mutex = &sdcp->op_mutex[chan];
+ pkt = &op->pkt;
+
+ pkt->pNext = 0;
+ pkt->pkt1 = pkt1;
+ pkt->pkt2 = pkt2;
+ pkt->pSrc = (u32)op->src_phys;
+ pkt->pDst = (u32)op->dst_phys;
+ pkt->size = op->len;
+ pkt->pPayload = chan == CIPHER_CHAN ?
+ (u32)op->cipher.key_phys : (u32)op->hash.digest_phys;
+ pkt->stat = 0;
+
+ pkt_phys = dma_map_single(sdcp->dev, pkt, sizeof(*pkt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(sdcp->dev, pkt_phys)) {
+ dev_err(sdcp->dev, "Unable to map packet descriptor\n");
+ return;
+ }
+
+ /* submit the work */
+ mutex_lock(mutex);
+
+ __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(chan));
+
+ /* Load the work packet pointer and bump the channel semaphore */
+ __raw_writel((u32)pkt_phys, sdcp->dcp_regs_base +
+ HW_DCP_CHnCMDPTR(chan));
+
+ /* XXX wake from interrupt instead of looping */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ sdcp->wait[chan] = 0;
+ __raw_writel(BF(1, DCP_CHnSEMA_INCREMENT), sdcp->dcp_regs_base
+ + HW_DCP_CHnSEMA(chan));
+ while (time_before(jiffies, timeout) && sdcp->wait[chan] == 0)
+ cpu_relax();
+
+ if (!time_before(jiffies, timeout)) {
+ dev_err(sdcp->dev, "Timeout while waiting STAT 0x%08x\n",
+ __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT));
+ goto out;
+ }
+
+ stat = __raw_readl(sdcp->dcp_regs_base + HW_DCP_CHnSTAT(chan));
+ if ((stat & 0xff) != 0)
+ dev_err(sdcp->dev, "Channel stat error 0x%02x\n",
+ __raw_readl(sdcp->dcp_regs_base +
+ HW_DCP_CHnSTAT(chan)) & 0xff);
+out:
+ mutex_unlock(mutex);
+
+ dma_unmap_single(sdcp->dev, pkt_phys, sizeof(*pkt), DMA_TO_DEVICE);
+}
+
+static int dcp_aes_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ op->cipher.keylen = len;
+
+ if (len == AES_KEYSIZE_128) {
+ memcpy(op->cipher.key, key, len);
+ return 0;
+ }
+
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ /* not supported at all */
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+ op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ op->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_cipher_setkey(op->fallback.cip, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (op->fallback.blk->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static void dcp_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_encrypt_one(op->fallback.cip, out, in);
+ return;
+ }
+
+ op->src = (void *) in;
+ op->dst = (void *) out;
+ op->flags = DCP_AES | DCP_ENC | DCP_ECB;
+ op->len = AES_KEYSIZE_128;
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, (void *)in, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dev_err(sdcp->dev, "Unable to map source\n");
+ return;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, out, AES_KEYSIZE_128,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ goto err_unmap_src;
+ }
+
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ goto err_unmap_dst;
+ }
+
+ /* perform the operation */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+err_unmap_dst:
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len, DMA_FROM_DEVICE);
+err_unmap_src:
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len, DMA_TO_DEVICE);
+}
+
+static void dcp_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128)) {
+ crypto_cipher_decrypt_one(op->fallback.cip, out, in);
+ return;
+ }
+
+ op->src = (void *) in;
+ op->dst = (void *) out;
+ op->flags = DCP_AES | DCP_DEC | DCP_ECB;
+ op->len = AES_KEYSIZE_128;
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, (void *)in, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dev_err(sdcp->dev, "Unable to map source\n");
+ return;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, out, AES_KEYSIZE_128,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ goto err_unmap_src;
+ }
+
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ goto err_unmap_dst;
+ }
+
+ /* perform the operation */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+err_unmap_dst:
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len, DMA_FROM_DEVICE);
+err_unmap_src:
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len, DMA_TO_DEVICE);
+}
+
+static int fallback_init_cip(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ op->fallback.cip = crypto_alloc_cipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(op->fallback.cip)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(op->fallback.cip);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_cip(struct crypto_tfm *tfm)
+{
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(op->fallback.cip);
+ op->fallback.cip = NULL;
+}
+
+static struct crypto_alg dcp_aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "dcp-aes",
+ .cra_priority = 300,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = fallback_init_cip,
+ .cra_exit = fallback_exit_cip,
+ .cra_blocksize = AES_KEYSIZE_128,
+ .cra_ctxsize = sizeof(struct dcp_op),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(dcp_aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = dcp_aes_setkey_cip,
+ .cia_encrypt = dcp_aes_encrypt,
+ .cia_decrypt = dcp_aes_decrypt
+ }
+ }
+};
+
+static int dcp_aes_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+ op->cipher.keylen = len;
+
+ if (len == AES_KEYSIZE_128) {
+ memcpy(op->cipher.key, key, len);
+ return 0;
+ }
+
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ /* not supported at all */
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+ op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ op->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |= (op->fallback.blk->base.crt_flags &
+ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int fallback_blk_dec(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = op->fallback.blk;
+
+ ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int fallback_blk_enc(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int ret;
+ struct crypto_blkcipher *tfm;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+
+ tfm = desc->tfm;
+ desc->tfm = op->fallback.blk;
+
+ ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
+
+ desc->tfm = tfm;
+ return ret;
+}
+
+static int fallback_init_blk(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ op->fallback.blk = crypto_alloc_blkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(op->fallback.blk)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(op->fallback.blk);
+ }
+
+ return 0;
+}
+
+static void fallback_exit_blk(struct crypto_tfm *tfm)
+{
+ struct dcp_op *op = crypto_tfm_ctx(tfm);
+
+ crypto_free_blkcipher(op->fallback.blk);
+ op->fallback.blk = NULL;
+}
+
+static int
+dcp_aes_ecb_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ /* key needs to be mapped only once */
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ return -ENOMEM;
+ }
+
+ err = blkcipher_walk_virt(desc, &walk);
+ while (err == 0 && (nbytes = walk.nbytes) > 0) {
+ op->src = walk.src.virt.addr,
+ op->dst = walk.dst.virt.addr;
+ op->flags = DCP_AES | DCP_DEC |
+ DCP_ECB;
+ op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dev_err(sdcp->dev, "Unable to map source\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* perform! */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+
+ nbytes -= op->len;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+
+ return err;
+}
+
+static int
+dcp_aes_ecb_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err, ret;
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ /* key needs to be mapped only once */
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ return -ENOMEM;
+ }
+
+ err = blkcipher_walk_virt(desc, &walk);
+
+ err = 0;
+ while (err == 0 && (nbytes = walk.nbytes) > 0) {
+ op->src = walk.src.virt.addr,
+ op->dst = walk.dst.virt.addr;
+ op->flags = DCP_AES | DCP_ENC |
+ DCP_ECB;
+ op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dev_err(sdcp->dev, "Unable to map source\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* perform! */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+
+ nbytes -= op->len;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+
+ return err;
+}
+
+
+static struct crypto_alg dcp_aes_ecb_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "dcp-ecb-aes",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
+ .cra_blocksize = AES_KEYSIZE_128,
+ .cra_ctxsize = sizeof(struct dcp_op),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(dcp_aes_ecb_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = dcp_aes_setkey_blk,
+ .encrypt = dcp_aes_ecb_encrypt,
+ .decrypt = dcp_aes_ecb_decrypt
+ }
+ }
+};
+
+static int
+dcp_aes_cbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err, blockno;
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ blockno = 0;
+ err = blkcipher_walk_virt(desc, &walk);
+ while (err == 0 && (nbytes = walk.nbytes) > 0) {
+ op->src = walk.src.virt.addr,
+ op->dst = walk.dst.virt.addr;
+ op->flags = DCP_AES | DCP_DEC |
+ DCP_CBC;
+ if (blockno == 0) {
+ op->flags |= DCP_CBC_INIT;
+ memcpy(op->cipher.key + AES_KEYSIZE_128, walk.iv,
+ AES_KEYSIZE_128);
+ }
+ op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+ /* key (+iv) needs to be mapped only once */
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dev_err(sdcp->dev, "Unable to map source\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* perform! */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+
+ nbytes -= op->len;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+
+ blockno++;
+ }
+
+ return err;
+}
+
+static int
+dcp_aes_cbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err, ret, blockno;
+
+ if (unlikely(op->cipher.keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ blockno = 0;
+
+ err = blkcipher_walk_virt(desc, &walk);
+ while (err == 0 && (nbytes = walk.nbytes) > 0) {
+ op->src = walk.src.virt.addr,
+ op->dst = walk.dst.virt.addr;
+ op->flags = DCP_AES | DCP_ENC |
+ DCP_CBC;
+ if (blockno == 0) {
+ op->flags |= DCP_CBC_INIT;
+ memcpy(op->cipher.key + AES_KEYSIZE_128, walk.iv,
+ AES_KEYSIZE_128);
+ }
+ op->len = nbytes - (nbytes % AES_KEYSIZE_128);
+
+ /* key needs to be mapped only once */
+ op->cipher.key_phys = dma_map_single(sdcp->dev, op->cipher.key,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(sdcp->dev, op->cipher.key_phys)) {
+ dev_err(sdcp->dev, "Unable to map key\n");
+ return -ENOMEM;
+ }
+
+ /* map the data */
+ op->src_phys = dma_map_single(sdcp->dev, op->src, op->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->src_phys)) {
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dev_err(sdcp->dev, "Unable to map source\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ op->dst_phys = dma_map_single(sdcp->dev, op->dst, op->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(sdcp->dev, op->dst_phys)) {
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+ dev_err(sdcp->dev, "Unable to map dest\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* perform! */
+ dcp_perform_op(op);
+
+ dma_unmap_single(sdcp->dev, op->cipher.key_phys,
+ AES_KEYSIZE_128 * 2, DMA_BIDIRECTIONAL);
+ dma_unmap_single(sdcp->dev, op->dst_phys, op->len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(sdcp->dev, op->src_phys, op->len,
+ DMA_TO_DEVICE);
+
+ nbytes -= op->len;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+
+ blockno++;
+ }
+
+ return err;
+}
+
+static struct crypto_alg dcp_aes_cbc_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "dcp-cbc-aes",
+ .cra_priority = 400,
+ .cra_alignmask = 15,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
+ .cra_blocksize = AES_KEYSIZE_128,
+ .cra_ctxsize = sizeof(struct dcp_op),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(dcp_aes_cbc_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = dcp_aes_setkey_blk,
+ .encrypt = dcp_aes_cbc_encrypt,
+ .decrypt = dcp_aes_cbc_decrypt,
+ .ivsize = AES_KEYSIZE_128,
+ }
+ }
+};
+
+static int dcp_perform_hash_op(
+ struct dcp_hash_coherent_block *input,
+ u32 num_desc, bool init, bool terminate)
+{
+ struct dcp *sdcp = global_sdcp;
+ int chan;
+ struct dcp_hw_packet *pkt;
+ struct dcp_hash_coherent_block *hw;
+ unsigned long timeout;
+ u32 stat;
+ int descno, mapped;
+
+ chan = HASH_CHAN;
+
+ hw = input;
+ pkt = hw->pkt;
+
+ for (descno = 0; descno < num_desc; descno++) {
+
+ if (descno != 0) {
+
+ /* set next ptr and CHAIN bit in last packet */
+ pkt->pNext = hw->next->my_phys + offsetof(
+ struct dcp_hash_coherent_block,
+ pkt[0]);
+ pkt->pkt1 |= BM_DCP_PACKET1_CHAIN;
+
+ /* iterate to next descriptor */
+ hw = hw->next;
+ pkt = hw->pkt;
+ }
+
+ pkt->pkt1 = BM_DCP_PACKET1_DECR_SEMAPHORE |
+ BM_DCP_PACKET1_ENABLE_HASH;
+
+ if (init && descno == 0)
+ pkt->pkt1 |= BM_DCP_PACKET1_HASH_INIT;
+
+ pkt->pkt2 = BF(hw->hash_sel,
+ DCP_PACKET2_HASH_SELECT);
+
+ /* no need to flush buf1 or buf2, which are uncached */
+ if (hw->src != sdcp->buf1 && hw->src != sdcp->buf2) {
+
+ /* we have to flush the cache for the buffer */
+ hw->src_phys = dma_map_single(sdcp->dev,
+ hw->src, hw->len, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(sdcp->dev, hw->src_phys)) {
+ dev_err(sdcp->dev, "Unable to map source\n");
+
+ /* unmap any previous mapped buffers */
+ for (mapped = 0, hw = input; mapped < descno;
+ mapped++) {
+
+ if (mapped != 0)
+ hw = hw->next;
+ if (hw->src != sdcp->buf1 &&
+ hw->src != sdcp->buf2)
+ dma_unmap_single(sdcp->dev,
+ hw->src_phys, hw->len,
+ DMA_TO_DEVICE);
+ }
+
+ return -EFAULT;
+ }
+ }
+
+ pkt->pSrc = (u32)hw->src_phys;
+ pkt->pDst = 0;
+ pkt->size = hw->len;
+ pkt->pPayload = 0;
+ pkt->stat = 0;
+
+ /* set HASH_TERM bit on last buf if terminate was set */
+ if (terminate && (descno == (num_desc - 1))) {
+ pkt->pkt1 |= BM_DCP_PACKET1_HASH_TERM;
+
+ memset(input->digest, 0, sizeof(input->digest));
+
+ /* set payload ptr to the 1st buffer's digest */
+ pkt->pPayload = (u32)input->my_phys +
+ offsetof(
+ struct dcp_hash_coherent_block,
+ digest);
+ }
+ }
+
+ /* submit the work */
+
+ __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(chan));
+
+ mb();
+ /* Load the 1st descriptor's physical address */
+ __raw_writel((u32)input->my_phys +
+ offsetof(struct dcp_hash_coherent_block,
+ pkt[0]), sdcp->dcp_regs_base + HW_DCP_CHnCMDPTR(chan));
+
+ /* XXX wake from interrupt instead of looping */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* write num_desc into sema register */
+ __raw_writel(BF(num_desc, DCP_CHnSEMA_INCREMENT),
+ sdcp->dcp_regs_base + HW_DCP_CHnSEMA(chan));
+
+ while (time_before(jiffies, timeout) &&
+ ((__raw_readl(sdcp->dcp_regs_base +
+ HW_DCP_CHnSEMA(chan)) >> 16) & 0xff) != 0) {
+
+ cpu_relax();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ dev_err(sdcp->dev,
+ "Timeout while waiting STAT 0x%08x\n",
+ __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT));
+ }
+
+ stat = __raw_readl(sdcp->dcp_regs_base + HW_DCP_CHnSTAT(chan));
+ if ((stat & 0xff) != 0)
+ dev_err(sdcp->dev, "Channel stat error 0x%02x\n",
+ __raw_readl(sdcp->dcp_regs_base +
+ HW_DCP_CHnSTAT(chan)) & 0xff);
+
+ /* unmap all src buffers */
+ for (descno = 0, hw = input; descno < num_desc; descno++) {
+ if (descno != 0)
+ hw = hw->next;
+ if (hw->src != sdcp->buf1 && hw->src != sdcp->buf2)
+ dma_unmap_single(sdcp->dev, hw->src_phys, hw->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+
+}
+
+static int dcp_sha_init(struct shash_desc *desc)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_hash_op *op = shash_desc_ctx(desc);
+ struct mutex *mutex = &sdcp->op_mutex[HASH_CHAN];
+
+ mutex_lock(mutex);
+
+ op->length = 0;
+
+ /* reset the lengths and the pointers of buffer descriptors */
+ sdcp->buf1_desc->len = 0;
+ sdcp->buf1_desc->src = sdcp->buf1;
+ sdcp->buf2_desc->len = 0;
+ sdcp->buf2_desc->src = sdcp->buf2;
+ op->head_desc = sdcp->buf1_desc;
+ op->tail_desc = sdcp->buf2_desc;
+
+ return 0;
+}
+
+static int dcp_sha_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct dcp *sdcp = global_sdcp;
+ struct dcp_hash_op *op = shash_desc_ctx(desc);
+ struct dcp_hash_coherent_block *temp;
+ u32 rem_bytes, bytes_borrowed, hash_sel;
+ int ret = 0;
+
+ if (strcmp(desc->tfm->base.__crt_alg->cra_name, "sha1") == 0)
+ hash_sel = BV_DCP_PACKET2_HASH_SELECT__SHA1;
+ else
+ hash_sel = BV_DCP_PACKET2_HASH_SELECT__SHA256;
+
+ sdcp->user_buf_desc->src = (void *)data;
+ sdcp->user_buf_desc->len = length;
+
+ op->tail_desc->len = 0;
+
+ /* check if any pending data from previous updates */
+ if (op->head_desc->len) {
+
+ /* borrow from this buffer to make it 64 bytes */
+ bytes_borrowed = min(64 - op->head_desc->len,
+ sdcp->user_buf_desc->len);
+
+ /* copy n bytes to head */
+ memcpy(op->head_desc->src + op->head_desc->len,
+ sdcp->user_buf_desc->src, bytes_borrowed);
+ op->head_desc->len += bytes_borrowed;
+
+ /* update current buffer's src and len */
+ sdcp->user_buf_desc->src += bytes_borrowed;
+ sdcp->user_buf_desc->len -= bytes_borrowed;
+ }
+
+ /* Is current buffer unaligned to 64 byte length?
+ * Each buffer's length must be a multiple of 64 bytes for DCP
+ */
+ rem_bytes = sdcp->user_buf_desc->len % 64;
+
+ /* if length is unaligned, copy remainder to tail */
+ if (rem_bytes) {
+
+ memcpy(op->tail_desc->src, (sdcp->user_buf_desc->src +
+ sdcp->user_buf_desc->len - rem_bytes),
+ rem_bytes);
+
+ /* update length of current buffer */
+ sdcp->user_buf_desc->len -= rem_bytes;
+
+ op->tail_desc->len = rem_bytes;
+ }
+
+ /* do not send to DCP if length is < 64 */
+ if ((op->head_desc->len + sdcp->user_buf_desc->len) >= 64) {
+
+ /* set hash alg to be used (SHA1 or SHA256) */
+ op->head_desc->hash_sel = hash_sel;
+ sdcp->user_buf_desc->hash_sel = hash_sel;
+
+ if (op->head_desc->len) {
+ op->head_desc->next = sdcp->user_buf_desc;
+
+ ret = dcp_perform_hash_op(op->head_desc,
+ sdcp->user_buf_desc->len ? 2 : 1,
+ op->length == 0, false);
+ } else {
+ ret = dcp_perform_hash_op(sdcp->user_buf_desc, 1,
+ op->length == 0, false);
+ }
+
+ op->length += op->head_desc->len + sdcp->user_buf_desc->len;
+ op->head_desc->len = 0;
+ }
+
+ /* if tail has bytes, make it the head for next time */
+ if (op->tail_desc->len) {
+ temp = op->head_desc;
+ op->head_desc = op->tail_desc;
+ op->tail_desc = temp;
+ }
+
+ /* hash_sel to be used by final function */
+ op->head_desc->hash_sel = hash_sel;
+
+ return ret;
+}
+
+static int dcp_sha_final(struct shash_desc *desc, u8 *out)
+{
+ struct dcp_hash_op *op = shash_desc_ctx(desc);
+ const uint8_t *digest;
+ struct dcp *sdcp = global_sdcp;
+ u32 i, digest_len;
+ struct mutex *mutex = &sdcp->op_mutex[HASH_CHAN];
+ int ret = 0;
+
+ /* Send the leftover bytes in head, which can be length 0,
+ * but DCP still produces hash result in payload ptr.
+ * Last data bytes need not be 64-byte multiple.
+ */
+ ret = dcp_perform_hash_op(op->head_desc, 1, op->length == 0, true);
+
+ op->length += op->head_desc->len;
+
+ digest_len = (op->head_desc->hash_sel ==
+ BV_DCP_PACKET2_HASH_SELECT__SHA1) ? SHA1_DIGEST_SIZE :
+ SHA256_DIGEST_SIZE;
+
+ /* hardware reverses the digest (for some unexplicable reason) */
+ digest = op->head_desc->digest + digest_len;
+ for (i = 0; i < digest_len; i++)
+ *out++ = *--digest;
+
+ mutex_unlock(mutex);
+
+ return ret;
+}
+
+static struct shash_alg dcp_sha1_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .descsize = sizeof(struct dcp_hash_op),
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-dcp",
+ .cra_priority = 300,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct dcp_hash_op),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static struct shash_alg dcp_sha256_alg = {
+ .init = dcp_sha_init,
+ .update = dcp_sha_update,
+ .final = dcp_sha_final,
+ .descsize = sizeof(struct dcp_hash_op),
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-dcp",
+ .cra_priority = 300,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct dcp_hash_op),
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static irqreturn_t dcp_common_irq(int irq, void *context)
+{
+ struct dcp *sdcp = context;
+ u32 msk;
+
+ /* check */
+ msk = __raw_readl(sdcp->dcp_regs_base + HW_DCP_STAT) &
+ BF(0x0f, DCP_STAT_IRQ);
+ if (msk == 0)
+ return IRQ_NONE;
+
+ /* clear this channel */
+ __raw_writel(msk, sdcp->dcp_regs_base + HW_DCP_STAT_CLR);
+ if (msk & BF(0x01, DCP_STAT_IRQ))
+ sdcp->wait[0]++;
+ if (msk & BF(0x02, DCP_STAT_IRQ))
+ sdcp->wait[1]++;
+ if (msk & BF(0x04, DCP_STAT_IRQ))
+ sdcp->wait[2]++;
+ if (msk & BF(0x08, DCP_STAT_IRQ))
+ sdcp->wait[3]++;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcp_vmi_irq(int irq, void *context)
+{
+ return dcp_common_irq(irq, context);
+}
+
+static irqreturn_t dcp_irq(int irq, void *context)
+{
+ return dcp_common_irq(irq, context);
+}
+
+static int dcp_probe(struct platform_device *pdev)
+{
+ struct dcp *sdcp = NULL;
+ struct resource *r;
+ int i, ret;
+ dma_addr_t hw_phys;
+
+ if (global_sdcp != NULL) {
+ dev_err(&pdev->dev, "Only one instance allowed\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* allocate memory */
+ sdcp = kzalloc(sizeof(*sdcp), GFP_KERNEL);
+ if (sdcp == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate structure\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sdcp->dev = &pdev->dev;
+ spin_lock_init(&sdcp->lock);
+
+ for (i = 0; i < DCP_NUM_CHANNELS; i++) {
+ mutex_init(&sdcp->op_mutex[i]);
+ init_completion(&sdcp->op_wait[i]);
+ }
+
+ platform_set_drvdata(pdev, sdcp);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
+ ret = -ENXIO;
+ goto err_kfree;
+ }
+ sdcp->dcp_regs_base = IO_ADDRESS(r->start);
+
+ /* Soft reset and remove the clock gate */
+ __raw_writel(BM_DCP_CTRL_SFTRST, sdcp->dcp_regs_base + HW_DCP_CTRL_SET);
+
+ /* At 24Mhz, it takes no more than 4 clocks (160 ns) Maximum for
+ * the part to reset, reading the register twice should
+ * be sufficient to get 4 clks delay.
+ */
+ __raw_readl(sdcp->dcp_regs_base + HW_DCP_CTRL);
+ __raw_readl(sdcp->dcp_regs_base + HW_DCP_CTRL);
+
+ __raw_writel(BM_DCP_CTRL_SFTRST | BM_DCP_CTRL_CLKGATE,
+ sdcp->dcp_regs_base + HW_DCP_CTRL_CLR);
+
+ /* Initialize control registers */
+ __raw_writel(DCP_CTRL_INIT, sdcp->dcp_regs_base + HW_DCP_CTRL);
+ __raw_writel(DCP_CHANNELCTRL_INIT, sdcp->dcp_regs_base +
+ HW_DCP_CHANNELCTRL);
+
+ /* We do not enable context switching. Give the context
+ * buffer pointer an illegal address so if context switching is
+ * inadvertantly enabled, the dcp will return an error instead of
+ * trashing good memory. The dcp dma cannot access rom, so any rom
+ * address will do.
+ */
+ __raw_writel(0xFFFF0000, sdcp->dcp_regs_base + HW_DCP_CONTEXT);
+
+ for (i = 0; i < DCP_NUM_CHANNELS; i++)
+ __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_CHnSTAT_CLR(i));
+ __raw_writel(-1, sdcp->dcp_regs_base + HW_DCP_STAT_CLR);
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
+ ret = -EIO;
+ goto err_kfree;
+ }
+ sdcp->dcp_vmi_irq = r->start;
+ ret = request_irq(sdcp->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp",
+ sdcp);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "can't request_irq (0)\n");
+ goto err_kfree;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
+ ret = -EIO;
+ goto err_free_irq0;
+ }
+ sdcp->dcp_irq = r->start;
+ ret = request_irq(sdcp->dcp_irq, dcp_irq, 0, "dcp", sdcp);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "can't request_irq (1)\n");
+ goto err_free_irq0;
+ }
+
+ global_sdcp = sdcp;
+
+ ret = crypto_register_alg(&dcp_aes_alg);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register aes crypto\n");
+ goto err_kfree;
+ }
+
+ ret = crypto_register_alg(&dcp_aes_ecb_alg);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register aes ecb crypto\n");
+ goto err_unregister_aes;
+ }
+
+ ret = crypto_register_alg(&dcp_aes_cbc_alg);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register aes cbc crypto\n");
+ goto err_unregister_aes_ecb;
+ }
+
+ /* Allocate the descriptor to be used for user buffer
+ * passed in by the "update" function from Crypto API
+ */
+ sdcp->user_buf_desc = dma_alloc_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block), &hw_phys,
+ GFP_KERNEL);
+ if (sdcp->user_buf_desc == NULL) {
+ printk(KERN_ERR "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_unregister_aes_cbc;
+ }
+
+ sdcp->user_buf_desc->my_phys = hw_phys;
+
+ /* Allocate 2 buffers (head & tail) & its descriptors to deal with
+ * buffer lengths that are not 64 byte aligned, except for the
+ * last one.
+ */
+ sdcp->buf1 = dma_alloc_coherent(sdcp->dev,
+ 64, &sdcp->buf1_phys, GFP_KERNEL);
+ if (sdcp->buf1 == NULL) {
+ printk(KERN_ERR "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_unregister_aes_cbc;
+ }
+
+ sdcp->buf2 = dma_alloc_coherent(sdcp->dev,
+ 64, &sdcp->buf2_phys, GFP_KERNEL);
+ if (sdcp->buf2 == NULL) {
+ printk(KERN_ERR "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_unregister_aes_cbc;
+ }
+
+ sdcp->buf1_desc = dma_alloc_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block), &hw_phys,
+ GFP_KERNEL);
+ if (sdcp->buf1_desc == NULL) {
+ printk(KERN_ERR "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_unregister_aes_cbc;
+ }
+
+ sdcp->buf1_desc->my_phys = hw_phys;
+ sdcp->buf1_desc->src = (void *)sdcp->buf1;
+ sdcp->buf1_desc->src_phys = sdcp->buf1_phys;
+
+ sdcp->buf2_desc = dma_alloc_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block), &hw_phys,
+ GFP_KERNEL);
+ if (sdcp->buf2_desc == NULL) {
+ printk(KERN_ERR "Error allocating coherent block\n");
+ ret = -ENOMEM;
+ goto err_unregister_aes_cbc;
+ }
+
+ sdcp->buf2_desc->my_phys = hw_phys;
+ sdcp->buf2_desc->src = (void *)sdcp->buf2;
+ sdcp->buf2_desc->src_phys = sdcp->buf2_phys;
+
+
+ ret = crypto_register_shash(&dcp_sha1_alg);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register sha1 hash\n");
+ goto err_unregister_aes_cbc;
+ }
+
+ if (__raw_readl(sdcp->dcp_regs_base + HW_DCP_CAPABILITY1) &
+ BF_DCP_CAPABILITY1_HASH_ALGORITHMS(
+ BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA256)) {
+
+ ret = crypto_register_shash(&dcp_sha256_alg);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register sha256 hash\n");
+ goto err_unregister_sha1;
+ }
+ }
+
+ dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
+ return 0;
+
+err_unregister_sha1:
+ crypto_unregister_shash(&dcp_sha1_alg);
+err_unregister_aes_cbc:
+ crypto_unregister_alg(&dcp_aes_cbc_alg);
+err_unregister_aes_ecb:
+ crypto_unregister_alg(&dcp_aes_ecb_alg);
+err_unregister_aes:
+ crypto_unregister_alg(&dcp_aes_alg);
+err_free_irq0:
+ free_irq(sdcp->dcp_vmi_irq, sdcp);
+err_kfree:
+ kfree(sdcp);
+err:
+
+ return ret;
+}
+
+static int dcp_remove(struct platform_device *pdev)
+{
+ struct dcp *sdcp;
+
+ sdcp = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
+ free_irq(sdcp->dcp_irq, sdcp);
+ free_irq(sdcp->dcp_vmi_irq, sdcp);
+
+ /* if head and tail buffers were allocated, free them */
+ if (sdcp->buf1) {
+ dma_free_coherent(sdcp->dev, 64, sdcp->buf1, sdcp->buf1_phys);
+ dma_free_coherent(sdcp->dev, 64, sdcp->buf2, sdcp->buf2_phys);
+
+ dma_free_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block),
+ sdcp->buf1_desc, sdcp->buf1_desc->my_phys);
+
+ dma_free_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block),
+ sdcp->buf2_desc, sdcp->buf2_desc->my_phys);
+
+ dma_free_coherent(sdcp->dev,
+ sizeof(struct dcp_hash_coherent_block),
+ sdcp->user_buf_desc, sdcp->user_buf_desc->my_phys);
+ }
+
+ crypto_unregister_shash(&dcp_sha1_alg);
+ crypto_unregister_shash(&dcp_sha256_alg);
+
+ crypto_unregister_alg(&dcp_aes_cbc_alg);
+ crypto_unregister_alg(&dcp_aes_ecb_alg);
+ crypto_unregister_alg(&dcp_aes_alg);
+ kfree(sdcp);
+ global_sdcp = NULL;
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+static int dcp_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int dcp_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+#else
+#define dcp_suspend NULL
+#define dcp_resume NULL
+#endif
+
+static struct platform_driver dcp_driver = {
+ .probe = dcp_probe,
+ .remove = dcp_remove,
+ .suspend = dcp_suspend,
+ .resume = dcp_resume,
+ .driver = {
+ .name = "dcp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+dcp_init(void)
+{
+ return platform_driver_register(&dcp_driver);
+}
+
+static void __exit
+dcp_exit(void)
+{
+ platform_driver_unregister(&dcp_driver);
+}
+
+MODULE_AUTHOR("Pantelis Antoniou <pantelis@embeddedalley.com>");
+MODULE_DESCRIPTION("DCP Crypto Driver");
+MODULE_LICENSE("GPL");
+
+module_init(dcp_init);
+module_exit(dcp_exit);
diff --git a/drivers/crypto/dcp.h b/drivers/crypto/dcp.h
new file mode 100644
index 000000000000..00cd27b479c0
--- /dev/null
+++ b/drivers/crypto/dcp.h
@@ -0,0 +1,712 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#ifndef DCP_H_
+#define DCP_H_
+
+#define CIPHER_CHAN 1
+#define CIPHER_MASK (1 << CIPHER_CHAN)
+
+#define HASH_CHAN 0
+#define HASH_MASK (1 << HASH_CHAN)
+
+#define ALL_MASK (CIPHER_MASK | HASH_MASK)
+
+/* Defines the initialization value for the dcp control register */
+#define DCP_CTRL_INIT \
+ (BM_DCP_CTRL_GATHER_RESIDUAL_WRITES | \
+ BM_DCP_CTRL_ENABLE_CONTEXT_CACHING | \
+ BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH0 | \
+ BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH1 | \
+ BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH2 | \
+ BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH3)
+
+/* Defines the initialization value for the dcp channel control register */
+#define DCP_CHANNELCTRL_INIT \
+ BF(ALL_MASK, DCP_CHANNELCTRL_ENABLE_CHANNEL)
+
+/* DCP work packet 1 value for encryption */
+#define DCP_PKT1_ENCRYPT \
+ (BM_DCP_PACKET1_DECR_SEMAPHORE | \
+ BM_DCP_PACKET1_ENABLE_CIPHER | \
+ BM_DCP_PACKET1_CIPHER_ENCRYPT | \
+ BM_DCP_PACKET1_CIPHER_INIT)
+
+/* DCP work packet 1 value for decryption */
+#define DCP_PKT1_DECRYPT \
+ (BM_DCP_PACKET1_DECR_SEMAPHORE | \
+ BM_DCP_PACKET1_ENABLE_CIPHER | \
+ BM_DCP_PACKET1_CIPHER_INIT)
+
+/* DCP (decryption) work packet definition */
+struct dcp_hw_packet {
+ uint32_t pNext; /* next dcp work packet address */
+ uint32_t pkt1; /* dcp work packet 1 (control 0) */
+ uint32_t pkt2; /* dcp work packet 2 (control 1) */
+ uint32_t pSrc; /* source buffer address */
+ uint32_t pDst; /* destination buffer address */
+ uint32_t size; /* buffer size in bytes */
+ uint32_t pPayload; /* payload buffer address */
+ uint32_t stat; /* dcp status (written by dcp) */
+};
+
+#define DCP_NUM_CHANNELS 4
+
+/* DCP Register definitions */
+
+#ifndef BF
+#define BF(value, field) (((value) << BP_##field) & BM_##field)
+#endif
+
+#define REGS_DCP_SIZE 0x00002000
+
+#define HW_DCP_CTRL (0x00000000)
+#define HW_DCP_CTRL_SET (0x00000004)
+#define HW_DCP_CTRL_CLR (0x00000008)
+#define HW_DCP_CTRL_TOG (0x0000000c)
+
+#define BM_DCP_CTRL_SFTRST 0x80000000
+#define BM_DCP_CTRL_CLKGATE 0x40000000
+#define BM_DCP_CTRL_PRESENT_CRYPTO 0x20000000
+#define BV_DCP_CTRL_PRESENT_CRYPTO__Present 0x1
+#define BV_DCP_CTRL_PRESENT_CRYPTO__Absent 0x0
+#define BM_DCP_CTRL_PRESENT_CSC 0x10000000
+#define BV_DCP_CTRL_PRESENT_CSC__Present 0x1
+#define BV_DCP_CTRL_PRESENT_CSC__Absent 0x0
+#define BP_DCP_CTRL_RSVD1 24
+#define BM_DCP_CTRL_RSVD1 0x0F000000
+#define BF_DCP_CTRL_RSVD1(v) \
+ (((v) << 24) & BM_DCP_CTRL_RSVD1)
+#define BM_DCP_CTRL_GATHER_RESIDUAL_WRITES 0x00800000
+#define BM_DCP_CTRL_ENABLE_CONTEXT_CACHING 0x00400000
+#define BM_DCP_CTRL_ENABLE_CONTEXT_SWITCHING 0x00200000
+#define BP_DCP_CTRL_RSVD0 9
+#define BM_DCP_CTRL_RSVD0 0x001FFE00
+#define BF_DCP_CTRL_RSVD0(v) \
+ (((v) << 9) & BM_DCP_CTRL_RSVD0)
+#define BM_DCP_CTRL_CSC_INTERRUPT_ENABLE 0x00000100
+#define BP_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE 0
+#define BM_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE 0x000000FF
+#define BF_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE(v) \
+ (((v) << 0) & BM_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE)
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH0 0x01
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH1 0x02
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH2 0x04
+#define BV_DCP_CTRL_CHANNEL_INTERRUPT_ENABLE__CH3 0x08
+
+#define HW_DCP_STAT (0x00000010)
+#define HW_DCP_STAT_SET (0x00000014)
+#define HW_DCP_STAT_CLR (0x00000018)
+#define HW_DCP_STAT_TOG (0x0000001c)
+
+#define BP_DCP_STAT_RSVD2 29
+#define BM_DCP_STAT_RSVD2 0xE0000000
+#define BF_DCP_STAT_RSVD2(v) \
+ (((v) << 29) & BM_DCP_STAT_RSVD2)
+#define BM_DCP_STAT_OTP_KEY_READY 0x10000000
+#define BP_DCP_STAT_CUR_CHANNEL 24
+#define BM_DCP_STAT_CUR_CHANNEL 0x0F000000
+#define BF_DCP_STAT_CUR_CHANNEL(v) \
+ (((v) << 24) & BM_DCP_STAT_CUR_CHANNEL)
+#define BV_DCP_STAT_CUR_CHANNEL__None 0x0
+#define BV_DCP_STAT_CUR_CHANNEL__CH0 0x1
+#define BV_DCP_STAT_CUR_CHANNEL__CH1 0x2
+#define BV_DCP_STAT_CUR_CHANNEL__CH2 0x3
+#define BV_DCP_STAT_CUR_CHANNEL__CH3 0x4
+#define BV_DCP_STAT_CUR_CHANNEL__CSC 0x8
+#define BP_DCP_STAT_READY_CHANNELS 16
+#define BM_DCP_STAT_READY_CHANNELS 0x00FF0000
+#define BF_DCP_STAT_READY_CHANNELS(v) \
+ (((v) << 16) & BM_DCP_STAT_READY_CHANNELS)
+#define BV_DCP_STAT_READY_CHANNELS__CH0 0x01
+#define BV_DCP_STAT_READY_CHANNELS__CH1 0x02
+#define BV_DCP_STAT_READY_CHANNELS__CH2 0x04
+#define BV_DCP_STAT_READY_CHANNELS__CH3 0x08
+#define BP_DCP_STAT_RSVD1 9
+#define BM_DCP_STAT_RSVD1 0x0000FE00
+#define BF_DCP_STAT_RSVD1(v) \
+ (((v) << 9) & BM_DCP_STAT_RSVD1)
+#define BM_DCP_STAT_CSCIRQ 0x00000100
+#define BP_DCP_STAT_RSVD0 4
+#define BM_DCP_STAT_RSVD0 0x000000F0
+#define BF_DCP_STAT_RSVD0(v) \
+ (((v) << 4) & BM_DCP_STAT_RSVD0)
+#define BP_DCP_STAT_IRQ 0
+#define BM_DCP_STAT_IRQ 0x0000000F
+#define BF_DCP_STAT_IRQ(v) \
+ (((v) << 0) & BM_DCP_STAT_IRQ)
+
+#define HW_DCP_CHANNELCTRL (0x00000020)
+#define HW_DCP_CHANNELCTRL_SET (0x00000024)
+#define HW_DCP_CHANNELCTRL_CLR (0x00000028)
+#define HW_DCP_CHANNELCTRL_TOG (0x0000002c)
+
+#define BP_DCP_CHANNELCTRL_RSVD 19
+#define BM_DCP_CHANNELCTRL_RSVD 0xFFF80000
+#define BF_DCP_CHANNELCTRL_RSVD(v) \
+ (((v) << 19) & BM_DCP_CHANNELCTRL_RSVD)
+#define BP_DCP_CHANNELCTRL_CSC_PRIORITY 17
+#define BM_DCP_CHANNELCTRL_CSC_PRIORITY 0x00060000
+#define BF_DCP_CHANNELCTRL_CSC_PRIORITY(v) \
+ (((v) << 17) & BM_DCP_CHANNELCTRL_CSC_PRIORITY)
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__HIGH 0x3
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__MED 0x2
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__LOW 0x1
+#define BV_DCP_CHANNELCTRL_CSC_PRIORITY__BACKGROUND 0x0
+#define BM_DCP_CHANNELCTRL_CH0_IRQ_MERGED 0x00010000
+#define BP_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL 8
+#define BM_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL 0x0000FF00
+#define BF_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL(v) \
+ (((v) << 8) & BM_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL)
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH0 0x01
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH1 0x02
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH2 0x04
+#define BV_DCP_CHANNELCTRL_HIGH_PRIORITY_CHANNEL__CH3 0x08
+#define BP_DCP_CHANNELCTRL_ENABLE_CHANNEL 0
+#define BM_DCP_CHANNELCTRL_ENABLE_CHANNEL 0x000000FF
+#define BF_DCP_CHANNELCTRL_ENABLE_CHANNEL(v) \
+ (((v) << 0) & BM_DCP_CHANNELCTRL_ENABLE_CHANNEL)
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH0 0x01
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH1 0x02
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH2 0x04
+#define BV_DCP_CHANNELCTRL_ENABLE_CHANNEL__CH3 0x08
+
+#define HW_DCP_CAPABILITY0 (0x00000030)
+
+#define BM_DCP_CAPABILITY0_DISABLE_DECRYPT 0x80000000
+#define BM_DCP_CAPABILITY0_ENABLE_TZONE 0x40000000
+#define BP_DCP_CAPABILITY0_RSVD 12
+#define BM_DCP_CAPABILITY0_RSVD 0x3FFFF000
+#define BF_DCP_CAPABILITY0_RSVD(v) \
+ (((v) << 12) & BM_DCP_CAPABILITY0_RSVD)
+#define BP_DCP_CAPABILITY0_NUM_CHANNELS 8
+#define BM_DCP_CAPABILITY0_NUM_CHANNELS 0x00000F00
+#define BF_DCP_CAPABILITY0_NUM_CHANNELS(v) \
+ (((v) << 8) & BM_DCP_CAPABILITY0_NUM_CHANNELS)
+#define BP_DCP_CAPABILITY0_NUM_KEYS 0
+#define BM_DCP_CAPABILITY0_NUM_KEYS 0x000000FF
+#define BF_DCP_CAPABILITY0_NUM_KEYS(v) \
+ (((v) << 0) & BM_DCP_CAPABILITY0_NUM_KEYS)
+
+#define HW_DCP_CAPABILITY1 (0x00000040)
+
+#define BP_DCP_CAPABILITY1_HASH_ALGORITHMS 16
+#define BM_DCP_CAPABILITY1_HASH_ALGORITHMS 0xFFFF0000
+#define BF_DCP_CAPABILITY1_HASH_ALGORITHMS(v) \
+ (((v) << 16) & BM_DCP_CAPABILITY1_HASH_ALGORITHMS)
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA1 0x0001
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__CRC32 0x0002
+#define BV_DCP_CAPABILITY1_HASH_ALGORITHMS__SHA256 0x0004
+#define BP_DCP_CAPABILITY1_CIPHER_ALGORITHMS 0
+#define BM_DCP_CAPABILITY1_CIPHER_ALGORITHMS 0x0000FFFF
+#define BF_DCP_CAPABILITY1_CIPHER_ALGORITHMS(v) \
+ (((v) << 0) & BM_DCP_CAPABILITY1_CIPHER_ALGORITHMS)
+#define BV_DCP_CAPABILITY1_CIPHER_ALGORITHMS__AES128 0x0001
+
+#define HW_DCP_CONTEXT (0x00000050)
+
+#define BP_DCP_CONTEXT_ADDR 0
+#define BM_DCP_CONTEXT_ADDR 0xFFFFFFFF
+#define BF_DCP_CONTEXT_ADDR(v) (v)
+
+#define HW_DCP_KEY (0x00000060)
+
+#define BP_DCP_KEY_RSVD 8
+#define BM_DCP_KEY_RSVD 0xFFFFFF00
+#define BF_DCP_KEY_RSVD(v) \
+ (((v) << 8) & BM_DCP_KEY_RSVD)
+#define BP_DCP_KEY_RSVD_INDEX 6
+#define BM_DCP_KEY_RSVD_INDEX 0x000000C0
+#define BF_DCP_KEY_RSVD_INDEX(v) \
+ (((v) << 6) & BM_DCP_KEY_RSVD_INDEX)
+#define BP_DCP_KEY_INDEX 4
+#define BM_DCP_KEY_INDEX 0x00000030
+#define BF_DCP_KEY_INDEX(v) \
+ (((v) << 4) & BM_DCP_KEY_INDEX)
+#define BP_DCP_KEY_RSVD_SUBWORD 2
+#define BM_DCP_KEY_RSVD_SUBWORD 0x0000000C
+#define BF_DCP_KEY_RSVD_SUBWORD(v) \
+ (((v) << 2) & BM_DCP_KEY_RSVD_SUBWORD)
+#define BP_DCP_KEY_SUBWORD 0
+#define BM_DCP_KEY_SUBWORD 0x00000003
+#define BF_DCP_KEY_SUBWORD(v) \
+ (((v) << 0) & BM_DCP_KEY_SUBWORD)
+
+#define HW_DCP_KEYDATA (0x00000070)
+
+#define BP_DCP_KEYDATA_DATA 0
+#define BM_DCP_KEYDATA_DATA 0xFFFFFFFF
+#define BF_DCP_KEYDATA_DATA(v) (v)
+
+#define HW_DCP_PACKET0 (0x00000080)
+
+#define BP_DCP_PACKET0_ADDR 0
+#define BM_DCP_PACKET0_ADDR 0xFFFFFFFF
+#define BF_DCP_PACKET0_ADDR(v) (v)
+
+#define HW_DCP_PACKET1 (0x00000090)
+
+#define BP_DCP_PACKET1_TAG 24
+#define BM_DCP_PACKET1_TAG 0xFF000000
+#define BF_DCP_PACKET1_TAG(v) \
+ (((v) << 24) & BM_DCP_PACKET1_TAG)
+#define BM_DCP_PACKET1_OUTPUT_WORDSWAP 0x00800000
+#define BM_DCP_PACKET1_OUTPUT_BYTESWAP 0x00400000
+#define BM_DCP_PACKET1_INPUT_WORDSWAP 0x00200000
+#define BM_DCP_PACKET1_INPUT_BYTESWAP 0x00100000
+#define BM_DCP_PACKET1_KEY_WORDSWAP 0x00080000
+#define BM_DCP_PACKET1_KEY_BYTESWAP 0x00040000
+#define BM_DCP_PACKET1_TEST_SEMA_IRQ 0x00020000
+#define BM_DCP_PACKET1_CONSTANT_FILL 0x00010000
+#define BM_DCP_PACKET1_HASH_OUTPUT 0x00008000
+#define BV_DCP_PACKET1_HASH_OUTPUT__INPUT 0x00
+#define BV_DCP_PACKET1_HASH_OUTPUT__OUTPUT 0x01
+#define BM_DCP_PACKET1_CHECK_HASH 0x00004000
+#define BM_DCP_PACKET1_HASH_TERM 0x00002000
+#define BM_DCP_PACKET1_HASH_INIT 0x00001000
+#define BM_DCP_PACKET1_PAYLOAD_KEY 0x00000800
+#define BM_DCP_PACKET1_OTP_KEY 0x00000400
+#define BM_DCP_PACKET1_CIPHER_INIT 0x00000200
+#define BM_DCP_PACKET1_CIPHER_ENCRYPT 0x00000100
+#define BV_DCP_PACKET1_CIPHER_ENCRYPT__ENCRYPT 0x01
+#define BV_DCP_PACKET1_CIPHER_ENCRYPT__DECRYPT 0x00
+#define BM_DCP_PACKET1_ENABLE_BLIT 0x00000080
+#define BM_DCP_PACKET1_ENABLE_HASH 0x00000040
+#define BM_DCP_PACKET1_ENABLE_CIPHER 0x00000020
+#define BM_DCP_PACKET1_ENABLE_MEMCOPY 0x00000010
+#define BM_DCP_PACKET1_CHAIN_CONTIGUOUS 0x00000008
+#define BM_DCP_PACKET1_CHAIN 0x00000004
+#define BM_DCP_PACKET1_DECR_SEMAPHORE 0x00000002
+#define BM_DCP_PACKET1_INTERRUPT 0x00000001
+
+#define HW_DCP_PACKET2 (0x000000a0)
+
+#define BP_DCP_PACKET2_CIPHER_CFG 24
+#define BM_DCP_PACKET2_CIPHER_CFG 0xFF000000
+#define BF_DCP_PACKET2_CIPHER_CFG(v) \
+ (((v) << 24) & BM_DCP_PACKET2_CIPHER_CFG)
+#define BP_DCP_PACKET2_RSVD 20
+#define BM_DCP_PACKET2_RSVD 0x00F00000
+#define BF_DCP_PACKET2_RSVD(v) \
+ (((v) << 20) & BM_DCP_PACKET2_RSVD)
+#define BP_DCP_PACKET2_HASH_SELECT 16
+#define BM_DCP_PACKET2_HASH_SELECT 0x000F0000
+#define BF_DCP_PACKET2_HASH_SELECT(v) \
+ (((v) << 16) & BM_DCP_PACKET2_HASH_SELECT)
+#define BV_DCP_PACKET2_HASH_SELECT__SHA1 0x00
+#define BV_DCP_PACKET2_HASH_SELECT__CRC32 0x01
+#define BV_DCP_PACKET2_HASH_SELECT__SHA256 0x02
+#define BP_DCP_PACKET2_KEY_SELECT 8
+#define BM_DCP_PACKET2_KEY_SELECT 0x0000FF00
+#define BF_DCP_PACKET2_KEY_SELECT(v) \
+ (((v) << 8) & BM_DCP_PACKET2_KEY_SELECT)
+#define BP_DCP_PACKET2_CIPHER_MODE 4
+#define BM_DCP_PACKET2_CIPHER_MODE 0x000000F0
+#define BF_DCP_PACKET2_CIPHER_MODE(v) \
+ (((v) << 4) & BM_DCP_PACKET2_CIPHER_MODE)
+#define BV_DCP_PACKET2_CIPHER_MODE__ECB 0x00
+#define BV_DCP_PACKET2_CIPHER_MODE__CBC 0x01
+#define BP_DCP_PACKET2_CIPHER_SELECT 0
+#define BM_DCP_PACKET2_CIPHER_SELECT 0x0000000F
+#define BF_DCP_PACKET2_CIPHER_SELECT(v) \
+ (((v) << 0) & BM_DCP_PACKET2_CIPHER_SELECT)
+#define BV_DCP_PACKET2_CIPHER_SELECT__AES128 0x00
+
+#define HW_DCP_PACKET3 (0x000000b0)
+
+#define BP_DCP_PACKET3_ADDR 0
+#define BM_DCP_PACKET3_ADDR 0xFFFFFFFF
+#define BF_DCP_PACKET3_ADDR(v) (v)
+
+#define HW_DCP_PACKET4 (0x000000c0)
+
+#define BP_DCP_PACKET4_ADDR 0
+#define BM_DCP_PACKET4_ADDR 0xFFFFFFFF
+#define BF_DCP_PACKET4_ADDR(v) (v)
+
+#define HW_DCP_PACKET5 (0x000000d0)
+
+#define BP_DCP_PACKET5_COUNT 0
+#define BM_DCP_PACKET5_COUNT 0xFFFFFFFF
+#define BF_DCP_PACKET5_COUNT(v) (v)
+
+#define HW_DCP_PACKET6 (0x000000e0)
+
+#define BP_DCP_PACKET6_ADDR 0
+#define BM_DCP_PACKET6_ADDR 0xFFFFFFFF
+#define BF_DCP_PACKET6_ADDR(v) (v)
+
+/*
+ * multi-register-define name HW_DCP_CHnCMDPTR
+ * base 0x00000100
+ * count 4
+ * offset 0x40
+ */
+#define HW_DCP_CHnCMDPTR(n) (0x00000100 + (n) * 0x40)
+
+#define BP_DCP_CHnCMDPTR_ADDR 0
+#define BM_DCP_CHnCMDPTR_ADDR 0xFFFFFFFF
+#define BF_DCP_CHnCMDPTR_ADDR(v) (v)
+
+/*
+ * multi-register-define name HW_DCP_CHnSEMA
+ * base 0x00000110
+ * count 4
+ * offset 0x40
+ */
+#define HW_DCP_CHnSEMA(n) (0x00000110 + (n) * 0x40)
+
+#define BP_DCP_CHnSEMA_RSVD2 24
+#define BM_DCP_CHnSEMA_RSVD2 0xFF000000
+#define BF_DCP_CHnSEMA_RSVD2(v) \
+ (((v) << 24) & BM_DCP_CHnSEMA_RSVD2)
+#define BP_DCP_CHnSEMA_VALUE 16
+#define BM_DCP_CHnSEMA_VALUE 0x00FF0000
+#define BF_DCP_CHnSEMA_VALUE(v) \
+ (((v) << 16) & BM_DCP_CHnSEMA_VALUE)
+#define BP_DCP_CHnSEMA_RSVD1 8
+#define BM_DCP_CHnSEMA_RSVD1 0x0000FF00
+#define BF_DCP_CHnSEMA_RSVD1(v) \
+ (((v) << 8) & BM_DCP_CHnSEMA_RSVD1)
+#define BP_DCP_CHnSEMA_INCREMENT 0
+#define BM_DCP_CHnSEMA_INCREMENT 0x000000FF
+#define BF_DCP_CHnSEMA_INCREMENT(v) \
+ (((v) << 0) & BM_DCP_CHnSEMA_INCREMENT)
+
+/*
+ * multi-register-define name HW_DCP_CHnSTAT
+ * base 0x00000120
+ * count 4
+ * offset 0x40
+ */
+#define HW_DCP_CHnSTAT(n) (0x00000120 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_SET(n) (0x00000124 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_CLR(n) (0x00000128 + (n) * 0x40)
+#define HW_DCP_CHnSTAT_TOG(n) (0x0000012c + (n) * 0x40)
+
+#define BP_DCP_CHnSTAT_TAG 24
+#define BM_DCP_CHnSTAT_TAG 0xFF000000
+#define BF_DCP_CHnSTAT_TAG(v) \
+ (((v) << 24) & BM_DCP_CHnSTAT_TAG)
+#define BP_DCP_CHnSTAT_ERROR_CODE 16
+#define BM_DCP_CHnSTAT_ERROR_CODE 0x00FF0000
+#define BF_DCP_CHnSTAT_ERROR_CODE(v) \
+ (((v) << 16) & BM_DCP_CHnSTAT_ERROR_CODE)
+#define BV_DCP_CHnSTAT_ERROR_CODE__NEXT_CHAIN_IS_0 0x01
+#define BV_DCP_CHnSTAT_ERROR_CODE__NO_CHAIN 0x02
+#define BV_DCP_CHnSTAT_ERROR_CODE__CONTEXT_ERROR 0x03
+#define BV_DCP_CHnSTAT_ERROR_CODE__PAYLOAD_ERROR 0x04
+#define BV_DCP_CHnSTAT_ERROR_CODE__INVALID_MODE 0x05
+#define BP_DCP_CHnSTAT_RSVD0 7
+#define BM_DCP_CHnSTAT_RSVD0 0x0000FF80
+#define BF_DCP_CHnSTAT_RSVD0(v) \
+ (((v) << 7) & BM_DCP_CHnSTAT_RSVD0)
+#define BM_DCP_CHnSTAT_ERROR_PAGEFAULT 0x00000040
+#define BM_DCP_CHnSTAT_ERROR_DST 0x00000020
+#define BM_DCP_CHnSTAT_ERROR_SRC 0x00000010
+#define BM_DCP_CHnSTAT_ERROR_PACKET 0x00000008
+#define BM_DCP_CHnSTAT_ERROR_SETUP 0x00000004
+#define BM_DCP_CHnSTAT_HASH_MISMATCH 0x00000002
+#define BM_DCP_CHnSTAT_RSVD_COMPLETE 0x00000001
+
+/*
+ * multi-register-define name HW_DCP_CHnOPTS
+ * base 0x00000130
+ * count 4
+ * offset 0x40
+ */
+#define HW_DCP_CHnOPTS(n) (0x00000130 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_SET(n) (0x00000134 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_CLR(n) (0x00000138 + (n) * 0x40)
+#define HW_DCP_CHnOPTS_TOG(n) (0x0000013c + (n) * 0x40)
+
+#define BP_DCP_CHnOPTS_RSVD 16
+#define BM_DCP_CHnOPTS_RSVD 0xFFFF0000
+#define BF_DCP_CHnOPTS_RSVD(v) \
+ (((v) << 16) & BM_DCP_CHnOPTS_RSVD)
+#define BP_DCP_CHnOPTS_RECOVERY_TIMER 0
+#define BM_DCP_CHnOPTS_RECOVERY_TIMER 0x0000FFFF
+#define BF_DCP_CHnOPTS_RECOVERY_TIMER(v) \
+ (((v) << 0) & BM_DCP_CHnOPTS_RECOVERY_TIMER)
+
+#define HW_DCP_CSCCTRL0 (0x00000300)
+#define HW_DCP_CSCCTRL0_SET (0x00000304)
+#define HW_DCP_CSCCTRL0_CLR (0x00000308)
+#define HW_DCP_CSCCTRL0_TOG (0x0000030c)
+
+#define BP_DCP_CSCCTRL0_RSVD1 16
+#define BM_DCP_CSCCTRL0_RSVD1 0xFFFF0000
+#define BF_DCP_CSCCTRL0_RSVD1(v) \
+ (((v) << 16) & BM_DCP_CSCCTRL0_RSVD1)
+#define BM_DCP_CSCCTRL0_CLIP 0x00008000
+#define BM_DCP_CSCCTRL0_UPSAMPLE 0x00004000
+#define BM_DCP_CSCCTRL0_SCALE 0x00002000
+#define BM_DCP_CSCCTRL0_ROTATE 0x00001000
+#define BM_DCP_CSCCTRL0_SUBSAMPLE 0x00000800
+#define BM_DCP_CSCCTRL0_DELTA 0x00000400
+#define BP_DCP_CSCCTRL0_RGB_FORMAT 8
+#define BM_DCP_CSCCTRL0_RGB_FORMAT 0x00000300
+#define BF_DCP_CSCCTRL0_RGB_FORMAT(v) \
+ (((v) << 8) & BM_DCP_CSCCTRL0_RGB_FORMAT)
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__RGB16_565 0x0
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__YCbCrI 0x1
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__RGB24 0x2
+#define BV_DCP_CSCCTRL0_RGB_FORMAT__YUV422I 0x3
+#define BP_DCP_CSCCTRL0_YUV_FORMAT 4
+#define BM_DCP_CSCCTRL0_YUV_FORMAT 0x000000F0
+#define BF_DCP_CSCCTRL0_YUV_FORMAT(v) \
+ (((v) << 4) & BM_DCP_CSCCTRL0_YUV_FORMAT)
+#define BV_DCP_CSCCTRL0_YUV_FORMAT__YUV420 0x0
+#define BV_DCP_CSCCTRL0_YUV_FORMAT__YUV422 0x2
+#define BP_DCP_CSCCTRL0_RSVD0 1
+#define BM_DCP_CSCCTRL0_RSVD0 0x0000000E
+#define BF_DCP_CSCCTRL0_RSVD0(v) \
+ (((v) << 1) & BM_DCP_CSCCTRL0_RSVD0)
+#define BM_DCP_CSCCTRL0_ENABLE 0x00000001
+
+#define HW_DCP_CSCSTAT (0x00000310)
+#define HW_DCP_CSCSTAT_SET (0x00000314)
+#define HW_DCP_CSCSTAT_CLR (0x00000318)
+#define HW_DCP_CSCSTAT_TOG (0x0000031c)
+
+#define BP_DCP_CSCSTAT_RSVD3 24
+#define BM_DCP_CSCSTAT_RSVD3 0xFF000000
+#define BF_DCP_CSCSTAT_RSVD3(v) \
+ (((v) << 24) & BM_DCP_CSCSTAT_RSVD3)
+#define BP_DCP_CSCSTAT_ERROR_CODE 16
+#define BM_DCP_CSCSTAT_ERROR_CODE 0x00FF0000
+#define BF_DCP_CSCSTAT_ERROR_CODE(v) \
+ (((v) << 16) & BM_DCP_CSCSTAT_ERROR_CODE)
+#define BV_DCP_CSCSTAT_ERROR_CODE__LUMA0_FETCH_ERROR_Y0 0x01
+#define BV_DCP_CSCSTAT_ERROR_CODE__LUMA1_FETCH_ERROR_Y1 0x02
+#define BV_DCP_CSCSTAT_ERROR_CODE__CHROMA_FETCH_ERROR_U 0x03
+#define BV_DCP_CSCSTAT_ERROR_CODE__CHROMA_FETCH_ERROR_V 0x04
+#define BP_DCP_CSCSTAT_RSVD2 7
+#define BM_DCP_CSCSTAT_RSVD2 0x0000FF80
+#define BF_DCP_CSCSTAT_RSVD2(v) \
+ (((v) << 7) & BM_DCP_CSCSTAT_RSVD2)
+#define BM_DCP_CSCSTAT_ERROR_PAGEFAULT 0x00000040
+#define BM_DCP_CSCSTAT_ERROR_DST 0x00000020
+#define BM_DCP_CSCSTAT_ERROR_SRC 0x00000010
+#define BM_DCP_CSCSTAT_RSVD1 0x00000008
+#define BM_DCP_CSCSTAT_ERROR_SETUP 0x00000004
+#define BM_DCP_CSCSTAT_RSVD0 0x00000002
+#define BM_DCP_CSCSTAT_COMPLETE 0x00000001
+
+#define HW_DCP_CSCOUTBUFPARAM (0x00000320)
+
+#define BP_DCP_CSCOUTBUFPARAM_RSVD1 24
+#define BM_DCP_CSCOUTBUFPARAM_RSVD1 0xFF000000
+#define BF_DCP_CSCOUTBUFPARAM_RSVD1(v) \
+ (((v) << 24) & BM_DCP_CSCOUTBUFPARAM_RSVD1)
+#define BP_DCP_CSCOUTBUFPARAM_FIELD_SIZE 12
+#define BM_DCP_CSCOUTBUFPARAM_FIELD_SIZE 0x00FFF000
+#define BF_DCP_CSCOUTBUFPARAM_FIELD_SIZE(v) \
+ (((v) << 12) & BM_DCP_CSCOUTBUFPARAM_FIELD_SIZE)
+#define BP_DCP_CSCOUTBUFPARAM_LINE_SIZE 0
+#define BM_DCP_CSCOUTBUFPARAM_LINE_SIZE 0x00000FFF
+#define BF_DCP_CSCOUTBUFPARAM_LINE_SIZE(v) \
+ (((v) << 0) & BM_DCP_CSCOUTBUFPARAM_LINE_SIZE)
+
+#define HW_DCP_CSCINBUFPARAM (0x00000330)
+
+#define BP_DCP_CSCINBUFPARAM_RSVD1 12
+#define BM_DCP_CSCINBUFPARAM_RSVD1 0xFFFFF000
+#define BF_DCP_CSCINBUFPARAM_RSVD1(v) \
+ (((v) << 12) & BM_DCP_CSCINBUFPARAM_RSVD1)
+#define BP_DCP_CSCINBUFPARAM_LINE_SIZE 0
+#define BM_DCP_CSCINBUFPARAM_LINE_SIZE 0x00000FFF
+#define BF_DCP_CSCINBUFPARAM_LINE_SIZE(v) \
+ (((v) << 0) & BM_DCP_CSCINBUFPARAM_LINE_SIZE)
+
+#define HW_DCP_CSCRGB (0x00000340)
+
+#define BP_DCP_CSCRGB_ADDR 0
+#define BM_DCP_CSCRGB_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCRGB_ADDR(v) (v)
+
+#define HW_DCP_CSCLUMA (0x00000350)
+
+#define BP_DCP_CSCLUMA_ADDR 0
+#define BM_DCP_CSCLUMA_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCLUMA_ADDR(v) (v)
+
+#define HW_DCP_CSCCHROMAU (0x00000360)
+
+#define BP_DCP_CSCCHROMAU_ADDR 0
+#define BM_DCP_CSCCHROMAU_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCCHROMAU_ADDR(v) (v)
+
+#define HW_DCP_CSCCHROMAV (0x00000370)
+
+#define BP_DCP_CSCCHROMAV_ADDR 0
+#define BM_DCP_CSCCHROMAV_ADDR 0xFFFFFFFF
+#define BF_DCP_CSCCHROMAV_ADDR(v) (v)
+
+#define HW_DCP_CSCCOEFF0 (0x00000380)
+
+#define BP_DCP_CSCCOEFF0_RSVD1 26
+#define BM_DCP_CSCCOEFF0_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF0_RSVD1(v) \
+ (((v) << 26) & BM_DCP_CSCCOEFF0_RSVD1)
+#define BP_DCP_CSCCOEFF0_C0 16
+#define BM_DCP_CSCCOEFF0_C0 0x03FF0000
+#define BF_DCP_CSCCOEFF0_C0(v) \
+ (((v) << 16) & BM_DCP_CSCCOEFF0_C0)
+#define BP_DCP_CSCCOEFF0_UV_OFFSET 8
+#define BM_DCP_CSCCOEFF0_UV_OFFSET 0x0000FF00
+#define BF_DCP_CSCCOEFF0_UV_OFFSET(v) \
+ (((v) << 8) & BM_DCP_CSCCOEFF0_UV_OFFSET)
+#define BP_DCP_CSCCOEFF0_Y_OFFSET 0
+#define BM_DCP_CSCCOEFF0_Y_OFFSET 0x000000FF
+#define BF_DCP_CSCCOEFF0_Y_OFFSET(v) \
+ (((v) << 0) & BM_DCP_CSCCOEFF0_Y_OFFSET)
+
+#define HW_DCP_CSCCOEFF1 (0x00000390)
+
+#define BP_DCP_CSCCOEFF1_RSVD1 26
+#define BM_DCP_CSCCOEFF1_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF1_RSVD1(v) \
+ (((v) << 26) & BM_DCP_CSCCOEFF1_RSVD1)
+#define BP_DCP_CSCCOEFF1_C1 16
+#define BM_DCP_CSCCOEFF1_C1 0x03FF0000
+#define BF_DCP_CSCCOEFF1_C1(v) \
+ (((v) << 16) & BM_DCP_CSCCOEFF1_C1)
+#define BP_DCP_CSCCOEFF1_RSVD0 10
+#define BM_DCP_CSCCOEFF1_RSVD0 0x0000FC00
+#define BF_DCP_CSCCOEFF1_RSVD0(v) \
+ (((v) << 10) & BM_DCP_CSCCOEFF1_RSVD0)
+#define BP_DCP_CSCCOEFF1_C4 0
+#define BM_DCP_CSCCOEFF1_C4 0x000003FF
+#define BF_DCP_CSCCOEFF1_C4(v) \
+ (((v) << 0) & BM_DCP_CSCCOEFF1_C4)
+
+#define HW_DCP_CSCCOEFF2 (0x000003a0)
+
+#define BP_DCP_CSCCOEFF2_RSVD1 26
+#define BM_DCP_CSCCOEFF2_RSVD1 0xFC000000
+#define BF_DCP_CSCCOEFF2_RSVD1(v) \
+ (((v) << 26) & BM_DCP_CSCCOEFF2_RSVD1)
+#define BP_DCP_CSCCOEFF2_C2 16
+#define BM_DCP_CSCCOEFF2_C2 0x03FF0000
+#define BF_DCP_CSCCOEFF2_C2(v) \
+ (((v) << 16) & BM_DCP_CSCCOEFF2_C2)
+#define BP_DCP_CSCCOEFF2_RSVD0 10
+#define BM_DCP_CSCCOEFF2_RSVD0 0x0000FC00
+#define BF_DCP_CSCCOEFF2_RSVD0(v) \
+ (((v) << 10) & BM_DCP_CSCCOEFF2_RSVD0)
+#define BP_DCP_CSCCOEFF2_C3 0
+#define BM_DCP_CSCCOEFF2_C3 0x000003FF
+#define BF_DCP_CSCCOEFF2_C3(v) \
+ (((v) << 0) & BM_DCP_CSCCOEFF2_C3)
+
+#define HW_DCP_CSCCLIP (0x000003d0)
+
+#define BP_DCP_CSCCLIP_RSVD1 24
+#define BM_DCP_CSCCLIP_RSVD1 0xFF000000
+#define BF_DCP_CSCCLIP_RSVD1(v) \
+ (((v) << 24) & BM_DCP_CSCCLIP_RSVD1)
+#define BP_DCP_CSCCLIP_HEIGHT 12
+#define BM_DCP_CSCCLIP_HEIGHT 0x00FFF000
+#define BF_DCP_CSCCLIP_HEIGHT(v) \
+ (((v) << 12) & BM_DCP_CSCCLIP_HEIGHT)
+#define BP_DCP_CSCCLIP_WIDTH 0
+#define BM_DCP_CSCCLIP_WIDTH 0x00000FFF
+#define BF_DCP_CSCCLIP_WIDTH(v) \
+ (((v) << 0) & BM_DCP_CSCCLIP_WIDTH)
+
+#define HW_DCP_CSCXSCALE (0x000003e0)
+
+#define BP_DCP_CSCXSCALE_RSVD1 26
+#define BM_DCP_CSCXSCALE_RSVD1 0xFC000000
+#define BF_DCP_CSCXSCALE_RSVD1(v) \
+ (((v) << 26) & BM_DCP_CSCXSCALE_RSVD1)
+#define BP_DCP_CSCXSCALE_INT 24
+#define BM_DCP_CSCXSCALE_INT 0x03000000
+#define BF_DCP_CSCXSCALE_INT(v) \
+ (((v) << 24) & BM_DCP_CSCXSCALE_INT)
+#define BP_DCP_CSCXSCALE_FRAC 12
+#define BM_DCP_CSCXSCALE_FRAC 0x00FFF000
+#define BF_DCP_CSCXSCALE_FRAC(v) \
+ (((v) << 12) & BM_DCP_CSCXSCALE_FRAC)
+#define BP_DCP_CSCXSCALE_WIDTH 0
+#define BM_DCP_CSCXSCALE_WIDTH 0x00000FFF
+#define BF_DCP_CSCXSCALE_WIDTH(v) \
+ (((v) << 0) & BM_DCP_CSCXSCALE_WIDTH)
+
+#define HW_DCP_CSCYSCALE (0x000003f0)
+
+#define BP_DCP_CSCYSCALE_RSVD1 26
+#define BM_DCP_CSCYSCALE_RSVD1 0xFC000000
+#define BF_DCP_CSCYSCALE_RSVD1(v) \
+ (((v) << 26) & BM_DCP_CSCYSCALE_RSVD1)
+#define BP_DCP_CSCYSCALE_INT 24
+#define BM_DCP_CSCYSCALE_INT 0x03000000
+#define BF_DCP_CSCYSCALE_INT(v) \
+ (((v) << 24) & BM_DCP_CSCYSCALE_INT)
+#define BP_DCP_CSCYSCALE_FRAC 12
+#define BM_DCP_CSCYSCALE_FRAC 0x00FFF000
+#define BF_DCP_CSCYSCALE_FRAC(v) \
+ (((v) << 12) & BM_DCP_CSCYSCALE_FRAC)
+#define BP_DCP_CSCYSCALE_HEIGHT 0
+#define BM_DCP_CSCYSCALE_HEIGHT 0x00000FFF
+#define BF_DCP_CSCYSCALE_HEIGHT(v) \
+ (((v) << 0) & BM_DCP_CSCYSCALE_HEIGHT)
+
+#define HW_DCP_DBGSELECT (0x00000400)
+
+#define BP_DCP_DBGSELECT_RSVD 8
+#define BM_DCP_DBGSELECT_RSVD 0xFFFFFF00
+#define BF_DCP_DBGSELECT_RSVD(v) \
+ (((v) << 8) & BM_DCP_DBGSELECT_RSVD)
+#define BP_DCP_DBGSELECT_INDEX 0
+#define BM_DCP_DBGSELECT_INDEX 0x000000FF
+#define BF_DCP_DBGSELECT_INDEX(v) \
+ (((v) << 0) & BM_DCP_DBGSELECT_INDEX)
+#define BV_DCP_DBGSELECT_INDEX__CONTROL 0x01
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY0 0x10
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY1 0x11
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY2 0x12
+#define BV_DCP_DBGSELECT_INDEX__OTPKEY3 0x13
+
+#define HW_DCP_DBGDATA (0x00000410)
+
+#define BP_DCP_DBGDATA_DATA 0
+#define BM_DCP_DBGDATA_DATA 0xFFFFFFFF
+#define BF_DCP_DBGDATA_DATA(v) (v)
+
+#define HW_DCP_PAGETABLE (0x00000420)
+
+#define BP_DCP_PAGETABLE_BASE 2
+#define BM_DCP_PAGETABLE_BASE 0xFFFFFFFC
+#define BF_DCP_PAGETABLE_BASE(v) \
+ (((v) << 2) & BM_DCP_PAGETABLE_BASE)
+#define BM_DCP_PAGETABLE_FLUSH 0x00000002
+#define BM_DCP_PAGETABLE_ENABLE 0x00000001
+
+#define HW_DCP_VERSION (0x00000430)
+
+#define BP_DCP_VERSION_MAJOR 24
+#define BM_DCP_VERSION_MAJOR 0xFF000000
+#define BF_DCP_VERSION_MAJOR(v) \
+ (((v) << 24) & BM_DCP_VERSION_MAJOR)
+#define BP_DCP_VERSION_MINOR 16
+#define BM_DCP_VERSION_MINOR 0x00FF0000
+#define BF_DCP_VERSION_MINOR(v) \
+ (((v) << 16) & BM_DCP_VERSION_MINOR)
+#define BP_DCP_VERSION_STEP 0
+#define BM_DCP_VERSION_STEP 0x0000FFFF
+#define BF_DCP_VERSION_STEP(v) \
+ (((v) << 0) & BM_DCP_VERSION_STEP)
+
+
+#endif