summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig4
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/ahash.c147
-rw-r--r--crypto/blkcipher.c81
-rw-r--r--crypto/crc32c_generic.c (renamed from crypto/crc32c.c)2
-rw-r--r--crypto/crypto_null.c6
-rw-r--r--crypto/crypto_wq.c2
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c32
-rw-r--r--crypto/testmgr.h180
10 files changed, 368 insertions, 96 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 7bcb70d216e1..ce4012a58781 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -491,14 +491,14 @@ config CRYPTO_SHA1
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA1_SSSE3
- tristate "SHA1 digest algorithm (SSSE3/AVX)"
+ tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2)"
depends on X86 && 64BIT
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
- Extensions (AVX), when available.
+ Extensions (AVX/AVX2), when available.
config CRYPTO_SHA256_SSSE3
tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2)"
diff --git a/crypto/Makefile b/crypto/Makefile
index b29402a7b9b5..38e64231dcd3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -81,7 +81,7 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
-obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
+obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a92dc382f781..6e7223392e80 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -190,6 +190,75 @@ static inline unsigned int ahash_align_buffer_size(unsigned len,
return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * WARNING: Voodoo programming below!
+ *
+ * The code below is obscure and hard to understand, thus explanation
+ * is necessary. See include/crypto/hash.h and include/linux/crypto.h
+ * to understand the layout of structures used here!
+ *
+ * The code here will replace portions of the ORIGINAL request with
+ * pointers to new code and buffers so the hashing operation can store
+ * the result in aligned buffer. We will call the modified request
+ * an ADJUSTED request.
+ *
+ * The newly mangled request will look as such:
+ *
+ * req {
+ * .result = ADJUSTED[new aligned buffer]
+ * .base.complete = ADJUSTED[pointer to completion function]
+ * .base.data = ADJUSTED[*req (pointer to self)]
+ * .priv = ADJUSTED[new priv] {
+ * .result = ORIGINAL(result)
+ * .complete = ORIGINAL(base.complete)
+ * .data = ORIGINAL(base.data)
+ * }
+ */
+
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
+ /*
+ * WARNING: We do not backup req->priv here! The req->priv
+ * is for internal use of the Crypto API and the
+ * user must _NOT_ _EVER_ depend on it's content!
+ */
+
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = cplt;
+ req->base.data = req;
+ req->priv = priv;
+
+ return 0;
+}
+
+static void ahash_restore_req(struct ahash_request *req)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ /* Restore the original crypto request. */
+ req->result = priv->result;
+ req->base.complete = priv->complete;
+ req->base.data = priv->data;
+ req->priv = NULL;
+
+ /* Free the req->priv.priv from the ADJUSTED request. */
+ kzfree(priv);
+}
+
static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
@@ -201,47 +270,37 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- kzfree(priv);
+ ahash_restore_req(req);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
- ahash_op_unaligned_finish(areq, err);
+ /*
+ * Restore the original request, see ahash_op_unaligned() for what
+ * goes where.
+ *
+ * The "struct ahash_request *req" here is in fact the "req.base"
+ * from the ADJUSTED request from ahash_op_unaligned(), thus as it
+ * is a pointer to self, it is also the ADJUSTED "req" .
+ */
- areq->base.complete = complete;
- areq->base.data = data;
+ /* First copy req->result into req->priv.result */
+ ahash_op_unaligned_finish(areq, err);
- complete(&areq->base, err);
+ /* Complete the ORIGINAL request. */
+ areq->base.complete(&areq->base, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
int (*op)(struct ahash_request *))
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
int err;
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
- return -ENOMEM;
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
-
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = ahash_op_unaligned_done;
- req->base.data = req;
- req->priv = priv;
+ err = ahash_save_req(req, ahash_op_unaligned_done);
+ if (err)
+ return err;
err = op(req);
ahash_op_unaligned_finish(req, err);
@@ -290,19 +349,16 @@ static void ahash_def_finup_finish2(struct ahash_request *req, int err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- kzfree(priv);
+ ahash_restore_req(req);
}
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
ahash_def_finup_finish2(areq, err);
- complete(data, err);
+ areq->base.complete(&areq->base, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
@@ -322,38 +378,23 @@ out:
static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
err = ahash_def_finup_finish1(areq, err);
- complete(data, err);
+ areq->base.complete(&areq->base, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
-
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
- return -ENOMEM;
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
+ int err;
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = ahash_def_finup_done1;
- req->base.data = req;
- req->priv = priv;
+ err = ahash_save_req(req, ahash_def_finup_done1);
+ if (err)
+ return err;
- return ahash_def_finup_finish1(req, tfm->update(req));
+ err = tfm->update(req);
+ return ahash_def_finup_finish1(req, err);
}
static int ahash_no_export(struct ahash_request *req, void *out)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index a79e7e9ab86e..0122bec38564 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
- struct blkcipher_walk *walk,
+static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
unsigned int bsize)
{
u8 *addr;
- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
- addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
+ addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return bsize;
@@ -105,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
- struct crypto_blkcipher *tfm = desc->tfm;
unsigned int nbytes = 0;
if (likely(err >= 0)) {
@@ -117,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc,
err = -EINVAL;
goto err;
} else
- n = blkcipher_done_slow(tfm, walk, n);
+ n = blkcipher_done_slow(walk, n);
nbytes = walk->total - n;
err = 0;
@@ -136,7 +133,7 @@ err:
}
if (walk->iv != desc->info)
- memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
+ memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
@@ -226,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
static int blkcipher_walk_next(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
unsigned int bsize;
unsigned int n;
int err;
n = walk->total;
- if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
+ if (unlikely(n < walk->cipher_blocksize)) {
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return blkcipher_walk_done(desc, walk, -EINVAL);
}
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
- if (!scatterwalk_aligned(&walk->in, alignmask) ||
- !scatterwalk_aligned(&walk->out, alignmask)) {
+ if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+ !scatterwalk_aligned(&walk->out, walk->alignmask)) {
walk->flags |= BLKCIPHER_WALK_COPY;
if (!walk->page) {
walk->page = (void *)__get_free_page(GFP_ATOMIC);
@@ -250,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
- bsize = min(walk->blocksize, n);
+ bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
if (unlikely(n < bsize)) {
- err = blkcipher_next_slow(desc, walk, bsize, alignmask);
+ err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
goto set_phys_lowmem;
}
@@ -277,28 +272,26 @@ set_phys_lowmem:
return err;
}
-static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
- struct crypto_blkcipher *tfm,
- unsigned int alignmask)
+static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
{
- unsigned bs = walk->blocksize;
- unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
- unsigned aligned_bs = ALIGN(bs, alignmask + 1);
- unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
- (alignmask + 1);
+ unsigned bs = walk->walk_blocksize;
+ unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
+ unsigned int size = aligned_bs * 2 +
+ walk->ivsize + max(aligned_bs, walk->ivsize) -
+ (walk->alignmask + 1);
u8 *iv;
- size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
walk->buffer = kmalloc(size, GFP_ATOMIC);
if (!walk->buffer)
return -ENOMEM;
- iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
+ iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
iv = blkcipher_get_spot(iv, bs) + aligned_bs;
- iv = blkcipher_get_spot(iv, ivsize);
+ iv = blkcipher_get_spot(iv, walk->ivsize);
- walk->iv = memcpy(iv, walk->iv, ivsize);
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
}
@@ -306,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
walk->flags &= ~BLKCIPHER_WALK_PHYS;
- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
+ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+ walk->cipher_blocksize = walk->walk_blocksize;
+ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk);
}
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
@@ -315,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
walk->flags |= BLKCIPHER_WALK_PHYS;
- walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
+ walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+ walk->cipher_blocksize = walk->walk_blocksize;
+ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk);
}
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
@@ -323,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
-
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
@@ -335,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
walk->buffer = NULL;
walk->iv = desc->info;
- if (unlikely(((unsigned long)walk->iv & alignmask))) {
- int err = blkcipher_copy_iv(walk, tfm, alignmask);
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = blkcipher_copy_iv(walk);
if (err)
return err;
}
@@ -353,11 +349,28 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
unsigned int blocksize)
{
walk->flags &= ~BLKCIPHER_WALK_PHYS;
- walk->blocksize = blocksize;
+ walk->walk_blocksize = blocksize;
+ walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
+ walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
+ walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
return blkcipher_walk_first(desc, walk);
}
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_aead *tfm,
+ unsigned int blocksize)
+{
+ walk->flags &= ~BLKCIPHER_WALK_PHYS;
+ walk->walk_blocksize = blocksize;
+ walk->cipher_blocksize = crypto_aead_blocksize(tfm);
+ walk->ivsize = crypto_aead_ivsize(tfm);
+ walk->alignmask = crypto_aead_alignmask(tfm);
+ return blkcipher_walk_first(desc, walk);
+}
+EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
+
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
diff --git a/crypto/crc32c.c b/crypto/crc32c_generic.c
index 06f7018c9d95..d9c7beba8e50 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c_generic.c
@@ -170,3 +170,5 @@ module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("crc32c");
+MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index fee7265cd35d..1dc54bb95a87 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -17,6 +17,7 @@
*
*/
+#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
@@ -24,11 +25,6 @@
#include <linux/mm.h>
#include <linux/string.h>
-#define NULL_KEY_SIZE 0
-#define NULL_BLOCK_SIZE 1
-#define NULL_DIGEST_SIZE 0
-#define NULL_IV_SIZE 0
-
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
index adad92a44ba2..2f1b8d12952a 100644
--- a/crypto/crypto_wq.c
+++ b/crypto/crypto_wq.c
@@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void)
destroy_workqueue(kcrypto_wq);
}
-module_init(crypto_wq_init);
+subsys_initcall(crypto_wq_init);
module_exit(crypto_wq_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0d9003ae8c61..870be7b4dc05 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1511,6 +1511,14 @@ static int do_test(int m)
ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
break;
+ case 156:
+ ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
+ break;
+
+ case 157:
+ ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
+ break;
+
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 77955507f6f1..dc3cf3535ef0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1809,6 +1809,22 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(md5),ecb(cipher_null))",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
+ .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
+ .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
@@ -1821,6 +1837,22 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha1),ecb(cipher_null))",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = hmac_sha1_ecb_cipher_null_enc_tv_template,
+ .count = HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = hmac_sha1_ecb_cipher_null_dec_tv_template,
+ .count = HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 7d44aa3d6b44..3db83dbba1d9 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -12821,6 +12821,10 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
#define AES_DEC_TEST_VECTORS 4
#define AES_CBC_ENC_TEST_VECTORS 5
#define AES_CBC_DEC_TEST_VECTORS 5
+#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
+#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
+#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
+#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
#define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7
@@ -13627,6 +13631,90 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
},
};
+static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
+ { /* Input data from RFC 2410 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .ilen = 8,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
+ "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
+ .rlen = 8 + 16,
+ }, { /* Input data from RFC 2410 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor",
+ .ilen = 53,
+ .result = "Network Security People Have A Strange Sense Of Humor"
+ "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
+ "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
+ .rlen = 53 + 16,
+ },
+};
+
+static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
+ "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
+ .ilen = 8 + 16,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .rlen = 8,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor"
+ "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
+ "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
+ .ilen = 53 + 16,
+ .result = "Network Security People Have A Strange Sense Of Humor",
+ .rlen = 53,
+ },
+};
+
static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -13876,6 +13964,98 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = {
},
};
+static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_template[] = {
+ { /* Input data from RFC 2410 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .ilen = 8,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
+ "\x99\x5e\x19\x04\xd1\x72\xef\xb8"
+ "\x8c\x5e\xe4\x08",
+ .rlen = 8 + 20,
+ }, { /* Input data from RFC 2410 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor",
+ .ilen = 53,
+ .result = "Network Security People Have A Strange Sense Of Humor"
+ "\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
+ "\x65\x47\xee\x8e\x1a\xef\x16\xf6"
+ "\x91\x56\xe4\xd6",
+ .rlen = 53 + 20,
+ },
+};
+
+static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
+ "\x99\x5e\x19\x04\xd1\x72\xef\xb8"
+ "\x8c\x5e\xe4\x08",
+ .ilen = 8 + 20,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .rlen = 8,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor"
+ "\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
+ "\x65\x47\xee\x8e\x1a\xef\x16\xf6"
+ "\x91\x56\xe4\xd6",
+ .ilen = 53 + 20,
+ .result = "Network Security People Have A Strange Sense Of Humor",
+ .rlen = 53,
+ },
+};
+
static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN