From f04a40e1bddd04ad5ffcf198370210faf937c4be Mon Sep 17 00:00:00 2001 From: Franck Bui-Huu Date: Mon, 12 May 2008 21:21:05 +0200 Subject: rcu: split list.h and move rcu-protected lists into rculist.h Move rcu-protected lists from list.h into a new header file rculist.h. This is done because list are a very used primitive structure all over the kernel and it's currently impossible to include other header files in this list.h without creating some circular dependencies. For example, list.h implements rcu-protected list and uses rcu_dereference() without including rcupdate.h. It actually compiles because users of rcu_dereference() are macros. Others RCU functions could be used too but aren't probably because of this. Therefore this patch creates rculist.h which includes rcupdates without to many changes/troubles. Signed-off-by: Franck Bui-Huu Acked-by: Paul E. McKenney Acked-by: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- crypto/async_tx/async_tx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index c6e772fc..095c798d 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -23,6 +23,7 @@ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ +#include #include #include -- cgit v1.2.3 From 21b0ce9aa76d6846bca11771592cb17d2ddce5ec Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 5 Jun 2008 23:26:11 -0700 Subject: async_tx: fix async_memset compile error commit c7ea8ef0 'dmaengine: ack to flags: make use of the unused bits in the 'ack' field' missed an ->ack conversion in crypto/async_tx/async_memset.c Signed-off-by: Dan Williams --- crypto/async_tx/async_memset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index f5ff3906..27a97dc9 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -76,7 +76,7 @@ async_memset(struct page *dest, int val, unsigned int offset, /* if ack is already set then we cannot be sure * we are referring to the correct operation */ - BUG_ON(depend_tx->ack); + BUG_ON(async_tx_test_ack(depend_tx)); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); -- cgit v1.2.3 From b7ed5cc934eae42ce60bccb1f65da25a4da38742 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 7 May 2008 21:10:13 +0800 Subject: [CRYPTO] cryptd: Fix EINPROGRESS notification context The EINPROGRESS notifications should be done just like the final call-backs, i.e., with BH off. This patch fixes the call in cryptd since previously it was called with BH on. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index b150de56..f38e1473 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -82,10 +82,8 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, rctx = ablkcipher_request_ctx(req); - if (unlikely(err == -EINPROGRESS)) { - rctx->complete(&req->base, err); - return; - } + if (unlikely(err == -EINPROGRESS)) + goto out; desc.tfm = child; desc.info = req->info; @@ -95,8 +93,9 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, req->base.complete = rctx->complete; +out: local_bh_disable(); - req->base.complete(&req->base, err); + rctx->complete(&req->base, err); local_bh_enable(); } -- cgit v1.2.3 From 32cb3a33b0f9c0d4ee8733ac6609daefff4da8ce Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 7 May 2008 22:14:10 +0800 Subject: [CRYPTO] ripemd: Add support for RIPEMD hash algorithms This patch adds support for RIPEMD-128 and RIPEMD-160 hash algorithms. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Makefile | 2 + crypto/ripemd.h | 26 ++++ crypto/rmd128.c | 344 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/rmd160.c | 388 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 760 insertions(+) create mode 100644 crypto/ripemd.h create mode 100644 crypto/rmd128.c create mode 100644 crypto/rmd160.c diff --git a/crypto/Makefile b/crypto/Makefile index ca024418..1efb5563 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -27,6 +27,8 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o +obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o +obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o diff --git a/crypto/ripemd.h b/crypto/ripemd.h new file mode 100644 index 00000000..2858e226 --- /dev/null +++ b/crypto/ripemd.h @@ -0,0 +1,26 @@ +/* + * Common values for RIPEMD algorithms + */ + +#ifndef _CRYPTO_RMD_H +#define _CRYPTO_RMD_H + +#define RMD128_DIGEST_SIZE 16 +#define RMD128_BLOCK_SIZE 64 + +#define RMD160_DIGEST_SIZE 20 +#define RMD160_BLOCK_SIZE 64 + +#define RMD256_DIGEST_SIZE 32 +#define RMD256_BLOCK_SIZE 64 + +#define RMD320_DIGEST_SIZE 40 +#define RMD320_BLOCK_SIZE 64 + +#define RMD_H0 0x67452301UL +#define RMD_H1 0xefcdab89UL +#define RMD_H2 0x98badcfeUL +#define RMD_H3 0x10325476UL +#define RMD_H4 0xc3d2e1f0UL + +#endif diff --git a/crypto/rmd128.c b/crypto/rmd128.c new file mode 100644 index 00000000..22cc13be --- /dev/null +++ b/crypto/rmd128.c @@ -0,0 +1,344 @@ +/* + * Cryptographic API. + * + * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest. + * + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC + * + * Copyright (c) 2008 Adrian-Ken Rueegsegger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "ripemd.h" + +struct rmd128_ctx { + u64 byte_count; + u32 state[4]; + u32 buffer[16]; +}; + +#define K1 0x00000000UL +#define K2 0x5a827999UL +#define K3 0x6ed9eba1UL +#define K4 0x8f1bbcdcUL +#define KK1 0x50a28be6UL +#define KK2 0x5c4dd124UL +#define KK3 0x6d703ef3UL +#define KK4 0x00000000UL + +#define F1(x, y, z) (x ^ y ^ z) /* XOR */ +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ +#define F3(x, y, z) ((x | ~y) ^ z) +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ + +#define ROUND(a, b, c, d, f, k, x, s) { \ + (a) += f((b), (c), (d)) + (x) + (k); \ + (a) = rol32((a), (s)); \ +} + +static void rmd128_transform(u32 *state, u32 const *in) +{ + u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; + + /* Initialize left lane */ + aa = state[0]; + bb = state[1]; + cc = state[2]; + dd = state[3]; + + /* Initialize right lane */ + aaa = state[0]; + bbb = state[1]; + ccc = state[2]; + ddd = state[3]; + + /* round 1: left lane */ + ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); + ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); + ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); + ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); + ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); + ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); + ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); + ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); + ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); + ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); + ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); + ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); + ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); + ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); + ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); + ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); + + /* round 2: left lane */ + ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); + ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); + ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); + ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); + ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); + ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); + ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); + ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); + ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); + ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); + ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); + ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); + ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); + ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); + ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); + ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); + + /* round 3: left lane */ + ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); + ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); + ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); + ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); + ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); + ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); + ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); + ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); + ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); + ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); + ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); + ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); + ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); + ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); + ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); + ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); + + /* round 4: left lane */ + ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); + ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); + ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); + ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); + ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); + ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); + ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); + ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); + ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); + ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); + ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); + ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); + ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); + ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); + ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); + ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); + + /* round 1: right lane */ + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); + + /* round 2: right lane */ + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); + + /* round 3: right lane */ + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); + + /* round 4: right lane */ + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); + + /* combine results */ + ddd += cc + state[1]; /* final result for state[0] */ + state[1] = state[2] + dd + aaa; + state[2] = state[3] + aa + bbb; + state[3] = state[0] + bb + ccc; + state[0] = ddd; + + return; +} + +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + cpu_to_le32s(buf); + buf++; + } +} + +static inline void rmd128_transform_helper(struct rmd128_ctx *ctx) +{ + le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); + rmd128_transform(ctx->state, ctx->buffer); +} + +static void rmd128_init(struct crypto_tfm *tfm) +{ + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + + rctx->byte_count = 0; + + rctx->state[0] = RMD_H0; + rctx->state[1] = RMD_H1; + rctx->state[2] = RMD_H2; + rctx->state[3] = RMD_H3; + + memset(rctx->buffer, 0, sizeof(rctx->buffer)); +} + +static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) +{ + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); + + rctx->byte_count += len; + + /* Enough space in buffer? If so copy and we're done */ + if (avail > len) { + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, len); + return; + } + + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, avail); + + rmd128_transform_helper(rctx); + data += avail; + len -= avail; + + while (len >= sizeof(rctx->buffer)) { + memcpy(rctx->buffer, data, sizeof(rctx->buffer)); + rmd128_transform_helper(rctx); + data += sizeof(rctx->buffer); + len -= sizeof(rctx->buffer); + } + + memcpy(rctx->buffer, data, len); +} + +/* Add padding and return the message digest. */ +static void rmd128_final(struct crypto_tfm *tfm, u8 *out) +{ + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + u32 index, padlen; + u64 bits; + static const u8 padding[64] = { 0x80, }; + bits = rctx->byte_count << 3; + + /* Pad out to 56 mod 64 */ + index = rctx->byte_count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + rmd128_update(tfm, padding, padlen); + + /* Append length */ + rmd128_update(tfm, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + memcpy(out, rctx->state, sizeof(rctx->state)); + + /* Wipe context */ + memset(rctx, 0, sizeof(*rctx)); +} + +static struct crypto_alg alg = { + .cra_name = "rmd128", + .cra_driver_name = "rmd128", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = RMD128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rmd128_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = RMD128_DIGEST_SIZE, + .dia_init = rmd128_init, + .dia_update = rmd128_update, + .dia_final = rmd128_final } } +}; + +static int __init rmd128_mod_init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit rmd128_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(rmd128_mod_init); +module_exit(rmd128_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); + +MODULE_ALIAS("rmd128"); diff --git a/crypto/rmd160.c b/crypto/rmd160.c new file mode 100644 index 00000000..a8a9d3d7 --- /dev/null +++ b/crypto/rmd160.c @@ -0,0 +1,388 @@ +/* + * Cryptographic API. + * + * RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest. + * + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC + * + * Copyright (c) 2008 Adrian-Ken Rueegsegger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "ripemd.h" + +struct rmd160_ctx { + u64 byte_count; + u32 state[5]; + u32 buffer[16]; +}; + +#define K1 0x00000000UL +#define K2 0x5a827999UL +#define K3 0x6ed9eba1UL +#define K4 0x8f1bbcdcUL +#define K5 0xa953fd4eUL +#define KK1 0x50a28be6UL +#define KK2 0x5c4dd124UL +#define KK3 0x6d703ef3UL +#define KK4 0x7a6d76e9UL +#define KK5 0x00000000UL + +#define F1(x, y, z) (x ^ y ^ z) /* XOR */ +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ +#define F3(x, y, z) ((x | ~y) ^ z) +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ +#define F5(x, y, z) (x ^ (y | ~z)) + +#define ROUND(a, b, c, d, e, f, k, x, s) { \ + (a) += f((b), (c), (d)) + (x) + (k); \ + (a) = rol32((a), (s)) + (e); \ + (c) = rol32((c), 10); \ +} + +static void rmd160_transform(u32 *state, u32 const *in) +{ + u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; + + /* Initialize left lane */ + aa = state[0]; + bb = state[1]; + cc = state[2]; + dd = state[3]; + ee = state[4]; + + /* Initialize right lane */ + aaa = state[0]; + bbb = state[1]; + ccc = state[2]; + ddd = state[3]; + eee = state[4]; + + /* round 1: left lane */ + ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); + + /* round 2: left lane" */ + ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); + + /* round 3: left lane" */ + ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); + + /* round 4: left lane" */ + ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); + + /* round 5: left lane" */ + ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); + + /* round 1: right lane */ + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); + + /* round 2: right lane */ + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); + + /* round 3: right lane */ + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); + + /* round 4: right lane */ + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); + + /* round 5: right lane */ + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); + + /* combine results */ + ddd += cc + state[1]; /* final result for state[0] */ + state[1] = state[2] + dd + eee; + state[2] = state[3] + ee + aaa; + state[3] = state[4] + aa + bbb; + state[4] = state[0] + bb + ccc; + state[0] = ddd; + + return; +} + +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + cpu_to_le32s(buf); + buf++; + } +} + +static inline void rmd160_transform_helper(struct rmd160_ctx *ctx) +{ + le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); + rmd160_transform(ctx->state, ctx->buffer); +} + +static void rmd160_init(struct crypto_tfm *tfm) +{ + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + + rctx->byte_count = 0; + + rctx->state[0] = RMD_H0; + rctx->state[1] = RMD_H1; + rctx->state[2] = RMD_H2; + rctx->state[3] = RMD_H3; + rctx->state[4] = RMD_H4; + + memset(rctx->buffer, 0, sizeof(rctx->buffer)); +} + +static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) +{ + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); + + rctx->byte_count += len; + + /* Enough space in buffer? If so copy and we're done */ + if (avail > len) { + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, len); + return; + } + + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, avail); + + rmd160_transform_helper(rctx); + data += avail; + len -= avail; + + while (len >= sizeof(rctx->buffer)) { + memcpy(rctx->buffer, data, sizeof(rctx->buffer)); + rmd160_transform_helper(rctx); + data += sizeof(rctx->buffer); + len -= sizeof(rctx->buffer); + } + + memcpy(rctx->buffer, data, len); +} + +/* Add padding and return the message digest. */ +static void rmd160_final(struct crypto_tfm *tfm, u8 *out) +{ + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + u32 index, padlen; + u64 bits; + static const u8 padding[64] = { 0x80, }; + bits = rctx->byte_count << 3; + + /* Pad out to 56 mod 64 */ + index = rctx->byte_count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + rmd160_update(tfm, padding, padlen); + + /* Append length */ + rmd160_update(tfm, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + memcpy(out, rctx->state, sizeof(rctx->state)); + + /* Wipe context */ + memset(rctx, 0, sizeof(*rctx)); +} + +static struct crypto_alg alg = { + .cra_name = "rmd160", + .cra_driver_name = "rmd160", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = RMD160_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rmd160_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = RMD160_DIGEST_SIZE, + .dia_init = rmd160_init, + .dia_update = rmd160_update, + .dia_final = rmd160_final } } +}; + +static int __init rmd160_mod_init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit rmd160_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(rmd160_mod_init); +module_exit(rmd160_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); + +MODULE_ALIAS("rmd160"); -- cgit v1.2.3 From db1bd5027d71e46eb1c881492333effa03bcaaf6 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 7 May 2008 22:16:36 +0800 Subject: [CRYPTO] tcrypt: Add test vectors for RIPEMD-128 and RIPEMD-160 This patch adds test vectors for RIPEMD-128 and RIPEMD-160 hash algorithms and digests (HMAC). The test vectors are taken from ISO:IEC 10118-3 (2004) and RFC2286. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 29 +++++- crypto/tcrypt.h | 292 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 320 insertions(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e47f6e02..f6220b35 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -13,6 +13,7 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2008-04-27 Added RIPEMD tests * 2007-11-13 Added GCM tests * 2007-11-13 Added AEAD support * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests @@ -83,7 +84,7 @@ static char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "lzo", "cts", NULL + "camellia", "seed", "salsa20", "rmd128", "rmd160", "lzo", "cts", NULL }; static void hexdump(unsigned char *buf, unsigned int len) @@ -1615,6 +1616,14 @@ static void do_test(void) CTS_MODE_DEC_TEST_VECTORS); break; + case 39: + test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS); + break; + + case 40: + test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); @@ -1650,6 +1659,16 @@ static void do_test(void) XCBC_AES_TEST_VECTORS); break; + case 107: + test_hash("hmac(rmd128)", hmac_rmd128_tv_template, + HMAC_RMD128_TEST_VECTORS); + break; + + case 108: + test_hash("hmac(rmd160)", hmac_rmd160_tv_template, + HMAC_RMD160_TEST_VECTORS); + break; + case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); @@ -1788,6 +1807,14 @@ static void do_test(void) test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 314: + test_hash_speed("rmd128", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + + case 315: + test_hash_speed("rmd160", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 47bc0ecb..89068233 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -13,6 +13,7 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * 2008-04-27 Added RIPEMD tests * 2007-11-13 Added GCM tests * 2007-11-13 Added AEAD support * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests @@ -168,6 +169,135 @@ static struct hash_testvec md5_tv_template[] = { .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55" "\xac\x49\xda\x2e\x21\x07\xb6\x7a", } + +}; + +/* + * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E) + */ +#define RMD128_TEST_VECTORS 10 + +static struct hash_testvec rmd128_tv_template[] = { + { + .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e" + "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7" + "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33", + }, { + .plaintext = "abc", + .psize = 3, + .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba" + "\x84\x63\x6b\x0f\x69\x14\x4c\x77", + }, { + .plaintext = "message digest", + .psize = 14, + .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62" + "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8", + }, { + .plaintext = "abcdefghijklmnopqrstuvwxyz", + .psize = 26, + .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5" + "\x10\x71\x49\x22\xb3\x71\x83\x4e", + }, { + .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789", + .psize = 62, + .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f" + "\xae\xa4\x62\x4c\x60\xc5\xc7\x02", + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890", + .psize = 80, + .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb" + "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighij" + "hijkijkljklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d" + "\xdc\x22\xe8\x8b\x49\x13\x3a\x06", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" + "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" + "lmnopqrsmnopqrstnopqrstu", + .psize = 112, + .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b" + "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijk", + .psize = 32, + .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d" + "\xe1\x93\xff\x46\xdb\xac\xcf\xd4", + } +}; + +/* + * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E) + */ +#define RMD160_TEST_VECTORS 10 + +static struct hash_testvec rmd160_tv_template[] = { + { + .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28" + "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae" + "\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe", + }, { + .plaintext = "abc", + .psize = 3, + .digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04" + "\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc", + }, { + .plaintext = "message digest", + .psize = 14, + .digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8" + "\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36", + }, { + .plaintext = "abcdefghijklmnopqrstuvwxyz", + .psize = 26, + .digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb" + "\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc", + }, { + .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789", + .psize = 62, + .digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed" + "\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89", + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890", + .psize = 80, + .digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb" + "\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighij" + "hijkijkljklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05" + "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" + "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" + "lmnopqrsmnopqrstnopqrstu", + .psize = 112, + .digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91" + "\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighijhijk", + .psize = 32, + .digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d" + "\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f", + } }; /* @@ -816,6 +946,168 @@ static struct hash_testvec hmac_md5_tv_template[] = }, }; +/* + * HMAC-RIPEMD128 test vectors from RFC2286 + */ +#define HMAC_RMD128_TEST_VECTORS 7 + +static struct hash_testvec hmac_rmd128_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ksize = 16, + .plaintext = "Hi There", + .psize = 8, + .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf" + "\x81\xc1\x72\xe8\x4e\x07\x34\xdb", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34" + "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b", + .np = 2, + .tap = { 14, 14 }, + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", + .ksize = 16, + .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", + .psize = 50, + .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d" + "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d", + }, { + .key = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18\x19", + .ksize = 25, + .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", + .psize = 50, + .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a" + "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94", + }, { + .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c", + .ksize = 16, + .plaintext = "Test With Truncation", + .psize = 20, + .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03" + "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa", + .ksize = 80, + .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First", + .psize = 54, + .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a" + "\x1f\x59\xd3\x73\xc1\x50\xac\xbb", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa", + .ksize = 80, + .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One " + "Block-Size Data", + .psize = 73, + .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4" + "\x06\x90\xc2\x37\x63\x5f\x30\xc5", + }, +}; + +/* + * HMAC-RIPEMD160 test vectors from RFC2286 + */ +#define HMAC_RMD160_TEST_VECTORS 7 + +static struct hash_testvec hmac_rmd160_tv_template[] = { + { + .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", + .ksize = 20, + .plaintext = "Hi There", + .psize = 8, + .digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e" + "\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68", + }, { + .key = "Jefe", + .ksize = 4, + .plaintext = "what do ya want for nothing?", + .psize = 28, + .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4" + "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69", + .np = 2, + .tap = { 14, 14 }, + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", + .ksize = 20, + .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd" + "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", + .psize = 50, + .digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4" + "\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1", + }, { + .key = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18\x19", + .ksize = 25, + .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd" + "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", + .psize = 50, + .digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1" + "\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4", + }, { + .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c", + .ksize = 20, + .plaintext = "Test With Truncation", + .psize = 20, + .digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a" + "\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa", + .ksize = 80, + .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First", + .psize = 54, + .digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd" + "\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b", + }, { + .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa", + .ksize = 80, + .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One " + "Block-Size Data", + .psize = 73, + .digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f" + "\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a", + }, +}; + /* * HMAC-SHA1 test vectors from RFC2202 */ -- cgit v1.2.3 From 470faeb74f871eeb868b0c378aa7cb1d48a7d8cb Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 7 May 2008 22:17:37 +0800 Subject: [CRYPTO] ripemd: Add Kconfig entries for RIPEMD hash algorithms This patch adds Kconfig entries for RIPEMD-128 and RIPEMD-160. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 864456c1..cfc521a0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -241,6 +241,32 @@ config CRYPTO_MICHAEL_MIC should not be used for other purposes because of the weakness of the algorithm. +config CRYPTO_RMD128 + tristate "RIPEMD-128 digest algorithm" + select CRYPTO_ALGAPI + help + RIPEMD-128 (ISO/IEC 10118-3:2004). + + RIPEMD-128 is a 128-bit cryptographic hash function. It should only + to be used as a secure replacement for RIPEMD. For other use cases + RIPEMD-160 should be used. + + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. + See + +config CRYPTO_RMD160 + tristate "RIPEMD-160 digest algorithm" + select CRYPTO_ALGAPI + help + RIPEMD-160 (ISO/IEC 10118-3:2004). + + RIPEMD-160 is a 160-bit cryptographic hash function. It is intended + to be used as a secure replacement for the 128-bit hash functions + MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128). + + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. + See + config CRYPTO_SHA1 tristate "SHA1 digest algorithm" select CRYPTO_ALGAPI -- cgit v1.2.3 From 538442d5d699131a28334aad8486d0fda1411fa8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 7 May 2008 22:19:38 +0800 Subject: [CRYPTO] tcrpyt: Get rid of change log in source Change logs should be kept in source control systems, not the source. This patch removes the change log from tcrpyt to stop people from extending it any more. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 8 -------- crypto/tcrypt.h | 7 ------- 2 files changed, 15 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f6220b35..285c093a 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -13,14 +13,6 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * - * 2008-04-27 Added RIPEMD tests - * 2007-11-13 Added GCM tests - * 2007-11-13 Added AEAD support - * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests - * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests - * 2004-08-09 Added cipher speed tests (Reyk Floeter ) - * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt - * */ #include diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 89068233..af91f0cd 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -13,13 +13,6 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * - * 2008-04-27 Added RIPEMD tests - * 2007-11-13 Added GCM tests - * 2007-11-13 Added AEAD support - * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests - * 2004-08-09 Cipher speed tests by Reyk Floeter - * 2003-09-14 Changes by Kartikey Mahendra Bhatt - * */ #ifndef _CRYPTO_TCRYPT_H #define _CRYPTO_TCRYPT_H -- cgit v1.2.3 From 19bafd294addc4ea4af67b94c6962d3a8ee9b912 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Thu, 8 May 2008 19:27:47 +0800 Subject: [CRYPTO] tcrypt: Catch cipher destination memory corruption Check whether the destination buffer is written to beyond the last byte contained in the scatterlist. Also change IDX1 of the cross-page access offsets to a multiple of 4. This triggers a corruption in the HIFN driver and doesn't seem to negatively impact other testcases. Signed-off-by: Patrick McHardy Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 285c093a..69eb29e1 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -39,7 +39,7 @@ /* * Indexes into the xbuf to simulate cross-page access. */ -#define IDX1 37 +#define IDX1 32 #define IDX2 32400 #define IDX3 1 #define IDX4 8193 @@ -211,7 +211,7 @@ out: static void test_aead(char *algo, int enc, struct aead_testvec *template, unsigned int tcount) { - unsigned int ret, i, j, k, temp; + unsigned int ret, i, j, k, n, temp; char *q; struct crypto_aead *tfm; char *key; @@ -353,7 +353,6 @@ next_one: } printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e); - memset(xbuf, 0, XBUFSIZE); memset(axbuf, 0, XBUFSIZE); for (i = 0, j = 0; i < tcount; i++) { @@ -381,6 +380,7 @@ next_one: goto out; } + memset(xbuf, 0, XBUFSIZE); sg_init_table(sg, template[i].np); for (k = 0, temp = 0; k < template[i].np; k++) { memcpy(&xbuf[IDX[k]], @@ -452,6 +452,14 @@ next_one: 0 : authsize)) ? "fail" : "pass"); + for (n = 0; q[template[i].tap[k] + n]; n++) + ; + if (n) { + printk("Result buffer corruption %u " + "bytes:\n", n); + hexdump(&q[template[i].tap[k]], n); + } + temp += template[i].tap[k]; kunmap(sg_page(&sg[k])); } @@ -466,7 +474,7 @@ out: static void test_cipher(char *algo, int enc, struct cipher_testvec *template, unsigned int tcount) { - unsigned int ret, i, j, k, temp; + unsigned int ret, i, j, k, n, temp; char *q; struct crypto_ablkcipher *tfm; struct ablkcipher_request *req; @@ -574,7 +582,6 @@ static void test_cipher(char *algo, int enc, } printk("\ntesting %s %s across pages (chunking)\n", algo, e); - memset(xbuf, 0, XBUFSIZE); j = 0; for (i = 0; i < tcount; i++) { @@ -589,6 +596,7 @@ static void test_cipher(char *algo, int enc, printk("test %u (%d bit key):\n", j, template[i].klen * 8); + memset(xbuf, 0, XBUFSIZE); crypto_ablkcipher_clear_flags(tfm, ~0); if (template[i].wk) crypto_ablkcipher_set_flags( @@ -648,6 +656,14 @@ static void test_cipher(char *algo, int enc, memcmp(q, template[i].result + temp, template[i].tap[k]) ? "fail" : "pass"); + + for (n = 0; q[template[i].tap[k] + n]; n++) + ; + if (n) { + printk("Result buffer corruption %u " + "bytes:\n", n); + hexdump(&q[template[i].tap[k]], n); + } temp += template[i].tap[k]; kunmap(sg_page(&sg[k])); } -- cgit v1.2.3 From ef29a8de694cbf0b8792f508f68ceb0b20f6003d Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Fri, 9 May 2008 21:25:42 +0800 Subject: [CRYPTO] ripemd: Put all common RIPEMD values in header file This patch puts all common RIPEMD values in the appropriate header file. Initial values and constants are the same for all variants of RIPEMD. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/ripemd.h | 17 +++++++++++++++++ crypto/rmd128.c | 16 ++++++++-------- crypto/rmd160.c | 20 ++++++++++---------- 3 files changed, 35 insertions(+), 18 deletions(-) diff --git a/crypto/ripemd.h b/crypto/ripemd.h index 2858e226..c57a2d4c 100644 --- a/crypto/ripemd.h +++ b/crypto/ripemd.h @@ -17,10 +17,27 @@ #define RMD320_DIGEST_SIZE 40 #define RMD320_BLOCK_SIZE 64 +/* initial values */ #define RMD_H0 0x67452301UL #define RMD_H1 0xefcdab89UL #define RMD_H2 0x98badcfeUL #define RMD_H3 0x10325476UL #define RMD_H4 0xc3d2e1f0UL +#define RMD_H5 0x76543210UL +#define RMD_H6 0xfedcba98UL +#define RMD_H7 0x89abcdefUL +#define RMD_H8 0x01234567UL +#define RMD_H9 0x3c2d1e0fUL + +/* constants */ +#define RMD_K1 0x00000000UL +#define RMD_K2 0x5a827999UL +#define RMD_K3 0x6ed9eba1UL +#define RMD_K4 0x8f1bbcdcUL +#define RMD_K5 0xa953fd4eUL +#define RMD_K6 0x50a28be6UL +#define RMD_K7 0x5c4dd124UL +#define RMD_K8 0x6d703ef3UL +#define RMD_K9 0x7a6d76e9UL #endif diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 22cc13be..f72d2ce8 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -29,14 +29,14 @@ struct rmd128_ctx { u32 buffer[16]; }; -#define K1 0x00000000UL -#define K2 0x5a827999UL -#define K3 0x6ed9eba1UL -#define K4 0x8f1bbcdcUL -#define KK1 0x50a28be6UL -#define KK2 0x5c4dd124UL -#define KK3 0x6d703ef3UL -#define KK4 0x00000000UL +#define K1 RMD_K1 +#define K2 RMD_K2 +#define K3 RMD_K3 +#define K4 RMD_K4 +#define KK1 RMD_K6 +#define KK2 RMD_K7 +#define KK3 RMD_K8 +#define KK4 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ diff --git a/crypto/rmd160.c b/crypto/rmd160.c index a8a9d3d7..80d647aa 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -29,16 +29,16 @@ struct rmd160_ctx { u32 buffer[16]; }; -#define K1 0x00000000UL -#define K2 0x5a827999UL -#define K3 0x6ed9eba1UL -#define K4 0x8f1bbcdcUL -#define K5 0xa953fd4eUL -#define KK1 0x50a28be6UL -#define KK2 0x5c4dd124UL -#define KK3 0x6d703ef3UL -#define KK4 0x7a6d76e9UL -#define KK5 0x00000000UL +#define K1 RMD_K1 +#define K2 RMD_K2 +#define K3 RMD_K3 +#define K4 RMD_K4 +#define K5 RMD_K5 +#define KK1 RMD_K6 +#define KK2 RMD_K7 +#define KK3 RMD_K8 +#define KK4 RMD_K9 +#define KK5 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ -- cgit v1.2.3 From 3731ff5e77f3b07b43fecd331d06ef6261dac4c5 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Fri, 9 May 2008 21:27:02 +0800 Subject: [CRYPTO] ripemd: Add support for RIPEMD-256 and RIPEMD-320 This patch adds support for the extended RIPEMD hash algorithms RIPEMD-256 and RIPEMD-320. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Makefile | 2 + crypto/rmd256.c | 363 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/rmd320.c | 412 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 777 insertions(+) create mode 100644 crypto/rmd256.c create mode 100644 crypto/rmd320.c diff --git a/crypto/Makefile b/crypto/Makefile index 1efb5563..807656b6 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o +obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o +obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o diff --git a/crypto/rmd256.c b/crypto/rmd256.c new file mode 100644 index 00000000..060ee81c --- /dev/null +++ b/crypto/rmd256.c @@ -0,0 +1,363 @@ +/* + * Cryptographic API. + * + * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest. + * + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC + * + * Copyright (c) 2008 Adrian-Ken Rueegsegger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "ripemd.h" + +struct rmd256_ctx { + u64 byte_count; + u32 state[8]; + u32 buffer[16]; +}; + +#define K1 RMD_K1 +#define K2 RMD_K2 +#define K3 RMD_K3 +#define K4 RMD_K4 +#define KK1 RMD_K6 +#define KK2 RMD_K7 +#define KK3 RMD_K8 +#define KK4 RMD_K1 + +#define F1(x, y, z) (x ^ y ^ z) /* XOR */ +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ +#define F3(x, y, z) ((x | ~y) ^ z) +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ + +#define ROUND(a, b, c, d, f, k, x, s) { \ + (a) += f((b), (c), (d)) + (x) + (k); \ + (a) = rol32((a), (s)); \ +} + +static void rmd256_transform(u32 *state, u32 const *in) +{ + u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp; + + /* Initialize left lane */ + aa = state[0]; + bb = state[1]; + cc = state[2]; + dd = state[3]; + + /* Initialize right lane */ + aaa = state[4]; + bbb = state[5]; + ccc = state[6]; + ddd = state[7]; + + /* round 1: left lane */ + ROUND(aa, bb, cc, dd, F1, K1, in[0], 11); + ROUND(dd, aa, bb, cc, F1, K1, in[1], 14); + ROUND(cc, dd, aa, bb, F1, K1, in[2], 15); + ROUND(bb, cc, dd, aa, F1, K1, in[3], 12); + ROUND(aa, bb, cc, dd, F1, K1, in[4], 5); + ROUND(dd, aa, bb, cc, F1, K1, in[5], 8); + ROUND(cc, dd, aa, bb, F1, K1, in[6], 7); + ROUND(bb, cc, dd, aa, F1, K1, in[7], 9); + ROUND(aa, bb, cc, dd, F1, K1, in[8], 11); + ROUND(dd, aa, bb, cc, F1, K1, in[9], 13); + ROUND(cc, dd, aa, bb, F1, K1, in[10], 14); + ROUND(bb, cc, dd, aa, F1, K1, in[11], 15); + ROUND(aa, bb, cc, dd, F1, K1, in[12], 6); + ROUND(dd, aa, bb, cc, F1, K1, in[13], 7); + ROUND(cc, dd, aa, bb, F1, K1, in[14], 9); + ROUND(bb, cc, dd, aa, F1, K1, in[15], 8); + + /* round 1: right lane */ + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11); + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14); + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14); + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12); + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); + + /* Swap contents of "a" registers */ + tmp = aa; aa = aaa; aaa = tmp; + + /* round 2: left lane */ + ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); + ROUND(dd, aa, bb, cc, F2, K2, in[4], 6); + ROUND(cc, dd, aa, bb, F2, K2, in[13], 8); + ROUND(bb, cc, dd, aa, F2, K2, in[1], 13); + ROUND(aa, bb, cc, dd, F2, K2, in[10], 11); + ROUND(dd, aa, bb, cc, F2, K2, in[6], 9); + ROUND(cc, dd, aa, bb, F2, K2, in[15], 7); + ROUND(bb, cc, dd, aa, F2, K2, in[3], 15); + ROUND(aa, bb, cc, dd, F2, K2, in[12], 7); + ROUND(dd, aa, bb, cc, F2, K2, in[0], 12); + ROUND(cc, dd, aa, bb, F2, K2, in[9], 15); + ROUND(bb, cc, dd, aa, F2, K2, in[5], 9); + ROUND(aa, bb, cc, dd, F2, K2, in[2], 11); + ROUND(dd, aa, bb, cc, F2, K2, in[14], 7); + ROUND(cc, dd, aa, bb, F2, K2, in[11], 13); + ROUND(bb, cc, dd, aa, F2, K2, in[8], 12); + + /* round 2: right lane */ + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7); + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6); + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15); + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13); + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); + + /* Swap contents of "b" registers */ + tmp = bb; bb = bbb; bbb = tmp; + + /* round 3: left lane */ + ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); + ROUND(dd, aa, bb, cc, F3, K3, in[10], 13); + ROUND(cc, dd, aa, bb, F3, K3, in[14], 6); + ROUND(bb, cc, dd, aa, F3, K3, in[4], 7); + ROUND(aa, bb, cc, dd, F3, K3, in[9], 14); + ROUND(dd, aa, bb, cc, F3, K3, in[15], 9); + ROUND(cc, dd, aa, bb, F3, K3, in[8], 13); + ROUND(bb, cc, dd, aa, F3, K3, in[1], 15); + ROUND(aa, bb, cc, dd, F3, K3, in[2], 14); + ROUND(dd, aa, bb, cc, F3, K3, in[7], 8); + ROUND(cc, dd, aa, bb, F3, K3, in[0], 13); + ROUND(bb, cc, dd, aa, F3, K3, in[6], 6); + ROUND(aa, bb, cc, dd, F3, K3, in[13], 5); + ROUND(dd, aa, bb, cc, F3, K3, in[11], 12); + ROUND(cc, dd, aa, bb, F3, K3, in[5], 7); + ROUND(bb, cc, dd, aa, F3, K3, in[12], 5); + + /* round 3: right lane */ + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14); + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13); + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13); + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7); + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); + + /* Swap contents of "c" registers */ + tmp = cc; cc = ccc; ccc = tmp; + + /* round 4: left lane */ + ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); + ROUND(dd, aa, bb, cc, F4, K4, in[9], 12); + ROUND(cc, dd, aa, bb, F4, K4, in[11], 14); + ROUND(bb, cc, dd, aa, F4, K4, in[10], 15); + ROUND(aa, bb, cc, dd, F4, K4, in[0], 14); + ROUND(dd, aa, bb, cc, F4, K4, in[8], 15); + ROUND(cc, dd, aa, bb, F4, K4, in[12], 9); + ROUND(bb, cc, dd, aa, F4, K4, in[4], 8); + ROUND(aa, bb, cc, dd, F4, K4, in[13], 9); + ROUND(dd, aa, bb, cc, F4, K4, in[3], 14); + ROUND(cc, dd, aa, bb, F4, K4, in[7], 5); + ROUND(bb, cc, dd, aa, F4, K4, in[15], 6); + ROUND(aa, bb, cc, dd, F4, K4, in[14], 8); + ROUND(dd, aa, bb, cc, F4, K4, in[5], 6); + ROUND(cc, dd, aa, bb, F4, K4, in[6], 5); + ROUND(bb, cc, dd, aa, F4, K4, in[2], 12); + + /* round 4: right lane */ + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9); + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12); + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5); + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15); + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); + + /* Swap contents of "d" registers */ + tmp = dd; dd = ddd; ddd = tmp; + + /* combine results */ + state[0] += aa; + state[1] += bb; + state[2] += cc; + state[3] += dd; + state[4] += aaa; + state[5] += bbb; + state[6] += ccc; + state[7] += ddd; + + return; +} + +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + cpu_to_le32s(buf); + buf++; + } +} + +static inline void rmd256_transform_helper(struct rmd256_ctx *ctx) +{ + le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); + rmd256_transform(ctx->state, ctx->buffer); +} + +static void rmd256_init(struct crypto_tfm *tfm) +{ + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + + rctx->byte_count = 0; + + rctx->state[0] = RMD_H0; + rctx->state[1] = RMD_H1; + rctx->state[2] = RMD_H2; + rctx->state[3] = RMD_H3; + rctx->state[4] = RMD_H5; + rctx->state[5] = RMD_H6; + rctx->state[6] = RMD_H7; + rctx->state[7] = RMD_H8; + + memset(rctx->buffer, 0, sizeof(rctx->buffer)); +} + +static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) +{ + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); + + rctx->byte_count += len; + + /* Enough space in buffer? If so copy and we're done */ + if (avail > len) { + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, len); + return; + } + + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, avail); + + rmd256_transform_helper(rctx); + data += avail; + len -= avail; + + while (len >= sizeof(rctx->buffer)) { + memcpy(rctx->buffer, data, sizeof(rctx->buffer)); + rmd256_transform_helper(rctx); + data += sizeof(rctx->buffer); + len -= sizeof(rctx->buffer); + } + + memcpy(rctx->buffer, data, len); +} + +/* Add padding and return the message digest. */ +static void rmd256_final(struct crypto_tfm *tfm, u8 *out) +{ + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + u32 index, padlen; + u64 bits; + static const u8 padding[64] = { 0x80, }; + bits = rctx->byte_count << 3; + + /* Pad out to 56 mod 64 */ + index = rctx->byte_count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + rmd256_update(tfm, padding, padlen); + + /* Append length */ + rmd256_update(tfm, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + memcpy(out, rctx->state, sizeof(rctx->state)); + + /* Wipe context */ + memset(rctx, 0, sizeof(*rctx)); +} + +static struct crypto_alg alg = { + .cra_name = "rmd256", + .cra_driver_name = "rmd256", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = RMD256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rmd256_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = RMD256_DIGEST_SIZE, + .dia_init = rmd256_init, + .dia_update = rmd256_update, + .dia_final = rmd256_final } } +}; + +static int __init rmd256_mod_init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit rmd256_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(rmd256_mod_init); +module_exit(rmd256_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); + +MODULE_ALIAS("rmd256"); diff --git a/crypto/rmd320.c b/crypto/rmd320.c new file mode 100644 index 00000000..b39c0543 --- /dev/null +++ b/crypto/rmd320.c @@ -0,0 +1,412 @@ +/* + * Cryptographic API. + * + * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest. + * + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC + * + * Copyright (c) 2008 Adrian-Ken Rueegsegger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "ripemd.h" + +struct rmd320_ctx { + u64 byte_count; + u32 state[10]; + u32 buffer[16]; +}; + +#define K1 RMD_K1 +#define K2 RMD_K2 +#define K3 RMD_K3 +#define K4 RMD_K4 +#define K5 RMD_K5 +#define KK1 RMD_K6 +#define KK2 RMD_K7 +#define KK3 RMD_K8 +#define KK4 RMD_K9 +#define KK5 RMD_K1 + +#define F1(x, y, z) (x ^ y ^ z) /* XOR */ +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ +#define F3(x, y, z) ((x | ~y) ^ z) +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ +#define F5(x, y, z) (x ^ (y | ~z)) + +#define ROUND(a, b, c, d, e, f, k, x, s) { \ + (a) += f((b), (c), (d)) + (x) + (k); \ + (a) = rol32((a), (s)) + (e); \ + (c) = rol32((c), 10); \ +} + +static void rmd320_transform(u32 *state, u32 const *in) +{ + u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; + + /* Initialize left lane */ + aa = state[0]; + bb = state[1]; + cc = state[2]; + dd = state[3]; + ee = state[4]; + + /* Initialize right lane */ + aaa = state[5]; + bbb = state[6]; + ccc = state[7]; + ddd = state[8]; + eee = state[9]; + + /* round 1: left lane */ + ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); + ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); + ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); + ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); + ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); + ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); + + /* round 1: right lane */ + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); + + /* Swap contents of "a" registers */ + tmp = aa; aa = aaa; aaa = tmp; + + /* round 2: left lane" */ + ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); + ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); + ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); + ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); + ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); + ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); + + /* round 2: right lane */ + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); + + /* Swap contents of "b" registers */ + tmp = bb; bb = bbb; bbb = tmp; + + /* round 3: left lane" */ + ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); + ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); + ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); + ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); + ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); + ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); + + /* round 3: right lane */ + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); + + /* Swap contents of "c" registers */ + tmp = cc; cc = ccc; ccc = tmp; + + /* round 4: left lane" */ + ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); + ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); + ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); + ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); + ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); + ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); + + /* round 4: right lane */ + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); + + /* Swap contents of "d" registers */ + tmp = dd; dd = ddd; ddd = tmp; + + /* round 5: left lane" */ + ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); + ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); + ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); + ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); + ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); + ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); + + /* round 5: right lane */ + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); + + /* Swap contents of "e" registers */ + tmp = ee; ee = eee; eee = tmp; + + /* combine results */ + state[0] += aa; + state[1] += bb; + state[2] += cc; + state[3] += dd; + state[4] += ee; + state[5] += aaa; + state[6] += bbb; + state[7] += ccc; + state[8] += ddd; + state[9] += eee; + + return; +} + +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + cpu_to_le32s(buf); + buf++; + } +} + +static inline void rmd320_transform_helper(struct rmd320_ctx *ctx) +{ + le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); + rmd320_transform(ctx->state, ctx->buffer); +} + +static void rmd320_init(struct crypto_tfm *tfm) +{ + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + + rctx->byte_count = 0; + + rctx->state[0] = RMD_H0; + rctx->state[1] = RMD_H1; + rctx->state[2] = RMD_H2; + rctx->state[3] = RMD_H3; + rctx->state[4] = RMD_H4; + rctx->state[5] = RMD_H5; + rctx->state[6] = RMD_H6; + rctx->state[7] = RMD_H7; + rctx->state[8] = RMD_H8; + rctx->state[9] = RMD_H9; + + memset(rctx->buffer, 0, sizeof(rctx->buffer)); +} + +static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) +{ + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); + + rctx->byte_count += len; + + /* Enough space in buffer? If so copy and we're done */ + if (avail > len) { + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, len); + return; + } + + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), + data, avail); + + rmd320_transform_helper(rctx); + data += avail; + len -= avail; + + while (len >= sizeof(rctx->buffer)) { + memcpy(rctx->buffer, data, sizeof(rctx->buffer)); + rmd320_transform_helper(rctx); + data += sizeof(rctx->buffer); + len -= sizeof(rctx->buffer); + } + + memcpy(rctx->buffer, data, len); +} + +/* Add padding and return the message digest. */ +static void rmd320_final(struct crypto_tfm *tfm, u8 *out) +{ + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + u32 index, padlen; + u64 bits; + static const u8 padding[64] = { 0x80, }; + bits = rctx->byte_count << 3; + + /* Pad out to 56 mod 64 */ + index = rctx->byte_count & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + rmd320_update(tfm, padding, padlen); + + /* Append length */ + rmd320_update(tfm, (const u8 *)&bits, sizeof(bits)); + + /* Store state in digest */ + memcpy(out, rctx->state, sizeof(rctx->state)); + + /* Wipe context */ + memset(rctx, 0, sizeof(*rctx)); +} + +static struct crypto_alg alg = { + .cra_name = "rmd320", + .cra_driver_name = "rmd320", + .cra_flags = CRYPTO_ALG_TYPE_DIGEST, + .cra_blocksize = RMD320_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rmd320_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { .digest = { + .dia_digestsize = RMD320_DIGEST_SIZE, + .dia_init = rmd320_init, + .dia_update = rmd320_update, + .dia_final = rmd320_final } } +}; + +static int __init rmd320_mod_init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit rmd320_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(rmd320_mod_init); +module_exit(rmd320_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); + +MODULE_ALIAS("rmd320"); -- cgit v1.2.3 From 6cc27cbb08b5b826e774585dd650f6785b4166c7 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Fri, 9 May 2008 21:29:35 +0800 Subject: [CRYPTO] tcrypt: Add test vectors for RIPEMD-256 and RIPEMD-320 This patch adds test vectors for RIPEMD-256 and RIPEMD-320 hash algorithms. The test vectors are taken from Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 21 ++++++++- crypto/tcrypt.h | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 69eb29e1..e0ea4d53 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -76,7 +76,8 @@ static char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", "seed", "salsa20", "rmd128", "rmd160", "lzo", "cts", NULL + "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", + "lzo", "cts", NULL }; static void hexdump(unsigned char *buf, unsigned int len) @@ -1559,7 +1560,7 @@ static void do_test(void) case 29: test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); break; - + case 30: test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); @@ -1632,6 +1633,14 @@ static void do_test(void) test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS); break; + case 41: + test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS); + break; + + case 42: + test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS); + break; + case 100: test_hash("hmac(md5)", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); @@ -1823,6 +1832,14 @@ static void do_test(void) test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; + case 316: + test_hash_speed("rmd256", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + + case 317: + test_hash_speed("rmd320", sec, generic_hash_speed_template); + if (mode > 300 && mode < 400) break; + case 399: break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index af91f0cd..20bd5fef 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -293,6 +293,142 @@ static struct hash_testvec rmd160_tv_template[] = { } }; +/* + * RIPEMD-256 test vectors + */ +#define RMD256_TEST_VECTORS 8 + +static struct hash_testvec rmd256_tv_template[] = { + { + .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18" + "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a" + "\x2d\x97\x74\xfb\x1e\x5d\x02\x63" + "\x80\xae\x01\x68\xe3\xc5\x52\x2d", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9" + "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c" + "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf" + "\xcd\x88\x3a\x91\x34\x69\x29\x25", + }, { + .plaintext = "abc", + .psize = 3, + .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb" + "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1" + "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e" + "\x1e\x42\xd2\xe9\x75\x45\x9b\x65", + }, { + .plaintext = "message digest", + .psize = 14, + .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a" + "\x51\x4d\x5c\x91\x4c\x39\x2c\x90" + "\x18\xc7\xc4\x6b\xc1\x44\x65\x55" + "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e", + }, { + .plaintext = "abcdefghijklmnopqrstuvwxyz", + .psize = 26, + .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16" + "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc" + "\x78\x96\x11\x8a\x51\x97\x96\x87" + "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33", + }, { + .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789", + .psize = 62, + .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20" + "\xb8\x44\x24\xae\x93\x1c\xbb\x1f" + "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1" + "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8", + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890", + .psize = 80, + .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa" + "\xf9\x13\x68\xc0\x6a\x62\x75\xb5" + "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed" + "\xfd\x67\x78\xdf\x89\xa8\x90\xdd", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighij" + "hijkijkljklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8" + "\xc8\xd9\x12\x85\x73\xe7\xa9\x80" + "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e" + "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f", + .np = 2, + .tap = { 28, 28 }, + } +}; + +/* + * RIPEMD-320 test vectors + */ +#define RMD320_TEST_VECTORS 8 + +static struct hash_testvec rmd320_tv_template[] = { + { + .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1" + "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25" + "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e" + "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8", + }, { + .plaintext = "a", + .psize = 1, + .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5" + "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57" + "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54" + "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d", + }, { + .plaintext = "abc", + .psize = 3, + .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d" + "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08" + "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74" + "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d", + }, { + .plaintext = "message digest", + .psize = 14, + .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68" + "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa" + "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d" + "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97", + }, { + .plaintext = "abcdefghijklmnopqrstuvwxyz", + .psize = 26, + .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93" + "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4" + "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed" + "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09", + }, { + .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789", + .psize = 62, + .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2" + "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c" + "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9" + "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4", + }, { + .plaintext = "1234567890123456789012345678901234567890" + "1234567890123456789012345678901234567890", + .psize = 80, + .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6" + "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41" + "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f" + "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42", + }, { + .plaintext = "abcdbcdecdefdefgefghfghighij" + "hijkijkljklmklmnlmnomnopnopq", + .psize = 56, + .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4" + "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59" + "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b" + "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac", + .np = 2, + .tap = { 28, 28 }, + } +}; + /* * SHA1 test vectors from from FIPS PUB 180-1 */ -- cgit v1.2.3 From 12ba5b383139c21e04da614e7b3bc5fd2f75c175 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Fri, 9 May 2008 21:30:27 +0800 Subject: [CRYPTO] ripemd: Add Kconfig entries for extended RIPEMD hash algorithms This patch adds Kconfig entries for RIPEMD-256 and RIPEMD-320. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index cfc521a0..5963a956 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -264,6 +264,31 @@ config CRYPTO_RMD160 to be used as a secure replacement for the 128-bit hash functions MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128). + It's speed is comparable to SHA1 and there are no known attacks against + RIPEMD-160. + + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. + See + +config CRYPTO_RMD256 + tristate "RIPEMD-256 digest algorithm" + select CRYPTO_ALGAPI + help + RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash. + It is intended for applications that require longer hash-results, without + needing a larger security level (than RIPEMD-128). + + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. + See + +config CRYPTO_RMD320 + tristate "RIPEMD-320 digest algorithm" + select CRYPTO_ALGAPI + help + RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash. + It is intended for applications that require longer hash-results, without + needing a larger security level (than RIPEMD-160). + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. See -- cgit v1.2.3 From 9cd10badbf647f89dc0b9921af097101e8dd78a1 Mon Sep 17 00:00:00 2001 From: Loc Ho Date: Wed, 14 May 2008 20:41:47 +0800 Subject: [CRYPTO] hash: Add asynchronous hash support This patch adds asynchronous hash and digest support. Signed-off-by: Loc Ho Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/ahash.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/api.c | 8 +++-- crypto/digest.c | 81 +++++++++++++++++++++++++++++++++++++++++ crypto/hash.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++----- crypto/internal.h | 1 + 6 files changed, 288 insertions(+), 11 deletions(-) create mode 100644 crypto/ahash.c diff --git a/crypto/Makefile b/crypto/Makefile index 807656b6..d4f3ed85 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o crypto_hash-objs := hash.o +crypto_hash-objs += ahash.o obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o diff --git a/crypto/ahash.c b/crypto/ahash.c new file mode 100644 index 00000000..a83e035d --- /dev/null +++ b/crypto/ahash.c @@ -0,0 +1,106 @@ +/* + * Asynchronous Cryptographic Hash operations. + * + * This is the asynchronous version of hash.c with notification of + * completion via a callback. + * + * Copyright (c) 2008 Loc Ho + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct ahash_alg *ahash = crypto_ahash_alg(tfm); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + int ret; + u8 *buffer, *alignbuffer; + unsigned long absize; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = ahash->setkey(tfm, alignbuffer, keylen); + memset(alignbuffer, 0, keylen); + kfree(buffer); + return ret; +} + +static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct ahash_alg *ahash = crypto_ahash_alg(tfm); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + + if ((unsigned long)key & alignmask) + return ahash_setkey_unaligned(tfm, key, keylen); + + return ahash->setkey(tfm, key, keylen); +} + +static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; + struct ahash_tfm *crt = &tfm->crt_ahash; + + if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) + return -EINVAL; + + crt->init = alg->init; + crt->update = alg->update; + crt->final = alg->final; + crt->digest = alg->digest; + crt->setkey = ahash_setkey; + crt->base = __crypto_ahash_cast(tfm); + crt->digestsize = alg->digestsize; + + return 0; +} + +static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_printf(m, "type : ahash\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); +} + +const struct crypto_type crypto_ahash_type = { + .ctxsize = crypto_ahash_ctxsize, + .init = crypto_init_ahash_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_ahash_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_ahash_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/api.c b/crypto/api.c index 0a0f41ef..d06e3327 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return crypto_init_cipher_ops(tfm); case CRYPTO_ALG_TYPE_DIGEST: - return crypto_init_digest_ops(tfm); - + if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != + CRYPTO_ALG_TYPE_HASH_MASK) + return crypto_init_digest_ops_async(tfm); + else + return crypto_init_digest_ops(tfm); + case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_ops(tfm); diff --git a/crypto/digest.c b/crypto/digest.c index b526cc34..025c9aea 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -157,3 +157,84 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm) void crypto_exit_digest_ops(struct crypto_tfm *tfm) { } + +static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key, + unsigned int keylen) +{ + crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); + return -ENOSYS; +} + +static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; + + crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK); + return dalg->dia_setkey(tfm, key, keylen); +} + +static int digest_async_init(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; + + dalg->dia_init(tfm); + return 0; +} + +static int digest_async_update(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + update(&desc, req->src, req->nbytes); + return 0; +} + +static int digest_async_final(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + final(&desc, req->result); + return 0; +} + +static int digest_async_digest(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + return digest(&desc, req->src, req->nbytes, req->result); +} + +int crypto_init_digest_ops_async(struct crypto_tfm *tfm) +{ + struct ahash_tfm *crt = &tfm->crt_ahash; + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; + + if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) + return -EINVAL; + + crt->init = digest_async_init; + crt->update = digest_async_update; + crt->final = digest_async_final; + crt->digest = digest_async_digest; + crt->setkey = dalg->dia_setkey ? digest_async_setkey : + digest_async_nosetkey; + crt->digestsize = dalg->dia_digestsize; + crt->base = __crypto_ahash_cast(tfm); + + return 0; +} diff --git a/crypto/hash.c b/crypto/hash.c index 7dcff671..f9400a01 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -59,24 +59,108 @@ static int hash_setkey(struct crypto_hash *crt, const u8 *key, return alg->setkey(crt, key, keylen); } -static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async); + struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm); + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + + return alg->setkey(tfm_hash, key, keylen); +} + +static int hash_async_init(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + return alg->init(&desc); +} + +static int hash_async_update(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + return alg->update(&desc, req->src, req->nbytes); +} + +static int hash_async_final(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + return alg->final(&desc, req->result); +} + +static int hash_async_digest(struct ahash_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + struct hash_desc desc = { + .tfm = __crypto_hash_cast(tfm), + .flags = req->base.flags, + }; + + return alg->digest(&desc, req->src, req->nbytes, req->result); +} + +static int crypto_init_hash_ops_async(struct crypto_tfm *tfm) +{ + struct ahash_tfm *crt = &tfm->crt_ahash; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + + crt->init = hash_async_init; + crt->update = hash_async_update; + crt->final = hash_async_final; + crt->digest = hash_async_digest; + crt->setkey = hash_async_setkey; + crt->digestsize = alg->digestsize; + crt->base = __crypto_ahash_cast(tfm); + + return 0; +} + +static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm) { struct hash_tfm *crt = &tfm->crt_hash; struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) - return -EINVAL; - - crt->init = alg->init; - crt->update = alg->update; - crt->final = alg->final; - crt->digest = alg->digest; - crt->setkey = hash_setkey; + crt->init = alg->init; + crt->update = alg->update; + crt->final = alg->final; + crt->digest = alg->digest; + crt->setkey = hash_setkey; crt->digestsize = alg->digestsize; return 0; } +static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + + if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) + return -EINVAL; + + if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK) + return crypto_init_hash_ops_async(tfm); + else + return crypto_init_hash_ops_sync(tfm); +} + static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) diff --git a/crypto/internal.h b/crypto/internal.h index 32f4c214..683fcb2d 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); int crypto_init_digest_ops(struct crypto_tfm *tfm); +int crypto_init_digest_ops_async(struct crypto_tfm *tfm); int crypto_init_cipher_ops(struct crypto_tfm *tfm); int crypto_init_compress_ops(struct crypto_tfm *tfm); -- cgit v1.2.3 From 66add3aad4c701180a1d48096765da022a107cdc Mon Sep 17 00:00:00 2001 From: Loc Ho Date: Wed, 14 May 2008 21:23:00 +0800 Subject: [CRYPTO] cryptd: Add asynchronous hash support This patch adds asynchronous hash support to crypto daemon. Signed-off-by: Loc Ho Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/cryptd.c | 243 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 244 insertions(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 5963a956..795e31c8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -65,6 +65,7 @@ config CRYPTO_NULL config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_BLKCIPHER + select CRYPTO_HASH select CRYPTO_MANAGER help This is a generic software asynchronous crypto daemon that diff --git a/crypto/cryptd.c b/crypto/cryptd.c index f38e1473..d3ecd7e7 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -45,6 +45,13 @@ struct cryptd_blkcipher_request_ctx { crypto_completion_t complete; }; +struct cryptd_hash_ctx { + struct crypto_hash *child; +}; + +struct cryptd_hash_request_ctx { + crypto_completion_t complete; +}; static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) { @@ -260,6 +267,240 @@ out_put_alg: return inst; } +static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_spawn *spawn = &ictx->spawn; + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_hash *cipher; + + cipher = crypto_spawn_hash(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + tfm->crt_ahash.reqsize = + sizeof(struct cryptd_hash_request_ctx); + return 0; +} + +static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_state *state = cryptd_get_state(tfm); + int active; + + mutex_lock(&state->mutex); + active = ahash_tfm_in_queue(&state->queue, + __crypto_ahash_cast(tfm)); + mutex_unlock(&state->mutex); + + BUG_ON(active); + + crypto_free_hash(ctx->child); +} + +static int cryptd_hash_setkey(struct crypto_ahash *parent, + const u8 *key, unsigned int keylen) +{ + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); + struct crypto_hash *child = ctx->child; + int err; + + crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_hash_setkey(child, key, keylen); + crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int cryptd_hash_enqueue(struct ahash_request *req, + crypto_completion_t complete) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_state *state = + cryptd_get_state(crypto_ahash_tfm(tfm)); + int err; + + rctx->complete = req->base.complete; + req->base.complete = complete; + + spin_lock_bh(&state->lock); + err = ahash_enqueue_request(&state->queue, req); + spin_unlock_bh(&state->lock); + + wake_up_process(state->task); + return err; +} + +static void cryptd_hash_init(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->init(&desc); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_init_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_init); +} + +static void cryptd_hash_update(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->update(&desc, + req->src, + req->nbytes); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_update_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_update); +} + +static void cryptd_hash_final(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->final(&desc, req->result); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_final_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_final); +} + +static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) +{ + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_hash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx; + struct hash_desc desc; + + rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + desc.tfm = child; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_crt(child)->digest(&desc, + req->src, + req->nbytes, + req->result); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_digest_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_digest); +} + +static struct crypto_instance *cryptd_alloc_hash( + struct rtattr **tb, struct cryptd_state *state) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = cryptd_alloc_instance(alg, state); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = &crypto_ahash_type; + + inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; + inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); + + inst->alg.cra_init = cryptd_hash_init_tfm; + inst->alg.cra_exit = cryptd_hash_exit_tfm; + + inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; + inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; + inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; + inst->alg.cra_ahash.setkey = cryptd_hash_setkey; + inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + static struct cryptd_state state; static struct crypto_instance *cryptd_alloc(struct rtattr **tb) @@ -273,6 +514,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: return cryptd_alloc_blkcipher(tb, &state); + case CRYPTO_ALG_TYPE_DIGEST: + return cryptd_alloc_hash(tb, &state); } return ERR_PTR(-EINVAL); -- cgit v1.2.3 From 0b265ebd6aeb13dba1684b1c46fac7d2f444009c Mon Sep 17 00:00:00 2001 From: Loc Ho Date: Wed, 14 May 2008 21:24:51 +0800 Subject: [CRYPTO] tcrypt: Use asynchronous hash interface This patch changes tcrypt to use the new asynchronous hash interface for testing hash algorithm correctness. The speed tests will continue to use the existing interface for now. Signed-off-by: Loc Ho Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 76 ++++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e0ea4d53..e99cb4bc 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -104,22 +104,30 @@ static void test_hash(char *algo, struct hash_testvec *template, unsigned int i, j, k, temp; struct scatterlist sg[8]; char result[64]; - struct crypto_hash *tfm; - struct hash_desc desc; + struct crypto_ahash *tfm; + struct ahash_request *req; + struct tcrypt_result tresult; int ret; void *hash_buff; printk("\ntesting %s\n", algo); - tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); + init_completion(&tresult.completion); + + tfm = crypto_alloc_ahash(algo, 0, 0); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } - desc.tfm = tfm; - desc.flags = 0; + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + printk(KERN_ERR "failed to allocate request for %s\n", algo); + goto out_noreq; + } + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &tresult); for (i = 0; i < tcount; i++) { printk("test %u:\n", i + 1); @@ -133,8 +141,9 @@ static void test_hash(char *algo, struct hash_testvec *template, sg_init_one(&sg[0], hash_buff, template[i].psize); if (template[i].ksize) { - ret = crypto_hash_setkey(tfm, template[i].key, - template[i].ksize); + crypto_ahash_clear_flags(tfm, ~0); + ret = crypto_ahash_setkey(tfm, template[i].key, + template[i].ksize); if (ret) { printk("setkey() failed ret=%d\n", ret); kfree(hash_buff); @@ -142,17 +151,30 @@ static void test_hash(char *algo, struct hash_testvec *template, } } - ret = crypto_hash_digest(&desc, sg, template[i].psize, result); - if (ret) { + ahash_request_set_crypt(req, sg, result, template[i].psize); + ret = crypto_ahash_digest(req); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &tresult.completion); + if (!ret && !(ret = tresult.err)) { + INIT_COMPLETION(tresult.completion); + break; + } + /* fall through */ + default: printk("digest () failed ret=%d\n", ret); kfree(hash_buff); goto out; } - hexdump(result, crypto_hash_digestsize(tfm)); + hexdump(result, crypto_ahash_digestsize(tfm)); printk("%s\n", memcmp(result, template[i].digest, - crypto_hash_digestsize(tfm)) ? + crypto_ahash_digestsize(tfm)) ? "fail" : "pass"); kfree(hash_buff); } @@ -181,8 +203,9 @@ static void test_hash(char *algo, struct hash_testvec *template, } if (template[i].ksize) { - ret = crypto_hash_setkey(tfm, template[i].key, - template[i].ksize); + crypto_ahash_clear_flags(tfm, ~0); + ret = crypto_ahash_setkey(tfm, template[i].key, + template[i].ksize); if (ret) { printk("setkey() failed ret=%d\n", ret); @@ -190,23 +213,38 @@ static void test_hash(char *algo, struct hash_testvec *template, } } - ret = crypto_hash_digest(&desc, sg, template[i].psize, - result); - if (ret) { + ahash_request_set_crypt(req, sg, result, + template[i].psize); + ret = crypto_ahash_digest(req); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &tresult.completion); + if (!ret && !(ret = tresult.err)) { + INIT_COMPLETION(tresult.completion); + break; + } + /* fall through */ + default: printk("digest () failed ret=%d\n", ret); goto out; } - hexdump(result, crypto_hash_digestsize(tfm)); + hexdump(result, crypto_ahash_digestsize(tfm)); printk("%s\n", memcmp(result, template[i].digest, - crypto_hash_digestsize(tfm)) ? + crypto_ahash_digestsize(tfm)) ? "fail" : "pass"); } } out: - crypto_free_hash(tfm); + ahash_request_free(req); +out_noreq: + crypto_free_ahash(tfm); } static void test_aead(char *algo, int enc, struct aead_testvec *template, -- cgit v1.2.3 From 9c088156e9c3490bdba0e1c4e6f9229eedf17b3d Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Tue, 20 May 2008 11:41:48 +0800 Subject: [CRYPTO] rmd128: Fix endian problems This patch is based on Sebastian Siewior's patch and fixes endian issues making rmd128 work properly on big-endian machines. Signed-off-by: Adrian-Ken Rueegsegger Acked-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/rmd128.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/crypto/rmd128.c b/crypto/rmd128.c index f72d2ce8..89a535aa 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -44,7 +44,7 @@ struct rmd128_ctx { #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + (x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ (a) = rol32((a), (s)); \ } @@ -218,28 +218,6 @@ static void rmd128_transform(u32 *state, u32 const *in) return; } -static inline void le32_to_cpu_array(u32 *buf, unsigned int words) -{ - while (words--) { - le32_to_cpus(buf); - buf++; - } -} - -static inline void cpu_to_le32_array(u32 *buf, unsigned int words) -{ - while (words--) { - cpu_to_le32s(buf); - buf++; - } -} - -static inline void rmd128_transform_helper(struct rmd128_ctx *ctx) -{ - le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); - rmd128_transform(ctx->state, ctx->buffer); -} - static void rmd128_init(struct crypto_tfm *tfm) { struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); @@ -272,13 +250,13 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); - rmd128_transform_helper(rctx); + rmd128_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd128_transform_helper(rctx); + rmd128_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } @@ -290,10 +268,12 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, static void rmd128_final(struct crypto_tfm *tfm, u8 *out) { struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); - u32 index, padlen; + u32 i, index, padlen; u64 bits; + u32 *dst = (u32 *)out; static const u8 padding[64] = { 0x80, }; - bits = rctx->byte_count << 3; + + bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; @@ -304,7 +284,8 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out) rmd128_update(tfm, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ - memcpy(out, rctx->state, sizeof(rctx->state)); + for (i = 0; i < 4; i++) + dst[i] = cpu_to_le32(rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); -- cgit v1.2.3 From 59b3bcebc6de4c3fc1fc4d54b289ee30bb382b64 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Mon, 26 May 2008 20:32:52 +1000 Subject: [CRYPTO] rmd160: Fix endian issues This patch fixes endian issues making rmd160 work properly on big-endian machines. Signed-off-by: Adrian-Ken Rueegsegger Acked-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/rmd160.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/crypto/rmd160.c b/crypto/rmd160.c index 80d647aa..136e31f5 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -47,7 +47,7 @@ struct rmd160_ctx { #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ - (a) += f((b), (c), (d)) + (x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } @@ -261,28 +261,6 @@ static void rmd160_transform(u32 *state, u32 const *in) return; } -static inline void le32_to_cpu_array(u32 *buf, unsigned int words) -{ - while (words--) { - le32_to_cpus(buf); - buf++; - } -} - -static inline void cpu_to_le32_array(u32 *buf, unsigned int words) -{ - while (words--) { - cpu_to_le32s(buf); - buf++; - } -} - -static inline void rmd160_transform_helper(struct rmd160_ctx *ctx) -{ - le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); - rmd160_transform(ctx->state, ctx->buffer); -} - static void rmd160_init(struct crypto_tfm *tfm) { struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); @@ -316,13 +294,13 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); - rmd160_transform_helper(rctx); + rmd160_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd160_transform_helper(rctx); + rmd160_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } @@ -334,10 +312,12 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, static void rmd160_final(struct crypto_tfm *tfm, u8 *out) { struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); - u32 index, padlen; + u32 i, index, padlen; u64 bits; + u32 *dst = (u32 *)out; static const u8 padding[64] = { 0x80, }; - bits = rctx->byte_count << 3; + + bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; @@ -348,7 +328,8 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out) rmd160_update(tfm, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ - memcpy(out, rctx->state, sizeof(rctx->state)); + for (i = 0; i < 5; i++) + dst[i] = cpu_to_le32(rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); -- cgit v1.2.3 From c36dddeb2890c58c41f9b9899bff02b136f75237 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Mon, 26 May 2008 20:33:44 +1000 Subject: [CRYPTO] rmd256: Fix endian issues This patch fixes endian issues making rmd256 work properly on big-endian machines. Signed-off-by: Adrian-Ken Rueegsegger Acked-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/rmd256.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 060ee81c..88f22037 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -44,7 +44,7 @@ struct rmd256_ctx { #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + (x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ (a) = rol32((a), (s)); \ } @@ -233,28 +233,6 @@ static void rmd256_transform(u32 *state, u32 const *in) return; } -static inline void le32_to_cpu_array(u32 *buf, unsigned int words) -{ - while (words--) { - le32_to_cpus(buf); - buf++; - } -} - -static inline void cpu_to_le32_array(u32 *buf, unsigned int words) -{ - while (words--) { - cpu_to_le32s(buf); - buf++; - } -} - -static inline void rmd256_transform_helper(struct rmd256_ctx *ctx) -{ - le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); - rmd256_transform(ctx->state, ctx->buffer); -} - static void rmd256_init(struct crypto_tfm *tfm) { struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); @@ -291,13 +269,13 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); - rmd256_transform_helper(rctx); + rmd256_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd256_transform_helper(rctx); + rmd256_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } @@ -309,10 +287,12 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, static void rmd256_final(struct crypto_tfm *tfm, u8 *out) { struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); - u32 index, padlen; + u32 i, index, padlen; u64 bits; + u32 *dst = (u32 *)out; static const u8 padding[64] = { 0x80, }; - bits = rctx->byte_count << 3; + + bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; @@ -323,7 +303,8 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out) rmd256_update(tfm, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ - memcpy(out, rctx->state, sizeof(rctx->state)); + for (i = 0; i < 8; i++) + dst[i] = cpu_to_le32(rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); -- cgit v1.2.3 From c681195f7bdfd25b90c3e088f3c9d0ef9c2a2276 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Mon, 26 May 2008 20:54:34 +1000 Subject: [CRYPTO] rmd320: Fix endian issues This patch fixes endian issues making rmd320 work properly on big-endian machines. Signed-off-by: Adrian-Ken Rueegsegger Acked-by: Sebastian Siewior Signed-off-by: Herbert Xu --- crypto/rmd320.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/crypto/rmd320.c b/crypto/rmd320.c index b39c0543..5b172f89 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -47,7 +47,7 @@ struct rmd320_ctx { #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ - (a) += f((b), (c), (d)) + (x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } @@ -280,28 +280,6 @@ static void rmd320_transform(u32 *state, u32 const *in) return; } -static inline void le32_to_cpu_array(u32 *buf, unsigned int words) -{ - while (words--) { - le32_to_cpus(buf); - buf++; - } -} - -static inline void cpu_to_le32_array(u32 *buf, unsigned int words) -{ - while (words--) { - cpu_to_le32s(buf); - buf++; - } -} - -static inline void rmd320_transform_helper(struct rmd320_ctx *ctx) -{ - le32_to_cpu_array(ctx->buffer, sizeof(ctx->buffer) / sizeof(u32)); - rmd320_transform(ctx->state, ctx->buffer); -} - static void rmd320_init(struct crypto_tfm *tfm) { struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); @@ -340,13 +318,13 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); - rmd320_transform_helper(rctx); + rmd320_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); - rmd320_transform_helper(rctx); + rmd320_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } @@ -358,10 +336,12 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, static void rmd320_final(struct crypto_tfm *tfm, u8 *out) { struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); - u32 index, padlen; + u32 i, index, padlen; u64 bits; + u32 *dst = (u32 *)out; static const u8 padding[64] = { 0x80, }; - bits = rctx->byte_count << 3; + + bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; @@ -372,7 +352,8 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out) rmd320_update(tfm, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ - memcpy(out, rctx->state, sizeof(rctx->state)); + for (i = 0; i < 10; i++) + dst[i] = cpu_to_le32(rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); -- cgit v1.2.3 From 42cff71eb7d645836454585589bbc36e772c903d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 2 Jun 2008 21:30:38 +1000 Subject: [CRYPTO] rmd: Use pointer form of endian swapping operations This patch converts the relevant code in the rmd implementations to use the pointer form of the endian swapping operations. This allows certain architectures to generate more optimised code. For example, on sparc64 this more than halves the CPU cycles on a typical hashing operation. Based on a patch by David Miller. Signed-off-by: Herbert Xu --- crypto/rmd128.c | 4 ++-- crypto/rmd160.c | 4 ++-- crypto/rmd256.c | 4 ++-- crypto/rmd320.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 89a535aa..1a481df6 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -44,7 +44,7 @@ struct rmd128_ctx { #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)); \ } @@ -285,7 +285,7 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out) /* Store state in digest */ for (i = 0; i < 4; i++) - dst[i] = cpu_to_le32(rctx->state[i]); + dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); diff --git a/crypto/rmd160.c b/crypto/rmd160.c index 136e31f5..e9fd5f6a 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -47,7 +47,7 @@ struct rmd160_ctx { #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } @@ -329,7 +329,7 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out) /* Store state in digest */ for (i = 0; i < 5; i++) - dst[i] = cpu_to_le32(rctx->state[i]); + dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 88f22037..b0885269 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -44,7 +44,7 @@ struct rmd256_ctx { #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define ROUND(a, b, c, d, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)); \ } @@ -304,7 +304,7 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out) /* Store state in digest */ for (i = 0; i < 8; i++) - dst[i] = cpu_to_le32(rctx->state[i]); + dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); diff --git a/crypto/rmd320.c b/crypto/rmd320.c index 5b172f89..dba03ecf 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -47,7 +47,7 @@ struct rmd320_ctx { #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ - (a) += f((b), (c), (d)) + le32_to_cpu(x) + (k); \ + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } @@ -353,7 +353,7 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out) /* Store state in digest */ for (i = 0; i < 10; i++) - dst[i] = cpu_to_le32(rctx->state[i]); + dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); -- cgit v1.2.3 From 461b9327bf815013f1fd28433f99a9778b85cf4c Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Tue, 3 Jun 2008 20:00:16 +1000 Subject: [CRYPTO] tcrypt: Add self test for des3_ebe cipher operating in cbc mode Patch to add checking of DES3 test vectors using CBC mode. FIPS-140-2 compliance mandates that any supported mode of operation must include a self test. This satisfies that requirement for cbc(des3_ede). The included test vector was generated by me using openssl. Key/IV was generated with the following command: openssl enc -des_ede_cbc -P input and output values were generated by repeating the string "Too many secrets" a few times over, truncating it to 128 bytes, and encrypting it with openssl using the aformentioned key. Tested successfully by myself Signed-off-by: Neil Horman Acked-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 16 ++++++++++ crypto/tcrypt.h | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 106 insertions(+), 3 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e99cb4bc..ffc1ec6d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1220,6 +1220,14 @@ static void do_test(void) test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); + test_cipher("cbc(des3_ede)", ENCRYPT, + des3_ede_cbc_enc_tv_template, + DES3_EDE_CBC_ENC_TEST_VECTORS); + + test_cipher("cbc(des3_ede)", DECRYPT, + des3_ede_cbc_dec_tv_template, + DES3_EDE_CBC_DEC_TEST_VECTORS); + test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); @@ -1430,6 +1438,14 @@ static void do_test(void) DES3_EDE_ENC_TEST_VECTORS); test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); + + test_cipher("cbc(des3_ede)", ENCRYPT, + des3_ede_cbc_enc_tv_template, + DES3_EDE_CBC_ENC_TEST_VECTORS); + + test_cipher("cbc(des3_ede)", DECRYPT, + des3_ede_cbc_dec_tv_template, + DES3_EDE_CBC_DEC_TEST_VECTORS); break; case 5: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 20bd5fef..801e0c28 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -1863,6 +1863,8 @@ static struct hash_testvec hmac_sha512_tv_template[] = { #define DES_CBC_DEC_TEST_VECTORS 4 #define DES3_EDE_ENC_TEST_VECTORS 3 #define DES3_EDE_DEC_TEST_VECTORS 3 +#define DES3_EDE_CBC_ENC_TEST_VECTORS 1 +#define DES3_EDE_CBC_DEC_TEST_VECTORS 1 static struct cipher_testvec des_enc_tv_template[] = { { /* From Applied Cryptography */ @@ -2101,9 +2103,6 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = { }, }; -/* - * We really need some more test vectors, especially for DES3 CBC. - */ static struct cipher_testvec des3_ede_enc_tv_template[] = { { /* These are from openssl */ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" @@ -2166,6 +2165,94 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = { }, }; +static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = { + { /* Generated from openssl */ + .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" + "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" + "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", + .klen = 24, + .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + "\x53\x20\x63\x65\x65\x72\x73\x74" + "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" + "\x20\x79\x65\x53\x72\x63\x74\x65" + "\x20\x73\x6f\x54\x20\x6f\x61\x4d" + "\x79\x6e\x53\x20\x63\x65\x65\x72" + "\x73\x74\x54\x20\x6f\x6f\x4d\x20" + "\x6e\x61\x20\x79\x65\x53\x72\x63" + "\x74\x65\x20\x73\x6f\x54\x20\x6f" + "\x61\x4d\x79\x6e\x53\x20\x63\x65" + "\x65\x72\x73\x74\x54\x20\x6f\x6f" + "\x4d\x20\x6e\x61\x20\x79\x65\x53" + "\x72\x63\x74\x65\x20\x73\x6f\x54" + "\x20\x6f\x61\x4d\x79\x6e\x53\x20" + "\x63\x65\x65\x72\x73\x74\x54\x20" + "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", + .ilen = 128, + .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" + "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" + "\x12\x56\x5c\x53\x96\xb6\x00\x7d" + "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" + "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" + "\x76\xd1\xda\x0c\x94\x67\xbb\x04" + "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" + "\x22\x64\x47\xaa\x8f\x75\x13\xbf" + "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" + "\x71\x63\x2e\x89\x7b\x1e\x12\xca" + "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" + "\xd6\xf9\x21\x31\x62\x44\x45\xa6" + "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" + "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" + "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19", + .rlen = 128, + }, +}; + +static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = { + { /* Generated from openssl */ + .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24" + "\x44\x4D\x99\x5A\x12\xD6\x40\xC0" + "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", + .klen = 24, + .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" + "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" + "\x12\x56\x5c\x53\x96\xb6\x00\x7d" + "\x90\x48\xfc\xf5\x8d\x29\x39\xcc" + "\x8a\xd5\x35\x18\x36\x23\x4e\xd7" + "\x76\xd1\xda\x0c\x94\x67\xbb\x04" + "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea" + "\x22\x64\x47\xaa\x8f\x75\x13\xbf" + "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a" + "\x71\x63\x2e\x89\x7b\x1e\x12\xca" + "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a" + "\xd6\xf9\x21\x31\x62\x44\x45\xa6" + "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc" + "\x9d\xde\xa5\x70\xe9\x42\x45\x8a" + "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19", + .ilen = 128, + .result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + "\x53\x20\x63\x65\x65\x72\x73\x74" + "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" + "\x20\x79\x65\x53\x72\x63\x74\x65" + "\x20\x73\x6f\x54\x20\x6f\x61\x4d" + "\x79\x6e\x53\x20\x63\x65\x65\x72" + "\x73\x74\x54\x20\x6f\x6f\x4d\x20" + "\x6e\x61\x20\x79\x65\x53\x72\x63" + "\x74\x65\x20\x73\x6f\x54\x20\x6f" + "\x61\x4d\x79\x6e\x53\x20\x63\x65" + "\x65\x72\x73\x74\x54\x20\x6f\x6f" + "\x4d\x20\x6e\x61\x20\x79\x65\x53" + "\x72\x63\x74\x65\x20\x73\x6f\x54" + "\x20\x6f\x61\x4d\x79\x6e\x53\x20" + "\x63\x65\x65\x72\x73\x74\x54\x20" + "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", + .rlen = 128, + }, +}; + /* * Blowfish test vectors. */ -- cgit v1.2.3 From 2a5b3098e64ce75b01559789b542038b5ae8f945 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 3 Jul 2008 14:57:30 +0800 Subject: crypto: tcrpyt - Remove unnecessary kmap/kunmap calls Noticed by Neil Horman: we are doing unnecessary kmap/kunmap calls on kmalloced memory. This patch removes them. For the purposes of testing SG construction, the underlying crypto code already does plenty of kmap/kunmap calls anyway. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ffc1ec6d..87f08f9b 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -31,7 +30,7 @@ #include "tcrypt.h" /* - * Need to kmalloc() memory for testing kmap(). + * Need to kmalloc() memory for testing. */ #define TVMEMSIZE 16384 #define XBUFSIZE 32768 @@ -376,13 +375,12 @@ static void test_aead(char *algo, int enc, struct aead_testvec *template, goto next_one; } - q = kmap(sg_page(&sg[0])) + sg[0].offset; + q = input; hexdump(q, template[i].rlen); printk(KERN_INFO "enc/dec: %s\n", memcmp(q, template[i].result, template[i].rlen) ? "fail" : "pass"); - kunmap(sg_page(&sg[0])); next_one: if (!template[i].key) kfree(key); @@ -482,7 +480,7 @@ next_one: for (k = 0, temp = 0; k < template[i].np; k++) { printk(KERN_INFO "page %u\n", k); - q = kmap(sg_page(&sg[k])) + sg[k].offset; + q = &axbuf[IDX[k]]; hexdump(q, template[i].tap[k]); printk(KERN_INFO "%s\n", memcmp(q, template[i].result + temp, @@ -500,7 +498,6 @@ next_one: } temp += template[i].tap[k]; - kunmap(sg_page(&sg[k])); } } } @@ -609,13 +606,12 @@ static void test_cipher(char *algo, int enc, goto out; } - q = kmap(sg_page(&sg[0])) + sg[0].offset; + q = data; hexdump(q, template[i].rlen); printk("%s\n", memcmp(q, template[i].result, template[i].rlen) ? "fail" : "pass"); - kunmap(sg_page(&sg[0])); } kfree(data); } @@ -689,7 +685,7 @@ static void test_cipher(char *algo, int enc, temp = 0; for (k = 0; k < template[i].np; k++) { printk("page %u\n", k); - q = kmap(sg_page(&sg[k])) + sg[k].offset; + q = &xbuf[IDX[k]]; hexdump(q, template[i].tap[k]); printk("%s\n", memcmp(q, template[i].result + temp, @@ -704,7 +700,6 @@ static void test_cipher(char *algo, int enc, hexdump(&q[template[i].tap[k]], n); } temp += template[i].tap[k]; - kunmap(sg_page(&sg[k])); } } } -- cgit v1.2.3 From 9c770d77eb33f78452c9fad79fd84df279cad27d Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 4 Jul 2008 19:42:24 +0800 Subject: crypto: camellia - Use kernel-provided bitops, unaligned access helpers Remove the private implementation of 32-bit rotation and unaligned access with byteswapping. As a bonus, fixes sparse warnings: crypto/camellia.c:602:2: warning: cast to restricted __be32 crypto/camellia.c:603:2: warning: cast to restricted __be32 crypto/camellia.c:604:2: warning: cast to restricted __be32 crypto/camellia.c:605:2: warning: cast to restricted __be32 crypto/camellia.c:710:2: warning: cast to restricted __be32 crypto/camellia.c:711:2: warning: cast to restricted __be32 crypto/camellia.c:712:2: warning: cast to restricted __be32 crypto/camellia.c:713:2: warning: cast to restricted __be32 crypto/camellia.c:714:2: warning: cast to restricted __be32 crypto/camellia.c:715:2: warning: cast to restricted __be32 crypto/camellia.c:716:2: warning: cast to restricted __be32 crypto/camellia.c:717:2: warning: cast to restricted __be32 Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/camellia.c | 84 ++++++++++++++++++++++++------------------------------- 1 file changed, 36 insertions(+), 48 deletions(-) diff --git a/crypto/camellia.c b/crypto/camellia.c index 493fee7e..b1cc4de6 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -35,6 +35,8 @@ #include #include #include +#include +#include static const u32 camellia_sp1110[256] = { 0x70707000,0x82828200,0x2c2c2c00,0xececec00, @@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = { /* * macros */ -#define GETU32(v, pt) \ - do { \ - /* latest breed of gcc is clever enough to use move */ \ - memcpy(&(v), (pt), 4); \ - (v) = be32_to_cpu(v); \ - } while(0) - -/* rotation right shift 1byte */ -#define ROR8(x) (((x) >> 8) + ((x) << 24)) -/* rotation left shift 1bit */ -#define ROL1(x) (((x) << 1) + ((x) >> 31)) -/* rotation left shift 1byte */ -#define ROL8(x) (((x) << 8) + ((x) >> 24)) - #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ do { \ w0 = ll; \ @@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = { ^ camellia_sp3033[(u8)(il >> 8)] \ ^ camellia_sp4404[(u8)(il )]; \ yl ^= yr; \ - yr = ROR8(yr); \ + yr = ror32(yr, 8); \ yr ^= yl; \ } while(0) @@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; dw = subL[1] & subL[9], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ @@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; dw = subL[1] & subL[17], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ @@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) } else { subL[1] ^= subR[1] & ~subR[25]; dw = subL[1] & subL[25], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ /* round 20 */ subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ @@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[26] ^= kw4l; subR[26] ^= kw4r; kw4l ^= kw4r & ~subR[24]; dw = kw4l & subL[24], - kw4r ^= ROL1(dw); /* modified for FL(kl5) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ } /* round 17 */ subL[22] ^= kw4l; subR[22] ^= kw4r; @@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; dw = kw4l & subL[16], - kw4r ^= ROL1(dw); /* modified for FL(kl3) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ @@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; dw = kw4l & subL[8], - kw4r ^= ROL1(dw); /* modified for FL(kl1) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ @@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ ROL1(dw); + tr = subR[10] ^ rol32(dw, 1); SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ SUBKEY_R(7) = subR[6] ^ tr; SUBKEY_L(8) = subL[8]; /* FL(kl1) */ @@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ ROL1(dw); + tr = subR[7] ^ rol32(dw, 1); SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ SUBKEY_R(10) = tr ^ subR[11]; SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ @@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ ROL1(dw); + tr = subR[18] ^ rol32(dw, 1); SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ SUBKEY_R(15) = subR[14] ^ tr; SUBKEY_L(16) = subL[16]; /* FL(kl3) */ @@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ ROL1(dw); + tr = subR[15] ^ rol32(dw, 1); SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ SUBKEY_R(18) = tr ^ subR[19]; SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ @@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) } else { tl = subL[26] ^ (subR[26] & ~subR[24]); dw = tl & subL[24], /* FL(kl5) */ - tr = subR[26] ^ ROL1(dw); + tr = subR[26] ^ rol32(dw, 1); SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ SUBKEY_R(23) = subR[22] ^ tr; SUBKEY_L(24) = subL[24]; /* FL(kl5) */ @@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(25) = subR[25]; tl = subL[23] ^ (subR[23] & ~subR[25]); dw = tl & subL[25], /* FLinv(kl6) */ - tr = subR[23] ^ ROL1(dw); + tr = subR[23] ^ rol32(dw, 1); SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ SUBKEY_R(26) = tr ^ subR[27]; SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ @@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* apply the inverse of the last half of P-function */ i = 2; do { - dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ + dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */ SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; - dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ + dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */ SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; - dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ + dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */ SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; - dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ + dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */ SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; - dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ + dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */ SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; - dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ + dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */ SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; i += 8; } while (i < max); @@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /** * k == kll || klr || krl || krr (|| is concatenation) */ - GETU32(kll, key ); - GETU32(klr, key + 4); - GETU32(krl, key + 8); - GETU32(krr, key + 12); + kll = get_unaligned_be32(key); + klr = get_unaligned_be32(key + 4); + krl = get_unaligned_be32(key + 8); + krr = get_unaligned_be32(key + 12); /* generate KL dependent subkeys */ /* kw1 */ @@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * (|| is concatenation) */ - GETU32(kll, key ); - GETU32(klr, key + 4); - GETU32(krl, key + 8); - GETU32(krr, key + 12); - GETU32(krll, key + 16); - GETU32(krlr, key + 20); - GETU32(krrl, key + 24); - GETU32(krrr, key + 28); + kll = get_unaligned_be32(key); + klr = get_unaligned_be32(key + 4); + krl = get_unaligned_be32(key + 8); + krr = get_unaligned_be32(key + 12); + krll = get_unaligned_be32(key + 16); + krlr = get_unaligned_be32(key + 20); + krrl = get_unaligned_be32(key + 24); + krrr = get_unaligned_be32(key + 28); /* generate KL dependent subkeys */ /* kw1 */ @@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) t0 &= ll; \ t2 |= rr; \ rl ^= t2; \ - lr ^= ROL1(t0); \ + lr ^= rol32(t0, 1); \ t3 = krl; \ t1 = klr; \ t3 &= rl; \ t1 |= lr; \ ll ^= t1; \ - rr ^= ROL1(t3); \ + rr ^= rol32(t3, 1); \ } while(0) #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ @@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) il ^= kl; \ ir ^= il ^ kr; \ yl ^= ir; \ - yr ^= ROR8(il) ^ ir; \ + yr ^= ror32(il, 8) ^ ir; \ } while(0) /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ -- cgit v1.2.3 From 83ad73957df54f7f5db8e6f50a4b23d2bf476a39 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 4 Jul 2008 19:45:57 +0800 Subject: crypto: rmd128 - sparse annotations Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/rmd128.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 1a481df6..5de6fa2a 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -26,7 +26,7 @@ struct rmd128_ctx { u64 byte_count; u32 state[4]; - u32 buffer[16]; + __le32 buffer[16]; }; #define K1 RMD_K1 @@ -48,7 +48,7 @@ struct rmd128_ctx { (a) = rol32((a), (s)); \ } -static void rmd128_transform(u32 *state, u32 const *in) +static void rmd128_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; @@ -269,8 +269,8 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out) { struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); u32 i, index, padlen; - u64 bits; - u32 *dst = (u32 *)out; + __le64 bits; + __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); -- cgit v1.2.3 From f09d5a6a216ee08d8c3f10a1e1e13f5cda9793a8 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 4 Jul 2008 19:48:58 +0800 Subject: crypto: rmd - sparse annotations Similar to the rmd128.c annotations, significantly cuts down on the noise. Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/rmd160.c | 8 ++++---- crypto/rmd256.c | 8 ++++---- crypto/rmd320.c | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crypto/rmd160.c b/crypto/rmd160.c index e9fd5f6a..f001ec77 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -26,7 +26,7 @@ struct rmd160_ctx { u64 byte_count; u32 state[5]; - u32 buffer[16]; + __le32 buffer[16]; }; #define K1 RMD_K1 @@ -52,7 +52,7 @@ struct rmd160_ctx { (c) = rol32((c), 10); \ } -static void rmd160_transform(u32 *state, u32 const *in) +static void rmd160_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; @@ -313,8 +313,8 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out) { struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); u32 i, index, padlen; - u64 bits; - u32 *dst = (u32 *)out; + __le64 bits; + __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); diff --git a/crypto/rmd256.c b/crypto/rmd256.c index b0885269..e3de5b4c 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -26,7 +26,7 @@ struct rmd256_ctx { u64 byte_count; u32 state[8]; - u32 buffer[16]; + __le32 buffer[16]; }; #define K1 RMD_K1 @@ -48,7 +48,7 @@ struct rmd256_ctx { (a) = rol32((a), (s)); \ } -static void rmd256_transform(u32 *state, u32 const *in) +static void rmd256_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp; @@ -288,8 +288,8 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out) { struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); u32 i, index, padlen; - u64 bits; - u32 *dst = (u32 *)out; + __le64 bits; + __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); diff --git a/crypto/rmd320.c b/crypto/rmd320.c index dba03ecf..b143d66e 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -26,7 +26,7 @@ struct rmd320_ctx { u64 byte_count; u32 state[10]; - u32 buffer[16]; + __le32 buffer[16]; }; #define K1 RMD_K1 @@ -52,7 +52,7 @@ struct rmd320_ctx { (c) = rol32((c), 10); \ } -static void rmd320_transform(u32 *state, u32 const *in) +static void rmd320_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; @@ -337,8 +337,8 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out) { struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); u32 i, index, padlen; - u64 bits; - u32 *dst = (u32 *)out; + __le64 bits; + __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); -- cgit v1.2.3 From a8f41298907a53f66b8169977d520bcd2d7d26e1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 7 Jul 2008 20:23:56 +0800 Subject: crypto: hash - Fixed digest size check The digest size check on hash algorithms is incorrect. It's perfectly valid for hash algorithms to have a digest length longer than their block size. For example crc32c has a block size of 1 and a digest size of 4. Rather than having it lie about its block size, this patch fixes the checks to do what they really should which is to bound the digest size so that code placing the digest on the stack continue to work. HMAC however still needs to check this as it's only defined for such algorithms. Signed-off-by: Herbert Xu --- crypto/ahash.c | 2 +- crypto/digest.c | 2 +- crypto/hash.c | 2 +- crypto/hmac.c | 16 ++++++++++------ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index a83e035d..8c1f918a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -68,7 +68,7 @@ static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; struct ahash_tfm *crt = &tfm->crt_ahash; - if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) + if (alg->digestsize > PAGE_SIZE / 8) return -EINVAL; crt->init = alg->init; diff --git a/crypto/digest.c b/crypto/digest.c index 025c9aea..d63d5d96 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -141,7 +141,7 @@ int crypto_init_digest_ops(struct crypto_tfm *tfm) struct hash_tfm *ops = &tfm->crt_hash; struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; - if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) + if (dalg->dia_digestsize > PAGE_SIZE / 8) return -EINVAL; ops->init = init; diff --git a/crypto/hash.c b/crypto/hash.c index f9400a01..0d7caa9a 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -152,7 +152,7 @@ static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { struct hash_alg *alg = &tfm->__crt_alg->cra_hash; - if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) + if (alg->digestsize > PAGE_SIZE / 8) return -EINVAL; if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK) diff --git a/crypto/hmac.c b/crypto/hmac.c index 14c6351e..7ff2d6a8 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -226,6 +226,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) struct crypto_instance *inst; struct crypto_alg *alg; int err; + int ds; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); if (err) @@ -236,6 +237,13 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) if (IS_ERR(alg)) return ERR_CAST(alg); + inst = ERR_PTR(-EINVAL); + ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : + alg->cra_digest.dia_digestsize; + if (ds > alg->cra_blocksize) + goto out_put_alg; + inst = crypto_alloc_instance("hmac", alg); if (IS_ERR(inst)) goto out_put_alg; @@ -246,14 +254,10 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_hash_type; - inst->alg.cra_hash.digestsize = - (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : - alg->cra_digest.dia_digestsize; + inst->alg.cra_hash.digestsize = ds; inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(inst->alg.cra_blocksize * 2 + - inst->alg.cra_hash.digestsize, + ALIGN(inst->alg.cra_blocksize * 2 + ds, sizeof(void *)); inst->alg.cra_init = hmac_init_tfm; -- cgit v1.2.3 From 1731228ac6792c28eab91c054501b4876a2f580e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 7 Jul 2008 20:54:35 +0800 Subject: crypto: hash - Removed vestigial ahash fields The base field in ahash_tfm appears to have been cut-n-pasted from ablkcipher. It isn't needed here at all. Similarly, the info field in ahash_request also appears to have originated from its cipher counter-part and is vestigial. Signed-off-by: Herbert Xu --- crypto/ahash.c | 1 - crypto/digest.c | 1 - crypto/hash.c | 1 - 3 files changed, 3 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 8c1f918a..e6e5906c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -76,7 +76,6 @@ static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) crt->final = alg->final; crt->digest = alg->digest; crt->setkey = ahash_setkey; - crt->base = __crypto_ahash_cast(tfm); crt->digestsize = alg->digestsize; return 0; diff --git a/crypto/digest.c b/crypto/digest.c index d63d5d96..bf332982 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -234,7 +234,6 @@ int crypto_init_digest_ops_async(struct crypto_tfm *tfm) crt->setkey = dalg->dia_setkey ? digest_async_setkey : digest_async_nosetkey; crt->digestsize = dalg->dia_digestsize; - crt->base = __crypto_ahash_cast(tfm); return 0; } diff --git a/crypto/hash.c b/crypto/hash.c index 0d7caa9a..140a7556 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -128,7 +128,6 @@ static int crypto_init_hash_ops_async(struct crypto_tfm *tfm) crt->digest = hash_async_digest; crt->setkey = hash_async_setkey; crt->digestsize = alg->digestsize; - crt->base = __crypto_ahash_cast(tfm); return 0; } -- cgit v1.2.3 From 0548e8a92c3eece837209ec1ba6df1784c0e2eea Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 7 Jul 2008 22:41:31 +0800 Subject: crypto: prng - Deterministic CPRNG This patch adds a cryptographic pseudo-random number generator based on CTR(AES-128). It is meant to be used in cases where a deterministic CPRNG is required. One of the first applications will be as an input in the IPsec IV generation process. Signed-off-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++ crypto/Makefile | 2 +- crypto/prng.c | 410 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/prng.h | 27 ++++ 4 files changed, 447 insertions(+), 1 deletion(-) create mode 100644 crypto/prng.c create mode 100644 crypto/prng.h diff --git a/crypto/Kconfig b/crypto/Kconfig index 795e31c8..43b7473f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -666,6 +666,15 @@ config CRYPTO_LZO help This is the LZO algorithm. +comment "Random Number Generation" + +config CRYPTO_PRNG + tristate "Pseudo Random Number Generation for Cryptographic modules" + help + This option enables the generic pseudo random number generator + for cryptographic modules. Uses the Algorithm specified in + ANSI X9.31 A.2.4 + source "drivers/crypto/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index d4f3ed85..ef61b3b6 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -69,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o - +obj-$(CONFIG_CRYPTO_PRNG) += prng.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o # diff --git a/crypto/prng.c b/crypto/prng.c new file mode 100644 index 00000000..24e4f328 --- /dev/null +++ b/crypto/prng.c @@ -0,0 +1,410 @@ +/* + * PRNG: Pseudo Random Number Generator + * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using + * AES 128 cipher in RFC3686 ctr mode + * + * (C) Neil Horman + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * any later version. + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "prng.h" + +#define TEST_PRNG_ON_START 0 + +#define DEFAULT_PRNG_KEY "0123456789abcdef1011" +#define DEFAULT_PRNG_KSZ 20 +#define DEFAULT_PRNG_IV "defaultv" +#define DEFAULT_PRNG_IVSZ 8 +#define DEFAULT_BLK_SZ 16 +#define DEFAULT_V_SEED "zaybxcwdveuftgsh" + +/* + * Flags for the prng_context flags field + */ + +#define PRNG_FIXED_SIZE 0x1 +#define PRNG_NEED_RESET 0x2 + +/* + * Note: DT is our counter value + * I is our intermediate value + * V is our seed vector + * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf + * for implementation details + */ + + +struct prng_context { + char *prng_key; + char *prng_iv; + spinlock_t prng_lock; + unsigned char rand_data[DEFAULT_BLK_SZ]; + unsigned char last_rand_data[DEFAULT_BLK_SZ]; + unsigned char DT[DEFAULT_BLK_SZ]; + unsigned char I[DEFAULT_BLK_SZ]; + unsigned char V[DEFAULT_BLK_SZ]; + u32 rand_data_valid; + struct crypto_blkcipher *tfm; + u32 flags; +}; + +static int dbg; + +static void hexdump(char *note, unsigned char *buf, unsigned int len) +{ + if (dbg) { + printk(KERN_CRIT "%s", note); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, + 16, 1, + buf, len, false); + } +} + +#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0) + +static void xor_vectors(unsigned char *in1, unsigned char *in2, + unsigned char *out, unsigned int size) +{ + int i; + + for (i=0;itfm; + desc.flags = 0; + + + dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx); + + hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); + hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); + hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); + + /* + * This algorithm is a 3 stage state machine + */ + for (i=0;i<3;i++) { + + desc.tfm = ctx->tfm; + desc.flags = 0; + switch (i) { + case 0: + /* + * Start by encrypting the counter value + * This gives us an intermediate value I + */ + memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); + sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ); + hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); + break; + case 1: + + /* + * Next xor I with our secret vector V + * encrypt that result to obtain our + * pseudo random data which we output + */ + xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); + sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ); + hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); + break; + case 2: + /* + * First check that we didn't produce the same random data + * that we did last time around through this + */ + if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { + printk(KERN_ERR "ctx %p Failed repetition check!\n", + ctx); + ctx->flags |= PRNG_NEED_RESET; + return -1; + } + memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ); + + /* + * Lastly xor the random data with I + * and encrypt that to obtain a new secret vector V + */ + xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ); + sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ); + hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); + break; + } + + /* Initialize our input buffer */ + sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ); + + /* do the encryption */ + ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ); + + /* And check the result */ + if (ret) { + dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx); + ctx->rand_data_valid = DEFAULT_BLK_SZ; + return -1; + } + + } + + /* + * Now update our DT value + */ + for (i=DEFAULT_BLK_SZ-1;i>0;i--) { + ctx->DT[i] = ctx->DT[i-1]; + } + ctx->DT[0] += 1; + + dbgprint("Returning new block for context %p\n",ctx); + ctx->rand_data_valid = 0; + + hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); + hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); + hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); + hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); + + return 0; +} + +/* Our exported functions */ +int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx) +{ + unsigned long flags; + unsigned char *ptr = buf; + unsigned int byte_count = (unsigned int)nbytes; + int err; + + + if (nbytes < 0) + return -EINVAL; + + spin_lock_irqsave(&ctx->prng_lock, flags); + + err = -EFAULT; + if (ctx->flags & PRNG_NEED_RESET) + goto done; + + /* + * If the FIXED_SIZE flag is on, only return whole blocks of + * pseudo random data + */ + err = -EINVAL; + if (ctx->flags & PRNG_FIXED_SIZE) { + if (nbytes < DEFAULT_BLK_SZ) + goto done; + byte_count = DEFAULT_BLK_SZ; + } + + err = byte_count; + + dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx); + + +remainder: + if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { + if (_get_more_prng_bytes(ctx) < 0) { + memset(buf, 0, nbytes); + err = -EFAULT; + goto done; + } + } + + /* + * Copy up to the next whole block size + */ + if (byte_count < DEFAULT_BLK_SZ) { + for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) { + *ptr = ctx->rand_data[ctx->rand_data_valid]; + ptr++; + byte_count--; + if (byte_count == 0) + goto done; + } + } + + /* + * Now copy whole blocks + */ + for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { + if (_get_more_prng_bytes(ctx) < 0) { + memset(buf, 0, nbytes); + err = -1; + goto done; + } + memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); + ctx->rand_data_valid += DEFAULT_BLK_SZ; + ptr += DEFAULT_BLK_SZ; + } + + /* + * Now copy any extra partial data + */ + if (byte_count) + goto remainder; + +done: + spin_unlock_irqrestore(&ctx->prng_lock, flags); + dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx); + return err; +} +EXPORT_SYMBOL_GPL(get_prng_bytes); + +struct prng_context *alloc_prng_context(void) +{ + struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL); + + spin_lock_init(&ctx->prng_lock); + + if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) { + kfree(ctx); + ctx = NULL; + } + + dbgprint(KERN_CRIT "returning context %p\n",ctx); + return ctx; +} + +EXPORT_SYMBOL_GPL(alloc_prng_context); + +void free_prng_context(struct prng_context *ctx) +{ + crypto_free_blkcipher(ctx->tfm); + kfree(ctx); +} +EXPORT_SYMBOL_GPL(free_prng_context); + +int reset_prng_context(struct prng_context *ctx, + unsigned char *key, unsigned char *iv, + unsigned char *V, unsigned char *DT) +{ + int ret; + int iv_len; + int rc = -EFAULT; + + spin_lock(&ctx->prng_lock); + ctx->flags |= PRNG_NEED_RESET; + + if (key) + memcpy(ctx->prng_key,key,strlen(ctx->prng_key)); + else + ctx->prng_key = DEFAULT_PRNG_KEY; + + if (iv) + memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv)); + else + ctx->prng_iv = DEFAULT_PRNG_IV; + + if (V) + memcpy(ctx->V,V,DEFAULT_BLK_SZ); + else + memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ); + + if (DT) + memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); + else + memset(ctx->DT, 0, DEFAULT_BLK_SZ); + + memset(ctx->rand_data,0,DEFAULT_BLK_SZ); + memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ); + + if (ctx->tfm) + crypto_free_blkcipher(ctx->tfm); + + ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0); + if (!ctx->tfm) { + dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm); + goto out; + } + + ctx->rand_data_valid = DEFAULT_BLK_SZ; + + ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key)); + if (ret) { + dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", + crypto_blkcipher_get_flags(ctx->tfm)); + crypto_free_blkcipher(ctx->tfm); + goto out; + } + + iv_len = crypto_blkcipher_ivsize(ctx->tfm); + if (iv_len) { + crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len); + } + rc = 0; + ctx->flags &= ~PRNG_NEED_RESET; +out: + spin_unlock(&ctx->prng_lock); + + return rc; + +} +EXPORT_SYMBOL_GPL(reset_prng_context); + +/* Module initalization */ +static int __init prng_mod_init(void) +{ + +#ifdef TEST_PRNG_ON_START + int i; + unsigned char tmpbuf[DEFAULT_BLK_SZ]; + + struct prng_context *ctx = alloc_prng_context(); + if (ctx == NULL) + return -EFAULT; + for (i=0;i<16;i++) { + if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) { + free_prng_context(ctx); + return -EFAULT; + } + } + free_prng_context(ctx); +#endif + + return 0; +} + +static void __exit prng_mod_fini(void) +{ + return; +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); +MODULE_AUTHOR("Neil Horman "); +module_param(dbg, int, 0); +MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); +module_init(prng_mod_init); +module_exit(prng_mod_fini); diff --git a/crypto/prng.h b/crypto/prng.h new file mode 100644 index 00000000..1ac9be50 --- /dev/null +++ b/crypto/prng.h @@ -0,0 +1,27 @@ +/* + * PRNG: Pseudo Random Number Generator + * + * (C) Neil Horman + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * any later version. + * + * + */ + +#ifndef _PRNG_H_ +#define _PRNG_H_ +struct prng_context; + +int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx); +struct prng_context *alloc_prng_context(void); +int reset_prng_context(struct prng_context *ctx, + unsigned char *key, unsigned char *iv, + unsigned char *V, + unsigned char *DT); +void free_prng_context(struct prng_context *ctx); + +#endif + -- cgit v1.2.3 From c41403f4f90c773e27d1f7c79324ce9531993c3f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 7 Jul 2008 22:19:53 +0800 Subject: crypto: hash - Added scatter list walking helper This patch adds the walking helpers for hash algorithms akin to those of block ciphers. This is a necessary step before we can reimplement existing hash algorithms using the new ahash interface. Signed-off-by: Herbert Xu --- crypto/ahash.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 1 deletion(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index e6e5906c..27128f2c 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -13,7 +13,8 @@ * */ -#include +#include +#include #include #include #include @@ -23,6 +24,94 @@ #include "internal.h" +static int hash_walk_next(struct crypto_hash_walk *walk) +{ + unsigned int alignmask = walk->alignmask; + unsigned int offset = walk->offset; + unsigned int nbytes = min(walk->entrylen, + ((unsigned int)(PAGE_SIZE)) - offset); + + walk->data = crypto_kmap(walk->pg, 0); + walk->data += offset; + + if (offset & alignmask) + nbytes = alignmask + 1 - (offset & alignmask); + + walk->entrylen -= nbytes; + return nbytes; +} + +static int hash_walk_new_entry(struct crypto_hash_walk *walk) +{ + struct scatterlist *sg; + + sg = walk->sg; + walk->pg = sg_page(sg); + walk->offset = sg->offset; + walk->entrylen = sg->length; + + if (walk->entrylen > walk->total) + walk->entrylen = walk->total; + walk->total -= walk->entrylen; + + return hash_walk_next(walk); +} + +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + unsigned int alignmask = walk->alignmask; + unsigned int nbytes = walk->entrylen; + + walk->data -= walk->offset; + + if (nbytes && walk->offset & alignmask && !err) { + walk->offset += alignmask - 1; + walk->offset = ALIGN(walk->offset, alignmask + 1); + walk->data += walk->offset; + + nbytes = min(nbytes, + ((unsigned int)(PAGE_SIZE)) - walk->offset); + walk->entrylen -= nbytes; + + return nbytes; + } + + crypto_kunmap(walk->data, 0); + crypto_yield(walk->flags); + + if (err) + return err; + + walk->offset = 0; + + if (nbytes) + return hash_walk_next(walk); + + if (!walk->total) + return 0; + + walk->sg = scatterwalk_sg_next(walk->sg); + + return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_hash_walk_done); + +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) +{ + walk->total = req->nbytes; + + if (!walk->total) + return 0; + + walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); + walk->sg = req->src; + walk->flags = req->base.flags; + + return hash_walk_new_entry(walk); +} +EXPORT_SYMBOL_GPL(crypto_hash_walk_first); + static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From 636cbb5c38a3c4a7afcad19aae8221e59f8f5783 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 8 Jul 2008 20:54:28 +0800 Subject: crypto: crc32c - Add ahash implementation This patch reimplements crc32c using the ahash interface. This allows one tfm to be used by an unlimited number of users provided that they all use the same key (which all current crc32c users do). Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/crc32c.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 122 insertions(+), 8 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 43b7473f..ea503572 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -213,7 +213,7 @@ comment "Digest" config CRYPTO_CRC32C tristate "CRC32c CRC algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH select LIBCRC32C help Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 0dcf64a7..a882d9e4 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -5,20 +5,23 @@ * * This module file is a wrapper to invoke the lib/crc32c routines. * + * Copyright (c) 2008 Herbert Xu + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ + +#include #include #include #include -#include #include #include -#define CHKSUM_BLOCK_SIZE 32 +#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 struct chksum_ctx { @@ -71,7 +74,7 @@ static void chksum_final(struct crypto_tfm *tfm, u8 *out) *(__le32 *)out = ~cpu_to_le32(mctx->crc); } -static int crc32c_cra_init(struct crypto_tfm *tfm) +static int crc32c_cra_init_old(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); @@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto_tfm *tfm) return 0; } -static struct crypto_alg alg = { +static struct crypto_alg old_alg = { .cra_name = "crc32c", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_init = crc32c_cra_init, + .cra_list = LIST_HEAD_INIT(old_alg.cra_list), + .cra_init = crc32c_cra_init_old, .cra_u = { .digest = { .dia_digestsize= CHKSUM_DIGEST_SIZE, @@ -98,14 +101,125 @@ static struct crypto_alg alg = { } }; +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_ahash_ctx(hash); + + if (keylen != sizeof(u32)) { + crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + *mctx = le32_to_cpup((__le32 *)key); + return 0; +} + +static int crc32c_init(struct ahash_request *req) +{ + u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + u32 *crcp = ahash_request_ctx(req); + + *crcp = *mctx; + return 0; +} + +static int crc32c_update(struct ahash_request *req) +{ + struct crypto_hash_walk walk; + u32 *crcp = ahash_request_ctx(req); + u32 crc = *crcp; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; + nbytes = crypto_hash_walk_done(&walk, 0)) + crc = crc32c(crc, walk.data, nbytes); + + *crcp = crc; + return 0; +} + +static int crc32c_final(struct ahash_request *req) +{ + u32 *crcp = ahash_request_ctx(req); + + *(__le32 *)req->result = ~cpu_to_le32p(crcp); + return 0; +} + +static int crc32c_digest(struct ahash_request *req) +{ + struct crypto_hash_walk walk; + u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + u32 crc = *mctx; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; + nbytes = crypto_hash_walk_done(&walk, 0)) + crc = crc32c(crc, walk.data, nbytes); + + *(__le32 *)req->result = ~cpu_to_le32(crc); + return 0; +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = ~0; + + tfm->crt_ahash.reqsize = sizeof(u32); + + return 0; +} + +static struct crypto_alg alg = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_AHASH, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 3, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_init = crc32c_cra_init, + .cra_type = &crypto_ahash_type, + .cra_u = { + .ahash = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = crc32c_setkey, + .init = crc32c_init, + .update = crc32c_update, + .final = crc32c_final, + .digest = crc32c_digest, + } + } +}; + static int __init crc32c_mod_init(void) { - return crypto_register_alg(&alg); + int err; + + err = crypto_register_alg(&old_alg); + if (err) + return err; + + err = crypto_register_alg(&alg); + if (err) + crypto_unregister_alg(&old_alg); + + return err; } static void __exit crc32c_mod_fini(void) { crypto_unregister_alg(&alg); + crypto_unregister_alg(&old_alg); } module_init(crc32c_mod_init); -- cgit v1.2.3 From d5c19f54da7b2b26a958b6392546900b1399fc8e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 10 Jul 2008 16:01:22 +0800 Subject: crypto: hash - Move ahash functions into crypto/hash.h All new crypto interfaces should go into individual files as much as possible in order to ensure that crypto.h does not collapse under its own weight. This patch moves the ahash code into crypto/hash.h and crypto/internal/hash.h respectively. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 1 + crypto/digest.c | 1 + crypto/hash.c | 1 + crypto/tcrypt.c | 1 + 4 files changed, 4 insertions(+) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index d3ecd7e7..d29e06b3 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include diff --git a/crypto/digest.c b/crypto/digest.c index bf332982..ac091946 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -12,6 +12,7 @@ * */ +#include #include #include #include diff --git a/crypto/hash.c b/crypto/hash.c index 140a7556..cb86b19f 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -9,6 +9,7 @@ * any later version. */ +#include #include #include #include diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 87f08f9b..59821a22 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -15,6 +15,7 @@ * */ +#include #include #include #include -- cgit v1.2.3 From 0f4918a48efbb743bb3b6dd3dacd7609fdd43327 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 15 Jul 2008 23:46:24 +0800 Subject: Revert crypto: prng - Deterministic CPRNG This patch is clearly not ready yet for prime time. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 -- crypto/Makefile | 2 +- crypto/prng.c | 410 -------------------------------------------------------- crypto/prng.h | 27 ---- 4 files changed, 1 insertion(+), 447 deletions(-) delete mode 100644 crypto/prng.c delete mode 100644 crypto/prng.h diff --git a/crypto/Kconfig b/crypto/Kconfig index ea503572..d8318591 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -666,15 +666,6 @@ config CRYPTO_LZO help This is the LZO algorithm. -comment "Random Number Generation" - -config CRYPTO_PRNG - tristate "Pseudo Random Number Generation for Cryptographic modules" - help - This option enables the generic pseudo random number generator - for cryptographic modules. Uses the Algorithm specified in - ANSI X9.31 A.2.4 - source "drivers/crypto/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index ef61b3b6..d4f3ed85 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -69,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o -obj-$(CONFIG_CRYPTO_PRNG) += prng.o + obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o # diff --git a/crypto/prng.c b/crypto/prng.c deleted file mode 100644 index 24e4f328..00000000 --- a/crypto/prng.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * PRNG: Pseudo Random Number Generator - * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using - * AES 128 cipher in RFC3686 ctr mode - * - * (C) Neil Horman - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * any later version. - * - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "prng.h" - -#define TEST_PRNG_ON_START 0 - -#define DEFAULT_PRNG_KEY "0123456789abcdef1011" -#define DEFAULT_PRNG_KSZ 20 -#define DEFAULT_PRNG_IV "defaultv" -#define DEFAULT_PRNG_IVSZ 8 -#define DEFAULT_BLK_SZ 16 -#define DEFAULT_V_SEED "zaybxcwdveuftgsh" - -/* - * Flags for the prng_context flags field - */ - -#define PRNG_FIXED_SIZE 0x1 -#define PRNG_NEED_RESET 0x2 - -/* - * Note: DT is our counter value - * I is our intermediate value - * V is our seed vector - * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf - * for implementation details - */ - - -struct prng_context { - char *prng_key; - char *prng_iv; - spinlock_t prng_lock; - unsigned char rand_data[DEFAULT_BLK_SZ]; - unsigned char last_rand_data[DEFAULT_BLK_SZ]; - unsigned char DT[DEFAULT_BLK_SZ]; - unsigned char I[DEFAULT_BLK_SZ]; - unsigned char V[DEFAULT_BLK_SZ]; - u32 rand_data_valid; - struct crypto_blkcipher *tfm; - u32 flags; -}; - -static int dbg; - -static void hexdump(char *note, unsigned char *buf, unsigned int len) -{ - if (dbg) { - printk(KERN_CRIT "%s", note); - print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, - 16, 1, - buf, len, false); - } -} - -#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0) - -static void xor_vectors(unsigned char *in1, unsigned char *in2, - unsigned char *out, unsigned int size) -{ - int i; - - for (i=0;itfm; - desc.flags = 0; - - - dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx); - - hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); - hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); - hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); - - /* - * This algorithm is a 3 stage state machine - */ - for (i=0;i<3;i++) { - - desc.tfm = ctx->tfm; - desc.flags = 0; - switch (i) { - case 0: - /* - * Start by encrypting the counter value - * This gives us an intermediate value I - */ - memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); - sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ); - hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); - break; - case 1: - - /* - * Next xor I with our secret vector V - * encrypt that result to obtain our - * pseudo random data which we output - */ - xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); - sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ); - hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); - break; - case 2: - /* - * First check that we didn't produce the same random data - * that we did last time around through this - */ - if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { - printk(KERN_ERR "ctx %p Failed repetition check!\n", - ctx); - ctx->flags |= PRNG_NEED_RESET; - return -1; - } - memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ); - - /* - * Lastly xor the random data with I - * and encrypt that to obtain a new secret vector V - */ - xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ); - sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ); - hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); - break; - } - - /* Initialize our input buffer */ - sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ); - - /* do the encryption */ - ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ); - - /* And check the result */ - if (ret) { - dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx); - ctx->rand_data_valid = DEFAULT_BLK_SZ; - return -1; - } - - } - - /* - * Now update our DT value - */ - for (i=DEFAULT_BLK_SZ-1;i>0;i--) { - ctx->DT[i] = ctx->DT[i-1]; - } - ctx->DT[0] += 1; - - dbgprint("Returning new block for context %p\n",ctx); - ctx->rand_data_valid = 0; - - hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); - hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); - hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); - hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); - - return 0; -} - -/* Our exported functions */ -int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx) -{ - unsigned long flags; - unsigned char *ptr = buf; - unsigned int byte_count = (unsigned int)nbytes; - int err; - - - if (nbytes < 0) - return -EINVAL; - - spin_lock_irqsave(&ctx->prng_lock, flags); - - err = -EFAULT; - if (ctx->flags & PRNG_NEED_RESET) - goto done; - - /* - * If the FIXED_SIZE flag is on, only return whole blocks of - * pseudo random data - */ - err = -EINVAL; - if (ctx->flags & PRNG_FIXED_SIZE) { - if (nbytes < DEFAULT_BLK_SZ) - goto done; - byte_count = DEFAULT_BLK_SZ; - } - - err = byte_count; - - dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx); - - -remainder: - if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx) < 0) { - memset(buf, 0, nbytes); - err = -EFAULT; - goto done; - } - } - - /* - * Copy up to the next whole block size - */ - if (byte_count < DEFAULT_BLK_SZ) { - for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) { - *ptr = ctx->rand_data[ctx->rand_data_valid]; - ptr++; - byte_count--; - if (byte_count == 0) - goto done; - } - } - - /* - * Now copy whole blocks - */ - for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx) < 0) { - memset(buf, 0, nbytes); - err = -1; - goto done; - } - memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); - ctx->rand_data_valid += DEFAULT_BLK_SZ; - ptr += DEFAULT_BLK_SZ; - } - - /* - * Now copy any extra partial data - */ - if (byte_count) - goto remainder; - -done: - spin_unlock_irqrestore(&ctx->prng_lock, flags); - dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx); - return err; -} -EXPORT_SYMBOL_GPL(get_prng_bytes); - -struct prng_context *alloc_prng_context(void) -{ - struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL); - - spin_lock_init(&ctx->prng_lock); - - if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) { - kfree(ctx); - ctx = NULL; - } - - dbgprint(KERN_CRIT "returning context %p\n",ctx); - return ctx; -} - -EXPORT_SYMBOL_GPL(alloc_prng_context); - -void free_prng_context(struct prng_context *ctx) -{ - crypto_free_blkcipher(ctx->tfm); - kfree(ctx); -} -EXPORT_SYMBOL_GPL(free_prng_context); - -int reset_prng_context(struct prng_context *ctx, - unsigned char *key, unsigned char *iv, - unsigned char *V, unsigned char *DT) -{ - int ret; - int iv_len; - int rc = -EFAULT; - - spin_lock(&ctx->prng_lock); - ctx->flags |= PRNG_NEED_RESET; - - if (key) - memcpy(ctx->prng_key,key,strlen(ctx->prng_key)); - else - ctx->prng_key = DEFAULT_PRNG_KEY; - - if (iv) - memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv)); - else - ctx->prng_iv = DEFAULT_PRNG_IV; - - if (V) - memcpy(ctx->V,V,DEFAULT_BLK_SZ); - else - memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ); - - if (DT) - memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); - else - memset(ctx->DT, 0, DEFAULT_BLK_SZ); - - memset(ctx->rand_data,0,DEFAULT_BLK_SZ); - memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ); - - if (ctx->tfm) - crypto_free_blkcipher(ctx->tfm); - - ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0); - if (!ctx->tfm) { - dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm); - goto out; - } - - ctx->rand_data_valid = DEFAULT_BLK_SZ; - - ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key)); - if (ret) { - dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(ctx->tfm)); - crypto_free_blkcipher(ctx->tfm); - goto out; - } - - iv_len = crypto_blkcipher_ivsize(ctx->tfm); - if (iv_len) { - crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len); - } - rc = 0; - ctx->flags &= ~PRNG_NEED_RESET; -out: - spin_unlock(&ctx->prng_lock); - - return rc; - -} -EXPORT_SYMBOL_GPL(reset_prng_context); - -/* Module initalization */ -static int __init prng_mod_init(void) -{ - -#ifdef TEST_PRNG_ON_START - int i; - unsigned char tmpbuf[DEFAULT_BLK_SZ]; - - struct prng_context *ctx = alloc_prng_context(); - if (ctx == NULL) - return -EFAULT; - for (i=0;i<16;i++) { - if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) { - free_prng_context(ctx); - return -EFAULT; - } - } - free_prng_context(ctx); -#endif - - return 0; -} - -static void __exit prng_mod_fini(void) -{ - return; -} - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); -MODULE_AUTHOR("Neil Horman "); -module_param(dbg, int, 0); -MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); -module_init(prng_mod_init); -module_exit(prng_mod_fini); diff --git a/crypto/prng.h b/crypto/prng.h deleted file mode 100644 index 1ac9be50..00000000 --- a/crypto/prng.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * PRNG: Pseudo Random Number Generator - * - * (C) Neil Horman - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * any later version. - * - * - */ - -#ifndef _PRNG_H_ -#define _PRNG_H_ -struct prng_context; - -int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx); -struct prng_context *alloc_prng_context(void); -int reset_prng_context(struct prng_context *ctx, - unsigned char *key, unsigned char *iv, - unsigned char *V, - unsigned char *DT); -void free_prng_context(struct prng_context *ctx); - -#endif - -- cgit v1.2.3 From 29402ff17529a3df6a3cc062eb1024fa65231e9c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 17 Jul 2008 17:59:47 -0700 Subject: async_tx: list_for_each_entry_rcu() cleanup In the rcu update side, don't use list_for_each_entry_rcu(). Signed-off-by: Li Zefan Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index c6e772fc..9325c612 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -294,7 +294,7 @@ dma_channel_add_remove(struct dma_client *client, case DMA_RESOURCE_REMOVED: found = 0; spin_lock_irqsave(&async_tx_lock, flags); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) + list_for_each_entry(ref, &async_tx_master_list, node) if (ref->chan == chan) { /* permit backing devices to go away */ dma_chan_put(ref->chan); -- cgit v1.2.3 From 7672bf08f2fc86033d160c63be2fff0998869481 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 16 Jul 2008 19:44:56 -0700 Subject: async_tx: ensure the xor destination buffer remains dma-mapped When the number of source buffers for an xor operation exceeds the hardware channel maximum async_xor creates a chain of dependent operations. The result of one operation is reused as an input to the next to continue the xor calculation. The destination buffer should remain mapped for the duration of the entire chain. To provide this guarantee the code must no longer be allowed to fallback to the synchronous path as this will preclude the buffer from being unmapped, i.e. the dma-driver will potentially miss the descriptor with !DMA_COMPL_SKIP_DEST_UNMAP. Cc: Neil Brown Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 244 ++++++++++++++++++++------------------------ 1 file changed, 113 insertions(+), 131 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3a0dddca..1fcf45ac 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -35,72 +35,118 @@ * when CONFIG_DMA_ENGINE=n */ static __always_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_device *device, - struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, unsigned int src_cnt, size_t len, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, + unsigned int offset, int src_cnt, size_t len, + enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_param) { - dma_addr_t dma_dest; + struct dma_device *dma = chan->device; dma_addr_t *dma_src = (dma_addr_t *) src_list; - struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *tx = NULL; + int src_off = 0; int i; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; - - pr_debug("%s: len: %zu\n", __func__, len); - - dma_dest = dma_map_page(device->dev, dest, offset, len, - DMA_FROM_DEVICE); + dma_async_tx_callback _cb_fn; + void *_cb_param; + enum async_tx_flags async_flags; + enum dma_ctrl_flags dma_flags; + int xor_src_cnt; + dma_addr_t dma_dest; + dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); for (i = 0; i < src_cnt; i++) - dma_src[i] = dma_map_page(device->dev, src_list[i], offset, + dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, len, DMA_TO_DEVICE); - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward progress - * in case they can not provide a descriptor - */ - tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len, - dma_prep_flags); - if (!tx) { - if (depend_tx) + while (src_cnt) { + async_flags = flags; + dma_flags = 0; + xor_src_cnt = min(src_cnt, dma->max_xor); + /* if we are submitting additional xors, leave the chain open, + * clear the callback parameters, and leave the destination + * buffer mapped + */ + if (src_cnt > xor_src_cnt) { + async_flags &= ~ASYNC_TX_ACK; + dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; + _cb_fn = NULL; + _cb_param = NULL; + } else { + _cb_fn = cb_fn; + _cb_param = cb_param; + } + if (_cb_fn) + dma_flags |= DMA_PREP_INTERRUPT; + + /* Since we have clobbered the src_list we are committed + * to doing this asynchronously. Drivers force forward progress + * in case they can not provide a descriptor + */ + tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], + xor_src_cnt, len, dma_flags); + + if (unlikely(!tx && depend_tx)) dma_wait_for_async_tx(depend_tx); - while (!tx) - tx = device->device_prep_dma_xor(chan, dma_dest, - dma_src, src_cnt, len, - dma_prep_flags); - } + /* spin wait for the preceeding transactions to complete */ + while (unlikely(!tx)) + tx = dma->device_prep_dma_xor(chan, dma_dest, + &dma_src[src_off], + xor_src_cnt, len, + dma_flags); + + async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, + _cb_param); + + depend_tx = tx; + flags |= ASYNC_TX_DEP_ACK; + + if (src_cnt > xor_src_cnt) { + /* drop completed sources */ + src_cnt -= xor_src_cnt; + src_off += xor_src_cnt; - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + /* use the intermediate result a source */ + dma_src[--src_off] = dma_dest; + src_cnt++; + } else + break; + } return tx; } static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, - unsigned int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, enum async_tx_flags flags, + struct dma_async_tx_descriptor *depend_tx, + dma_async_tx_callback cb_fn, void *cb_param) { - void *_dest; int i; - - pr_debug("%s: len: %zu\n", __func__, len); + int xor_src_cnt; + int src_off = 0; + void *dest_buf; + void **srcs = (void **) src_list; /* reuse the 'src_list' array to convert to buffer pointers */ for (i = 0; i < src_cnt; i++) - src_list[i] = (struct page *) - (page_address(src_list[i]) + offset); + srcs[i] = page_address(src_list[i]) + offset; /* set destination address */ - _dest = page_address(dest) + offset; + dest_buf = page_address(dest) + offset; if (flags & ASYNC_TX_XOR_ZERO_DST) - memset(_dest, 0, len); + memset(dest_buf, 0, len); + + while (src_cnt > 0) { + /* process up to 'MAX_XOR_BLOCKS' sources */ + xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); + xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]); - xor_blocks(src_cnt, len, _dest, - (void **) src_list); + /* drop completed sources */ + src_cnt -= xor_src_cnt; + src_off += xor_src_cnt; + } async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } @@ -132,106 +178,42 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, &dest, 1, src_list, src_cnt, len); - struct dma_device *device = chan ? chan->device : NULL; - struct dma_async_tx_descriptor *tx = NULL; - dma_async_tx_callback _cb_fn; - void *_cb_param; - unsigned long local_flags; - int xor_src_cnt; - int i = 0, src_off = 0; - BUG_ON(src_cnt <= 1); - while (src_cnt) { - local_flags = flags; - if (device) { /* run the xor asynchronously */ - xor_src_cnt = min(src_cnt, device->max_xor); - /* if we are submitting additional xors - * only set the callback on the last transaction - */ - if (src_cnt > xor_src_cnt) { - local_flags &= ~ASYNC_TX_ACK; - _cb_fn = NULL; - _cb_param = NULL; - } else { - _cb_fn = cb_fn; - _cb_param = cb_param; - } - - tx = do_async_xor(device, chan, dest, - &src_list[src_off], offset, - xor_src_cnt, len, local_flags, - depend_tx, _cb_fn, _cb_param); - } else { /* run the xor synchronously */ - /* in the sync case the dest is an implied source - * (assumes the dest is at the src_off index) - */ - if (flags & ASYNC_TX_XOR_DROP_DST) { - src_cnt--; - src_off++; - } - - /* process up to 'MAX_XOR_BLOCKS' sources */ - xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); + if (chan) { + /* run the xor asynchronously */ + pr_debug("%s (async): len: %zu\n", __func__, len); - /* if we are submitting additional xors - * only set the callback on the last transaction - */ - if (src_cnt > xor_src_cnt) { - local_flags &= ~ASYNC_TX_ACK; - _cb_fn = NULL; - _cb_param = NULL; - } else { - _cb_fn = cb_fn; - _cb_param = cb_param; - } - - /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == - DMA_ERROR) - panic("%s: DMA_ERROR waiting for " - "depend_tx\n", - __func__); - } - - do_sync_xor(dest, &src_list[src_off], offset, - xor_src_cnt, len, local_flags, depend_tx, - _cb_fn, _cb_param); - } + return do_async_xor(chan, dest, src_list, offset, src_cnt, len, + flags, depend_tx, cb_fn, cb_param); + } else { + /* run the xor synchronously */ + pr_debug("%s (sync): len: %zu\n", __func__, len); - /* the previous tx is hidden from the client, - * so ack it + /* in the sync case the dest is an implied source + * (assumes the dest is the first source) */ - if (i && depend_tx) - async_tx_ack(depend_tx); - - depend_tx = tx; + if (flags & ASYNC_TX_XOR_DROP_DST) { + src_cnt--; + src_list++; + } - if (src_cnt > xor_src_cnt) { - /* drop completed sources */ - src_cnt -= xor_src_cnt; - src_off += xor_src_cnt; + /* wait for any prerequisite operations */ + if (depend_tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(depend_tx)); + if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) + panic("%s: DMA_ERROR waiting for depend_tx\n", + __func__); + } - /* unconditionally preserve the destination */ - flags &= ~ASYNC_TX_XOR_ZERO_DST; + do_sync_xor(dest, src_list, offset, src_cnt, len, + flags, depend_tx, cb_fn, cb_param); - /* use the intermediate result a source, but remember - * it's dropped, because it's implied, in the sync case - */ - src_list[--src_off] = dest; - src_cnt++; - flags |= ASYNC_TX_XOR_DROP_DST; - } else - src_cnt = 0; - i++; + return NULL; } - - return tx; } EXPORT_SYMBOL_GPL(async_xor); -- cgit v1.2.3 From 4af1fc66eb19dadf73950d7b903a3886bc41f7b4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: fix handling of the "out of descriptor" condition in async_xor Ensure forward progress is made when a dmaengine driver is unable to allocate an xor descriptor by breaking the dependency chain with async_tx_quisce() and issue any pending descriptors. Tested with iop-adma by setting device->max_xor = 2 to force multiple calls to device_prep_dma_xor for each call to async_xor and limiting the descriptor slot pool to 5. Discovered that the minimum descriptor pool size for iop-adma is 2 * iop_chan_xor_slot_cnt(device->max_xor) + 1. Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1fcf45ac..19d16e45 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -30,6 +30,24 @@ #include #include +/** + * async_tx_quiesce - ensure tx is complete and freeable upon return + * @tx - transaction to quiesce + */ +static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) +{ + if (*tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(*tx)); + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) + panic("DMA_ERROR waiting for transaction\n"); + async_tx_ack(*tx); + *tx = NULL; + } +} + /* do_async_xor - dma map the pages and perform the xor with an engine. * This routine is marked __always_inline so it can be compiled away * when CONFIG_DMA_ENGINE=n @@ -85,15 +103,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], xor_src_cnt, len, dma_flags); - if (unlikely(!tx && depend_tx)) - dma_wait_for_async_tx(depend_tx); + if (unlikely(!tx)) + async_tx_quiesce(&depend_tx); /* spin wait for the preceeding transactions to complete */ - while (unlikely(!tx)) + while (unlikely(!tx)) { + dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], xor_src_cnt, len, dma_flags); + } async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, _cb_param); @@ -267,11 +287,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, len, result, dma_prep_flags); - if (!tx) { - if (depend_tx) - dma_wait_for_async_tx(depend_tx); + if (unlikely(!tx)) { + async_tx_quiesce(&depend_tx); while (!tx) + dma_async_issue_pending(chan); tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, len, result, dma_prep_flags); -- cgit v1.2.3 From 8f310c0ebdb7c748899f702bff3fef966906628b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: export async_tx_quiesce Replace open coded "wait and acknowledge" instances with async_tx_quiesce. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 10 +--------- crypto/async_tx/async_memset.c | 10 +--------- crypto/async_tx/async_tx.c | 29 ++++++++++++++++++++--------- crypto/async_tx/async_xor.c | 37 ++----------------------------------- 4 files changed, 24 insertions(+), 62 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index a5eda80e..06a7f4be 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -73,15 +73,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 27a97dc9..d48ed22e 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -72,15 +72,7 @@ async_memset(struct page *dest, int val, unsigned int offset, dest_buf = (void *) (((char *) page_address(dest)) + offset); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); memset(dest_buf, val, len); diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 9325c612..78a61e7f 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -607,15 +607,7 @@ async_trigger_callback(enum async_tx_flags flags, pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } @@ -624,6 +616,25 @@ async_trigger_callback(enum async_tx_flags flags, } EXPORT_SYMBOL_GPL(async_trigger_callback); +/** + * async_tx_quiesce - ensure tx is complete and freeable upon return + * @tx - transaction to quiesce + */ +void async_tx_quiesce(struct dma_async_tx_descriptor **tx) +{ + if (*tx) { + /* if ack is already set then we cannot be sure + * we are referring to the correct operation + */ + BUG_ON(async_tx_test_ack(*tx)); + if (dma_wait_for_async_tx(*tx) == DMA_ERROR) + panic("DMA_ERROR waiting for transaction\n"); + async_tx_ack(*tx); + *tx = NULL; + } +} +EXPORT_SYMBOL_GPL(async_tx_quiesce); + module_init(async_tx_init); module_exit(async_tx_exit); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 19d16e45..689ecce7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -30,24 +30,6 @@ #include #include -/** - * async_tx_quiesce - ensure tx is complete and freeable upon return - * @tx - transaction to quiesce - */ -static void async_tx_quiesce(struct dma_async_tx_descriptor **tx) -{ - if (*tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(*tx)); - if (dma_wait_for_async_tx(*tx) == DMA_ERROR) - panic("DMA_ERROR waiting for transaction\n"); - async_tx_ack(*tx); - *tx = NULL; - } -} - /* do_async_xor - dma map the pages and perform the xor with an engine. * This routine is marked __always_inline so it can be compiled away * when CONFIG_DMA_ENGINE=n @@ -219,15 +201,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, } /* wait for any prerequisite operations */ - if (depend_tx) { - /* if ack is already set then we cannot be sure - * we are referring to the correct operation - */ - BUG_ON(async_tx_test_ack(depend_tx)); - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", - __func__); - } + async_tx_quiesce(&depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, flags, depend_tx, cb_fn, cb_param); @@ -309,17 +283,10 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, depend_tx, NULL, NULL); - if (tx) { - if (dma_wait_for_async_tx(tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for tx\n", - __func__); - async_tx_ack(tx); - } + async_tx_quiesce(&tx); *result = page_is_zero(dest, offset, len) ? 0 : 1; - tx = NULL; - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); } -- cgit v1.2.3 From a240d7600f74660d27dedd6d033d2867e790957d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 17 Jul 2008 17:59:55 -0700 Subject: async_tx: remove depend_tx from async_tx_sync_epilog All callers of async_tx_sync_epilog have called async_tx_quiesce on the depend_tx, so async_tx_sync_epilog need only call the callback to complete the operation. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_tx.c | 2 +- crypto/async_tx/async_xor.c | 7 +++---- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 06a7f4be..ddccfb01 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(src_buf, KM_USER1); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index d48ed22e..5b5eb99b 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -76,7 +76,7 @@ async_memset(struct page *dest, int val, unsigned int offset, memset(dest_buf, val, len); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 78a61e7f..35869a37 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -609,7 +609,7 @@ async_trigger_callback(enum async_tx_flags flags, /* wait for any prerequisite operations */ async_tx_quiesce(&depend_tx); - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 689ecce7..65974c6d 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -121,7 +121,6 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { int i; @@ -150,7 +149,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, src_off += xor_src_cnt; } - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } /** @@ -204,7 +203,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, async_tx_quiesce(&depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, - flags, depend_tx, cb_fn, cb_param); + flags, cb_fn, cb_param); return NULL; } @@ -287,7 +286,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, *result = page_is_zero(dest, offset, len) ? 0 : 1; - async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param); + async_tx_sync_epilog(cb_fn, cb_param); } return tx; -- cgit v1.2.3