From 4192cdfff75b89e4097ed2c0b822c3f9acf346ce Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Thu, 23 Jan 2014 03:25:47 -0800 Subject: CRC32C: Add soft module dependency to load other accelerated crc32c modules We added the soft module dependency of crc32c module alias to generic crc32c module so other hardware accelerated crc32c modules could get loaded and used before the generic version. We also renamed the crypto/crc32c.c containing the generic crc32c crypto computation to crypto/crc32c_generic.c according to convention. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/Makefile | 2 +- crypto/crc32c.c | 172 ----------------------------------------------- crypto/crc32c_generic.c | 174 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 173 deletions(-) delete mode 100644 crypto/crc32c.c create mode 100644 crypto/crc32c_generic.c diff --git a/crypto/Makefile b/crypto/Makefile index b29402a7..38e64231 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,7 +81,7 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o -obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o +obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o diff --git a/crypto/crc32c.c b/crypto/crc32c.c deleted file mode 100644 index 06f7018c..00000000 --- a/crypto/crc32c.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Cryptographic API. - * - * CRC32C chksum - * - *@Article{castagnoli-crc, - * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, - * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 - * and 32 Parity Bits}}, - * journal = IEEE Transactions on Communication, - * year = {1993}, - * volume = {41}, - * number = {6}, - * pages = {}, - * month = {June}, - *} - * Used by the iSCSI driver, possibly others, and derived from the - * the iscsi-crc.c module of the linux-iscsi driver at - * http://linux-iscsi.sourceforge.net. - * - * Following the example of lib/crc32, this function is intended to be - * flexible and useful for all users. Modules that currently have their - * own crc32c, but hopefully may be able to use this one are: - * net/sctp (please add all your doco to here if you change to - * use this one!) - * - * - * Copyright (c) 2004 Cisco Systems, Inc. - * Copyright (c) 2008 Herbert Xu - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include -#include -#include -#include - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -struct chksum_ctx { - u32 key; -}; - -struct chksum_desc_ctx { - u32 crc; -}; - -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - -static int chksum_init(struct shash_desc *desc) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = mctx->key; - - return 0; -} - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(tfm); - - if (keylen != sizeof(mctx->key)) { - crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - mctx->key = le32_to_cpu(*(__le32 *)key); - return 0; -} - -static int chksum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - ctx->crc = __crc32c_le(ctx->crc, data, length); - return 0; -} - -static int chksum_final(struct shash_desc *desc, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - *(__le32 *)out = ~cpu_to_le32p(&ctx->crc); - return 0; -} - -static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) -{ - *(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len)); - return 0; -} - -static int chksum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, len, out); -} - -static int chksum_digest(struct shash_desc *desc, const u8 *data, - unsigned int length, u8 *out) -{ - struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); - - return __chksum_finup(&mctx->key, data, length, out); -} - -static int crc32c_cra_init(struct crypto_tfm *tfm) -{ - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - - mctx->key = ~0; - return 0; -} - -static struct shash_alg alg = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = chksum_setkey, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-generic", - .cra_priority = 100, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 3, - .cra_ctxsize = sizeof(struct chksum_ctx), - .cra_module = THIS_MODULE, - .cra_init = crc32c_cra_init, - } -}; - -static int __init crc32c_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crc32c_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(crc32c_mod_init); -module_exit(crc32c_mod_fini); - -MODULE_AUTHOR("Clay Haapala "); -MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); -MODULE_LICENSE("GPL"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c new file mode 100644 index 00000000..d9c7beba --- /dev/null +++ b/crypto/crc32c_generic.c @@ -0,0 +1,174 @@ +/* + * Cryptographic API. + * + * CRC32C chksum + * + *@Article{castagnoli-crc, + * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, + * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 + * and 32 Parity Bits}}, + * journal = IEEE Transactions on Communication, + * year = {1993}, + * volume = {41}, + * number = {6}, + * pages = {}, + * month = {June}, + *} + * Used by the iSCSI driver, possibly others, and derived from the + * the iscsi-crc.c module of the linux-iscsi driver at + * http://linux-iscsi.sourceforge.net. + * + * Following the example of lib/crc32, this function is intended to be + * flexible and useful for all users. Modules that currently have their + * own crc32c, but hopefully may be able to use this one are: + * net/sctp (please add all your doco to here if you change to + * use this one!) + * + * + * Copyright (c) 2004 Cisco Systems, Inc. + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = mctx->key; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(tfm); + + if (keylen != sizeof(mctx->key)) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + mctx->key = le32_to_cpu(*(__le32 *)key); + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = __crc32c_le(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + *(__le32 *)out = ~cpu_to_le32p(&ctx->crc); + return 0; +} + +static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) +{ + *(__le32 *)out = ~cpu_to_le32(__crc32c_le(*crcp, data, len)); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(&ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksum_finup(&mctx->key, data, length, out); +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->key = ~0; + return 0; +} + +static struct shash_alg alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-generic", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 3, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32c_cra_init, + } +}; + +static int __init crc32c_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc32c_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc32c_mod_init); +module_exit(crc32c_mod_fini); + +MODULE_AUTHOR("Clay Haapala "); +MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("crc32c"); +MODULE_SOFTDEP("pre: crc32c"); -- cgit v1.2.3 From d8a580d52eb2d210d214107d2acc96b83c7bc3c9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 4 Mar 2014 13:28:38 +0800 Subject: crypto: remove direct blkcipher_walk dependency on transform In order to allow other uses of the blkcipher walk API than the blkcipher algos themselves, this patch copies some of the transform data members to the walk struct so the transform is only accessed at walk init time. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/blkcipher.c | 67 +++++++++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index a79e7e9a..46fdab5e 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -70,14 +70,12 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, - struct blkcipher_walk *walk, +static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, unsigned int bsize) { u8 *addr; - unsigned int alignmask = crypto_blkcipher_alignmask(tfm); - addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); + addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = blkcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, 1); return bsize; @@ -105,7 +103,6 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { - struct crypto_blkcipher *tfm = desc->tfm; unsigned int nbytes = 0; if (likely(err >= 0)) { @@ -117,7 +114,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, err = -EINVAL; goto err; } else - n = blkcipher_done_slow(tfm, walk, n); + n = blkcipher_done_slow(walk, n); nbytes = walk->total - n; err = 0; @@ -136,7 +133,7 @@ err: } if (walk->iv != desc->info) - memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); + memcpy(desc->info, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) @@ -226,22 +223,20 @@ static inline int blkcipher_next_fast(struct blkcipher_desc *desc, static int blkcipher_walk_next(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { - struct crypto_blkcipher *tfm = desc->tfm; - unsigned int alignmask = crypto_blkcipher_alignmask(tfm); unsigned int bsize; unsigned int n; int err; n = walk->total; - if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { + if (unlikely(n < walk->cipher_blocksize)) { desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return blkcipher_walk_done(desc, walk, -EINVAL); } walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | BLKCIPHER_WALK_DIFF); - if (!scatterwalk_aligned(&walk->in, alignmask) || - !scatterwalk_aligned(&walk->out, alignmask)) { + if (!scatterwalk_aligned(&walk->in, walk->alignmask) || + !scatterwalk_aligned(&walk->out, walk->alignmask)) { walk->flags |= BLKCIPHER_WALK_COPY; if (!walk->page) { walk->page = (void *)__get_free_page(GFP_ATOMIC); @@ -250,12 +245,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, } } - bsize = min(walk->blocksize, n); + bsize = min(walk->walk_blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { - err = blkcipher_next_slow(desc, walk, bsize, alignmask); + err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask); goto set_phys_lowmem; } @@ -277,28 +272,26 @@ set_phys_lowmem: return err; } -static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, - struct crypto_blkcipher *tfm, - unsigned int alignmask) +static inline int blkcipher_copy_iv(struct blkcipher_walk *walk) { - unsigned bs = walk->blocksize; - unsigned int ivsize = crypto_blkcipher_ivsize(tfm); - unsigned aligned_bs = ALIGN(bs, alignmask + 1); - unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - - (alignmask + 1); + unsigned bs = walk->walk_blocksize; + unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); + unsigned int size = aligned_bs * 2 + + walk->ivsize + max(aligned_bs, walk->ivsize) - + (walk->alignmask + 1); u8 *iv; - size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); + size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1); walk->buffer = kmalloc(size, GFP_ATOMIC); if (!walk->buffer) return -ENOMEM; - iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); + iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); iv = blkcipher_get_spot(iv, bs) + aligned_bs; iv = blkcipher_get_spot(iv, bs) + aligned_bs; - iv = blkcipher_get_spot(iv, ivsize); + iv = blkcipher_get_spot(iv, walk->ivsize); - walk->iv = memcpy(iv, walk->iv, ivsize); + walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; } @@ -306,7 +299,10 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags &= ~BLKCIPHER_WALK_PHYS; - walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); + walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); + walk->cipher_blocksize = walk->walk_blocksize; + walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); + walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); return blkcipher_walk_first(desc, walk); } EXPORT_SYMBOL_GPL(blkcipher_walk_virt); @@ -315,7 +311,10 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags |= BLKCIPHER_WALK_PHYS; - walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); + walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); + walk->cipher_blocksize = walk->walk_blocksize; + walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); + walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); return blkcipher_walk_first(desc, walk); } EXPORT_SYMBOL_GPL(blkcipher_walk_phys); @@ -323,9 +322,6 @@ EXPORT_SYMBOL_GPL(blkcipher_walk_phys); static int blkcipher_walk_first(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { - struct crypto_blkcipher *tfm = desc->tfm; - unsigned int alignmask = crypto_blkcipher_alignmask(tfm); - if (WARN_ON_ONCE(in_irq())) return -EDEADLK; @@ -335,8 +331,8 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, walk->buffer = NULL; walk->iv = desc->info; - if (unlikely(((unsigned long)walk->iv & alignmask))) { - int err = blkcipher_copy_iv(walk, tfm, alignmask); + if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { + int err = blkcipher_copy_iv(walk); if (err) return err; } @@ -353,7 +349,10 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc, unsigned int blocksize) { walk->flags &= ~BLKCIPHER_WALK_PHYS; - walk->blocksize = blocksize; + walk->walk_blocksize = blocksize; + walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); + walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); + walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); return blkcipher_walk_first(desc, walk); } EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); -- cgit v1.2.3 From ac15546efedb59a56882fa6a415c554d10a0c32a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 4 Mar 2014 13:28:39 +0800 Subject: crypto: allow blkcipher walks over AEAD data This adds the function blkcipher_aead_walk_virt_block, which allows the caller to use the blkcipher walk API to handle the input and output scatterlists. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/blkcipher.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 46fdab5e..0122bec3 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -357,6 +357,20 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc, } EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); +int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_aead *tfm, + unsigned int blocksize) +{ + walk->flags &= ~BLKCIPHER_WALK_PHYS; + walk->walk_blocksize = blocksize; + walk->cipher_blocksize = crypto_aead_blocksize(tfm); + walk->ivsize = crypto_aead_ivsize(tfm); + walk->alignmask = crypto_aead_alignmask(tfm); + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); + static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From cbe9f2132e48a3d83d7c84e2c2ddf5b0f7b3e086 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Fri, 14 Mar 2014 02:37:04 +0100 Subject: crypto: hash - Fix the pointer voodoo in unaligned ahash Add documentation for the pointer voodoo that is happening in crypto/ahash.c in ahash_op_unaligned(). This code is quite confusing, so add a beefy chunk of documentation. Moreover, make sure the mangled request is completely restored after finishing this unaligned operation. This means restoring all of .result, .base.data and .base.complete . Also, remove the crypto_completion_t complete = ... line present in the ahash_op_unaligned_done() function. This type actually declares a function pointer, which is very confusing. Finally, yet very important nonetheless, make sure the req->priv is free()'d only after the original request is restored in ahash_op_unaligned_done(). The req->priv data must not be free()'d before that in ahash_op_unaligned_finish(), since we would be accessing previously free()'d data in ahash_op_unaligned_done() and cause corruption. Signed-off-by: Marek Vasut Cc: David S. Miller Cc: Fabio Estevam Cc: Herbert Xu Cc: Shawn Guo Cc: Tom Lendacky Signed-off-by: Herbert Xu --- crypto/ahash.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index a92dc382..4fdb4d3f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -201,22 +201,34 @@ static void ahash_op_unaligned_finish(struct ahash_request *req, int err) memcpy(priv->result, req->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + /* Restore the original crypto request. */ + req->result = priv->result; + req->base.complete = priv->complete; + req->base.data = priv->data; + req->priv = NULL; + + /* Free the req->priv.priv from the ADJUSTED request. */ kzfree(priv); } static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; - struct ahash_request_priv *priv = areq->priv; - crypto_completion_t complete = priv->complete; - void *data = priv->data; - ahash_op_unaligned_finish(areq, err); + /* + * Restore the original request, see ahash_op_unaligned() for what + * goes where. + * + * The "struct ahash_request *req" here is in fact the "req.base" + * from the ADJUSTED request from ahash_op_unaligned(), thus as it + * is a pointer to self, it is also the ADJUSTED "req" . + */ - areq->base.complete = complete; - areq->base.data = data; + /* First copy areq->result into areq->priv.result */ + ahash_op_unaligned_finish(areq, err); - complete(&areq->base, err); + /* Complete the ORIGINAL request. */ + areq->base.complete(&areq->base, err); } static int ahash_op_unaligned(struct ahash_request *req, @@ -234,9 +246,39 @@ static int ahash_op_unaligned(struct ahash_request *req, if (!priv) return -ENOMEM; + /* + * WARNING: Voodoo programming below! + * + * The code below is obscure and hard to understand, thus explanation + * is necessary. See include/crypto/hash.h and include/linux/crypto.h + * to understand the layout of structures used here! + * + * The code here will replace portions of the ORIGINAL request with + * pointers to new code and buffers so the hashing operation can store + * the result in aligned buffer. We will call the modified request + * an ADJUSTED request. + * + * The newly mangled request will look as such: + * + * req { + * .result = ADJUSTED[new aligned buffer] + * .base.complete = ADJUSTED[pointer to completion function] + * .base.data = ADJUSTED[*req (pointer to self)] + * .priv = ADJUSTED[new priv] { + * .result = ORIGINAL(result) + * .complete = ORIGINAL(base.complete) + * .data = ORIGINAL(base.data) + * } + */ + priv->result = req->result; priv->complete = req->base.complete; priv->data = req->base.data; + /* + * WARNING: We do not backup req->priv here! The req->priv + * is for internal use of the Crypto API and the + * user must _NOT_ _EVER_ depend on it's content! + */ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); req->base.complete = ahash_op_unaligned_done; -- cgit v1.2.3 From b1e099df78cee8f7789a2808ac055f34d8723e42 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Fri, 14 Mar 2014 02:37:05 +0100 Subject: crypto: hash - Pull out the functions to save/restore request The functions to save original request within a newly adjusted request and it's counterpart to restore the original request can be re-used by more code in the crypto/ahash.c file. Pull these functions out from the code so they're available. Signed-off-by: Marek Vasut Cc: David S. Miller Cc: Fabio Estevam Cc: Herbert Xu Cc: Shawn Guo Cc: Tom Lendacky Signed-off-by: Herbert Xu --- crypto/ahash.c | 107 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 62 insertions(+), 45 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 4fdb4d3f..fcc6077b 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -190,55 +190,12 @@ static inline unsigned int ahash_align_buffer_size(unsigned len, return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); } -static void ahash_op_unaligned_finish(struct ahash_request *req, int err) -{ - struct ahash_request_priv *priv = req->priv; - - if (err == -EINPROGRESS) - return; - - if (!err) - memcpy(priv->result, req->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - - /* Restore the original crypto request. */ - req->result = priv->result; - req->base.complete = priv->complete; - req->base.data = priv->data; - req->priv = NULL; - - /* Free the req->priv.priv from the ADJUSTED request. */ - kzfree(priv); -} - -static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) -{ - struct ahash_request *areq = req->data; - - /* - * Restore the original request, see ahash_op_unaligned() for what - * goes where. - * - * The "struct ahash_request *req" here is in fact the "req.base" - * from the ADJUSTED request from ahash_op_unaligned(), thus as it - * is a pointer to self, it is also the ADJUSTED "req" . - */ - - /* First copy areq->result into areq->priv.result */ - ahash_op_unaligned_finish(areq, err); - - /* Complete the ORIGINAL request. */ - areq->base.complete(&areq->base, err); -} - -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *)) +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request_priv *priv; - int err; priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? @@ -281,10 +238,70 @@ static int ahash_op_unaligned(struct ahash_request *req, */ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); - req->base.complete = ahash_op_unaligned_done; + req->base.complete = cplt; req->base.data = req; req->priv = priv; + return 0; +} + +static void ahash_restore_req(struct ahash_request *req) +{ + struct ahash_request_priv *priv = req->priv; + + /* Restore the original crypto request. */ + req->result = priv->result; + req->base.complete = priv->complete; + req->base.data = priv->data; + req->priv = NULL; + + /* Free the req->priv.priv from the ADJUSTED request. */ + kzfree(priv); +} + +static void ahash_op_unaligned_finish(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req->priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + + ahash_restore_req(req); +} + +static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + + /* + * Restore the original request, see ahash_op_unaligned() for what + * goes where. + * + * The "struct ahash_request *req" here is in fact the "req.base" + * from the ADJUSTED request from ahash_op_unaligned(), thus as it + * is a pointer to self, it is also the ADJUSTED "req" . + */ + + /* First copy req->result into req->priv.result */ + ahash_op_unaligned_finish(areq, err); + + /* Complete the ORIGINAL request. */ + areq->base.complete(&areq->base, err); +} + +static int ahash_op_unaligned(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + int err; + + err = ahash_save_req(req, ahash_op_unaligned_done); + if (err) + return err; + err = op(req); ahash_op_unaligned_finish(req, err); -- cgit v1.2.3 From 5acd1dd75912763db76eecfa14ce563ea6ec310d Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Fri, 14 Mar 2014 02:37:06 +0100 Subject: crypto: hash - Simplify the ahash_finup implementation The ahash_def_finup() can make use of the request save/restore functions, thus make it so. This simplifies the code a little and unifies the code paths. Note that the same remark about free()ing the req->priv applies here, the req->priv can only be free()'d after the original request was restored. Finally, squash a bug in the invocation of completion in the ASYNC path. In both ahash_def_finup_done{1,2}, the function areq->base.complete(X, err); was called with X=areq->base.data . This is incorrect , as X=&areq->base is the correct value. By analysis of the data structures, we see the areq is of type 'struct ahash_request' , areq->base is of type 'struct crypto_async_request' and areq->base.completion is of type crypto_completion_t, which is defined in include/linux/crypto.h as: typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); This is one lead that the X should be &areq->base . Next up, we can inspect other code which calls the completion callback to give us kind-of statistical idea of how this callback is used. We can try: $ git grep base\.complete\( drivers/crypto/ Finally, by inspecting ahash_request_set_callback() implementation defined in include/crypto/hash.h , we observe that the .data entry of 'struct crypto_async_request' is intended for arbitrary data, not for completion argument. Signed-off-by: Marek Vasut Cc: David S. Miller Cc: Fabio Estevam Cc: Herbert Xu Cc: Shawn Guo Cc: Tom Lendacky Signed-off-by: Herbert Xu --- crypto/ahash.c | 36 +++++++++--------------------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index fcc6077b..6e722339 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -349,19 +349,16 @@ static void ahash_def_finup_finish2(struct ahash_request *req, int err) memcpy(priv->result, req->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - kzfree(priv); + ahash_restore_req(req); } static void ahash_def_finup_done2(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; - struct ahash_request_priv *priv = areq->priv; - crypto_completion_t complete = priv->complete; - void *data = priv->data; ahash_def_finup_finish2(areq, err); - complete(data, err); + areq->base.complete(&areq->base, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) @@ -381,38 +378,23 @@ out: static void ahash_def_finup_done1(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; - struct ahash_request_priv *priv = areq->priv; - crypto_completion_t complete = priv->complete; - void *data = priv->data; err = ahash_def_finup_finish1(areq, err); - complete(data, err); + areq->base.complete(&areq->base, err); } static int ahash_def_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request_priv *priv; - - priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!priv) - return -ENOMEM; - - priv->result = req->result; - priv->complete = req->base.complete; - priv->data = req->base.data; + int err; - req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); - req->base.complete = ahash_def_finup_done1; - req->base.data = req; - req->priv = priv; + err = ahash_save_req(req, ahash_def_finup_done1); + if (err) + return err; - return ahash_def_finup_finish1(req, tfm->update(req)); + err = tfm->update(req); + return ahash_def_finup_finish1(req, err); } static int ahash_no_export(struct ahash_request *req, void *out) -- cgit v1.2.3 From a11d31eb29343fcc2d5919d06ebabb92a9920993 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 14 Mar 2014 17:46:50 +0200 Subject: crypto: export NULL algorithms defines These defines might be needed by crypto drivers. Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- crypto/crypto_null.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index fee7265c..1dc54bb9 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,6 +17,7 @@ * */ +#include #include #include #include @@ -24,11 +25,6 @@ #include #include -#define NULL_KEY_SIZE 0 -#define NULL_BLOCK_SIZE 1 -#define NULL_DIGEST_SIZE 0 -#define NULL_IV_SIZE 0 - static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { -- cgit v1.2.3 From 2311b3526eccbbd1c6a880a8436e752527537ec6 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Fri, 14 Mar 2014 17:46:51 +0200 Subject: crypto: testmgr - add aead null encryption test vectors Add test vectors for aead with null encryption and md5, respectively sha1 authentication. Input data is taken from test vectors listed in RFC2410. Signed-off-by: Horia Geanta Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 8 +++ crypto/testmgr.c | 32 ++++++++++ crypto/testmgr.h | 180 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 220 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0d9003ae..870be7b4 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1511,6 +1511,14 @@ static int do_test(int m) ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); break; + case 156: + ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"); + break; + + case 157: + ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"); + break; + case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 77955507..dc3cf353 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1808,6 +1808,22 @@ static const struct alg_test_desc alg_test_descs[] = { .count = ANSI_CPRNG_AES_TEST_VECTORS } } + }, { + .alg = "authenc(hmac(md5),ecb(cipher_null))", + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = { + .enc = { + .vecs = hmac_md5_ecb_cipher_null_enc_tv_template, + .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS + }, + .dec = { + .vecs = hmac_md5_ecb_cipher_null_dec_tv_template, + .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS + } + } + } }, { .alg = "authenc(hmac(sha1),cbc(aes))", .test = alg_test_aead, @@ -1820,6 +1836,22 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "authenc(hmac(sha1),ecb(cipher_null))", + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = { + .enc = { + .vecs = hmac_sha1_ecb_cipher_null_enc_tv_template, + .count = HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS + }, + .dec = { + .vecs = hmac_sha1_ecb_cipher_null_dec_tv_template, + .count = HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS + } + } + } }, { .alg = "authenc(hmac(sha256),cbc(aes))", .test = alg_test_aead, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 7d44aa3d..3db83dbb 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -12821,6 +12821,10 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { #define AES_DEC_TEST_VECTORS 4 #define AES_CBC_ENC_TEST_VECTORS 5 #define AES_CBC_DEC_TEST_VECTORS 5 +#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 #define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7 #define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7 #define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7 @@ -13627,6 +13631,90 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { }, }; +static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { + { /* Input data from RFC 2410 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8 + 16 + 0, + .iv = "", + .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .ilen = 8, + .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" + "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", + .rlen = 8 + 16, + }, { /* Input data from RFC 2410 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8 + 16 + 0, + .iv = "", + .input = "Network Security People Have A Strange Sense Of Humor", + .ilen = 53, + .result = "Network Security People Have A Strange Sense Of Humor" + "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" + "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", + .rlen = 53 + 16, + }, +}; + +static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { + { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8 + 16 + 0, + .iv = "", + .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" + "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", + .ilen = 8 + 16, + .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .rlen = 8, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 8 + 16 + 0, + .iv = "", + .input = "Network Security People Have A Strange Sense Of Humor" + "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" + "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", + .ilen = 53 + 16, + .result = "Network Security People Have A Strange Sense Of Humor", + .rlen = 53, + }, +}; + static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN @@ -13876,6 +13964,98 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = { }, }; +static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_template[] = { + { /* Input data from RFC 2410 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00", + .klen = 8 + 20 + 0, + .iv = "", + .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .ilen = 8, + .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" + "\x99\x5e\x19\x04\xd1\x72\xef\xb8" + "\x8c\x5e\xe4\x08", + .rlen = 8 + 20, + }, { /* Input data from RFC 2410 Case 2 */ +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00", + .klen = 8 + 20 + 0, + .iv = "", + .input = "Network Security People Have A Strange Sense Of Humor", + .ilen = 53, + .result = "Network Security People Have A Strange Sense Of Humor" + "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" + "\x65\x47\xee\x8e\x1a\xef\x16\xf6" + "\x91\x56\xe4\xd6", + .rlen = 53 + 20, + }, +}; + +static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_template[] = { + { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00", + .klen = 8 + 20 + 0, + .iv = "", + .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" + "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" + "\x99\x5e\x19\x04\xd1\x72\xef\xb8" + "\x8c\x5e\xe4\x08", + .ilen = 8 + 20, + .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .rlen = 8, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x00" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00", + .klen = 8 + 20 + 0, + .iv = "", + .input = "Network Security People Have A Strange Sense Of Humor" + "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" + "\x65\x47\xee\x8e\x1a\xef\x16\xf6" + "\x91\x56\xe4\xd6", + .ilen = 53 + 20, + .result = "Network Security People Have A Strange Sense Of Humor", + .rlen = 53, + }, +}; + static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN -- cgit v1.2.3 From 6bfa7c6cee76cc788719452c92b13c8dcd3beb5d Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Mon, 17 Mar 2014 16:52:26 -0700 Subject: crypto: crypto_wq - Fix late crypto work queue initialization The crypto algorithm modules utilizing the crypto daemon could be used early when the system start up. Using module_init does not guarantee that the daemon's work queue is initialized when the cypto alorithm depending on crypto_wq starts. It is necessary to initialize the crypto work queue earlier at the subsystem init time to make sure that it is initialized when used. Signed-off-by: Tim Chen Signed-off-by: Herbert Xu --- crypto/crypto_wq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c index adad92a4..2f1b8d12 100644 --- a/crypto/crypto_wq.c +++ b/crypto/crypto_wq.c @@ -33,7 +33,7 @@ static void __exit crypto_wq_exit(void) destroy_workqueue(kcrypto_wq); } -module_init(crypto_wq_init); +subsys_initcall(crypto_wq_init); module_exit(crypto_wq_exit); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 6ebc73cd0ab4ff5094258b5057ba4b678d664619 Mon Sep 17 00:00:00 2001 From: chandramouli narayanan Date: Thu, 20 Mar 2014 15:14:00 -0700 Subject: crypto: sha - SHA1 transform x86_64 AVX2 This git patch adds x86_64 AVX2 optimization of SHA1 transform to crypto support. The patch has been tested with 3.14.0-rc1 kernel. On a Haswell desktop, with turbo disabled and all cpus running at maximum frequency, tcrypt shows AVX2 performance improvement from 3% for 256 bytes update to 16% for 1024 bytes update over AVX implementation. This patch adds sha1_avx2_transform(), the glue, build and configuration changes needed for AVX2 optimization of SHA1 transform to crypto support. sha1-ssse3 is one module which adds the necessary optimization support (SSSE3/AVX/AVX2) for the low-level SHA1 transform function. With better optimization support, transform function is overridden as the case may be. In the case of AVX2, due to performance reasons across datablock sizes, the AVX or AVX2 transform function is used at run-time as it suits best. The Makefile change therefore appends the necessary objects to the linkage. Due to this, the patch merely appends AVX2 transform to the existing build mix and Kconfig support and leaves the configuration build support as is. Signed-off-by: Chandramouli Narayanan Reviewed-by: Marek Vasut Acked-by: H. Peter Anvin Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 7bcb70d2..ce4012a5 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -491,14 +491,14 @@ config CRYPTO_SHA1 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). config CRYPTO_SHA1_SSSE3 - tristate "SHA1 digest algorithm (SSSE3/AVX)" + tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2)" depends on X86 && 64BIT select CRYPTO_SHA1 select CRYPTO_HASH help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented using Supplemental SSE3 (SSSE3) instructions or Advanced Vector - Extensions (AVX), when available. + Extensions (AVX/AVX2), when available. config CRYPTO_SHA256_SSSE3 tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2)" -- cgit v1.2.3