From 6cf9495b172f2fa30adc1f8dcd4886fdba665dca Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Fri, 28 Dec 2012 12:04:58 +0200 Subject: crypto: ctr - make rfc3686 asynchronous block cipher Some hardware crypto drivers register asynchronous ctr(aes), which is left unused in IPSEC because rfc3686 template only supports synchronous block ciphers. Some other drivers register rfc3686(ctr(aes)) to workaround this limitation but not all. This patch changes rfc3686 to use asynchronous block ciphers, to allow async ctr(aes) algorithms to be utilized automatically by IPSEC. Signed-off-by: Jussi Kivilinna Acked-by: Herbert Xu Signed-off-by: Steffen Klassert --- crypto/ctr.c | 173 +++++++++++++++++++++++++++++++++++--------------------- crypto/tcrypt.c | 4 ++ crypto/tcrypt.h | 1 + 3 files changed, 115 insertions(+), 63 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index 4ca7222c..1f2997cb 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -25,10 +26,15 @@ struct crypto_ctr_ctx { }; struct crypto_rfc3686_ctx { - struct crypto_blkcipher *child; + struct crypto_ablkcipher *child; u8 nonce[CTR_RFC3686_NONCE_SIZE]; }; +struct crypto_rfc3686_req_ctx { + u8 iv[CTR_RFC3686_BLOCK_SIZE]; + struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; +}; + static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { @@ -243,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = { .module = THIS_MODULE, }; -static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, + const u8 *key, unsigned int keylen) { - struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_blkcipher *child = ctx->child; + struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_ablkcipher *child = ctx->child; int err; /* the nonce is stored in bytes at end of key */ @@ -259,59 +265,64 @@ static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key, keylen -= CTR_RFC3686_NONCE_SIZE; - crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_blkcipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(child, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } -static int crypto_rfc3686_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int crypto_rfc3686_crypt(struct ablkcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_blkcipher *child = ctx->child; - unsigned long alignmask = crypto_blkcipher_alignmask(tfm); - u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask]; - u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1); - u8 *info = desc->info; - int err; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_ablkcipher *child = ctx->child; + unsigned long align = crypto_ablkcipher_alignmask(tfm); + struct crypto_rfc3686_req_ctx *rctx = + (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); + struct ablkcipher_request *subreq = &rctx->subreq; + u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); - memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); + memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); - desc->tfm = child; - desc->info = iv; - err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); - desc->tfm = tfm; - desc->info = info; + ablkcipher_request_set_tfm(subreq, child); + ablkcipher_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, + iv); - return err; + return crypto_ablkcipher_encrypt(subreq); } static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_blkcipher *cipher; + struct crypto_ablkcipher *cipher; + unsigned long align; - cipher = crypto_spawn_blkcipher(spawn); + cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; + align = crypto_tfm_alg_alignmask(tfm); + align &= ~(crypto_tfm_ctx_alignment() - 1); + tfm->crt_ablkcipher.reqsize = align + + sizeof(struct crypto_rfc3686_req_ctx) + + crypto_ablkcipher_reqsize(cipher); + return 0; } @@ -319,74 +330,110 @@ static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) { struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_blkcipher(ctx->child); + crypto_free_ablkcipher(ctx->child); } static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) { + struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *alg; + struct crypto_skcipher_spawn *spawn; + const char *cipher_name; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) + algt = crypto_get_attr_type(tb); + err = PTR_ERR(algt); + if (IS_ERR(algt)) return ERR_PTR(err); - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, - CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(alg); - if (IS_ERR(alg)) + if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) + return ERR_PTR(-EINVAL); + + cipher_name = crypto_attr_alg_name(tb[1]); + err = PTR_ERR(cipher_name); + if (IS_ERR(cipher_name)) return ERR_PTR(err); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = crypto_instance_ctx(inst); + + crypto_set_skcipher_spawn(spawn, inst); + err = crypto_grab_skcipher(spawn, cipher_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_free_inst; + + alg = crypto_skcipher_spawn_alg(spawn); + /* We only support 16-byte blocks. */ err = -EINVAL; - if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) - goto out_put_alg; + if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) + goto err_drop_spawn; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) - goto out_put_alg; + goto err_drop_spawn; - inst = crypto_alloc_instance("rfc3686", alg); - if (IS_ERR(inst)) - goto out; + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", + alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_drop_spawn; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "rfc3686(%s)", alg->cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_spawn; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize - + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize - + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + (alg->cra_flags & CRYPTO_ALG_ASYNC); + inst->alg.cra_type = &crypto_ablkcipher_type; + + inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; + inst->alg.cra_ablkcipher.min_keysize = + alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.cra_ablkcipher.max_keysize = + alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; - inst->alg.cra_blkcipher.geniv = "seqiv"; + inst->alg.cra_ablkcipher.geniv = "seqiv"; + + inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; + inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; + inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); inst->alg.cra_init = crypto_rfc3686_init_tfm; inst->alg.cra_exit = crypto_rfc3686_exit_tfm; - inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt; - inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt; - -out: - crypto_mod_put(alg); return inst; -out_put_alg: - inst = ERR_PTR(err); - goto out; +err_drop_spawn: + crypto_drop_skcipher(spawn); +err_free_inst: + kfree(inst); + return ERR_PTR(err); +} + +static void crypto_rfc3686_free(struct crypto_instance *inst) +{ + struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); + + crypto_drop_skcipher(spawn); + kfree(inst); } static struct crypto_template crypto_rfc3686_tmpl = { .name = "rfc3686", .alloc = crypto_rfc3686_alloc, - .free = crypto_ctr_free, + .free = crypto_rfc3686_free, .module = THIS_MODULE, }; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 7ae2130e..87ef7d66 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1591,6 +1591,10 @@ static int do_test(int m) speed_template_16_24_32); test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, speed_template_16_24_32); + test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0, + speed_template_20_28_36); + test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0, + speed_template_20_28_36); break; case 501: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index cd206852..ecdeeb1a 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -51,6 +51,7 @@ static u8 speed_template_8_16[] = {8, 16, 0}; static u8 speed_template_8_32[] = {8, 32, 0}; static u8 speed_template_16_32[] = {16, 32, 0}; static u8 speed_template_16_24_32[] = {16, 24, 32, 0}; +static u8 speed_template_20_28_36[] = {20, 28, 36, 0}; static u8 speed_template_32_40_48[] = {32, 40, 48, 0}; static u8 speed_template_32_48[] = {32, 48, 0}; static u8 speed_template_32_48_64[] = {32, 48, 64, 0}; -- cgit v1.2.3 From 725e579c9a0cc9cb5d2fafc5e517c2de7b3f24b7 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 28 Oct 2012 00:49:33 +0900 Subject: async_tx: use memchr_inv Use memchr_inv() to check the specified page is filled with zero. Signed-off-by: Akinobu Mita Cc: Vinod Koul Cc: Dan Williams Signed-off-by: Vinod Koul --- crypto/async_tx/async_xor.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 154cc843..8ade0a04 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -230,9 +230,7 @@ EXPORT_SYMBOL_GPL(async_xor); static int page_is_zero(struct page *p, unsigned int offset, size_t len) { - char *a = page_address(p) + offset; - return ((*(u32 *) a) == 0 && - memcmp(a, a + 4, len - 4) == 0); + return !memchr_inv(page_address(p) + offset, 0, len); } static inline struct dma_chan * -- cgit v1.2.3 From f474b6ce30e670498582a6b853c03047bec8009d Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 5 Nov 2012 10:00:12 +0000 Subject: async_tx: add missing DMA unmap to async_memcpy() Do DMA unmap on ->device_prep_dma_memcpy failure. Cc: Dan Williams Cc: Tomasz Figa Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 361b5e82..9e62feff 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -67,6 +67,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, dma_prep_flags); + if (!tx) { + dma_unmap_page(device->dev, dma_dest, len, + DMA_FROM_DEVICE); + dma_unmap_page(device->dev, dma_src, len, + DMA_TO_DEVICE); + } } if (tx) { -- cgit v1.2.3 From 72928b1eecee4fb2f91ce03bcb44c52b1682de18 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 5 Nov 2012 10:00:20 +0000 Subject: async_tx: fix build for async_memset Add missing include. Cc: Dan Williams Cc: Tomasz Figa Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_memset.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 58e4a875..05a4d1e0 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -25,6 +25,7 @@ */ #include #include +#include #include #include #include -- cgit v1.2.3 From bad66658ad759444c12f8ba62ac8f190b0832687 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 8 Nov 2012 10:03:16 +0000 Subject: async_tx: fix checking of dma_wait_for_async_tx() return value dma_wait_for_async_tx() can also return DMA_PAUSED (which should be considered as error). Cc: Vinod Koul Cc: Dan Williams Cc: Tomasz Figa Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 84212097..7be34248 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -128,8 +128,8 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, } device->device_issue_pending(chan); } else { - if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) - panic("%s: DMA_ERROR waiting for depend_tx\n", + if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) + panic("%s: DMA error waiting for depend_tx\n", __func__); tx->tx_submit(tx); } @@ -280,8 +280,9 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) * we are referring to the correct operation */ BUG_ON(async_tx_test_ack(*tx)); - if (dma_wait_for_async_tx(*tx) == DMA_ERROR) - panic("DMA_ERROR waiting for transaction\n"); + if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) + panic("%s: DMA error waiting for transaction\n", + __func__); async_tx_ack(*tx); *tx = NULL; } -- cgit v1.2.3 From 28621f6ebe80e7dcd9cabd05b507babf713a2071 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 13 Sep 2012 23:00:49 +0000 Subject: powerpc: Add a powerpc implementation of SHA-1 This patch adds a crypto driver which provides a powerpc accelerated implementation of SHA-1, accelerated in that it is written in asm. Original patch by Paul, minor fixups for upstream by moi. Lightly tested on 64-bit with the test program here: http://michael.ellerman.id.au/files/junkcode/sha1test.c Seems to work, and is "not slower" than the generic version. Needs testing on 32-bit. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- crypto/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 4641d956..8e6ae5ed 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -479,6 +479,13 @@ config CRYPTO_SHA1_ARM SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented using optimized ARM assembler. +config CRYPTO_SHA1_PPC + tristate "SHA1 digest algorithm (powerpc)" + depends on PPC + help + This is the powerpc hardware accelerated implementation of the + SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). + config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH -- cgit v1.2.3 From 04a2abffe684b5f7308cfc4f9a6aee01d1d2b1dc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 2 Oct 2012 11:16:49 -0700 Subject: crypto: remove depends on CONFIG_EXPERIMENTAL The CONFIG_EXPERIMENTAL config item has not carried much meaning for a while now and is almost always enabled by default. As agreed during the Linux kernel summit, remove it from any "depends on" lines in Kconfigs. CC: Herbert Xu CC: "David S. Miller" Signed-off-by: Kees Cook Acked-by: David S. Miller --- crypto/Kconfig | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 4641d956..3f375200 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -134,8 +134,8 @@ config CRYPTO_NULL These are 'Null' algorithms, used by IPsec, which do nothing. config CRYPTO_PCRYPT - tristate "Parallel crypto engine (EXPERIMENTAL)" - depends on SMP && EXPERIMENTAL + tristate "Parallel crypto engine" + depends on SMP select PADATA select CRYPTO_MANAGER select CRYPTO_AEAD @@ -292,7 +292,6 @@ config CRYPTO_HMAC config CRYPTO_XCBC tristate "XCBC support" - depends on EXPERIMENTAL select CRYPTO_HASH select CRYPTO_MANAGER help @@ -303,7 +302,6 @@ config CRYPTO_XCBC config CRYPTO_VMAC tristate "VMAC support" - depends on EXPERIMENTAL select CRYPTO_HASH select CRYPTO_MANAGER help @@ -932,8 +930,7 @@ config CRYPTO_KHAZAD config CRYPTO_SALSA20 - tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)" - depends on EXPERIMENTAL + tristate "Salsa20 stream cipher algorithm" select CRYPTO_BLKCIPHER help Salsa20 stream cipher algorithm. @@ -945,9 +942,8 @@ config CRYPTO_SALSA20 Bernstein . See config CRYPTO_SALSA20_586 - tristate "Salsa20 stream cipher algorithm (i586) (EXPERIMENTAL)" + tristate "Salsa20 stream cipher algorithm (i586)" depends on (X86 || UML_X86) && !64BIT - depends on EXPERIMENTAL select CRYPTO_BLKCIPHER help Salsa20 stream cipher algorithm. @@ -959,9 +955,8 @@ config CRYPTO_SALSA20_586 Bernstein . See config CRYPTO_SALSA20_X86_64 - tristate "Salsa20 stream cipher algorithm (x86_64) (EXPERIMENTAL)" + tristate "Salsa20 stream cipher algorithm (x86_64)" depends on (X86 || UML_X86) && 64BIT - depends on EXPERIMENTAL select CRYPTO_BLKCIPHER help Salsa20 stream cipher algorithm. -- cgit v1.2.3 From 56b20012d77d20bbf8eb42974468738bc3a6b29f Mon Sep 17 00:00:00 2001 From: Alexander Boyko Date: Thu, 10 Jan 2013 18:54:59 +0400 Subject: crypto: crc32 - add crc32 pclmulqdq implementation and wrappers for table implementation This patch adds crc32 algorithms to shash crypto api. One is wrapper to gerneric crc32_le function. Second is crc32 pclmulqdq implementation. It use hardware provided PCLMULQDQ instruction to accelerate the CRC32 disposal. This instruction present from Intel Westmere and AMD Bulldozer CPUs. For intel core i5 I got 450MB/s for table implementation and 2100MB/s for pclmulqdq implementation. Signed-off-by: Alexander Boyko Signed-off-by: Herbert Xu --- crypto/Kconfig | 21 ++++++++ crypto/Makefile | 1 + crypto/crc32.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 180 insertions(+) create mode 100644 crypto/crc32.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 4641d956..e8b51e06 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -355,6 +355,27 @@ config CRYPTO_CRC32C_SPARC64 CRC32c CRC algorithm implemented using sparc64 crypto instructions, when available. +config CRYPTO_CRC32 + tristate "CRC32 CRC algorithm" + select CRYPTO_HASH + select CRC32 + help + CRC-32-IEEE 802.3 cyclic redundancy-check algorithm. + Shash crypto api wrappers to crc32_le function. + +config CRYPTO_CRC32_PCLMUL + tristate "CRC32 PCLMULQDQ hardware acceleration" + depends on X86 + select CRYPTO_HASH + select CRC32 + help + From Intel Westmere and AMD Bulldozer processor with SSE4.2 + and PCLMULQDQ supported, the processor will support + CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ + instruction. This option will create 'crc32-plcmul' module, + which will enable any routine to use the CRC-32-IEEE 802.3 checksum + and gain better performance as compared with the table implementation. + config CRYPTO_GHASH tristate "GHASH digest algorithm" select CRYPTO_GF128MUL diff --git a/crypto/Makefile b/crypto/Makefile index d59dec74..be1a1beb 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o +obj-$(CONFIG_CRYPTO_CRC32) += crc32.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o obj-$(CONFIG_CRYPTO_842) += 842.o diff --git a/crypto/crc32.c b/crypto/crc32.c new file mode 100644 index 00000000..9d1c4156 --- /dev/null +++ b/crypto/crc32.c @@ -0,0 +1,158 @@ +/* GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please visit http://www.xyratex.com/contact if you need additional + * information or have any questions. + * + * GPL HEADER END + */ + +/* + * Copyright 2012 Xyratex Technology Limited + */ + +/* + * This is crypto api shash wrappers to crc32_le. + */ + +#include +#include +#include +#include +#include +#include + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_le(crc, p, len); +} + +/** No default init with ~0 */ +static int crc32_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = 0; + + return 0; +} + + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int crc32_setkey(struct crypto_shash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_shash_ctx(hash); + + if (keylen != sizeof(u32)) { + crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + *mctx = le32_to_cpup((__le32 *)key); + return 0; +} + +static int crc32_init(struct shash_desc *desc) +{ + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *crcp = shash_desc_ctx(desc); + + *crcp = *mctx; + + return 0; +} + +static int crc32_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *crcp = shash_desc_ctx(desc); + + *crcp = __crc32_le(*crcp, data, len); + return 0; +} + +/* No final XOR 0xFFFFFFFF, like crc32_le */ +static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__le32 *)out = cpu_to_le32(__crc32_le(*crcp, data, len)); + return 0; +} + +static int crc32_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(shash_desc_ctx(desc), data, len, out); +} + +static int crc32_final(struct shash_desc *desc, u8 *out) +{ + u32 *crcp = shash_desc_ctx(desc); + + *(__le32 *)out = cpu_to_le32p(crcp); + return 0; +} + +static int crc32_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, + out); +} +static struct shash_alg alg = { + .setkey = crc32_setkey, + .init = crc32_init, + .update = crc32_update, + .final = crc32_final, + .finup = crc32_finup, + .digest = crc32_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + .base = { + .cra_name = "crc32", + .cra_driver_name = "crc32-table", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static int __init crc32_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc32_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc32_mod_init); +module_exit(crc32_mod_fini); + +MODULE_AUTHOR("Alexander Boyko "); +MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 90df61c376d68903c14f0c5e74254f493f8861a2 Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Sat, 19 Jan 2013 13:31:36 +0200 Subject: crypto: testmgr - add test vector for fcrypt fcrypt is used only as pcbc(fcrypt), but testmgr does not know this. Use the zero key, zero plaintext pcbc(fcrypt) test vector for testing plain 'fcrypt' to hide "no test for fcrypt" warnings. Signed-off-by: Jussi Kivilinna Acked-by: David S. Miller Signed-off-by: Herbert Xu --- crypto/testmgr.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index edf4a081..efd8b20e 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2268,6 +2268,21 @@ static const struct alg_test_desc alg_test_descs[] = { } } } + }, { + .alg = "ecb(fcrypt)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = { + .vecs = fcrypt_pcbc_enc_tv_template, + .count = 1 + }, + .dec = { + .vecs = fcrypt_pcbc_dec_tv_template, + .count = 1 + } + } + } }, { .alg = "ecb(khazad)", .test = alg_test_skcipher, -- cgit v1.2.3 From ea0d1aff21b1bd3252424f200c7de510403edfa5 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 22 Jan 2013 12:29:26 +0100 Subject: crypto: use ERR_CAST Replace PTR_ERR followed by ERR_PTR by ERR_CAST, to be more concise. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression err,x; @@ - err = PTR_ERR(x); if (IS_ERR(x)) - return ERR_PTR(err); + return ERR_CAST(x); // Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- crypto/aead.c | 6 ++---- crypto/algapi.c | 4 +--- crypto/authenc.c | 3 +-- crypto/authencesn.c | 3 +-- crypto/blkcipher.c | 6 ++---- crypto/ccm.c | 23 +++++++---------------- crypto/chainiv.c | 3 +-- crypto/ctr.c | 3 +-- crypto/cts.c | 3 +-- crypto/gcm.c | 29 +++++++++-------------------- crypto/seqiv.c | 3 +-- 11 files changed, 27 insertions(+), 59 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index 0b8121eb..4d04e12f 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -282,18 +282,16 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) & algt->mask) return ERR_PTR(-EINVAL); name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(name); if (IS_ERR(name)) - return ERR_PTR(err); + return ERR_CAST(name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/algapi.c b/crypto/algapi.c index c3b9bfee..08c57c8a 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -749,12 +749,10 @@ struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, u32 type, u32 mask) { const char *name; - int err; name = crypto_attr_alg_name(rta); - err = PTR_ERR(name); if (IS_ERR(name)) - return ERR_PTR(err); + return ERR_CAST(name); return crypto_find_alg(name, frontend, type, mask); } diff --git a/crypto/authenc.c b/crypto/authenc.c index d0583a44..ffce19de 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -592,9 +592,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 136b68b9..ab53762f 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -715,9 +715,8 @@ static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index a8d85a1d..e9e7244d 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -588,18 +588,16 @@ struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & algt->mask) return ERR_PTR(-EINVAL); name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(name); if (IS_ERR(name)) - return ERR_PTR(err); + return ERR_CAST(name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/ccm.c b/crypto/ccm.c index 32fe1bb5..499c9171 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -484,18 +484,16 @@ static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(cipher); if (IS_ERR(cipher)) - return ERR_PTR(err); + return ERR_CAST(cipher); err = -EINVAL; if (cipher->cra_blocksize != 16) @@ -573,15 +571,13 @@ out_put_cipher: static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) { - int err; const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(cipher_name); if (IS_ERR(cipher_name)) - return ERR_PTR(err); + return ERR_CAST(cipher_name); if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -612,20 +608,17 @@ static struct crypto_template crypto_ccm_tmpl = { static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) { - int err; const char *ctr_name; const char *cipher_name; char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(ctr_name); if (IS_ERR(ctr_name)) - return ERR_PTR(err); + return ERR_CAST(ctr_name); cipher_name = crypto_attr_alg_name(tb[2]); - err = PTR_ERR(cipher_name); if (IS_ERR(cipher_name)) - return ERR_PTR(err); + return ERR_CAST(cipher_name); if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -760,17 +753,15 @@ static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ccm_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(ccm_name); if (IS_ERR(ccm_name)) - return ERR_PTR(err); + return ERR_CAST(ccm_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/chainiv.c b/crypto/chainiv.c index ba200b07..834d8dd3 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c @@ -291,9 +291,8 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); err = crypto_get_default_rng(); if (err) diff --git a/crypto/ctr.c b/crypto/ctr.c index 4ca7222c..095dcb66 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -334,9 +334,8 @@ static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(alg); if (IS_ERR(alg)) - return ERR_PTR(err); + return ERR_CAST(alg); /* We only support 16-byte blocks. */ err = -EINVAL; diff --git a/crypto/cts.c b/crypto/cts.c index ccf9c5de..042223f8 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -282,9 +282,8 @@ static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); - err = PTR_ERR(alg); if (IS_ERR(alg)) - return ERR_PTR(err); + return ERR_CAST(alg); inst = ERR_PTR(-EINVAL); if (!is_power_of_2(alg->cra_blocksize)) diff --git a/crypto/gcm.c b/crypto/gcm.c index 1a252639..137ad1ec 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -701,9 +701,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); @@ -711,9 +710,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_AHASH_MASK); - err = PTR_ERR(ghash_alg); if (IS_ERR(ghash_alg)) - return ERR_PTR(err); + return ERR_CAST(ghash_alg); err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -787,15 +785,13 @@ out_put_ghash: static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) { - int err; const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; char full_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(cipher_name); if (IS_ERR(cipher_name)) - return ERR_PTR(err); + return ERR_CAST(cipher_name); if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) @@ -826,20 +822,17 @@ static struct crypto_template crypto_gcm_tmpl = { static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) { - int err; const char *ctr_name; const char *ghash_name; char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(ctr_name); if (IS_ERR(ctr_name)) - return ERR_PTR(err); + return ERR_CAST(ctr_name); ghash_name = crypto_attr_alg_name(tb[2]); - err = PTR_ERR(ghash_name); if (IS_ERR(ghash_name)) - return ERR_PTR(err); + return ERR_CAST(ghash_name); if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) @@ -971,17 +964,15 @@ static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ccm_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(ccm_name); if (IS_ERR(ccm_name)) - return ERR_PTR(err); + return ERR_CAST(ccm_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -1222,17 +1213,15 @@ static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ccm_name = crypto_attr_alg_name(tb[1]); - err = PTR_ERR(ccm_name); if (IS_ERR(ccm_name)) - return ERR_PTR(err); + return ERR_CAST(ccm_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 4c449122..f2cba4ed 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -305,9 +305,8 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb) int err; algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); if (IS_ERR(algt)) - return ERR_PTR(err); + return ERR_CAST(algt); err = crypto_get_default_rng(); if (err) -- cgit v1.2.3 From f9db44b2ad38f1e6f9ff88359da550b5ec0e5d74 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 5 Feb 2013 18:19:13 +0100 Subject: crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause Cc: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 12 ++++++------ crypto/aead.c | 9 ++++----- crypto/ahash.c | 2 +- crypto/blkcipher.c | 6 +++--- crypto/crypto_user.c | 22 +++++++++++----------- crypto/pcompress.c | 3 +-- crypto/rng.c | 2 +- crypto/shash.c | 3 ++- 8 files changed, 29 insertions(+), 30 deletions(-) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 533de955..7d4a8d28 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher"); - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", - alg->cra_ablkcipher.geniv ?: ""); + strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", + sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; @@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher"); - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", - alg->cra_ablkcipher.geniv ?: ""); + strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", + sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; diff --git a/crypto/aead.c b/crypto/aead.c index 4d04e12f..547491e3 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; - snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead"); - snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", - aead->geniv ?: ""); + strncpy(raead.type, "aead", sizeof(raead.type)); + strncpy(raead.geniv, aead->geniv ?: "", sizeof(raead.geniv)); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; @@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; - snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead"); - snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv); + strncpy(raead.type, "nivaead", sizeof(raead.type)); + strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; diff --git a/crypto/ahash.c b/crypto/ahash.c index 3887856c..793a27f2 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; - snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash"); + strncpy(rhash.type, "ahash", sizeof(rhash.type)); rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index e9e7244d..a79e7e9a 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; - snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher"); - snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s", - alg->cra_blkcipher.geniv ?: ""); + strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); + strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "", + sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 35d700a9..f6d9baf7 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -75,7 +75,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_cipher rcipher; - snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher"); + strncpy(rcipher.type, "cipher", sizeof(rcipher.type)); rcipher.blocksize = alg->cra_blocksize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; @@ -94,8 +94,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rcomp; - snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression"); - + strncpy(rcomp.type, "compression", sizeof(rcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rcomp)) goto nla_put_failure; @@ -108,12 +107,14 @@ nla_put_failure: static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { - memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name)); - memcpy(&ualg->cru_driver_name, &alg->cra_driver_name, - sizeof(ualg->cru_driver_name)); - memcpy(&ualg->cru_module_name, module_name(alg->cra_module), - CRYPTO_MAX_ALG_NAME); - + strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); + strncpy(ualg->cru_driver_name, alg->cra_driver_name, + sizeof(ualg->cru_driver_name)); + strncpy(ualg->cru_module_name, module_name(alg->cra_module), + sizeof(ualg->cru_module_name)); + + ualg->cru_type = 0; + ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); @@ -122,8 +123,7 @@ static int crypto_report_one(struct crypto_alg *alg, if (alg->cra_flags & CRYPTO_ALG_LARVAL) { struct crypto_report_larval rl; - snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); - + strncpy(rl.type, "larval", sizeof(rl.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(struct crypto_report_larval), &rl)) goto nla_put_failure; diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 04e083ff..7140fe70 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_comp rpcomp; - snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp"); - + strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type)); if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(struct crypto_report_comp), &rpcomp)) goto nla_put_failure; diff --git a/crypto/rng.c b/crypto/rng.c index f3b7894d..e0a25c24 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_rng rrng; - snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng"); + strncpy(rrng.type, "rng", sizeof(rrng.type)); rrng.seedsize = alg->cra_rng.seedsize; diff --git a/crypto/shash.c b/crypto/shash.c index f426330f..929058a6 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) struct crypto_report_hash rhash; struct shash_alg *salg = __crypto_shash_alg(alg); - snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash"); + strncpy(rhash.type, "shash", sizeof(rhash.type)); + rhash.blocksize = alg->cra_blocksize; rhash.digestsize = salg->digestsize; -- cgit v1.2.3 From 50540e6eba0e59c3a5e4551b9aa7868df4f7bd8d Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 5 Feb 2013 18:19:14 +0100 Subject: crypto: user - fix empty string test in report API The current test for empty strings fails because it is testing the address of a field, not a pointer. So the test will always be true. Test the first character in the string to not be null instead. Signed-off-by: Mathias Krause Cc: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index f6d9baf7..423a2670 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -196,7 +196,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct crypto_dump_info info; int err; - if (!p->cru_driver_name) + if (!p->cru_driver_name[0]) return -EINVAL; alg = crypto_alg_match(p, 1); -- cgit v1.2.3 From 25633c1e43f8d2a9343a14bd019a05fad5615af5 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 5 Feb 2013 18:19:15 +0100 Subject: crypto: user - ensure user supplied strings are nul-terminated To avoid misuse, ensure cru_name and cru_driver_name are always nul-terminated strings. Signed-off-by: Mathias Krause Signed-off-by: Herbert Xu --- crypto/crypto_user.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 423a2670..dfd511fb 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -30,6 +30,8 @@ #include "internal.h" +#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) + static DEFINE_MUTEX(crypto_cfg_mutex); /* The crypto netlink socket */ @@ -196,6 +198,9 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct crypto_dump_info info; int err; + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) + return -EINVAL; + if (!p->cru_driver_name[0]) return -EINVAL; @@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; LIST_HEAD(list); + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) + return -EINVAL; + if (priority && !strlen(p->cru_driver_name)) return -EINVAL; @@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct crypto_alg *alg; struct crypto_user_alg *p = nlmsg_data(nlh); + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) + return -EINVAL; + alg = crypto_alg_match(p, 1); if (!alg) return -ENOENT; @@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, struct crypto_user_alg *p = nlmsg_data(nlh); struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL]; + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) + return -EINVAL; + if (strlen(p->cru_driver_name)) exact = 1; -- cgit v1.2.3 From 70aeda630631489ad2825be5271f6ac88d33d850 Mon Sep 17 00:00:00 2001 From: "Markus F.X.J. Oberhumer" Date: Sun, 14 Oct 2012 15:39:04 +0200 Subject: crypto: testmgr - update LZO compression test vectors Update the LZO compression test vectors according to the latest compressor version. Signed-off-by: Markus F.X.J. Oberhumer --- crypto/testmgr.h | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index b5721e0b..3db1b759 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -25084,38 +25084,40 @@ static struct pcomp_testvec zlib_decomp_tv_template[] = { static struct comp_testvec lzo_comp_tv_template[] = { { .inlen = 70, - .outlen = 46, + .outlen = 57, .input = "Join us now and share the software " "Join us now and share the software ", .output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75" - "\x73\x20\x6e\x6f\x77\x20\x61\x6e" - "\x64\x20\x73\x68\x61\x72\x65\x20" - "\x74\x68\x65\x20\x73\x6f\x66\x74" - "\x77\x70\x01\x01\x4a\x6f\x69\x6e" - "\x3d\x88\x00\x11\x00\x00", + "\x73\x20\x6e\x6f\x77\x20\x61\x6e" + "\x64\x20\x73\x68\x61\x72\x65\x20" + "\x74\x68\x65\x20\x73\x6f\x66\x74" + "\x77\x70\x01\x32\x88\x00\x0c\x65" + "\x20\x74\x68\x65\x20\x73\x6f\x66" + "\x74\x77\x61\x72\x65\x20\x11\x00" + "\x00", }, { .inlen = 159, - .outlen = 133, + .outlen = 131, .input = "This document describes a compression method based on the LZO " "compression algorithm. This document defines the application of " "the LZO algorithm used in UBIFS.", - .output = "\x00\x2b\x54\x68\x69\x73\x20\x64" + .output = "\x00\x2c\x54\x68\x69\x73\x20\x64" "\x6f\x63\x75\x6d\x65\x6e\x74\x20" "\x64\x65\x73\x63\x72\x69\x62\x65" "\x73\x20\x61\x20\x63\x6f\x6d\x70" "\x72\x65\x73\x73\x69\x6f\x6e\x20" "\x6d\x65\x74\x68\x6f\x64\x20\x62" "\x61\x73\x65\x64\x20\x6f\x6e\x20" - "\x74\x68\x65\x20\x4c\x5a\x4f\x2b" - "\x8c\x00\x0d\x61\x6c\x67\x6f\x72" - "\x69\x74\x68\x6d\x2e\x20\x20\x54" - "\x68\x69\x73\x2a\x54\x01\x02\x66" - "\x69\x6e\x65\x73\x94\x06\x05\x61" - "\x70\x70\x6c\x69\x63\x61\x74\x76" - "\x0a\x6f\x66\x88\x02\x60\x09\x27" - "\xf0\x00\x0c\x20\x75\x73\x65\x64" - "\x20\x69\x6e\x20\x55\x42\x49\x46" - "\x53\x2e\x11\x00\x00", + "\x74\x68\x65\x20\x4c\x5a\x4f\x20" + "\x2a\x8c\x00\x09\x61\x6c\x67\x6f" + "\x72\x69\x74\x68\x6d\x2e\x20\x20" + "\x2e\x54\x01\x03\x66\x69\x6e\x65" + "\x73\x20\x74\x06\x05\x61\x70\x70" + "\x6c\x69\x63\x61\x74\x76\x0a\x6f" + "\x66\x88\x02\x60\x09\x27\xf0\x00" + "\x0c\x20\x75\x73\x65\x64\x20\x69" + "\x6e\x20\x55\x42\x49\x46\x53\x2e" + "\x11\x00\x00", }, }; -- cgit v1.2.3 From c90ee7766deeafba378a030407e718a3f69b9426 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Wed, 27 Feb 2013 17:06:00 -0800 Subject: hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin Acked-by: Paul E. McKenney Signed-off-by: Sasha Levin Cc: Wu Fengguang Cc: Marcelo Tosatti Cc: Gleb Natapov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- crypto/algapi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 08c57c8a..6149a6e0 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(crypto_register_template); void crypto_unregister_template(struct crypto_template *tmpl) { struct crypto_instance *inst; - struct hlist_node *p, *n; + struct hlist_node *n; struct hlist_head *list; LIST_HEAD(users); @@ -457,7 +457,7 @@ void crypto_unregister_template(struct crypto_template *tmpl) list_del_init(&tmpl->list); list = &tmpl->instances; - hlist_for_each_entry(inst, p, list, list) { + hlist_for_each_entry(inst, list, list) { int err = crypto_remove_alg(&inst->alg, &users); BUG_ON(err); } @@ -466,7 +466,7 @@ void crypto_unregister_template(struct crypto_template *tmpl) up_write(&crypto_alg_sem); - hlist_for_each_entry_safe(inst, p, n, list, list) { + hlist_for_each_entry_safe(inst, n, list, list) { BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); tmpl->free(inst); } -- cgit v1.2.3