From a49015cc37749055b7fd3415c322be7c63b71422 Mon Sep 17 00:00:00 2001 From: Robert Baronescu Date: Tue, 10 Oct 2017 13:21:59 +0300 Subject: crypto: tcrypt - fix S/G table for test_aead_speed() In case buffer length is a multiple of PAGE_SIZE, the S/G table is incorrectly generated. Fix this by handling buflen = k * PAGE_SIZE separately. Signed-off-by: Robert Baronescu Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 9267cbdb..3ced1ba1 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -198,11 +198,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], } sg_init_table(sg, np + 1); - np--; + if (rem) + np--; for (k = 0; k < np; k++) sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); - sg_set_buf(&sg[k + 1], xbuf[k], rem); + if (rem) + sg_set_buf(&sg[k + 1], xbuf[k], rem); } static void test_aead_speed(const char *algo, int enc, unsigned int secs, -- cgit v1.2.3 From 9211b88a8396b7272dff653486894ab8c890b963 Mon Sep 17 00:00:00 2001 From: Pierre Date: Sun, 12 Nov 2017 15:24:32 +0100 Subject: crypto: ecc - Fix NULL pointer deref. on no default_rng If crypto_get_default_rng returns an error, the function ecc_gen_privkey should return an error. Instead, it currently tries to use the default_rng nevertheless, thus creating a kernel panic with a NULL pointer dereference. Returning the error directly, as was supposedly intended when looking at the code, fixes this. Signed-off-by: Pierre Ducroquet Reviewed-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- crypto/ecc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/ecc.c b/crypto/ecc.c index 633a9bcd..18f32f2a 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) * DRBG with a security strength of 256. */ if (crypto_get_default_rng()) - err = -EFAULT; + return -EFAULT; err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); crypto_put_default_rng(); -- cgit v1.2.3 From 3093535d27c318640a0faa8b90b5b13f3adc8380 Mon Sep 17 00:00:00 2001 From: Martin Kepplinger Date: Tue, 14 Nov 2017 10:25:15 +0100 Subject: crypto: replace FSF address with web source in license notices A few years ago the FSF moved and "59 Temple Place" is wrong. Having this still in our source files feels old and unmaintained. Let's take the license statement serious and not confuse users. As https://www.gnu.org/licenses/gpl-howto.html suggests, we replace the postal address with "". Signed-off-by: Martin Kepplinger Signed-off-by: Herbert Xu --- crypto/ablk_helper.c | 4 +--- crypto/camellia_generic.c | 3 +-- crypto/cast5_generic.c | 3 +-- crypto/cast6_generic.c | 3 +-- crypto/simd.c | 4 +--- crypto/twofish_common.c | 5 ++--- crypto/twofish_generic.c | 5 ++--- crypto/xcbc.c | 3 +-- 8 files changed, 10 insertions(+), 20 deletions(-) diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c index 1441f07d..6e5d2f14 100644 --- a/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c @@ -18,9 +18,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA + * along with this program. If not, see . * */ diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index a02286bf..32ddd483 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -13,8 +13,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * along with this program. If not, see . */ /* diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index df5c7262..66169c17 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -16,8 +16,7 @@ * any later version. * * You should have received a copy of the GNU General Public License -* along with this program; if not, write to the Free Software -* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA +* along with this program. If not, see . */ diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index 058c8d75..c8e5ec69 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -13,8 +13,7 @@ * any later version. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * along with this program. If not, see . */ diff --git a/crypto/simd.c b/crypto/simd.c index 88203370..208226d7 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -19,9 +19,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA + * along with this program. If not, see . * */ diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index 5f62c4f9..f3a0dd25 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -24,9 +24,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA + * along with this program. If not, see . + * * * This code is a "clean room" implementation, written from the paper * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index ebf7a3ef..07e62433 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -23,9 +23,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA + * along with this program. If not, see . + * * * This code is a "clean room" implementation, written from the paper * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, diff --git a/crypto/xcbc.c b/crypto/xcbc.c index df90b332..25c75af5 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -12,8 +12,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see . * * Author: * Kazunori Miyazawa -- cgit v1.2.3 From 440ba8e9a63324ae5ea339a40b0c3e6dcee595f4 Mon Sep 17 00:00:00 2001 From: Tudor-Dan Ambarus Date: Tue, 14 Nov 2017 16:59:15 +0200 Subject: crypto: tcrypt - set assoc in sg_init_aead() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Results better code readability. Signed-off-by: Tudor Ambarus Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 3ced1ba1..28b4882f 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -185,7 +185,8 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) } static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], - unsigned int buflen) + unsigned int buflen, const void *assoc, + unsigned int aad_size) { int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; int k, rem; @@ -198,6 +199,9 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], } sg_init_table(sg, np + 1); + + sg_set_buf(&sg[0], assoc, aad_size); + if (rem) np--; for (k = 0; k < np; k++) @@ -318,14 +322,12 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, goto out; } - sg_init_aead(sg, xbuf, - *b_size + (enc ? 0 : authsize)); + sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize), + assoc, aad_size); sg_init_aead(sgout, xoutbuf, - *b_size + (enc ? authsize : 0)); - - sg_set_buf(&sg[0], assoc, aad_size); - sg_set_buf(&sgout[0], assoc, aad_size); + *b_size + (enc ? authsize : 0), assoc, + aad_size); aead_request_set_crypt(req, sg, sgout, *b_size + (enc ? 0 : authsize), -- cgit v1.2.3 From 1610c7ee810e278537c82977867e8a9a854baba0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 15 Nov 2017 11:44:28 +0100 Subject: crypto: keywrap - Add missing ULL suffixes for 64-bit constants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On 32-bit (e.g. with m68k-linux-gnu-gcc-4.1): crypto/keywrap.c: In function ‘crypto_kw_decrypt’: crypto/keywrap.c:191: warning: integer constant is too large for ‘long’ type crypto/keywrap.c: In function ‘crypto_kw_encrypt’: crypto/keywrap.c:224: warning: integer constant is too large for ‘long’ type Fixes: c0bf056e88a1e1e5 ("crypto: keywrap - simplify code") Signed-off-by: Geert Uytterhoeven Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/keywrap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 744e3513..ec5c6a08 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, } /* Perform authentication check */ - if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6)) + if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) ret = -EBADMSG; memzero_explicit(&block, sizeof(struct crypto_kw_block)); @@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, * Place the predefined IV into block A -- for encrypt, the caller * does not need to provide an IV, but he needs to fetch the final IV. */ - block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6); + block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); /* * src scatterlist is read-only. dst scatterlist is r/w. During the -- cgit v1.2.3 From fb5a74bc3110de22a09b6a2c9220006d71bcd60a Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Sat, 18 Nov 2017 07:02:18 +0800 Subject: crypto: remove unused hardirq.h Preempt counter APIs have been split out, currently, hardirq.h just includes irq_enter/exit APIs which are not used by crypto at all. So, remove the unused hardirq.h. Signed-off-by: Yang Shi Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/ablk_helper.c | 1 - crypto/blkcipher.c | 1 - crypto/mcryptd.c | 1 - 3 files changed, 3 deletions(-) diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c index 6e5d2f14..09776bb1 100644 --- a/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 6c43a0a1..01c0d4aa 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e647265..9fa362c1 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -26,7 +26,6 @@ #include #include #include -#include #define MCRYPTD_MAX_CPU_QLEN 100 #define MCRYPTD_BATCH 9 -- cgit v1.2.3 From 8733a5f8aa2031841b3b82246db980a952c3a7c8 Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Wed, 22 Nov 2017 16:08:17 +1100 Subject: crypto: cryptd - Add cryptd_max_cpu_qlen module parameter Make the cryptd queue length configurable. We recently had customer where this needed to be tuned to accommodate the aesni_intel module and prevent packet drop. Signed-off-by: Jon Maxwell Signed-off-by: Herbert Xu --- crypto/cryptd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index bd43cf5b..b1eb131c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -32,7 +32,9 @@ #include #include -#define CRYPTD_MAX_CPU_QLEN 1000 +unsigned int cryptd_max_cpu_qlen = 1000; +module_param(cryptd_max_cpu_qlen, uint, 0); +MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); struct cryptd_cpu_queue { struct crypto_queue queue; @@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); } + pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; } @@ -1372,7 +1375,7 @@ static int __init cryptd_init(void) { int err; - err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); + err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); if (err) return err; -- cgit v1.2.3 From b71bca3c84a81228f89a07a2fc0295cab7361b00 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 22 Nov 2017 11:51:35 -0800 Subject: crypto: chacha20 - Fix unaligned access when loading constants The four 32-bit constants for the initial state of ChaCha20 were loaded from a char array which is not guaranteed to have the needed alignment. Fix it by just assigning the constants directly instead. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/chacha20_generic.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index 4a45fa48..ec84e783 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -41,12 +41,10 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) { - static const char constant[16] = "expand 32-byte k"; - - state[0] = le32_to_cpuvp(constant + 0); - state[1] = le32_to_cpuvp(constant + 4); - state[2] = le32_to_cpuvp(constant + 8); - state[3] = le32_to_cpuvp(constant + 12); + state[0] = 0x61707865; /* "expa" */ + state[1] = 0x3320646e; /* "nd 3" */ + state[2] = 0x79622d32; /* "2-by" */ + state[3] = 0x6b206574; /* "te k" */ state[4] = ctx->key[0]; state[5] = ctx->key[1]; state[6] = ctx->key[2]; -- cgit v1.2.3 From 20ac3c0c1439ffc5f324b8a45b9b627fa60c9ad4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 22 Nov 2017 11:51:36 -0800 Subject: crypto: chacha20 - Use unaligned access macros when loading key and IV The generic ChaCha20 implementation has a cra_alignmask of 3, which ensures that the key passed into crypto_chacha20_setkey() and the IV passed into crypto_chacha20_init() are 4-byte aligned. However, these functions are also called from the ARM and ARM64 implementations of ChaCha20, which intentionally do not have a cra_alignmask set. This is broken because 32-bit words are being loaded from potentially-unaligned buffers without the unaligned access macros. Fix it by using the unaligned access macros when loading the key and IV. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/chacha20_generic.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index ec84e783..b5a10ebf 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -9,16 +9,12 @@ * (at your option) any later version. */ +#include #include #include #include #include -static inline u32 le32_to_cpuvp(const void *p) -{ - return le32_to_cpup(p); -} - static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, unsigned int bytes) { @@ -53,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) state[9] = ctx->key[5]; state[10] = ctx->key[6]; state[11] = ctx->key[7]; - state[12] = le32_to_cpuvp(iv + 0); - state[13] = le32_to_cpuvp(iv + 4); - state[14] = le32_to_cpuvp(iv + 8); - state[15] = le32_to_cpuvp(iv + 12); + state[12] = get_unaligned_le32(iv + 0); + state[13] = get_unaligned_le32(iv + 4); + state[14] = get_unaligned_le32(iv + 8); + state[15] = get_unaligned_le32(iv + 12); } EXPORT_SYMBOL_GPL(crypto_chacha20_init); @@ -70,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, return -EINVAL; for (i = 0; i < ARRAY_SIZE(ctx->key); i++) - ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32)); + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); return 0; } -- cgit v1.2.3 From 6c76177611511a8be879eb44cd801c994eb3a717 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 22 Nov 2017 11:51:37 -0800 Subject: crypto: chacha20 - Remove cra_alignmask Now that crypto_chacha20_setkey() and crypto_chacha20_init() use the unaligned access macros and crypto_xor() also accepts unaligned buffers, there is no need to have a cra_alignmask set for chacha20-generic. Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/chacha20_generic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index b5a10ebf..bb4affbd 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -105,7 +105,6 @@ static struct skcipher_alg alg = { .base.cra_priority = 100, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha20_ctx), - .base.cra_alignmask = sizeof(u32) - 1, .base.cra_module = THIS_MODULE, .min_keysize = CHACHA20_KEY_SIZE, -- cgit v1.2.3 From 49b79ef83e36c996759ef5ca4df9eda80d8d549e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 22 Nov 2017 11:51:39 -0800 Subject: crypto: chacha20 - Fix keystream alignment for chacha20_block() When chacha20_block() outputs the keystream block, it uses 'u32' stores directly. However, the callers (crypto/chacha20_generic.c and drivers/char/random.c) declare the keystream buffer as a 'u8' array, which is not guaranteed to have the needed alignment. Fix it by having both callers declare the keystream as a 'u32' array. For now this is preferable to switching over to the unaligned access macros because chacha20_block() is only being used in cases where we can easily control the alignment (stack buffers). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/chacha20_generic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index bb4affbd..e451c3cb 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -18,20 +18,20 @@ static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, unsigned int bytes) { - u8 stream[CHACHA20_BLOCK_SIZE]; + u32 stream[CHACHA20_BLOCK_WORDS]; if (dst != src) memcpy(dst, src, bytes); while (bytes >= CHACHA20_BLOCK_SIZE) { chacha20_block(state, stream); - crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); + crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE); bytes -= CHACHA20_BLOCK_SIZE; dst += CHACHA20_BLOCK_SIZE; } if (bytes) { chacha20_block(state, stream); - crypto_xor(dst, stream, bytes); + crypto_xor(dst, (const u8 *)stream, bytes); } } -- cgit v1.2.3 From bedfbcf55b6411b115d4234ed48075e617916965 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sun, 26 Nov 2017 00:16:46 +0100 Subject: crypto: ecdh - fix typo in KPP dependency of CRYPTO_ECDH This fixes a typo in the CRYPTO_KPP dependency of CRYPTO_ECDH. Fixes: e8c7f3061131 ("crypto: ecdh - Add ECDH software support") Cc: # v4.8+ Signed-off-by: Hauke Mehrtens Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index f7911963..9327fbfc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -130,7 +130,7 @@ config CRYPTO_DH config CRYPTO_ECDH tristate "ECDH algorithm" - select CRYTPO_KPP + select CRYPTO_KPP select CRYPTO_RNG_DEFAULT help Generic implementation of the ECDH algorithm -- cgit v1.2.3 From 724174d99fd51ae560ab56f33286c4945041f47c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 30 Nov 2017 11:26:14 +0000 Subject: crypto: cryptd - make cryptd_max_cpu_qlen module parameter static The cryptd_max_cpu_qlen module parameter is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warning: crypto/cryptd.c:35:14: warning: symbol 'cryptd_max_cpu_qlen' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- crypto/cryptd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index b1eb131c..552e3a86 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -32,7 +32,7 @@ #include #include -unsigned int cryptd_max_cpu_qlen = 1000; +static unsigned int cryptd_max_cpu_qlen = 1000; module_param(cryptd_max_cpu_qlen, uint, 0); MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); -- cgit v1.2.3 From b5677d415a609cfeb0052fdf1694eb1c384e098a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 7 Dec 2017 10:55:59 -0800 Subject: crypto: api - Unexport crypto_larval_lookup() crypto_larval_lookup() is not used outside of crypto/api.c, so unexport it and mark it 'static'. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/api.c | 4 ++-- crypto/internal.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/crypto/api.c b/crypto/api.c index 2a2479d1..6da802d7 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -205,7 +205,8 @@ struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alg_lookup); -struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) +static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, + u32 mask) { struct crypto_alg *alg; @@ -231,7 +232,6 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) return crypto_larval_add(name, type, mask); } -EXPORT_SYMBOL_GPL(crypto_larval_lookup); int crypto_probing_notify(unsigned long val, void *v) { diff --git a/crypto/internal.h b/crypto/internal.h index f0732042..ae65e5fc 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -78,7 +78,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm); struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); void crypto_larval_kill(struct crypto_alg *alg); -struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, -- cgit v1.2.3 From ffb15a03dbf4998b10ff22afd5937ae63a5c181f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 7 Dec 2017 10:56:34 -0800 Subject: crypto: null - Get rid of crypto_{get,put}_default_null_skcipher2() Since commit 12c88739b745 ("crypto: null - Remove default null blkcipher"), crypto_get_default_null_skcipher2() and crypto_put_default_null_skcipher2() are the same as their non-2 equivalents. So switch callers of the "2" versions over to the original versions and remove the "2" versions. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/aead.c | 6 +++--- crypto/algif_aead.c | 4 ++-- crypto/authenc.c | 4 ++-- crypto/authencesn.c | 4 ++-- crypto/gcm.c | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index f794b30a..fe00cbd7 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -295,7 +295,7 @@ int aead_init_geniv(struct crypto_aead *aead) if (err) goto out; - ctx->sknull = crypto_get_default_null_skcipher2(); + ctx->sknull = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->sknull); if (IS_ERR(ctx->sknull)) goto out; @@ -315,7 +315,7 @@ out: return err; drop_null: - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); @@ -325,7 +325,7 @@ void aead_exit_geniv(struct crypto_aead *tfm) struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 9d73be28..87a27eb1 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -469,7 +469,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask) return ERR_CAST(aead); } - null_tfm = crypto_get_default_null_skcipher2(); + null_tfm = crypto_get_default_null_skcipher(); if (IS_ERR(null_tfm)) { crypto_free_aead(aead); kfree(tfm); @@ -487,7 +487,7 @@ static void aead_release(void *private) struct aead_tfm *tfm = private; crypto_free_aead(tfm->aead); - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); kfree(tfm); } diff --git a/crypto/authenc.c b/crypto/authenc.c index 875470b0..d3d6d72f 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher2(); + null = crypto_get_default_null_skcipher(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_skcipher; @@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); } static void crypto_authenc_free(struct aead_instance *inst) diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0cf5fefd..15f91ddd 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) if (IS_ERR(enc)) goto err_free_ahash; - null = crypto_get_default_null_skcipher2(); + null = crypto_get_default_null_skcipher(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_skcipher; @@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm) crypto_free_ahash(ctx->auth); crypto_free_skcipher(ctx->enc); - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); } static void crypto_authenc_esn_free(struct aead_instance *inst) diff --git a/crypto/gcm.c b/crypto/gcm.c index 8589681f..0ad879e1 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_get_default_null_skcipher2(); + null = crypto_get_default_null_skcipher(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; @@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); - crypto_put_default_null_skcipher2(); + crypto_put_default_null_skcipher(); } static void crypto_rfc4543_free(struct aead_instance *inst) -- cgit v1.2.3 From c978bed923f63894981d038e014e273a60a6b8c5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 11 Dec 2017 13:58:26 -0800 Subject: crypto: gf128mul - remove incorrect comment The comment in gf128mul_x8_ble() was copy-and-pasted from gf128mul.h and makes no sense in the new context. Remove it. Cc: Harsh Jain Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/gf128mul.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 24e60195..a4b1c026 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x) { u64 a = le64_to_cpu(x->a); u64 b = le64_to_cpu(x->b); - - /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ u64 _tt = gf128mul_table_be[a >> 56]; r->a = cpu_to_le64((a << 8) | (b >> 56)); -- cgit v1.2.3 From f597ef6fc8a700949cb97ec18047ec574c5e37fb Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Tue, 12 Dec 2017 19:30:13 +0000 Subject: crypto: echainiv - Remove unused alg/spawn variable This patch remove two unused variable and some dead "code" using it. Fixes: 52a08691eb96 ("crypto: echainiv - Remove AEAD compatibility code") Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/echainiv.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crypto/echainiv.c b/crypto/echainiv.c index e3d889b1..45819e60 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; - struct crypto_aead_spawn *spawn; - struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); @@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, if (IS_ERR(inst)) return PTR_ERR(inst); - spawn = aead_instance_ctx(inst); - alg = crypto_spawn_aead_alg(spawn); - err = -EINVAL; if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize) goto free_inst; -- cgit v1.2.3 From 85d35da5374413041093f5bcb0c352ef9bbe7a5b Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Tue, 12 Dec 2017 19:30:14 +0000 Subject: crypto: seqiv - Remove unused alg/spawn variable This patch remove two unused variable and some dead "code" using it. Fixes: 404f1f40cac0 ("crypto: seqiv - Remove AEAD compatibility code") Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/seqiv.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 570b7d1a..39dbf2f7 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req) static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) { struct aead_instance *inst; - struct crypto_aead_spawn *spawn; - struct aead_alg *alg; int err; inst = aead_geniv_alloc(tmpl, tb, 0, 0); @@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) if (IS_ERR(inst)) return PTR_ERR(inst); - spawn = aead_instance_ctx(inst); - alg = crypto_spawn_aead_alg(spawn); - err = -EINVAL; if (inst->alg.ivsize != sizeof(u64)) goto free_inst; -- cgit v1.2.3 From f696ab85aa28632425bebed97bcbe7018c892841 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:00 +0000 Subject: crypto: tcrypt - use multi buf for ahash mb test The multi buffer ahash speed test was allocating multiple buffers for use with the multiple outstanding requests it was starting but never actually using them (except to free them), instead using a different single statically allocated buffer for all requests. Fix this by actually using the allocated buffers for the test. It is noted that it may seem tempting to instead remove the allocation and free of the multiple buffers and leave things as they are since this is a hash test where the input is read only. However, after consideration I believe that multiple buffers better reflect real life scenario with regard to data cache and TLB behaviours etc. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 28b4882f..a0c4e0db 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -385,7 +385,7 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) } struct test_mb_ahash_data { - struct scatterlist sg[TVMEMSIZE]; + struct scatterlist sg[XBUFSIZE]; char result[64]; struct ahash_request *req; struct crypto_wait wait; @@ -428,7 +428,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, ahash_request_set_callback(data[i].req, 0, crypto_req_done, &data[i].wait); - test_hash_sg_init(data[i].sg); + + sg_init_table(data[i].sg, XBUFSIZE); + for (j = 0; j < XBUFSIZE; j++) { + sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); + memset(data[i].xbuf[j], 0xff, PAGE_SIZE); + } } pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, @@ -439,9 +444,9 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (speed[i].blen != speed[i].plen) continue; - if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { + if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { pr_err("template (%u) too big for tvmem (%lu)\n", - speed[i].blen, TVMEMSIZE * PAGE_SIZE); + speed[i].blen, XBUFSIZE * PAGE_SIZE); goto out; } -- cgit v1.2.3 From afb87203b36b8d120a0d01e729313c3d483feb59 Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:01 +0000 Subject: crypto: tcrypt - fix AEAD decryption speed test The AEAD speed test pretended to support decryption, however that support was broken as decryption requires a valid auth field which the test did not provide. Fix this by running the encryption path once with inout/output sgls switched to calculate the auth field prior to performing decryption speed tests. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a0c4e0db..69c8e639 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -329,10 +329,30 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, *b_size + (enc ? authsize : 0), assoc, aad_size); + aead_request_set_ad(req, aad_size); + + if (!enc) { + + /* + * For decryption we need a proper auth so + * we do the encryption path once with buffers + * reversed (input <-> output) to calculate it + */ + aead_request_set_crypt(req, sgout, sg, + *b_size, iv); + ret = do_one_aead_op(req, + crypto_aead_encrypt(req)); + + if (ret) { + pr_err("calculating auth failed failed (%d)\n", + ret); + break; + } + } + aead_request_set_crypt(req, sg, sgout, *b_size + (enc ? 0 : authsize), iv); - aead_request_set_ad(req, aad_size); if (secs) ret = test_aead_jiffies(req, enc, *b_size, @@ -1566,16 +1586,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) NULL, 0, 16, 16, aead_speed_template_20); test_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, speed_template_16_24_32); + test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, + NULL, 0, 16, 16, aead_speed_template_20); + test_aead_speed("gcm(aes)", DECRYPT, sec, + NULL, 0, 16, 8, speed_template_16_24_32); break; case 212: test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0, 16, 16, aead_speed_template_19); + test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, + NULL, 0, 16, 16, aead_speed_template_19); break; case 213: test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec, NULL, 0, 16, 8, aead_speed_template_36); + test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec, + NULL, 0, 16, 8, aead_speed_template_36); break; case 214: -- cgit v1.2.3 From 8f8a22d73abe3a35ccd4cc40ef96b72ba19609fc Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:02 +0000 Subject: crypto: tcrypt - allow setting num of bufs For multiple buffers speed tests, the number of buffers, or requests, used actually sets the level of parallelism a tfm provider may utilize to hide latency. The existing number (of 8) is good for some software based providers but not enough for many HW providers with deep FIFOs. Add a module parameter that allows setting the number of multiple buffers/requests used, leaving the default at 8. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 69c8e639..26043600 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -67,6 +67,7 @@ static char *alg = NULL; static u32 type; static u32 mask; static int mode; +static u32 num_mb = 8; static char *tvmem[TVMEMSIZE]; static char *check[] = { @@ -413,7 +414,7 @@ struct test_mb_ahash_data { }; static void test_mb_ahash_speed(const char *algo, unsigned int sec, - struct hash_speed *speed) + struct hash_speed *speed, u32 num_mb) { struct test_mb_ahash_data *data; struct crypto_ahash *tfm; @@ -422,7 +423,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, unsigned int i, j, k; int ret; - data = kzalloc(sizeof(*data) * 8, GFP_KERNEL); + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); if (!data) return; @@ -433,7 +434,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, goto free_data; } - for (i = 0; i < 8; ++i) { + for (i = 0; i < num_mb; ++i) { if (testmgr_alloc_buf(data[i].xbuf)) goto out; @@ -473,7 +474,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, if (speed[i].klen) crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); - for (k = 0; k < 8; k++) + for (k = 0; k < num_mb; k++) ahash_request_set_crypt(data[k].req, data[k].sg, data[k].result, speed[i].blen); @@ -484,7 +485,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, start = get_cycles(); - for (k = 0; k < 8; k++) { + for (k = 0; k < num_mb; k++) { ret = crypto_ahash_digest(data[k].req); if (ret == -EINPROGRESS) { ret = 0; @@ -509,7 +510,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, end = get_cycles(); cycles = end - start; pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", - cycles, cycles / (8 * speed[i].blen)); + cycles, cycles / (num_mb * speed[i].blen)); if (ret) { pr_err("At least one hashing failed ret=%d\n", ret); @@ -518,10 +519,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, } out: - for (k = 0; k < 8; ++k) + for (k = 0; k < num_mb; ++k) ahash_request_free(data[k].req); - for (k = 0; k < 8; ++k) + for (k = 0; k < num_mb; ++k) testmgr_free_buf(data[k].xbuf); crypto_free_ahash(tfm); @@ -1815,19 +1816,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) if (mode > 400 && mode < 500) break; /* fall through */ case 422: - test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); + test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, + num_mb); if (mode > 400 && mode < 500) break; /* fall through */ case 423: - test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); + test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, + num_mb); if (mode > 400 && mode < 500) break; /* fall through */ case 424: - test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); + test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, + num_mb); if (mode > 400 && mode < 500) break; /* fall through */ case 425: - test_mb_ahash_speed("sm3", sec, generic_hash_speed_template); + test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, + num_mb); if (mode > 400 && mode < 500) break; /* fall through */ case 499: @@ -2106,6 +2111,8 @@ module_param(mode, int, 0); module_param(sec, uint, 0); MODULE_PARM_DESC(sec, "Length in seconds of speed tests " "(defaults to zero which uses CPU cycles instead)"); +module_param(num_mb, uint, 0000); +MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Quick & dirty crypto testing module"); -- cgit v1.2.3 From 4b88cf59d55533e08876c90fefd05a8c30310eff Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:03 +0000 Subject: crypto: tcrypt - add multi buf ahash jiffies test The multi buffer concurrent requests ahash speed test only supported the cycles mode. Add support for the so called jiffies mode that test performance of bytes/sec. We only add support for digest mode at the moment. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 112 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 30 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 26043600..e406b00d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -413,13 +413,87 @@ struct test_mb_ahash_data { char *xbuf[XBUFSIZE]; }; -static void test_mb_ahash_speed(const char *algo, unsigned int sec, +static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb) +{ + int i, rc[num_mb], err = 0; + + /* Fire up a bunch of concurrent requests */ + for (i = 0; i < num_mb; i++) + rc[i] = crypto_ahash_digest(data[i].req); + + /* Wait for all requests to finish */ + for (i = 0; i < num_mb; i++) { + rc[i] = crypto_wait_req(rc[i], &data[i].wait); + + if (rc[i]) { + pr_info("concurrent request %d error %d\n", i, rc[i]); + err = rc[i]; + } + } + + return err; +} + +static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, + int secs, u32 num_mb) +{ + unsigned long start, end; + int bcount; + int ret; + + for (start = jiffies, end = start + secs * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_mult_ahash_op(data, num_mb); + if (ret) + return ret; + } + + pr_cont("%d operations in %d seconds (%ld bytes)\n", + bcount * num_mb, secs, (long)bcount * blen * num_mb); + return 0; +} + +static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, + u32 num_mb) +{ + unsigned long cycles = 0; + int ret = 0; + int i; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_mult_ahash_op(data, num_mb); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + ret = do_mult_ahash_op(data, num_mb); + end = get_cycles(); + + if (ret) + goto out; + + cycles += end - start; + } + +out: + if (ret == 0) + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / (8 * num_mb), blen); + + return ret; +} + +static void test_mb_ahash_speed(const char *algo, unsigned int secs, struct hash_speed *speed, u32 num_mb) { struct test_mb_ahash_data *data; struct crypto_ahash *tfm; - unsigned long start, end; - unsigned long cycles; unsigned int i, j, k; int ret; @@ -483,34 +557,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec, i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); - start = get_cycles(); - - for (k = 0; k < num_mb; k++) { - ret = crypto_ahash_digest(data[k].req); - if (ret == -EINPROGRESS) { - ret = 0; - continue; - } - - if (ret) - break; - - crypto_req_done(&data[k].req->base, 0); - } - - for (j = 0; j < k; j++) { - struct crypto_wait *wait = &data[j].wait; - int wait_ret; - - wait_ret = crypto_wait_req(-EINPROGRESS, wait); - if (wait_ret) - ret = wait_ret; - } + if (secs) + ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, + num_mb); + else + ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); - end = get_cycles(); - cycles = end - start; - pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", - cycles, cycles / (num_mb * speed[i].blen)); if (ret) { pr_err("At least one hashing failed ret=%d\n", ret); -- cgit v1.2.3 From 6f0e4a555377d9f675949a681b423f5e58cdac4b Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:04 +0000 Subject: crypto: tcrypt - add multibuf skcipher speed test The performance of some skcipher tfm providers is affected by the amount of parallelism possible with the processing. Introduce an async skcipher concurrent multiple buffer processing speed test to be able to test performance of such tfm providers. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 460 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 460 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e406b00d..d617c195 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -818,6 +818,254 @@ static void test_hash_speed(const char *algo, unsigned int secs, return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); } +struct test_mb_skcipher_data { + struct scatterlist sg[XBUFSIZE]; + struct skcipher_request *req; + struct crypto_wait wait; + char *xbuf[XBUFSIZE]; +}; + +static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc, + u32 num_mb) +{ + int i, rc[num_mb], err = 0; + + /* Fire up a bunch of concurrent requests */ + for (i = 0; i < num_mb; i++) { + if (enc == ENCRYPT) + rc[i] = crypto_skcipher_encrypt(data[i].req); + else + rc[i] = crypto_skcipher_decrypt(data[i].req); + } + + /* Wait for all requests to finish */ + for (i = 0; i < num_mb; i++) { + rc[i] = crypto_wait_req(rc[i], &data[i].wait); + + if (rc[i]) { + pr_info("concurrent request %d error %d\n", i, rc[i]); + err = rc[i]; + } + } + + return err; +} + +static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, + int blen, int secs, u32 num_mb) +{ + unsigned long start, end; + int bcount; + int ret; + + for (start = jiffies, end = start + secs * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_mult_acipher_op(data, enc, num_mb); + if (ret) + return ret; + } + + pr_cont("%d operations in %d seconds (%ld bytes)\n", + bcount * num_mb, secs, (long)bcount * blen * num_mb); + return 0; +} + +static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc, + int blen, u32 num_mb) +{ + unsigned long cycles = 0; + int ret = 0; + int i; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_mult_acipher_op(data, enc, num_mb); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + ret = do_mult_acipher_op(data, enc, num_mb); + end = get_cycles(); + + if (ret) + goto out; + + cycles += end - start; + } + +out: + if (ret == 0) + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / (8 * num_mb), blen); + + return ret; +} + +static void test_mb_skcipher_speed(const char *algo, int enc, int secs, + struct cipher_speed_template *template, + unsigned int tcount, u8 *keysize, u32 num_mb) +{ + struct test_mb_skcipher_data *data; + struct crypto_skcipher *tfm; + unsigned int i, j, iv_len; + const char *key; + const char *e; + u32 *b_size; + char iv[128]; + int ret; + + if (enc == ENCRYPT) + e = "encryption"; + else + e = "decryption"; + + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); + if (!data) + return; + + tfm = crypto_alloc_skcipher(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + goto out_free_data; + } + + for (i = 0; i < num_mb; ++i) + if (testmgr_alloc_buf(data[i].xbuf)) { + while (i--) + testmgr_free_buf(data[i].xbuf); + goto out_free_tfm; + } + + + for (i = 0; i < num_mb; ++i) + if (testmgr_alloc_buf(data[i].xbuf)) { + while (i--) + testmgr_free_buf(data[i].xbuf); + goto out_free_tfm; + } + + + for (i = 0; i < num_mb; ++i) { + data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!data[i].req) { + pr_err("alg: skcipher: Failed to allocate request for %s\n", + algo); + while (i--) + skcipher_request_free(data[i].req); + goto out_free_xbuf; + } + } + + for (i = 0; i < num_mb; ++i) { + skcipher_request_set_callback(data[i].req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &data[i].wait); + crypto_init_wait(&data[i].wait); + } + + pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + get_driver_name(crypto_skcipher, tfm), e); + + i = 0; + do { + b_size = block_sizes; + do { + if (*b_size > XBUFSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for bufufer (%lu)\n", + *b_size, XBUFSIZE * PAGE_SIZE); + goto out; + } + + pr_info("test %u (%d bit key, %d byte blocks): ", i, + *keysize * 8, *b_size); + + /* Set up tfm global state, i.e. the key */ + + memset(tvmem[0], 0xff, PAGE_SIZE); + key = tvmem[0]; + for (j = 0; j < tcount; j++) { + if (template[j].klen == *keysize) { + key = template[j].key; + break; + } + } + + crypto_skcipher_clear_flags(tfm, ~0); + + ret = crypto_skcipher_setkey(tfm, key, *keysize); + if (ret) { + pr_err("setkey() failed flags=%x\n", + crypto_skcipher_get_flags(tfm)); + goto out; + } + + iv_len = crypto_skcipher_ivsize(tfm); + if (iv_len) + memset(&iv, 0xff, iv_len); + + /* Now setup per request stuff, i.e. buffers */ + + for (j = 0; j < num_mb; ++j) { + struct test_mb_skcipher_data *cur = &data[j]; + unsigned int k = *b_size; + unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE); + unsigned int p = 0; + + sg_init_table(cur->sg, pages); + + while (k > PAGE_SIZE) { + sg_set_buf(cur->sg + p, cur->xbuf[p], + PAGE_SIZE); + memset(cur->xbuf[p], 0xff, PAGE_SIZE); + p++; + k -= PAGE_SIZE; + } + + sg_set_buf(cur->sg + p, cur->xbuf[p], k); + memset(cur->xbuf[p], 0xff, k); + + skcipher_request_set_crypt(cur->req, cur->sg, + cur->sg, *b_size, + iv); + } + + if (secs) + ret = test_mb_acipher_jiffies(data, enc, + *b_size, secs, + num_mb); + else + ret = test_mb_acipher_cycles(data, enc, + *b_size, num_mb); + + if (ret) { + pr_err("%s() failed flags=%x\n", e, + crypto_skcipher_get_flags(tfm)); + break; + } + b_size++; + i++; + } while (*b_size); + keysize++; + } while (*keysize); + +out: + for (i = 0; i < num_mb; ++i) + skcipher_request_free(data[i].req); +out_free_xbuf: + for (i = 0; i < num_mb; ++i) + testmgr_free_buf(data[i].xbuf); +out_free_tfm: + crypto_free_skcipher(tfm); +out_free_data: + kfree(data); +} + static inline int do_one_acipher_op(struct skcipher_request *req, int ret) { struct crypto_wait *wait = req->base.data; @@ -2102,6 +2350,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) speed_template_8_32); break; + case 600: + test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0, + speed_template_32_40_48, num_mb); + test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, + speed_template_32_40_48, num_mb); + test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, + 0, speed_template_20_28_36, num_mb); + test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, + 0, speed_template_20_28_36, num_mb); + break; + + case 601: + test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec, + des3_speed_template, DES3_SPEED_VECTORS, + speed_template_24, num_mb); + break; + + case 602: + test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, + speed_template_8, num_mb); + test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, + speed_template_8, num_mb); + break; + + case 603: + test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + break; + + case 604: + test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32, num_mb); + test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0, + speed_template_32_40_48, num_mb); + test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0, + speed_template_32_40_48, num_mb); + test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0, + speed_template_32_48_64, num_mb); + test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0, + speed_template_32_48_64, num_mb); + break; + + case 605: + test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, + speed_template_8, num_mb); + break; + + case 606: + test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0, + speed_template_8_16, num_mb); + break; + + case 607: + test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + break; + + case 608: + test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0, + speed_template_16_32, num_mb); + test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0, + speed_template_32_48, num_mb); + test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0, + speed_template_32_64, num_mb); + break; + + case 609: + test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0, + speed_template_8_32, num_mb); + break; + case 1000: test_available(); break; -- cgit v1.2.3 From aa6bade9aa65fa4e09010f6c744f4727fce612de Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 17 Dec 2017 08:29:05 +0000 Subject: crypto: tcrypt - add multibuf aead speed test The performance of some aead tfm providers is affected by the amount of parallelism possible with the processing. Introduce an async aead concurrent multiple buffer processing speed test to be able to test performance of such tfm providers. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 437 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 378 insertions(+), 59 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d617c195..58e3344d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -80,6 +80,66 @@ static char *check[] = { NULL }; +static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; +static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; + +#define XBUFSIZE 8 +#define MAX_IVLEN 32 + +static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +{ + int i; + + for (i = 0; i < XBUFSIZE; i++) { + buf[i] = (void *)__get_free_page(GFP_KERNEL); + if (!buf[i]) + goto err_free_buf; + } + + return 0; + +err_free_buf: + while (i-- > 0) + free_page((unsigned long)buf[i]); + + return -ENOMEM; +} + +static void testmgr_free_buf(char *buf[XBUFSIZE]) +{ + int i; + + for (i = 0; i < XBUFSIZE; i++) + free_page((unsigned long)buf[i]); +} + +static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], + unsigned int buflen, const void *assoc, + unsigned int aad_size) +{ + int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; + int k, rem; + + if (np > XBUFSIZE) { + rem = PAGE_SIZE; + np = XBUFSIZE; + } else { + rem = buflen % PAGE_SIZE; + } + + sg_init_table(sg, np + 1); + + sg_set_buf(&sg[0], assoc, aad_size); + + if (rem) + np--; + for (k = 0; k < np; k++) + sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); + + if (rem) + sg_set_buf(&sg[k + 1], xbuf[k], rem); +} + static inline int do_one_aead_op(struct aead_request *req, int ret) { struct crypto_wait *wait = req->base.data; @@ -87,8 +147,44 @@ static inline int do_one_aead_op(struct aead_request *req, int ret) return crypto_wait_req(ret, wait); } -static int test_aead_jiffies(struct aead_request *req, int enc, - int blen, int secs) +struct test_mb_aead_data { + struct scatterlist sg[XBUFSIZE]; + struct scatterlist sgout[XBUFSIZE]; + struct aead_request *req; + struct crypto_wait wait; + char *xbuf[XBUFSIZE]; + char *xoutbuf[XBUFSIZE]; + char *axbuf[XBUFSIZE]; +}; + +static int do_mult_aead_op(struct test_mb_aead_data *data, int enc, + u32 num_mb) +{ + int i, rc[num_mb], err = 0; + + /* Fire up a bunch of concurrent requests */ + for (i = 0; i < num_mb; i++) { + if (enc == ENCRYPT) + rc[i] = crypto_aead_encrypt(data[i].req); + else + rc[i] = crypto_aead_decrypt(data[i].req); + } + + /* Wait for all requests to finish */ + for (i = 0; i < num_mb; i++) { + rc[i] = crypto_wait_req(rc[i], &data[i].wait); + + if (rc[i]) { + pr_info("concurrent request %d error %d\n", i, rc[i]); + err = rc[i]; + } + } + + return err; +} + +static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, + int blen, int secs, u32 num_mb) { unsigned long start, end; int bcount; @@ -96,21 +192,18 @@ static int test_aead_jiffies(struct aead_request *req, int enc, for (start = jiffies, end = start + secs * HZ, bcount = 0; time_before(jiffies, end); bcount++) { - if (enc) - ret = do_one_aead_op(req, crypto_aead_encrypt(req)); - else - ret = do_one_aead_op(req, crypto_aead_decrypt(req)); - + ret = do_mult_aead_op(data, enc, num_mb); if (ret) return ret; } - printk("%d operations in %d seconds (%ld bytes)\n", - bcount, secs, (long)bcount * blen); + pr_cont("%d operations in %d seconds (%ld bytes)\n", + bcount * num_mb, secs, (long)bcount * blen * num_mb); return 0; } -static int test_aead_cycles(struct aead_request *req, int enc, int blen) +static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc, + int blen, u32 num_mb) { unsigned long cycles = 0; int ret = 0; @@ -118,11 +211,7 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) /* Warm-up run. */ for (i = 0; i < 4; i++) { - if (enc) - ret = do_one_aead_op(req, crypto_aead_encrypt(req)); - else - ret = do_one_aead_op(req, crypto_aead_decrypt(req)); - + ret = do_mult_aead_op(data, enc, num_mb); if (ret) goto out; } @@ -132,10 +221,7 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) cycles_t start, end; start = get_cycles(); - if (enc) - ret = do_one_aead_op(req, crypto_aead_encrypt(req)); - else - ret = do_one_aead_op(req, crypto_aead_decrypt(req)); + ret = do_mult_aead_op(data, enc, num_mb); end = get_cycles(); if (ret) @@ -146,70 +232,276 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) out: if (ret == 0) - printk("1 operation in %lu cycles (%d bytes)\n", - (cycles + 4) / 8, blen); + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / (8 * num_mb), blen); return ret; } -static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; -static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; +static void test_mb_aead_speed(const char *algo, int enc, int secs, + struct aead_speed_template *template, + unsigned int tcount, u8 authsize, + unsigned int aad_size, u8 *keysize, u32 num_mb) +{ + struct test_mb_aead_data *data; + struct crypto_aead *tfm; + unsigned int i, j, iv_len; + const char *key; + const char *e; + void *assoc; + u32 *b_size; + char *iv; + int ret; -#define XBUFSIZE 8 -#define MAX_IVLEN 32 -static int testmgr_alloc_buf(char *buf[XBUFSIZE]) -{ - int i; + if (aad_size >= PAGE_SIZE) { + pr_err("associate data length (%u) too big\n", aad_size); + return; + } - for (i = 0; i < XBUFSIZE; i++) { - buf[i] = (void *)__get_free_page(GFP_KERNEL); - if (!buf[i]) - goto err_free_buf; + iv = kzalloc(MAX_IVLEN, GFP_KERNEL); + if (!iv) + return; + + if (enc == ENCRYPT) + e = "encryption"; + else + e = "decryption"; + + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_iv; + + tfm = crypto_alloc_aead(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + goto out_free_data; } - return 0; + ret = crypto_aead_setauthsize(tfm, authsize); -err_free_buf: - while (i-- > 0) - free_page((unsigned long)buf[i]); + for (i = 0; i < num_mb; ++i) + if (testmgr_alloc_buf(data[i].xbuf)) { + while (i--) + testmgr_free_buf(data[i].xbuf); + goto out_free_tfm; + } - return -ENOMEM; + for (i = 0; i < num_mb; ++i) + if (testmgr_alloc_buf(data[i].axbuf)) { + while (i--) + testmgr_free_buf(data[i].axbuf); + goto out_free_xbuf; + } + + for (i = 0; i < num_mb; ++i) + if (testmgr_alloc_buf(data[i].xoutbuf)) { + while (i--) + testmgr_free_buf(data[i].axbuf); + goto out_free_axbuf; + } + + for (i = 0; i < num_mb; ++i) { + data[i].req = aead_request_alloc(tfm, GFP_KERNEL); + if (!data[i].req) { + pr_err("alg: skcipher: Failed to allocate request for %s\n", + algo); + while (i--) + aead_request_free(data[i].req); + goto out_free_xoutbuf; + } + } + + for (i = 0; i < num_mb; ++i) { + crypto_init_wait(&data[i].wait); + aead_request_set_callback(data[i].req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &data[i].wait); + } + + pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, + get_driver_name(crypto_aead, tfm), e); + + i = 0; + do { + b_size = aead_sizes; + do { + if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for bufufer (%lu)\n", + authsize + *b_size, + XBUFSIZE * PAGE_SIZE); + goto out; + } + + pr_info("test %u (%d bit key, %d byte blocks): ", i, + *keysize * 8, *b_size); + + /* Set up tfm global state, i.e. the key */ + + memset(tvmem[0], 0xff, PAGE_SIZE); + key = tvmem[0]; + for (j = 0; j < tcount; j++) { + if (template[j].klen == *keysize) { + key = template[j].key; + break; + } + } + + crypto_aead_clear_flags(tfm, ~0); + + ret = crypto_aead_setkey(tfm, key, *keysize); + if (ret) { + pr_err("setkey() failed flags=%x\n", + crypto_aead_get_flags(tfm)); + goto out; + } + + iv_len = crypto_aead_ivsize(tfm); + if (iv_len) + memset(iv, 0xff, iv_len); + + /* Now setup per request stuff, i.e. buffers */ + + for (j = 0; j < num_mb; ++j) { + struct test_mb_aead_data *cur = &data[j]; + + assoc = cur->axbuf[0]; + memset(assoc, 0xff, aad_size); + + sg_init_aead(cur->sg, cur->xbuf, + *b_size + (enc ? 0 : authsize), + assoc, aad_size); + + sg_init_aead(cur->sgout, cur->xoutbuf, + *b_size + (enc ? authsize : 0), + assoc, aad_size); + + aead_request_set_ad(cur->req, aad_size); + + if (!enc) { + + aead_request_set_crypt(cur->req, + cur->sgout, + cur->sg, + *b_size, iv); + ret = crypto_aead_encrypt(cur->req); + ret = do_one_aead_op(cur->req, ret); + + if (ret) { + pr_err("calculating auth failed failed (%d)\n", + ret); + break; + } + } + + aead_request_set_crypt(cur->req, cur->sg, + cur->sgout, *b_size + + (enc ? 0 : authsize), + iv); + + } + + if (secs) + ret = test_mb_aead_jiffies(data, enc, *b_size, + secs, num_mb); + else + ret = test_mb_aead_cycles(data, enc, *b_size, + num_mb); + + if (ret) { + pr_err("%s() failed return code=%d\n", e, ret); + break; + } + b_size++; + i++; + } while (*b_size); + keysize++; + } while (*keysize); + +out: + for (i = 0; i < num_mb; ++i) + aead_request_free(data[i].req); +out_free_xoutbuf: + for (i = 0; i < num_mb; ++i) + testmgr_free_buf(data[i].xoutbuf); +out_free_axbuf: + for (i = 0; i < num_mb; ++i) + testmgr_free_buf(data[i].axbuf); +out_free_xbuf: + for (i = 0; i < num_mb; ++i) + testmgr_free_buf(data[i].xbuf); +out_free_tfm: + crypto_free_aead(tfm); +out_free_data: + kfree(data); +out_free_iv: + kfree(iv); } -static void testmgr_free_buf(char *buf[XBUFSIZE]) +static int test_aead_jiffies(struct aead_request *req, int enc, + int blen, int secs) { - int i; + unsigned long start, end; + int bcount; + int ret; - for (i = 0; i < XBUFSIZE; i++) - free_page((unsigned long)buf[i]); + for (start = jiffies, end = start + secs * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + if (enc) + ret = do_one_aead_op(req, crypto_aead_encrypt(req)); + else + ret = do_one_aead_op(req, crypto_aead_decrypt(req)); + + if (ret) + return ret; + } + + printk("%d operations in %d seconds (%ld bytes)\n", + bcount, secs, (long)bcount * blen); + return 0; } -static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], - unsigned int buflen, const void *assoc, - unsigned int aad_size) +static int test_aead_cycles(struct aead_request *req, int enc, int blen) { - int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; - int k, rem; + unsigned long cycles = 0; + int ret = 0; + int i; - if (np > XBUFSIZE) { - rem = PAGE_SIZE; - np = XBUFSIZE; - } else { - rem = buflen % PAGE_SIZE; + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + if (enc) + ret = do_one_aead_op(req, crypto_aead_encrypt(req)); + else + ret = do_one_aead_op(req, crypto_aead_decrypt(req)); + + if (ret) + goto out; } - sg_init_table(sg, np + 1); + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; - sg_set_buf(&sg[0], assoc, aad_size); + start = get_cycles(); + if (enc) + ret = do_one_aead_op(req, crypto_aead_encrypt(req)); + else + ret = do_one_aead_op(req, crypto_aead_decrypt(req)); + end = get_cycles(); - if (rem) - np--; - for (k = 0; k < np; k++) - sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); + if (ret) + goto out; - if (rem) - sg_set_buf(&sg[k + 1], xbuf[k], rem); + cycles += end - start; + } + +out: + if (ret == 0) + printk("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / 8, blen); + + return ret; } static void test_aead_speed(const char *algo, int enc, unsigned int secs, @@ -1912,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) speed_template_32); break; + case 215: + test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, + 0, 16, 16, aead_speed_template_20, num_mb); + test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, + speed_template_16_24_32, num_mb); + test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL, + 0, 16, 16, aead_speed_template_20, num_mb); + test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8, + speed_template_16_24_32, num_mb); + break; + + case 216: + test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0, + 16, 16, aead_speed_template_19, num_mb); + test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0, + 16, 16, aead_speed_template_19, num_mb); + break; + + case 217: + test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, + sec, NULL, 0, 16, 8, aead_speed_template_36, + num_mb); + test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, + sec, NULL, 0, 16, 8, aead_speed_template_36, + num_mb); + break; + case 300: if (alg) { test_hash_speed(alg, sec, generic_hash_speed_template); -- cgit v1.2.3 From 2431f0da543e8c93333ed3b3c8ab9a1158404959 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 10:00:46 -0600 Subject: crypto: algapi - convert cra_refcnt to refcount_t Reference counters should use refcount_t rather than atomic_t, since the refcount_t implementation can prevent overflows, reducing the exploitability of reference leak bugs. crypto_alg.cra_refcount is a reference counter with the usual semantics, so switch it over to refcount_t. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 8 ++++---- crypto/api.c | 2 +- crypto/crypto_user.c | 4 ++-- crypto/internal.h | 4 ++-- crypto/proc.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 60d7366e..8084a76e 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg) if (alg->cra_priority < 0) return -EINVAL; - atomic_set(&alg->cra_refcnt, 1); + refcount_set(&alg->cra_refcnt, 1); return crypto_set_driver_name(alg); } @@ -224,7 +224,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) if (!larval->adult) goto free_larval; - atomic_set(&larval->alg.cra_refcnt, 1); + refcount_set(&larval->alg.cra_refcnt, 1); memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME); larval->alg.cra_priority = alg->cra_priority; @@ -399,7 +399,7 @@ int crypto_unregister_alg(struct crypto_alg *alg) if (ret) return ret; - BUG_ON(atomic_read(&alg->cra_refcnt) != 1); + BUG_ON(refcount_read(&alg->cra_refcnt) != 1); if (alg->cra_destroy) alg->cra_destroy(alg); @@ -490,7 +490,7 @@ void crypto_unregister_template(struct crypto_template *tmpl) up_write(&crypto_alg_sem); hlist_for_each_entry_safe(inst, n, list, list) { - BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); + BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1); crypto_free_instance(inst); } crypto_remove_final(&users); diff --git a/crypto/api.c b/crypto/api.c index 6da802d7..70a894e5 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (IS_ERR(larval)) return ERR_CAST(larval); - atomic_set(&larval->alg.cra_refcnt, 2); + refcount_set(&larval->alg.cra_refcnt, 2); down_write(&crypto_alg_sem); alg = __crypto_alg_lookup(name, type, mask); diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 0dbe2be7..5c291eed 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg, ualg->cru_type = 0; ualg->cru_mask = 0; ualg->cru_flags = alg->cra_flags; - ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); + ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) goto nla_put_failure; @@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh, goto drop_alg; err = -EBUSY; - if (atomic_read(&alg->cra_refcnt) > 2) + if (refcount_read(&alg->cra_refcnt) > 2) goto drop_alg; err = crypto_unregister_instance((struct crypto_instance *)alg); diff --git a/crypto/internal.h b/crypto/internal.h index ae65e5fc..1388af6d 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -105,13 +105,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { - atomic_inc(&alg->cra_refcnt); + refcount_inc(&alg->cra_refcnt); return alg; } static inline void crypto_alg_put(struct crypto_alg *alg) { - if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) + if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) alg->cra_destroy(alg); } diff --git a/crypto/proc.c b/crypto/proc.c index 2cc10c96..822fcef6 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "driver : %s\n", alg->cra_driver_name); seq_printf(m, "module : %s\n", module_name(alg->cra_module)); seq_printf(m, "priority : %d\n", alg->cra_priority); - seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); + seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt)); seq_printf(m, "selftest : %s\n", (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); -- cgit v1.2.3 From e9d99c44f900c6ec4c9fc78c96e6e7a34e3fd898 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 10:06:46 -0600 Subject: crypto: algapi - remove unused notifications There is a message posted to the crypto notifier chain when an algorithm is unregistered, and when a template is registered or unregistered. But nothing is listening for those messages; currently there are only listeners for the algorithm request and registration messages. Get rid of these unused notifications for now. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algapi.c | 5 ----- crypto/internal.h | 3 --- 2 files changed, 8 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 8084a76e..9895cafc 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst, if (!tmpl || !crypto_tmpl_get(tmpl)) return; - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); list_move(&inst->alg.cra_list, list); hlist_del(&inst->list); inst->alg.cra_destroy = crypto_destroy_instance; @@ -380,7 +379,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) alg->cra_flags |= CRYPTO_ALG_DEAD; - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); list_del_init(&alg->cra_list); crypto_remove_spawns(alg, list, NULL); @@ -458,7 +456,6 @@ int crypto_register_template(struct crypto_template *tmpl) } list_add(&tmpl->list, &crypto_template_list); - crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); err = 0; out: up_write(&crypto_alg_sem); @@ -485,8 +482,6 @@ void crypto_unregister_template(struct crypto_template *tmpl) BUG_ON(err); } - crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); - up_write(&crypto_alg_sem); hlist_for_each_entry_safe(inst, n, list, list) { diff --git a/crypto/internal.h b/crypto/internal.h index 1388af6d..5ac27fba 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -30,9 +30,6 @@ enum { CRYPTO_MSG_ALG_REQUEST, CRYPTO_MSG_ALG_REGISTER, - CRYPTO_MSG_ALG_UNREGISTER, - CRYPTO_MSG_TMPL_REGISTER, - CRYPTO_MSG_TMPL_UNREGISTER, }; struct crypto_instance; -- cgit v1.2.3 From 040b6d0af003596fb42f282e37372b1dcd9ff0d9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 10:10:24 -0600 Subject: crypto: poly1305 - use unaligned access macros to output digest Currently the only part of poly1305-generic which is assuming special alignment is the part where the final digest is written. Switch this over to the unaligned access macros so that we'll be able to remove the cra_alignmask. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/poly1305_generic.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b1c2d57d..d752901b 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -210,7 +210,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update); int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - __le32 *mac = (__le32 *)dst; u32 h0, h1, h2, h3, h4; u32 g0, g1, g2, g3, g4; u32 mask; @@ -267,10 +266,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) h3 = (h3 >> 18) | (h4 << 8); /* mac = (h + s) % (2^128) */ - f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f); - f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f); - f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f); - f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f); + f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0); + f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4); + f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8); + f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12); return 0; } -- cgit v1.2.3 From 9ccd67a5d3370f458d7a90e38da9450e2ec7bf77 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 29 Dec 2017 10:10:25 -0600 Subject: crypto: poly1305 - remove cra_alignmask Now that nothing in poly1305-generic assumes any special alignment, remove the cra_alignmask so that the crypto API does not have to unnecessarily align the buffers. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/poly1305_generic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index d752901b..d92617ae 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -287,7 +287,6 @@ static struct shash_alg poly1305_alg = { .cra_driver_name = "poly1305-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_alignmask = sizeof(u32) - 1, .cra_blocksize = POLY1305_BLOCK_SIZE, .cra_module = THIS_MODULE, }, -- cgit v1.2.3 From 40b6b8ae2a3d759238651c27393035b794ea7b90 Mon Sep 17 00:00:00 2001 From: Joey Pabalinas Date: Mon, 1 Jan 2018 10:40:14 -1000 Subject: crypto: testmgr - change `guard` to unsigned char When char is signed, storing the values 0xba (186) and 0xad (173) in the `guard` array produces signed overflow. Change the type of `guard` to static unsigned char to correct undefined behavior and reduce function stack usage. Signed-off-by: Joey Pabalinas Signed-off-by: Herbert Xu --- crypto/testmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 29d7020b..44a85d4b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -185,7 +185,7 @@ static int ahash_partial_update(struct ahash_request **preq, char *state; struct ahash_request *req; int statesize, ret = -EINVAL; - const char guard[] = { 0x00, 0xba, 0xad, 0x00 }; + static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; req = *preq; statesize = crypto_ahash_statesize( -- cgit v1.2.3 From 5bbed7cae7b557883d1b01cc6a7890a7d2c40ff8 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Tue, 2 Jan 2018 08:55:25 +0100 Subject: crypto: af_alg - whitelist mask and type The user space interface allows specifying the type and mask field used to allocate the cipher. Only a subset of the possible flags are intended for user space. Therefore, white-list the allowed flags. In case the user space caller uses at least one non-allowed flag, EINVAL is returned. Reported-by: syzbot Cc: Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/af_alg.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 35d4dcea..5231f421 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - const u32 forbidden = CRYPTO_ALG_INTERNAL; + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sockaddr_alg *sa = (void *)uaddr; @@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) void *private; int err; + /* If caller uses non-allowed flag, return error. */ + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) + return -EINVAL; + if (sock->state == SS_CONNECTED) return -EINVAL; @@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (IS_ERR(type)) return PTR_ERR(type); - private = type->bind(sa->salg_name, - sa->salg_feat & ~forbidden, - sa->salg_mask & ~forbidden); + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); if (IS_ERR(private)) { module_put(type->owner); return PTR_ERR(private); -- cgit v1.2.3 From f3193325d890c69341e38e40f571d8f6eb9aa329 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 2 Jan 2018 09:21:06 +0000 Subject: crypto: tcrypt - fix spelling mistake: "bufufer"-> "buffer" Trivial fix to spelling mistakes in pr_err error message text. Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 58e3344d..f61d2f40 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -328,7 +328,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, b_size = aead_sizes; do { if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) { - pr_err("template (%u) too big for bufufer (%lu)\n", + pr_err("template (%u) too big for buffer (%lu)\n", authsize + *b_size, XBUFSIZE * PAGE_SIZE); goto out; @@ -1269,7 +1269,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, b_size = block_sizes; do { if (*b_size > XBUFSIZE * PAGE_SIZE) { - pr_err("template (%u) too big for bufufer (%lu)\n", + pr_err("template (%u) too big for buffer (%lu)\n", *b_size, XBUFSIZE * PAGE_SIZE); goto out; } -- cgit v1.2.3 From 5dfe655511fbf64c805dd6a0bde3b66d4f5d4f58 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 2 Jan 2018 15:43:04 +0000 Subject: crypto: tcrypt - free xoutbuf instead of axbuf There seems to be a cut-n-paste bug with the name of the buffer being free'd, xoutbuf should be used instead of axbuf. Detected by CoverityScan, CID#1463420 ("Copy-paste error") Fixes: aa6bade9aa65 ("crypto: tcrypt - add multibuf aead speed test") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f61d2f40..14213a09 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -298,7 +298,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, for (i = 0; i < num_mb; ++i) if (testmgr_alloc_buf(data[i].xoutbuf)) { while (i--) - testmgr_free_buf(data[i].axbuf); + testmgr_free_buf(data[i].xoutbuf); goto out_free_axbuf; } -- cgit v1.2.3 From f810a26d418173436aba9e6ed61b1f5f77016b20 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:22 -0800 Subject: crypto: hash - introduce crypto_hash_alg_has_setkey() Templates that use an shash spawn can use crypto_shash_alg_has_setkey() to determine whether the underlying algorithm requires a key or not. But there was no corresponding function for ahash spawns. Add it. Note that the new function actually has to support both shash and ahash algorithms, since the ahash API can be used with either. Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crypto/ahash.c b/crypto/ahash.c index 3a35d67d..d2c8895b 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -649,5 +649,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(ahash_attr_alg); +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type != &crypto_ahash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != NULL; +} +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); -- cgit v1.2.3 From 48420a717b6a7d05a8efcd2744aaaa9760d911e9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:23 -0800 Subject: crypto: cryptd - pass through absence of ->setkey() When the cryptd template is used to wrap an unkeyed hash algorithm, don't install a ->setkey() method to the cryptd instance. This change is necessary for cryptd to keep working with unkeyed hash algorithms once we start enforcing that ->setkey() is called when present. Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cryptd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 552e3a86..457ae3e6 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -914,7 +914,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - inst->alg.setkey = cryptd_hash_setkey; + if (crypto_shash_alg_has_setkey(salg)) + inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); -- cgit v1.2.3 From f461f04ef38939af2cf23efadea2820f4edcdc6e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:24 -0800 Subject: crypto: mcryptd - pass through absence of ->setkey() When the mcryptd template is used to wrap an unkeyed hash algorithm, don't install a ->setkey() method to the mcryptd instance. This change is necessary for mcryptd to keep working with unkeyed hash algorithms once we start enforcing that ->setkey() is called when present. Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/mcryptd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 29083828..ace346b9 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -534,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = mcryptd_hash_finup_enqueue; inst->alg.export = mcryptd_hash_export; inst->alg.import = mcryptd_hash_import; - inst->alg.setkey = mcryptd_hash_setkey; + if (crypto_hash_alg_has_setkey(halg)) + inst->alg.setkey = mcryptd_hash_setkey; inst->alg.digest = mcryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); -- cgit v1.2.3 From d4a4e0378f21584341e77be1a6f6ba2b640f5780 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:25 -0800 Subject: crypto: poly1305 - remove ->setkey() method Since Poly1305 requires a nonce per invocation, the Linux kernel implementations of Poly1305 don't use the crypto API's keying mechanism and instead expect the key and nonce as the first 32 bytes of the data. But ->setkey() is still defined as a stub returning an error code. This prevents Poly1305 from being used through AF_ALG and will also break it completely once we start enforcing that all crypto API users (not just AF_ALG) call ->setkey() if present. Fix it by removing crypto_poly1305_setkey(), leaving ->setkey as NULL. Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/poly1305_generic.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index d92617ae..b7a3a061 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) } EXPORT_SYMBOL_GPL(crypto_poly1305_init); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - /* Poly1305 requires a unique key for each tag, which implies that - * we can't set it on the tfm that gets accessed by multiple users - * simultaneously. Instead we expect the key as the first 32 bytes in - * the update() call. */ - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); - static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) dctx->s[3] = get_unaligned_le32(key + 12); } +/* + * Poly1305 requires a unique key for each tag, which implies that we can't set + * it on the tfm that gets accessed by multiple users simultaneously. Instead we + * expect the key as the first 32 bytes in the update() call. + */ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -280,7 +274,6 @@ static struct shash_alg poly1305_alg = { .init = crypto_poly1305_init, .update = crypto_poly1305_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", -- cgit v1.2.3 From c293080398aa33d491ebe6f8b4cd6baf1497cc1a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:26 -0800 Subject: crypto: hash - annotate algorithms taking optional key We need to consistently enforce that keyed hashes cannot be used without setting the key. To do this we need a reliable way to determine whether a given hash algorithm is keyed or not. AF_ALG currently does this by checking for the presence of a ->setkey() method. However, this is actually slightly broken because the CRC-32 algorithms implement ->setkey() but can also be used without a key. (The CRC-32 "key" is not actually a cryptographic key but rather represents the initial state. If not overridden, then a default initial state is used.) Prepare to fix this by introducing a flag CRYPTO_ALG_OPTIONAL_KEY which indicates that the algorithm has a ->setkey() method, but it is not required to be called. Then set it on all the CRC-32 algorithms. The same also applies to the Adler-32 implementation in Lustre. Also, the cryptd and mcryptd templates have to pass through the flag from their underlying algorithm. Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/crc32_generic.c | 1 + crypto/crc32c_generic.c | 1 + crypto/cryptd.c | 7 +++---- crypto/mcryptd.c | 7 +++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index aa2a25fc..718cbce8 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 4c0a0e27..37232039 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -146,6 +146,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct chksum_ctx), diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 457ae3e6..addca7ba 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -896,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = salg->digestsize; inst->alg.halg.statesize = salg->statesize; diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index ace346b9..fe5129d6 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -516,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = halg->digestsize; inst->alg.halg.statesize = halg->statesize; -- cgit v1.2.3 From 40c60e1610b63c7499b2df6df79547d691a0c72f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:27 -0800 Subject: crypto: hash - prevent using keyed hashes without setting key Currently, almost none of the keyed hash algorithms check whether a key has been set before proceeding. Some algorithms are okay with this and will effectively just use a key of all 0's or some other bogus default. However, others will severely break, as demonstrated using "hmac(sha3-512-generic)", the unkeyed use of which causes a kernel crash via a (potentially exploitable) stack buffer overflow. A while ago, this problem was solved for AF_ALG by pairing each hash transform with a 'has_key' bool. However, there are still other places in the kernel where userspace can specify an arbitrary hash algorithm by name, and the kernel uses it as unkeyed hash without checking whether it is really unkeyed. Examples of this include: - KEYCTL_DH_COMPUTE, via the KDF extension - dm-verity - dm-crypt, via the ESSIV support - dm-integrity, via the "internal hash" mode with no key given - drbd (Distributed Replicated Block Device) This bug is especially bad for KEYCTL_DH_COMPUTE as that requires no privileges to call. Fix the bug for all users by adding a flag CRYPTO_TFM_NEED_KEY to the ->crt_flags of each hash transform that indicates whether the transform still needs to be keyed or not. Then, make the hash init, import, and digest functions return -ENOKEY if the key is still needed. The new flag also replaces the 'has_key' bool which algif_hash was previously using, thereby simplifying the algif_hash implementation. Reported-by: syzbot Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 22 +++++++++++++++++----- crypto/algif_hash.c | 52 +++++++++++----------------------------------------- crypto/shash.c | 25 +++++++++++++++++++++---- 3 files changed, 49 insertions(+), 50 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index d2c8895b..266fc1d6 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_ahash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return ahash_setkey_unaligned(tfm, key, keylen); + err = ahash_setkey_unaligned(tfm, key, keylen); + else + err = tfm->setkey(tfm, key, keylen); + + if (err) + return err; - return tfm->setkey(tfm, key, keylen); + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); @@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_ahash_op(req, tfm->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); hash->setkey = ahash_nosetkey; - hash->has_setkey = false; hash->export = ahash_no_export; hash->import = ahash_no_import; @@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - hash->has_setkey = true; + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); } if (alg->export) hash->export = alg->export; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 76d2e716..6c9b1927 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,11 +34,6 @@ struct hash_ctx { struct ahash_request req; }; -struct algif_hash_tfm { - struct crypto_ahash *hash; - bool has_key; -}; - static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct algif_hash_tfm *tfm; + struct crypto_ahash *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = { static void *hash_bind(const char *name, u32 type, u32 mask) { - struct algif_hash_tfm *tfm; - struct crypto_ahash *hash; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - hash = crypto_alloc_ahash(name, type, mask); - if (IS_ERR(hash)) { - kfree(tfm); - return ERR_CAST(hash); - } - - tfm->hash = hash; - - return tfm; + return crypto_alloc_ahash(name, type, mask); } static void hash_release(void *private) { - struct algif_hash_tfm *tfm = private; - - crypto_free_ahash(tfm->hash); - kfree(tfm); + crypto_free_ahash(private); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { - struct algif_hash_tfm *tfm = private; - int err; - - err = crypto_ahash_setkey(tfm->hash, key, keylen); - tfm->has_key = !err; - - return err; + return crypto_ahash_setkey(private, key, keylen); } static void hash_sock_destruct(struct sock *sk) @@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk) static int hash_accept_parent_nokey(void *private, struct sock *sk) { - struct hash_ctx *ctx; + struct crypto_ahash *tfm = private; struct alg_sock *ask = alg_sk(sk); - struct algif_hash_tfm *tfm = private; - struct crypto_ahash *hash = tfm->hash; - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); + struct hash_ctx *ctx; + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; - ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_tfm(&ctx->req, tfm); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &ctx->wait); @@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) static int hash_accept_parent(void *private, struct sock *sk) { - struct algif_hash_tfm *tfm = private; + struct crypto_ahash *tfm = private; - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return hash_accept_parent_nokey(private, sk); diff --git a/crypto/shash.c b/crypto/shash.c index e849d3ee..5d732c6b 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, { struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return shash_setkey_unaligned(tfm, key, keylen); + err = shash_setkey_unaligned(tfm, key, keylen); + else + err = shash->setkey(tfm, key, keylen); + + if (err) + return err; - return shash->setkey(tfm, key, keylen); + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_shash_setkey); @@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_digest_unaligned(desc, data, len, out); @@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->digest = shash_async_digest; crt->setkey = shash_async_setkey; - crt->has_setkey = alg->setkey != shash_no_setkey; + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); if (alg->export) crt->export = shash_async_export; @@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); + struct shash_alg *alg = crypto_shash_alg(hash); + + hash->descsize = alg->descsize; + + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); - hash->descsize = crypto_shash_alg(hash)->descsize; return 0; } -- cgit v1.2.3 From 76d35e2d799b3c594d5fde35f7e5f6a9a8d7e88d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:28 -0800 Subject: crypto: ghash - remove checks for key being set Now that the crypto API prevents a keyed hash from being used without setting the key, there's no need for GHASH to do this check itself. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ghash-generic.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 12ad3e3a..1bffb3f7 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; - if (!ctx->gf128) - return -ENOKEY; - if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; - if (!ctx->gf128) - return -ENOKEY; - ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); -- cgit v1.2.3 From afef2cfab1cc0c1a6528fb266c6c609f5d028900 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:29 -0800 Subject: crypto: skcipher - prevent using skciphers without setting key Similar to what was done for the hash API, update the skcipher API to track whether each transform has been keyed, and reject encryption/decryption if a key is needed but one hasn't been set. This isn't as important as the equivalent fix for the hash API because symmetric ciphers almost always require a key (the "null cipher" is the only exception), so are unlikely to be used without one. Still, tracking the key will prevent accidental unkeyed use. algif_skcipher also had to track the key anyway, so the new flag replaces that and simplifies the algif_skcipher implementation. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 59 +++++++++++-------------------------------------- crypto/skcipher.c | 30 +++++++++++++++++++++---- 2 files changed, 39 insertions(+), 50 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index c5c47b68..c88e5e4c 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -38,11 +38,6 @@ #include #include -struct skcipher_tfm { - struct crypto_skcipher *skcipher; - bool has_key; -}; - static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { @@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; + struct crypto_skcipher *tfm = pask->private; unsigned ivsize = crypto_skcipher_ivsize(tfm); return af_alg_sendmsg(sock, msg, size, ivsize); @@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); struct af_alg_ctx *ctx = ask->private; - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; + struct crypto_skcipher *tfm = pask->private; unsigned int bs = crypto_skcipher_blocksize(tfm); struct af_alg_async_req *areq; int err = 0; @@ -221,7 +214,7 @@ static int skcipher_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct skcipher_tfm *tfm; + struct crypto_skcipher *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -235,7 +228,7 @@ static int skcipher_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -314,41 +307,17 @@ static struct proto_ops algif_skcipher_ops_nokey = { static void *skcipher_bind(const char *name, u32 type, u32 mask) { - struct skcipher_tfm *tfm; - struct crypto_skcipher *skcipher; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - skcipher = crypto_alloc_skcipher(name, type, mask); - if (IS_ERR(skcipher)) { - kfree(tfm); - return ERR_CAST(skcipher); - } - - tfm->skcipher = skcipher; - - return tfm; + return crypto_alloc_skcipher(name, type, mask); } static void skcipher_release(void *private) { - struct skcipher_tfm *tfm = private; - - crypto_free_skcipher(tfm->skcipher); - kfree(tfm); + crypto_free_skcipher(private); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { - struct skcipher_tfm *tfm = private; - int err; - - err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); - tfm->has_key = !err; - - return err; + return crypto_skcipher_setkey(private, key, keylen); } static void skcipher_sock_destruct(struct sock *sk) @@ -357,8 +326,7 @@ static void skcipher_sock_destruct(struct sock *sk) struct af_alg_ctx *ctx = ask->private; struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; + struct crypto_skcipher *tfm = pask->private; af_alg_pull_tsgl(sk, ctx->used, NULL, 0); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); @@ -370,22 +338,21 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) { struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - struct skcipher_tfm *tfm = private; - struct crypto_skcipher *skcipher = tfm->skcipher; + struct crypto_skcipher *tfm = private; unsigned int len = sizeof(*ctx); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); + memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; @@ -405,9 +372,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) static int skcipher_accept_parent(void *private, struct sock *sk) { - struct skcipher_tfm *tfm = private; + struct crypto_skcipher *tfm = private; - if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return skcipher_accept_parent_nokey(private, sk); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 11af5fd6..0fe2a292 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); + if (err) + return err; - return err; + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } static int skcipher_crypt_blkcipher(struct skcipher_request *req, @@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; + if (skcipher->keysize) + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + return 0; } @@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); + if (err) + return err; - return err; + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } static int skcipher_crypt_ablkcipher(struct skcipher_request *req, @@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; + if (skcipher->keysize) + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + return 0; } @@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); unsigned long alignmask = crypto_skcipher_alignmask(tfm); + int err; if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); @@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } if ((unsigned long)key & alignmask) - return skcipher_setkey_unaligned(tfm, key, keylen); + err = skcipher_setkey_unaligned(tfm, key, keylen); + else + err = cipher->setkey(tfm, key, keylen); + + if (err) + return err; - return cipher->setkey(tfm, key, keylen); + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) @@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; + if (skcipher->keysize) + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; -- cgit v1.2.3 From d2dd68415e32ac6ce141461815a29a119b458ccd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 3 Jan 2018 11:16:30 -0800 Subject: crypto: aead - prevent using AEADs without setting key Similar to what was done for the hash API, update the AEAD API to track whether each transform has been keyed, and reject encryption/decryption if a key is needed but one hasn't been set. This isn't quite as important as the equivalent fix for the hash API because AEADs always require a key, so are unlikely to be used without one. Still, tracking the key will prevent accidental unkeyed use. algif_aead also had to track the key anyway, so the new flag replaces that and slightly simplifies the algif_aead implementation. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/aead.c | 13 +++++++++++-- crypto/algif_aead.c | 11 +++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/crypto/aead.c b/crypto/aead.c index fe00cbd7..60b3bbe9 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_aead_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return setkey_unaligned(tfm, key, keylen); + err = setkey_unaligned(tfm, key, keylen); + else + err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); + + if (err) + return err; - return crypto_aead_alg(tfm)->setkey(tfm, key, keylen); + crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_aead_setkey); @@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm) struct crypto_aead *aead = __crypto_aead_cast(tfm); struct aead_alg *alg = crypto_aead_alg(aead); + crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY); + aead->authsize = alg->maxauthsize; if (alg->exit) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index d963c8cf..4b07edd5 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -42,7 +42,6 @@ struct aead_tfm { struct crypto_aead *aead; - bool has_key; struct crypto_skcipher *null_tfm; }; @@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize) static int aead_setkey(void *private, const u8 *key, unsigned int keylen) { struct aead_tfm *tfm = private; - int err; - - err = crypto_aead_setkey(tfm->aead, key, keylen); - tfm->has_key = !err; - return err; + return crypto_aead_setkey(tfm->aead, key, keylen); } static void aead_sock_destruct(struct sock *sk) @@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk) { struct aead_tfm *tfm = private; - if (!tfm->has_key) + if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return aead_accept_parent_nokey(private, sk); -- cgit v1.2.3 From 2f1095410977dcef14d8c10e01901ef959463750 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Jan 2018 23:39:27 +0100 Subject: crypto: aes-generic - build with -Os on gcc-7+ While testing other changes, I discovered that gcc-7.2.1 produces badly optimized code for aes_encrypt/aes_decrypt. This is especially true when CONFIG_UBSAN_SANITIZE_ALL is enabled, where it leads to extremely large stack usage that in turn might cause kernel stack overflows: crypto/aes_generic.c: In function 'aes_encrypt': crypto/aes_generic.c:1371:1: warning: the frame size of 4880 bytes is larger than 2048 bytes [-Wframe-larger-than=] crypto/aes_generic.c: In function 'aes_decrypt': crypto/aes_generic.c:1441:1: warning: the frame size of 4864 bytes is larger than 2048 bytes [-Wframe-larger-than=] I verified that this problem exists on all architectures that are supported by gcc-7.2, though arm64 in particular is less affected than the others. I also found that gcc-7.1 and gcc-8 do not show the extreme stack usage but still produce worse code than earlier versions for this file, apparently because of optimization passes that generally provide a substantial improvement in object code quality but understandably fail to find any shortcuts in the AES algorithm. Possible workarounds include a) disabling -ftree-pre and -ftree-sra optimizations, this was an earlier patch I tried, which reliably fixed the stack usage, but caused a serious performance regression in some versions, as later testing found. b) disabling UBSAN on this file or all ciphers, as suggested by Ard Biesheuvel. This would lead to massively better crypto performance in UBSAN-enabled kernels and avoid the stack usage, but there is a concern over whether we should exclude arbitrary files from UBSAN at all. c) Forcing the optimization level in a different way. Similar to a), but rather than deselecting specific optimization stages, this now uses "gcc -Os" for this file, regardless of the CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE/SIZE option. This is a reliable workaround for the stack consumption on all architecture, and I've retested the performance results now on x86, cycles/byte (lower is better) for cbc(aes-generic) with 256 bit keys: -O2 -Os gcc-6.3.1 14.9 15.1 gcc-7.0.1 14.7 15.3 gcc-7.1.1 15.3 14.7 gcc-7.2.1 16.8 15.9 gcc-8.0.0 15.5 15.6 This implements the option c) by enabling forcing -Os on all compiler versions starting with gcc-7.1. As a workaround for PR83356, it would only be needed for gcc-7.2+ with UBSAN enabled, but since it also shows better performance on gcc-7.1 without UBSAN, it seems appropriate to use the faster version here as well. Side note: during testing, I also played with the AES code in libressl, which had a similar performance regression from gcc-6 to gcc-7.2, but was three times slower overall. It might be interesting to investigate that further and possibly port the Linux implementation into that. Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83651 Cc: Richard Biener Cc: Jakub Jelinek Cc: Ard Biesheuvel Signed-off-by: Arnd Bergmann Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/Makefile b/crypto/Makefile index d674884b..daa69360 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o +CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o -- cgit v1.2.3 From d48a31edd6d0d3f6f814df7be7016404151a8ce0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 5 Jan 2018 11:09:57 -0800 Subject: crypto: salsa20-generic - cleanup and convert to skcipher API Convert salsa20-generic from the deprecated "blkcipher" API to the "skcipher" API, in the process fixing it up to be thread-safe (as the crypto API expects) by maintaining each request's state separately from the transform context. Also remove the unnecessary cra_alignmask and tighten validation of the key size by accepting only 16 or 32 bytes, not anything in between. These changes bring the code close to the way chacha20-generic does things, so hopefully it will be easier to maintain in the future. However, the way Salsa20 interprets the IV is still slightly different; that was not changed. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/salsa20_generic.c | 240 ++++++++++++++++++++--------------------------- 1 file changed, 104 insertions(+), 136 deletions(-) diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index d7da0eea..8c77bc78 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -19,49 +19,27 @@ * */ -#include +#include +#include #include -#include -#include -#include -#include -#include -#include -#define SALSA20_IV_SIZE 8U -#define SALSA20_MIN_KEY_SIZE 16U -#define SALSA20_MAX_KEY_SIZE 32U +#define SALSA20_IV_SIZE 8 +#define SALSA20_MIN_KEY_SIZE 16 +#define SALSA20_MAX_KEY_SIZE 32 +#define SALSA20_BLOCK_SIZE 64 -/* - * Start of code taken from D. J. Bernstein's reference implementation. - * With some modifications and optimizations made to suit our needs. - */ - -/* -salsa20-ref.c version 20051118 -D. J. Bernstein -Public domain. -*/ - -#define U32TO8_LITTLE(p, v) \ - { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \ - (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; } -#define U8TO32_LITTLE(p) \ - (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \ - ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) ) - -struct salsa20_ctx -{ - u32 input[16]; +struct salsa20_ctx { + u32 initial_state[16]; }; -static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) +static void salsa20_block(u32 *state, __le32 *stream) { u32 x[16]; int i; - memcpy(x, input, sizeof(x)); - for (i = 20; i > 0; i -= 2) { + memcpy(x, state, sizeof(x)); + + for (i = 0; i < 20; i += 2) { x[ 4] ^= rol32((x[ 0] + x[12]), 7); x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); x[12] ^= rol32((x[ 8] + x[ 4]), 13); @@ -95,145 +73,135 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) x[14] ^= rol32((x[13] + x[12]), 13); x[15] ^= rol32((x[14] + x[13]), 18); } - for (i = 0; i < 16; ++i) - x[i] += input[i]; - for (i = 0; i < 16; ++i) - U32TO8_LITTLE(output + 4 * i,x[i]); -} -static const char sigma[16] = "expand 32-byte k"; -static const char tau[16] = "expand 16-byte k"; + for (i = 0; i < 16; i++) + stream[i] = cpu_to_le32(x[i] + state[i]); + + if (++state[8] == 0) + state[9]++; +} -static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) +static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) { - const char *constants; + __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)]; - ctx->input[1] = U8TO32_LITTLE(k + 0); - ctx->input[2] = U8TO32_LITTLE(k + 4); - ctx->input[3] = U8TO32_LITTLE(k + 8); - ctx->input[4] = U8TO32_LITTLE(k + 12); - if (kbytes == 32) { /* recommended */ - k += 16; - constants = sigma; - } else { /* kbytes == 16 */ - constants = tau; + if (dst != src) + memcpy(dst, src, bytes); + + while (bytes >= SALSA20_BLOCK_SIZE) { + salsa20_block(state, stream); + crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE); + bytes -= SALSA20_BLOCK_SIZE; + dst += SALSA20_BLOCK_SIZE; + } + if (bytes) { + salsa20_block(state, stream); + crypto_xor(dst, (const u8 *)stream, bytes); } - ctx->input[11] = U8TO32_LITTLE(k + 0); - ctx->input[12] = U8TO32_LITTLE(k + 4); - ctx->input[13] = U8TO32_LITTLE(k + 8); - ctx->input[14] = U8TO32_LITTLE(k + 12); - ctx->input[0] = U8TO32_LITTLE(constants + 0); - ctx->input[5] = U8TO32_LITTLE(constants + 4); - ctx->input[10] = U8TO32_LITTLE(constants + 8); - ctx->input[15] = U8TO32_LITTLE(constants + 12); } -static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) +static void salsa20_init(u32 *state, const struct salsa20_ctx *ctx, + const u8 *iv) { - ctx->input[6] = U8TO32_LITTLE(iv + 0); - ctx->input[7] = U8TO32_LITTLE(iv + 4); - ctx->input[8] = 0; - ctx->input[9] = 0; + memcpy(state, ctx->initial_state, sizeof(ctx->initial_state)); + state[6] = get_unaligned_le32(iv + 0); + state[7] = get_unaligned_le32(iv + 4); } -static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, - const u8 *src, unsigned int bytes) +static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize) { - u8 buf[64]; - - if (dst != src) - memcpy(dst, src, bytes); - - while (bytes) { - salsa20_wordtobyte(buf, ctx->input); - - ctx->input[8]++; - if (!ctx->input[8]) - ctx->input[9]++; + static const char sigma[16] = "expand 32-byte k"; + static const char tau[16] = "expand 16-byte k"; + struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); + const char *constants; - if (bytes <= 64) { - crypto_xor(dst, buf, bytes); - return; - } + if (keysize != SALSA20_MIN_KEY_SIZE && + keysize != SALSA20_MAX_KEY_SIZE) + return -EINVAL; - crypto_xor(dst, buf, 64); - bytes -= 64; - dst += 64; + ctx->initial_state[1] = get_unaligned_le32(key + 0); + ctx->initial_state[2] = get_unaligned_le32(key + 4); + ctx->initial_state[3] = get_unaligned_le32(key + 8); + ctx->initial_state[4] = get_unaligned_le32(key + 12); + if (keysize == 32) { /* recommended */ + key += 16; + constants = sigma; + } else { /* keysize == 16 */ + constants = tau; } -} - -/* - * End of code taken from D. J. Bernstein's reference implementation. - */ + ctx->initial_state[11] = get_unaligned_le32(key + 0); + ctx->initial_state[12] = get_unaligned_le32(key + 4); + ctx->initial_state[13] = get_unaligned_le32(key + 8); + ctx->initial_state[14] = get_unaligned_le32(key + 12); + ctx->initial_state[0] = get_unaligned_le32(constants + 0); + ctx->initial_state[5] = get_unaligned_le32(constants + 4); + ctx->initial_state[10] = get_unaligned_le32(constants + 8); + ctx->initial_state[15] = get_unaligned_le32(constants + 12); + + /* space for the nonce; it will be overridden for each request */ + ctx->initial_state[6] = 0; + ctx->initial_state[7] = 0; + + /* initial block number */ + ctx->initial_state[8] = 0; + ctx->initial_state[9] = 0; -static int setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keysize) -{ - struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); - salsa20_keysetup(ctx, key, keysize); return 0; } -static int encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int salsa20_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + u32 state[16]; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, 64); + err = skcipher_walk_virt(&walk, req, true); - salsa20_ivsetup(ctx, walk.iv); + salsa20_init(state, ctx, walk.iv); - while (walk.nbytes >= 64) { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, - walk.nbytes - (walk.nbytes % 64)); - err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); - } + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; - if (walk.nbytes) { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, walk.nbytes); - err = blkcipher_walk_done(desc, &walk, 0); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static struct crypto_alg alg = { - .cra_name = "salsa20", - .cra_driver_name = "salsa20-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_type = &crypto_blkcipher_type, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct salsa20_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .setkey = setkey, - .encrypt = encrypt, - .decrypt = encrypt, - .min_keysize = SALSA20_MIN_KEY_SIZE, - .max_keysize = SALSA20_MAX_KEY_SIZE, - .ivsize = SALSA20_IV_SIZE, - } - } +static struct skcipher_alg alg = { + .base.cra_name = "salsa20", + .base.cra_driver_name = "salsa20-generic", + .base.cra_priority = 100, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct salsa20_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = SALSA20_MIN_KEY_SIZE, + .max_keysize = SALSA20_MAX_KEY_SIZE, + .ivsize = SALSA20_IV_SIZE, + .chunksize = SALSA20_BLOCK_SIZE, + .setkey = salsa20_setkey, + .encrypt = salsa20_crypt, + .decrypt = salsa20_crypt, }; static int __init salsa20_generic_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_skcipher(&alg); } static void __exit salsa20_generic_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_skcipher(&alg); } module_init(salsa20_generic_mod_init); -- cgit v1.2.3 From fbadf07b25d5c9a0effac75205c099c733b6952d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 5 Jan 2018 11:09:58 -0800 Subject: crypto: salsa20 - export generic helpers Export the Salsa20 constants, transform context, and initialization functions so that they can be reused by the x86 implementation. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/salsa20_generic.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index 8c77bc78..5074006a 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -21,17 +21,9 @@ #include #include +#include #include -#define SALSA20_IV_SIZE 8 -#define SALSA20_MIN_KEY_SIZE 16 -#define SALSA20_MAX_KEY_SIZE 32 -#define SALSA20_BLOCK_SIZE 64 - -struct salsa20_ctx { - u32 initial_state[16]; -}; - static void salsa20_block(u32 *state, __le32 *stream) { u32 x[16]; @@ -101,15 +93,16 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src, } } -static void salsa20_init(u32 *state, const struct salsa20_ctx *ctx, +void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, const u8 *iv) { memcpy(state, ctx->initial_state, sizeof(ctx->initial_state)); state[6] = get_unaligned_le32(iv + 0); state[7] = get_unaligned_le32(iv + 4); } +EXPORT_SYMBOL_GPL(crypto_salsa20_init); -static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, +int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize) { static const char sigma[16] = "expand 32-byte k"; @@ -150,6 +143,7 @@ static int salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, return 0; } +EXPORT_SYMBOL_GPL(crypto_salsa20_setkey); static int salsa20_crypt(struct skcipher_request *req) { @@ -161,7 +155,7 @@ static int salsa20_crypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, true); - salsa20_init(state, ctx, walk.iv); + crypto_salsa20_init(state, ctx, walk.iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -189,7 +183,7 @@ static struct skcipher_alg alg = { .max_keysize = SALSA20_MAX_KEY_SIZE, .ivsize = SALSA20_IV_SIZE, .chunksize = SALSA20_BLOCK_SIZE, - .setkey = salsa20_setkey, + .setkey = crypto_salsa20_setkey, .encrypt = salsa20_crypt, .decrypt = salsa20_crypt, }; -- cgit v1.2.3 From 65c020d01125eb251162deeecb5593986e511a9e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 5 Jan 2018 11:09:59 -0800 Subject: crypto: x86/salsa20 - cleanup and convert to skcipher API Convert salsa20-asm from the deprecated "blkcipher" API to the "skcipher" API, in the process fixing it up to use the generic helpers. This allows removing the salsa20_keysetup() and salsa20_ivsetup() assembly functions, which aren't performance critical; the C versions do just fine. This also fixes the same bug that salsa20-generic had, where the state array was being maintained directly in the transform context rather than on the stack or in the request context. Thus, if multiple threads used the same Salsa20 transform concurrently they produced the wrong results. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/Kconfig b/crypto/Kconfig index 9327fbfc..b44c0ae0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1339,6 +1339,7 @@ config CRYPTO_SALSA20_586 tristate "Salsa20 stream cipher algorithm (i586)" depends on (X86 || UML_X86) && !64BIT select CRYPTO_BLKCIPHER + select CRYPTO_SALSA20 help Salsa20 stream cipher algorithm. @@ -1352,6 +1353,7 @@ config CRYPTO_SALSA20_X86_64 tristate "Salsa20 stream cipher algorithm (x86_64)" depends on (X86 || UML_X86) && 64BIT select CRYPTO_BLKCIPHER + select CRYPTO_SALSA20 help Salsa20 stream cipher algorithm. -- cgit v1.2.3 From 9d0d0d182abf0201013ab6fe404657884700d852 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 15 Jan 2018 17:07:22 +0100 Subject: crypto: aes-generic - fix aes-generic regression on powerpc My last bugfix added -Os on the command line, which unfortunately caused a build regression on powerpc in some configurations. I've done some more analysis of the original problem and found slightly different workaround that avoids this regression and also results in better performance on gcc-7.0: -fcode-hoisting is an optimization step that got added in gcc-7 and that for all gcc-7 versions causes worse performance. This disables -fcode-hoisting on all compilers that understand the option. For gcc-7.1 and 7.2 I found the same performance as my previous patch (using -Os), in gcc-7.0 it was even better. On gcc-8 I could see no change in performance from this patch. In theory, code hoisting should not be able make things better for the AES cipher, so leaving it disabled for gcc-8 only serves to simplify the Makefile change. Reported-by: kbuild test robot Link: https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg30418.html Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83651 Fixes: 2f1095410977 ("crypto: aes-generic - build with -Os on gcc-7+") Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu --- crypto/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Makefile b/crypto/Makefile index daa69360..cdbc03b3 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -99,7 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o -CFLAGS_aes_generic.o := $(call cc-ifversion, -ge, 0701, -Os) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 +CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o -- cgit v1.2.3 From 7f975af478b6c083dbdfa08350a1499307468bea Mon Sep 17 00:00:00 2001 From: Kamil Konieczny Date: Tue, 16 Jan 2018 15:26:13 +0100 Subject: crypto: testmgr - test misuse of result in ahash Async hash operations can use result pointer in final/finup/digest, but not in init/update/export/import, so test it for misuse. Signed-off-by: Kamil Konieczny Signed-off-by: Herbert Xu --- crypto/testmgr.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 44a85d4b..d5e23a14 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE]) free_page((unsigned long)buf[i]); } +static int ahash_guard_result(char *result, char c, int size) +{ + int i; + + for (i = 0; i < size; i++) { + if (result[i] != c) + return -EINVAL; + } + + return 0; +} + static int ahash_partial_update(struct ahash_request **preq, struct crypto_ahash *tfm, const struct hash_testvec *template, void *hash_buff, int k, int temp, struct scatterlist *sg, @@ -186,6 +198,7 @@ static int ahash_partial_update(struct ahash_request **preq, struct ahash_request *req; int statesize, ret = -EINVAL; static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; + int digestsize = crypto_ahash_digestsize(tfm); req = *preq; statesize = crypto_ahash_statesize( @@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq, goto out_nostate; } memcpy(state + statesize, guard, sizeof(guard)); + memset(result, 1, digestsize); ret = crypto_ahash_export(req, state); WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); if (ret) { pr_err("alg: hash: Failed to export() for %s\n", algo); goto out; } + ret = ahash_guard_result(result, 1, digestsize); + if (ret) { + pr_err("alg: hash: Failed, export used req->result for %s\n", + algo); + goto out; + } ahash_request_free(req); req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { @@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq, pr_err("alg: hash: Failed to import() for %s\n", algo); goto out; } + ret = ahash_guard_result(result, 1, digestsize); + if (ret) { + pr_err("alg: hash: Failed, import used req->result for %s\n", + algo); + goto out; + } ret = crypto_wait_req(crypto_ahash_update(req), wait); if (ret) goto out; @@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm, goto out; } } else { + memset(result, 1, digest_size); ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { pr_err("alg: hash: init failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } + ret = ahash_guard_result(result, 1, digest_size); + if (ret) { + pr_err("alg: hash: init failed on test %d " + "for %s: used req->result\n", j, algo); + goto out; + } ret = crypto_wait_req(crypto_ahash_update(req), &wait); if (ret) { pr_err("alg: hash: update failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } + ret = ahash_guard_result(result, 1, digest_size); + if (ret) { + pr_err("alg: hash: update failed on test %d " + "for %s: used req->result\n", j, algo); + goto out; + } ret = crypto_wait_req(crypto_ahash_final(req), &wait); if (ret) { pr_err("alg: hash: final failed on test %d " -- cgit v1.2.3 From 50a0dceaa2634cd5a2a645ae0714da22fbbb2358 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:33 +0000 Subject: crypto: sha3-generic - fixes for alignment and big endian operation Ensure that the input is byte swabbed before injecting it into the SHA3 transform. Use the get_unaligned() accessor for this so that we don't perform unaligned access inadvertently on architectures that do not support that. Cc: Fixes: dc4b27a1bc222c3b ("crypto: sha3 - Add SHA-3 hash algorithm") Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/sha3_generic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7e8ed962..a68be626 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -18,6 +18,7 @@ #include #include #include +#include #define KECCAK_ROUNDS 24 @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int i; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) src)[i]; + sctx->st[i] ^= get_unaligned_le64(src + 8 * i); keccakf(sctx->st); done += sctx->rsiz; @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) sctx->buf[sctx->rsiz - 1] |= 0x80; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) sctx->buf)[i]; + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); keccakf(sctx->st); -- cgit v1.2.3 From 116121f60112aefdcda040aae3f8677e778e7303 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:34 +0000 Subject: crypto: sha3-generic - rewrite KECCAK transform to help the compiler optimize The way the KECCAK transform is currently coded involves many references into the state array using indexes that are calculated at runtime using simple but non-trivial arithmetic. This forces the compiler to treat the state matrix as an array in memory rather than keep it in registers, which results in poor performance. So instead, let's rephrase the algorithm using fixed array indexes only. This helps the compiler keep the state matrix in registers, resulting in the following speedup (SHA3-256 performance in cycles per byte): before after speedup Intel Core i7 @ 2.0 GHz (2.9 turbo) 100.6 35.7 2.8x Cortex-A57 @ 2.0 GHz (64-bit mode) 101.6 12.7 8.0x Cortex-A53 @ 1.0 GHz 224.4 15.8 14.2x Cortex-A57 @ 2.0 GHz (32-bit mode) 201.8 63.0 3.2x Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/sha3_generic.c | 134 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 96 insertions(+), 38 deletions(-) diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index a68be626..5fecb609 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -5,6 +5,7 @@ * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik + * Ard Biesheuvel * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -22,8 +23,6 @@ #define KECCAK_ROUNDS 24 -#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) - static const u64 keccakf_rndc[24] = { 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, @@ -35,53 +34,112 @@ static const u64 keccakf_rndc[24] = { 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL }; -static const int keccakf_rotc[24] = { - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 -}; - -static const int keccakf_piln[24] = { - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 -}; - /* update the state with given number of rounds */ -static void keccakf(u64 st[25]) +static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) { - int i, j, round; - u64 t, bc[5]; + u64 t[5], tt, bc[5]; + int round; for (round = 0; round < KECCAK_ROUNDS; round++) { /* Theta */ - for (i = 0; i < 5; i++) - bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] - ^ st[i + 20]; - - for (i = 0; i < 5; i++) { - t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); - for (j = 0; j < 25; j += 5) - st[j + i] ^= t; - } + bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; + bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; + bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; + bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; + bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; + + t[0] = bc[4] ^ rol64(bc[1], 1); + t[1] = bc[0] ^ rol64(bc[2], 1); + t[2] = bc[1] ^ rol64(bc[3], 1); + t[3] = bc[2] ^ rol64(bc[4], 1); + t[4] = bc[3] ^ rol64(bc[0], 1); + + st[0] ^= t[0]; /* Rho Pi */ - t = st[1]; - for (i = 0; i < 24; i++) { - j = keccakf_piln[i]; - bc[0] = st[j]; - st[j] = ROTL64(t, keccakf_rotc[i]); - t = bc[0]; - } + tt = st[1]; + st[ 1] = rol64(st[ 6] ^ t[1], 44); + st[ 6] = rol64(st[ 9] ^ t[4], 20); + st[ 9] = rol64(st[22] ^ t[2], 61); + st[22] = rol64(st[14] ^ t[4], 39); + st[14] = rol64(st[20] ^ t[0], 18); + st[20] = rol64(st[ 2] ^ t[2], 62); + st[ 2] = rol64(st[12] ^ t[2], 43); + st[12] = rol64(st[13] ^ t[3], 25); + st[13] = rol64(st[19] ^ t[4], 8); + st[19] = rol64(st[23] ^ t[3], 56); + st[23] = rol64(st[15] ^ t[0], 41); + st[15] = rol64(st[ 4] ^ t[4], 27); + st[ 4] = rol64(st[24] ^ t[4], 14); + st[24] = rol64(st[21] ^ t[1], 2); + st[21] = rol64(st[ 8] ^ t[3], 55); + st[ 8] = rol64(st[16] ^ t[1], 45); + st[16] = rol64(st[ 5] ^ t[0], 36); + st[ 5] = rol64(st[ 3] ^ t[3], 28); + st[ 3] = rol64(st[18] ^ t[3], 21); + st[18] = rol64(st[17] ^ t[2], 15); + st[17] = rol64(st[11] ^ t[1], 10); + st[11] = rol64(st[ 7] ^ t[2], 6); + st[ 7] = rol64(st[10] ^ t[0], 3); + st[10] = rol64( tt ^ t[1], 1); /* Chi */ - for (j = 0; j < 25; j += 5) { - for (i = 0; i < 5; i++) - bc[i] = st[j + i]; - for (i = 0; i < 5; i++) - st[j + i] ^= (~bc[(i + 1) % 5]) & - bc[(i + 2) % 5]; - } + bc[ 0] = ~st[ 1] & st[ 2]; + bc[ 1] = ~st[ 2] & st[ 3]; + bc[ 2] = ~st[ 3] & st[ 4]; + bc[ 3] = ~st[ 4] & st[ 0]; + bc[ 4] = ~st[ 0] & st[ 1]; + st[ 0] ^= bc[ 0]; + st[ 1] ^= bc[ 1]; + st[ 2] ^= bc[ 2]; + st[ 3] ^= bc[ 3]; + st[ 4] ^= bc[ 4]; + + bc[ 0] = ~st[ 6] & st[ 7]; + bc[ 1] = ~st[ 7] & st[ 8]; + bc[ 2] = ~st[ 8] & st[ 9]; + bc[ 3] = ~st[ 9] & st[ 5]; + bc[ 4] = ~st[ 5] & st[ 6]; + st[ 5] ^= bc[ 0]; + st[ 6] ^= bc[ 1]; + st[ 7] ^= bc[ 2]; + st[ 8] ^= bc[ 3]; + st[ 9] ^= bc[ 4]; + + bc[ 0] = ~st[11] & st[12]; + bc[ 1] = ~st[12] & st[13]; + bc[ 2] = ~st[13] & st[14]; + bc[ 3] = ~st[14] & st[10]; + bc[ 4] = ~st[10] & st[11]; + st[10] ^= bc[ 0]; + st[11] ^= bc[ 1]; + st[12] ^= bc[ 2]; + st[13] ^= bc[ 3]; + st[14] ^= bc[ 4]; + + bc[ 0] = ~st[16] & st[17]; + bc[ 1] = ~st[17] & st[18]; + bc[ 2] = ~st[18] & st[19]; + bc[ 3] = ~st[19] & st[15]; + bc[ 4] = ~st[15] & st[16]; + st[15] ^= bc[ 0]; + st[16] ^= bc[ 1]; + st[17] ^= bc[ 2]; + st[18] ^= bc[ 3]; + st[19] ^= bc[ 4]; + + bc[ 0] = ~st[21] & st[22]; + bc[ 1] = ~st[22] & st[23]; + bc[ 2] = ~st[23] & st[24]; + bc[ 3] = ~st[24] & st[20]; + bc[ 4] = ~st[20] & st[21]; + st[20] ^= bc[ 0]; + st[21] ^= bc[ 1]; + st[22] ^= bc[ 2]; + st[23] ^= bc[ 3]; + st[24] ^= bc[ 4]; /* Iota */ st[0] ^= keccakf_rndc[round]; -- cgit v1.2.3 From 9fb5b09b8d34b49c67c05eab0d3d92e806c03b24 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:35 +0000 Subject: crypto: sha3-generic - simplify code In preparation of exposing the generic SHA3 implementation to other versions as a fallback, simplify the code, and remove an inconsistency in the output handling (endian swabbing rsizw words of state before writing the output does not make sense) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/sha3_generic.c | 184 ++++++++++++++++---------------------------------- 1 file changed, 59 insertions(+), 125 deletions(-) diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 5fecb609..c7084a24 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #define KECCAK_ROUNDS 24 @@ -146,43 +145,16 @@ static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) } } -static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz) -{ - memset(sctx, 0, sizeof(*sctx)); - sctx->md_len = digest_sz; - sctx->rsiz = 200 - 2 * digest_sz; - sctx->rsizw = sctx->rsiz / 8; -} - -static int sha3_224_init(struct shash_desc *desc) +static int sha3_init(struct shash_desc *desc) { struct sha3_state *sctx = shash_desc_ctx(desc); + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); - sha3_init(sctx, SHA3_224_DIGEST_SIZE); - return 0; -} - -static int sha3_256_init(struct shash_desc *desc) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - - sha3_init(sctx, SHA3_256_DIGEST_SIZE); - return 0; -} - -static int sha3_384_init(struct shash_desc *desc) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); - - sha3_init(sctx, SHA3_384_DIGEST_SIZE); - return 0; -} - -static int sha3_512_init(struct shash_desc *desc) -{ - struct sha3_state *sctx = shash_desc_ctx(desc); + sctx->rsiz = 200 - 2 * digest_size; + sctx->rsizw = sctx->rsiz / 8; + sctx->partial = 0; - sha3_init(sctx, SHA3_512_DIGEST_SIZE); + memset(sctx->st, 0, sizeof(sctx->st)); return 0; } @@ -227,6 +199,8 @@ static int sha3_final(struct shash_desc *desc, u8 *out) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int i, inlen = sctx->partial; + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); + __le64 *digest = (__le64 *)out; sctx->buf[inlen++] = 0x06; memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); @@ -237,110 +211,70 @@ static int sha3_final(struct shash_desc *desc, u8 *out) keccakf(sctx->st); - for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] = cpu_to_le64(sctx->st[i]); + for (i = 0; i < digest_size / 8; i++) + put_unaligned_le64(sctx->st[i], digest++); - memcpy(out, sctx->st, sctx->md_len); + if (digest_size & 4) + put_unaligned_le32(sctx->st[i], (__le32 *)digest); memset(sctx, 0, sizeof(*sctx)); return 0; } -static struct shash_alg sha3_224 = { - .digestsize = SHA3_224_DIGEST_SIZE, - .init = sha3_224_init, - .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), - .base = { - .cra_name = "sha3-224", - .cra_driver_name = "sha3-224-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA3_224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg sha3_256 = { - .digestsize = SHA3_256_DIGEST_SIZE, - .init = sha3_256_init, - .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), - .base = { - .cra_name = "sha3-256", - .cra_driver_name = "sha3-256-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA3_256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg sha3_384 = { - .digestsize = SHA3_384_DIGEST_SIZE, - .init = sha3_384_init, - .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), - .base = { - .cra_name = "sha3-384", - .cra_driver_name = "sha3-384-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA3_384_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg sha3_512 = { - .digestsize = SHA3_512_DIGEST_SIZE, - .init = sha3_512_init, - .update = sha3_update, - .final = sha3_final, - .descsize = sizeof(struct sha3_state), - .base = { - .cra_name = "sha3-512", - .cra_driver_name = "sha3-512-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, - .cra_blocksize = SHA3_512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; +static struct shash_alg algs[] = { { + .digestsize = SHA3_224_DIGEST_SIZE, + .init = sha3_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-224", + .base.cra_driver_name = "sha3-224-generic", + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA3_224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_256_DIGEST_SIZE, + .init = sha3_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-256", + .base.cra_driver_name = "sha3-256-generic", + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA3_256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_384_DIGEST_SIZE, + .init = sha3_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-384", + .base.cra_driver_name = "sha3-384-generic", + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA3_384_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA3_512_DIGEST_SIZE, + .init = sha3_init, + .update = sha3_update, + .final = sha3_final, + .descsize = sizeof(struct sha3_state), + .base.cra_name = "sha3-512", + .base.cra_driver_name = "sha3-512-generic", + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA3_512_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +} }; static int __init sha3_generic_mod_init(void) { - int ret; - - ret = crypto_register_shash(&sha3_224); - if (ret < 0) - goto err_out; - ret = crypto_register_shash(&sha3_256); - if (ret < 0) - goto err_out_224; - ret = crypto_register_shash(&sha3_384); - if (ret < 0) - goto err_out_256; - ret = crypto_register_shash(&sha3_512); - if (ret < 0) - goto err_out_384; - - return 0; - -err_out_384: - crypto_unregister_shash(&sha3_384); -err_out_256: - crypto_unregister_shash(&sha3_256); -err_out_224: - crypto_unregister_shash(&sha3_224); -err_out: - return ret; + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit sha3_generic_mod_fini(void) { - crypto_unregister_shash(&sha3_224); - crypto_unregister_shash(&sha3_256); - crypto_unregister_shash(&sha3_384); - crypto_unregister_shash(&sha3_512); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_init(sha3_generic_mod_init); -- cgit v1.2.3 From 05c0463c8af9154c19d7824aed5fe1b540b7c396 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:36 +0000 Subject: crypto: sha3-generic - export init/update/final routines To allow accelerated implementations to fall back to the generic routines, e.g., in contexts where a SIMD based implementation is not allowed to run, expose the generic SHA3 init/update/final routines to other modules. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/sha3_generic.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index c7084a24..a965b9d8 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -145,7 +145,7 @@ static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) } } -static int sha3_init(struct shash_desc *desc) +int crypto_sha3_init(struct shash_desc *desc) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int digest_size = crypto_shash_digestsize(desc->tfm); @@ -157,8 +157,9 @@ static int sha3_init(struct shash_desc *desc) memset(sctx->st, 0, sizeof(sctx->st)); return 0; } +EXPORT_SYMBOL(crypto_sha3_init); -static int sha3_update(struct shash_desc *desc, const u8 *data, +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha3_state *sctx = shash_desc_ctx(desc); @@ -194,8 +195,9 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, return 0; } +EXPORT_SYMBOL(crypto_sha3_update); -static int sha3_final(struct shash_desc *desc, u8 *out) +int crypto_sha3_final(struct shash_desc *desc, u8 *out) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int i, inlen = sctx->partial; @@ -220,12 +222,13 @@ static int sha3_final(struct shash_desc *desc, u8 *out) memset(sctx, 0, sizeof(*sctx)); return 0; } +EXPORT_SYMBOL(crypto_sha3_final); static struct shash_alg algs[] = { { .digestsize = SHA3_224_DIGEST_SIZE, - .init = sha3_init, - .update = sha3_update, - .final = sha3_final, + .init = crypto_sha3_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-generic", @@ -234,9 +237,9 @@ static struct shash_alg algs[] = { { .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_256_DIGEST_SIZE, - .init = sha3_init, - .update = sha3_update, - .final = sha3_final, + .init = crypto_sha3_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-generic", @@ -245,9 +248,9 @@ static struct shash_alg algs[] = { { .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_384_DIGEST_SIZE, - .init = sha3_init, - .update = sha3_update, - .final = sha3_final, + .init = crypto_sha3_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-generic", @@ -256,9 +259,9 @@ static struct shash_alg algs[] = { { .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_512_DIGEST_SIZE, - .init = sha3_init, - .update = sha3_update, - .final = sha3_final, + .init = crypto_sha3_init, + .update = crypto_sha3_update, + .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-generic", -- cgit v1.2.3 From dfb7ab89469cb59097346193733dd3843da6ddcf Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 19 Jan 2018 12:04:37 +0000 Subject: crypto: testmgr - add new testcases for sha3 All current SHA3 test cases are smaller than the SHA3 block size, which means not all code paths are being exercised. So add a new test case to each variant, and make one of the existing test cases chunked. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/testmgr.h | 550 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 550 insertions(+) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index a714b629..6044f690 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = { "\xc9\xfd\x55\x74\x49\x44\x79\xba" "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" "\xd0\xfc\xce\x33", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" + "\x53\xea\x81\x18\x8c\x23\xba\x2e" + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" + "\x37\xce\x42\xd9\x70\x07\x7b\x12" + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" + "\x38\xcf\x43\xda\x71\x08\x7c\x13" + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" + "\xef\x63\xfa\x91\x05\x9c\x33\xca" + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" + "\xde\x75\x0c\x80\x17\xae\x22\xb9" + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" + "\x67\xfe\x72\x09\xa0\x14\xab\x42" + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" + "\x95\x09\xa0\x37\xce\x42\xd9\x70" + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" + "\x68\xff\x73\x0a\xa1\x15\xac\x43" + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" + "\x29\xc0\x57\xee\x62\xf9\x90\x04" + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" + "\x08\x7c\x13\xaa\x1e\xb5\x4c", + .psize = 1023, + .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26" + "\xc3\x88\x20\x71\x15\x06\xe8\x2d" + "\xa3\x92\x44\xab\x3e\xe7\xff\x86" + "\xb6\x79\x10\x72", }, }; @@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = { "\x49\x10\x03\x76\xa8\x23\x5e\x2c" "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" "\xdb\x32\xdd\x97\x49\x6d\x33\x76", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" + "\x53\xea\x81\x18\x8c\x23\xba\x2e" + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" + "\x37\xce\x42\xd9\x70\x07\x7b\x12" + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" + "\x38\xcf\x43\xda\x71\x08\x7c\x13" + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" + "\xef\x63\xfa\x91\x05\x9c\x33\xca" + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" + "\xde\x75\x0c\x80\x17\xae\x22\xb9" + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" + "\x67\xfe\x72\x09\xa0\x14\xab\x42" + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" + "\x95\x09\xa0\x37\xce\x42\xd9\x70" + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" + "\x68\xff\x73\x0a\xa1\x15\xac\x43" + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" + "\x29\xc0\x57\xee\x62\xf9\x90\x04" + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" + "\x08\x7c\x13\xaa\x1e\xb5\x4c", + .psize = 1023, + .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71" + "\xf7\xfa\x80\xf5\xea\x11\x03\xb1" + "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7" + "\x8a\x97\xbb\xf2\x07\x08\x38\x30", }, }; @@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = { "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" "\x9e\xef\x51\xac\xd0\x65\x7c\x22", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" + "\x53\xea\x81\x18\x8c\x23\xba\x2e" + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" + "\x37\xce\x42\xd9\x70\x07\x7b\x12" + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" + "\x38\xcf\x43\xda\x71\x08\x7c\x13" + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" + "\xef\x63\xfa\x91\x05\x9c\x33\xca" + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" + "\xde\x75\x0c\x80\x17\xae\x22\xb9" + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" + "\x67\xfe\x72\x09\xa0\x14\xab\x42" + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" + "\x95\x09\xa0\x37\xce\x42\xd9\x70" + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" + "\x68\xff\x73\x0a\xa1\x15\xac\x43" + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" + "\x29\xc0\x57\xee\x62\xf9\x90\x04" + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" + "\x08\x7c\x13\xaa\x1e\xb5\x4c", + .psize = 1023, + .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71" + "\xcf\xca\x30\x85\x9b\xc1\x25\xc7" + "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b" + "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51" + "\xd2\x21\xae\x2d\xc7\xae\x8c\x40" + "\xb9\xe6\x56\x48\x03\xcd\x88\x6b", }, }; @@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = { "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", + .np = 2, + .tap = { 28, 28 }, + }, { + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" + "\x53\xea\x81\x18\x8c\x23\xba\x2e" + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" + "\x37\xce\x42\xd9\x70\x07\x7b\x12" + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" + "\x38\xcf\x43\xda\x71\x08\x7c\x13" + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" + "\xef\x63\xfa\x91\x05\x9c\x33\xca" + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" + "\xde\x75\x0c\x80\x17\xae\x22\xb9" + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" + "\x67\xfe\x72\x09\xa0\x14\xab\x42" + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" + "\x95\x09\xa0\x37\xce\x42\xd9\x70" + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" + "\x68\xff\x73\x0a\xa1\x15\xac\x43" + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" + "\x29\xc0\x57\xee\x62\xf9\x90\x04" + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" + "\x08\x7c\x13\xaa\x1e\xb5\x4c", + .psize = 1023, + .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde" + "\xf0\xc6\x42\x17\xd7\xb2\x26\x47" + "\x90\x28\xa6\x84\xe8\x49\x7a\x86" + "\xd6\xb8\x9e\xf8\x07\x59\x21\x03" + "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0" + "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b" + "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41" + "\x32\xc2\x8e\x50\x91\x86\x90\xfb", }, }; -- cgit v1.2.3