From 69017f14631c4376c8a372a026d77b4f11876fbc Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Tue, 2 Jul 2019 14:39:20 +0300 Subject: crypto: fips - add FIPS test failure notification chain Crypto test failures in FIPS mode cause an immediate panic, but on some system the cryptographic boundary extends beyond just the Linux controlled domain. Add a simple atomic notification chain to allow interested parties to register to receive notification prior to us kicking the bucket. Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- crypto/fips.c | 11 +++++++++++ crypto/testmgr.c | 4 +++- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/fips.c b/crypto/fips.c index c0b3a3c3..7b1d8cae 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -11,10 +11,14 @@ #include #include #include +#include int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); +ATOMIC_NOTIFIER_HEAD(fips_fail_notif_chain); +EXPORT_SYMBOL_GPL(fips_fail_notif_chain); + /* Process kernel command-line parameter at boot time. fips=0 or fips=1 */ static int fips_enable(char *str) { @@ -58,6 +62,13 @@ static void crypto_proc_fips_exit(void) unregister_sysctl_table(crypto_sysctls); } +void fips_fail_notify(void) +{ + if (fips_enabled) + atomic_notifier_call_chain(&fips_fail_notif_chain, 0, NULL); +} +EXPORT_SYMBOL_GPL(fips_fail_notify); + static int __init fips_init(void) { crypto_proc_fips_init(); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d0b5b338..8ba1e75c 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5240,9 +5240,11 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) type, mask); test_done: - if (rc && (fips_enabled || panic_on_fail)) + if (rc && (fips_enabled || panic_on_fail)) { + fips_fail_notify(); panic("alg: self-tests for %s (%s) failed in %s mode!\n", driver, alg, fips_enabled ? "fips" : "panic_on_fail"); + } if (fips_enabled && !rc) pr_info("alg: self-tests for %s (%s) passed\n", driver, alg); -- cgit v1.2.3 From 0d4cf96bf6e6280cfb9f59fd6024efed09b1c01c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:20 +0200 Subject: crypto: aes - rename local routines to prevent future clashes Rename some local AES encrypt/decrypt routines so they don't clash with the names we are about to introduce for the routines exposed by the generic AES library. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index f2175689..3aa4a715 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1332,7 +1332,7 @@ EXPORT_SYMBOL_GPL(crypto_aes_set_key); f_rl(bo, bi, 3, k); \ } while (0) -static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); u32 b0[4], b1[4]; @@ -1402,7 +1402,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) i_rl(bo, bi, 3, k); \ } while (0) -static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); u32 b0[4], b1[4]; @@ -1454,8 +1454,8 @@ static struct crypto_alg aes_alg = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = crypto_aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt + .cia_encrypt = crypto_aes_encrypt, + .cia_decrypt = crypto_aes_decrypt } } }; -- cgit v1.2.3 From 2756463d0838ef5db48b67c8f6e85232aaef11ec Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:21 +0200 Subject: crypto: aes/fixed-time - align key schedule with other implementations The fixed time AES code mangles the key schedule so that xoring the first round key with values at fixed offsets across the Sbox produces the correct value. This primes the D-cache with the entire Sbox before any data dependent lookups are done, making it more difficult to infer key bits from timing variances when the plaintext is known. The downside of this approach is that it renders the key schedule incompatible with other implementations of AES in the kernel, which makes it cumbersome to use this implementation as a fallback for SIMD based AES in contexts where this is not allowed. So let's tweak the fixed Sbox indexes so that they add up to zero under the xor operation. While at it, increase the granularity to 16 bytes so we cover the entire Sbox even on systems with 16 byte cachelines. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aes_ti.c | 52 +++++++++++++++++++++------------------------------- 1 file changed, 21 insertions(+), 31 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 798fc9a2..b3ebdc56 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -234,30 +234,8 @@ static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - int err; - err = aesti_expand_key(ctx, in_key, key_len); - if (err) - return err; - - /* - * In order to force the compiler to emit data independent Sbox lookups - * at the start of each block, xor the first round key with values at - * fixed indexes in the Sbox. This will need to be repeated each time - * the key is used, which will pull the entire Sbox into the D-cache - * before any data dependent Sbox lookups are performed. - */ - ctx->key_enc[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; - ctx->key_enc[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; - ctx->key_enc[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; - ctx->key_enc[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; - - ctx->key_dec[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; - ctx->key_dec[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; - ctx->key_dec[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; - ctx->key_dec[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; - - return 0; + return aesti_expand_key(ctx, in_key, key_len); } static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -280,10 +258,16 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128]; - st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160]; - st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192]; - st0[3] ^= __aesti_sbox[96] ^ __aesti_sbox[224]; + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[ 64] ^ __aesti_sbox[134] ^ __aesti_sbox[195]; + st0[1] ^= __aesti_sbox[16] ^ __aesti_sbox[ 82] ^ __aesti_sbox[158] ^ __aesti_sbox[221]; + st0[2] ^= __aesti_sbox[32] ^ __aesti_sbox[ 96] ^ __aesti_sbox[160] ^ __aesti_sbox[234]; + st0[3] ^= __aesti_sbox[48] ^ __aesti_sbox[112] ^ __aesti_sbox[186] ^ __aesti_sbox[241]; for (round = 0;; round += 2, rkp += 8) { st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0]; @@ -328,10 +312,16 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128]; - st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160]; - st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192]; - st0[3] ^= __aesti_inv_sbox[96] ^ __aesti_inv_sbox[224]; + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[ 64] ^ __aesti_inv_sbox[129] ^ __aesti_inv_sbox[200]; + st0[1] ^= __aesti_inv_sbox[16] ^ __aesti_inv_sbox[ 83] ^ __aesti_inv_sbox[150] ^ __aesti_inv_sbox[212]; + st0[2] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[ 96] ^ __aesti_inv_sbox[160] ^ __aesti_inv_sbox[236]; + st0[3] ^= __aesti_inv_sbox[48] ^ __aesti_inv_sbox[112] ^ __aesti_inv_sbox[187] ^ __aesti_inv_sbox[247]; for (round = 0;; round += 2, rkp += 8) { st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0]; -- cgit v1.2.3 From 247c33988ca367d0bdb1ed515b729156bff95b50 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:22 +0200 Subject: crypto: aes - create AES library based on the fixed time AES code Take the existing small footprint and mostly time invariant C code and turn it into a AES library that can be used for non-performance critical, casual use of AES, and as a fallback for, e.g., SIMD code that needs a secondary path that can be taken in contexts where the SIMD unit is off limits (e.g., in hard interrupts taken from kernel context) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 + crypto/aes_ti.c | 303 +------------------------------------------------------- 2 files changed, 7 insertions(+), 300 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index e801450b..091ebbbc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1066,6 +1066,9 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL comment "Ciphers" +config CRYPTO_LIB_AES + tristate + config CRYPTO_AES tristate "AES cipher algorithms" select CRYPTO_ALGAPI @@ -1089,6 +1092,7 @@ config CRYPTO_AES config CRYPTO_AES_TI tristate "Fixed time AES cipher" select CRYPTO_ALGAPI + select CRYPTO_LIB_AES help This is a generic implementation of AES that attempts to eliminate data dependent latencies as much as possible without affecting diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index b3ebdc56..205c2c25 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c @@ -8,249 +8,19 @@ #include #include #include -#include - -/* - * Emit the sbox as volatile const to prevent the compiler from doing - * constant folding on sbox references involving fixed indexes. - */ -static volatile const u8 __cacheline_aligned __aesti_sbox[] = { - 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, - 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, - 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, - 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, - 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, - 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, - 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, - 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, - 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, - 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, - 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, - 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, - 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, - 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, - 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, - 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, - 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, - 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, - 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, - 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, - 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, - 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, - 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, - 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, - 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, - 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, - 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, - 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, - 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, - 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, - 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, - 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16, -}; - -static volatile const u8 __cacheline_aligned __aesti_inv_sbox[] = { - 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, - 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, - 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, - 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, - 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, - 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, - 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, - 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, - 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, - 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, - 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, - 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, - 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, - 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, - 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, - 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, - 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, - 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, - 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, - 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, - 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, - 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, - 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, - 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, - 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, - 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, - 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, - 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, - 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, - 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, - 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, - 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d, -}; - -static u32 mul_by_x(u32 w) -{ - u32 x = w & 0x7f7f7f7f; - u32 y = w & 0x80808080; - - /* multiply by polynomial 'x' (0b10) in GF(2^8) */ - return (x << 1) ^ (y >> 7) * 0x1b; -} - -static u32 mul_by_x2(u32 w) -{ - u32 x = w & 0x3f3f3f3f; - u32 y = w & 0x80808080; - u32 z = w & 0x40404040; - - /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */ - return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b; -} - -static u32 mix_columns(u32 x) -{ - /* - * Perform the following matrix multiplication in GF(2^8) - * - * | 0x2 0x3 0x1 0x1 | | x[0] | - * | 0x1 0x2 0x3 0x1 | | x[1] | - * | 0x1 0x1 0x2 0x3 | x | x[2] | - * | 0x3 0x1 0x1 0x2 | | x[3] | - */ - u32 y = mul_by_x(x) ^ ror32(x, 16); - - return y ^ ror32(x ^ y, 8); -} - -static u32 inv_mix_columns(u32 x) -{ - /* - * Perform the following matrix multiplication in GF(2^8) - * - * | 0xe 0xb 0xd 0x9 | | x[0] | - * | 0x9 0xe 0xb 0xd | | x[1] | - * | 0xd 0x9 0xe 0xb | x | x[2] | - * | 0xb 0xd 0x9 0xe | | x[3] | - * - * which can conveniently be reduced to - * - * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] | - * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] | - * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] | - * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] | - */ - u32 y = mul_by_x2(x); - - return mix_columns(x ^ y ^ ror32(y, 16)); -} - -static __always_inline u32 subshift(u32 in[], int pos) -{ - return (__aesti_sbox[in[pos] & 0xff]) ^ - (__aesti_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^ - (__aesti_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ - (__aesti_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24); -} - -static __always_inline u32 inv_subshift(u32 in[], int pos) -{ - return (__aesti_inv_sbox[in[pos] & 0xff]) ^ - (__aesti_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^ - (__aesti_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ - (__aesti_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24); -} - -static u32 subw(u32 in) -{ - return (__aesti_sbox[in & 0xff]) ^ - (__aesti_sbox[(in >> 8) & 0xff] << 8) ^ - (__aesti_sbox[(in >> 16) & 0xff] << 16) ^ - (__aesti_sbox[(in >> 24) & 0xff] << 24); -} - -static int aesti_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, - unsigned int key_len) -{ - u32 kwords = key_len / sizeof(u32); - u32 rc, i, j; - - if (key_len != AES_KEYSIZE_128 && - key_len != AES_KEYSIZE_192 && - key_len != AES_KEYSIZE_256) - return -EINVAL; - - ctx->key_length = key_len; - - for (i = 0; i < kwords; i++) - ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); - - for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) { - u32 *rki = ctx->key_enc + (i * kwords); - u32 *rko = rki + kwords; - - rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0]; - rko[1] = rko[0] ^ rki[1]; - rko[2] = rko[1] ^ rki[2]; - rko[3] = rko[2] ^ rki[3]; - - if (key_len == 24) { - if (i >= 7) - break; - rko[4] = rko[3] ^ rki[4]; - rko[5] = rko[4] ^ rki[5]; - } else if (key_len == 32) { - if (i >= 6) - break; - rko[4] = subw(rko[3]) ^ rki[4]; - rko[5] = rko[4] ^ rki[5]; - rko[6] = rko[5] ^ rki[6]; - rko[7] = rko[6] ^ rki[7]; - } - } - - /* - * Generate the decryption keys for the Equivalent Inverse Cipher. - * This involves reversing the order of the round keys, and applying - * the Inverse Mix Columns transformation to all but the first and - * the last one. - */ - ctx->key_dec[0] = ctx->key_enc[key_len + 24]; - ctx->key_dec[1] = ctx->key_enc[key_len + 25]; - ctx->key_dec[2] = ctx->key_enc[key_len + 26]; - ctx->key_dec[3] = ctx->key_enc[key_len + 27]; - - for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) { - ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]); - ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]); - ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]); - ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]); - } - - ctx->key_dec[i] = ctx->key_enc[0]; - ctx->key_dec[i + 1] = ctx->key_enc[1]; - ctx->key_dec[i + 2] = ctx->key_enc[2]; - ctx->key_dec[i + 3] = ctx->key_enc[3]; - - return 0; -} static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - return aesti_expand_key(ctx, in_key, key_len); + return aes_expandkey(ctx, in_key, key_len); } static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - const u32 *rkp = ctx->key_enc + 4; - int rounds = 6 + ctx->key_length / 4; - u32 st0[4], st1[4]; unsigned long flags; - int round; - - st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); - st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4); - st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); - st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); /* * Temporarily disable interrupts to avoid races where cachelines are @@ -258,36 +28,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - /* - * Force the compiler to emit data independent Sbox references, - * by xoring the input with Sbox values that are known to add up - * to zero. This pulls the entire Sbox into the D-cache before any - * data dependent lookups are done. - */ - st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[ 64] ^ __aesti_sbox[134] ^ __aesti_sbox[195]; - st0[1] ^= __aesti_sbox[16] ^ __aesti_sbox[ 82] ^ __aesti_sbox[158] ^ __aesti_sbox[221]; - st0[2] ^= __aesti_sbox[32] ^ __aesti_sbox[ 96] ^ __aesti_sbox[160] ^ __aesti_sbox[234]; - st0[3] ^= __aesti_sbox[48] ^ __aesti_sbox[112] ^ __aesti_sbox[186] ^ __aesti_sbox[241]; - - for (round = 0;; round += 2, rkp += 8) { - st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0]; - st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1]; - st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2]; - st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3]; - - if (round == rounds - 2) - break; - - st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4]; - st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5]; - st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6]; - st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7]; - } - - put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out); - put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); - put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); - put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); + aes_encrypt(ctx, out, in); local_irq_restore(flags); } @@ -295,16 +36,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - const u32 *rkp = ctx->key_dec + 4; - int rounds = 6 + ctx->key_length / 4; - u32 st0[4], st1[4]; unsigned long flags; - int round; - - st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); - st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4); - st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); - st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); /* * Temporarily disable interrupts to avoid races where cachelines are @@ -312,36 +44,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ local_irq_save(flags); - /* - * Force the compiler to emit data independent Sbox references, - * by xoring the input with Sbox values that are known to add up - * to zero. This pulls the entire Sbox into the D-cache before any - * data dependent lookups are done. - */ - st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[ 64] ^ __aesti_inv_sbox[129] ^ __aesti_inv_sbox[200]; - st0[1] ^= __aesti_inv_sbox[16] ^ __aesti_inv_sbox[ 83] ^ __aesti_inv_sbox[150] ^ __aesti_inv_sbox[212]; - st0[2] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[ 96] ^ __aesti_inv_sbox[160] ^ __aesti_inv_sbox[236]; - st0[3] ^= __aesti_inv_sbox[48] ^ __aesti_inv_sbox[112] ^ __aesti_inv_sbox[187] ^ __aesti_inv_sbox[247]; - - for (round = 0;; round += 2, rkp += 8) { - st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0]; - st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1]; - st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2]; - st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3]; - - if (round == rounds - 2) - break; - - st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4]; - st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5]; - st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6]; - st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7]; - } - - put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out); - put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); - put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); - put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); + aes_decrypt(ctx, out, in); local_irq_restore(flags); } -- cgit v1.2.3 From 01490fc88e550935c57c668ee686dc02aca6d7d7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:23 +0200 Subject: crypto: x86/aes-ni - switch to generic for fallback and key routines The AES-NI code contains fallbacks for invocations that occur from a context where the SIMD unit is unavailable, which really only occurs when running in softirq context that was entered from a hard IRQ that was taken while running kernel code that was already using the FPU. That means performance is not really a consideration, and we can just use the new library code for this use case, which has a smaller footprint and is believed to be time invariant. This will allow us to drop the non-SIMD asm routines in a subsequent patch. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 091ebbbc..20af5806 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1156,8 +1156,7 @@ config CRYPTO_AES_NI_INTEL tristate "AES cipher algorithms (AES-NI)" depends on X86 select CRYPTO_AEAD - select CRYPTO_AES_X86_64 if 64BIT - select CRYPTO_AES_586 if !64BIT + select CRYPTO_LIB_AES select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER select CRYPTO_GLUE_HELPER_X86 if 64BIT -- cgit v1.2.3 From bfef5b97787c4d41b2128863dc301a2d3e92525b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:24 +0200 Subject: crypto: x86/aes - drop scalar assembler implementations The AES assembler code for x86 isn't actually faster than code generated by the compiler from aes_generic.c, and considering the disproportionate maintenance burden of assembler code on x86, it is better just to drop it entirely. Modern x86 systems will use AES-NI anyway, and given that the modules being removed have a dependency on aes_generic already, we can remove them without running the risk of regressions. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 44 -------------------------------------------- 1 file changed, 44 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 20af5806..df6f0be6 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1108,50 +1108,6 @@ config CRYPTO_AES_TI block. Interrupts are also disabled to avoid races where cachelines are evicted when the CPU is interrupted to do something else. -config CRYPTO_AES_586 - tristate "AES cipher algorithms (i586)" - depends on (X86 || UML_X86) && !64BIT - select CRYPTO_ALGAPI - select CRYPTO_AES - help - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See for more information. - -config CRYPTO_AES_X86_64 - tristate "AES cipher algorithms (x86_64)" - depends on (X86 || UML_X86) && 64BIT - select CRYPTO_ALGAPI - select CRYPTO_AES - help - AES cipher algorithms (FIPS-197). AES uses the Rijndael - algorithm. - - Rijndael appears to be consistently a very good performer in - both hardware and software across a wide range of computing - environments regardless of its use in feedback or non-feedback - modes. Its key setup time is excellent, and its key agility is - good. Rijndael's very low memory requirements make it very well - suited for restricted-space environments, in which it also - demonstrates excellent performance. Rijndael's operations are - among the easiest to defend against power and timing attacks. - - The AES specifies three key sizes: 128, 192 and 256 bits - - See for more information. - config CRYPTO_AES_NI_INTEL tristate "AES cipher algorithms (AES-NI)" depends on X86 -- cgit v1.2.3 From 7f35cf68eb9f1ab4210b622dad2779a91a98dc44 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:33 +0200 Subject: crypto: aes-generic - drop key expansion routine in favor of library version Drop aes-generic's version of crypto_aes_expand_key(), and switch to the key expansion routine provided by the AES library. AES key expansion is not performance critical, and it is better to have a single version shared by all AES implementations. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/aes_generic.c | 153 +-------------------------------------------------- 2 files changed, 3 insertions(+), 151 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index df6f0be6..80ea1186 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1072,6 +1072,7 @@ config CRYPTO_LIB_AES config CRYPTO_AES tristate "AES cipher algorithms" select CRYPTO_ALGAPI + select CRYPTO_LIB_AES help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 3aa4a715..426deb43 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1125,155 +1125,6 @@ EXPORT_SYMBOL_GPL(crypto_fl_tab); EXPORT_SYMBOL_GPL(crypto_it_tab); EXPORT_SYMBOL_GPL(crypto_il_tab); -/* initialise the key schedule from the user supplied key */ - -#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) - -#define imix_col(y, x) do { \ - u = star_x(x); \ - v = star_x(u); \ - w = star_x(v); \ - t = w ^ (x); \ - (y) = u ^ v ^ w; \ - (y) ^= ror32(u ^ t, 8) ^ \ - ror32(v ^ t, 16) ^ \ - ror32(t, 24); \ -} while (0) - -#define ls_box(x) \ - crypto_fl_tab[0][byte(x, 0)] ^ \ - crypto_fl_tab[1][byte(x, 1)] ^ \ - crypto_fl_tab[2][byte(x, 2)] ^ \ - crypto_fl_tab[3][byte(x, 3)] - -#define loop4(i) do { \ - t = ror32(t, 8); \ - t = ls_box(t) ^ rco_tab[i]; \ - t ^= ctx->key_enc[4 * i]; \ - ctx->key_enc[4 * i + 4] = t; \ - t ^= ctx->key_enc[4 * i + 1]; \ - ctx->key_enc[4 * i + 5] = t; \ - t ^= ctx->key_enc[4 * i + 2]; \ - ctx->key_enc[4 * i + 6] = t; \ - t ^= ctx->key_enc[4 * i + 3]; \ - ctx->key_enc[4 * i + 7] = t; \ -} while (0) - -#define loop6(i) do { \ - t = ror32(t, 8); \ - t = ls_box(t) ^ rco_tab[i]; \ - t ^= ctx->key_enc[6 * i]; \ - ctx->key_enc[6 * i + 6] = t; \ - t ^= ctx->key_enc[6 * i + 1]; \ - ctx->key_enc[6 * i + 7] = t; \ - t ^= ctx->key_enc[6 * i + 2]; \ - ctx->key_enc[6 * i + 8] = t; \ - t ^= ctx->key_enc[6 * i + 3]; \ - ctx->key_enc[6 * i + 9] = t; \ - t ^= ctx->key_enc[6 * i + 4]; \ - ctx->key_enc[6 * i + 10] = t; \ - t ^= ctx->key_enc[6 * i + 5]; \ - ctx->key_enc[6 * i + 11] = t; \ -} while (0) - -#define loop8tophalf(i) do { \ - t = ror32(t, 8); \ - t = ls_box(t) ^ rco_tab[i]; \ - t ^= ctx->key_enc[8 * i]; \ - ctx->key_enc[8 * i + 8] = t; \ - t ^= ctx->key_enc[8 * i + 1]; \ - ctx->key_enc[8 * i + 9] = t; \ - t ^= ctx->key_enc[8 * i + 2]; \ - ctx->key_enc[8 * i + 10] = t; \ - t ^= ctx->key_enc[8 * i + 3]; \ - ctx->key_enc[8 * i + 11] = t; \ -} while (0) - -#define loop8(i) do { \ - loop8tophalf(i); \ - t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ - ctx->key_enc[8 * i + 12] = t; \ - t ^= ctx->key_enc[8 * i + 5]; \ - ctx->key_enc[8 * i + 13] = t; \ - t ^= ctx->key_enc[8 * i + 6]; \ - ctx->key_enc[8 * i + 14] = t; \ - t ^= ctx->key_enc[8 * i + 7]; \ - ctx->key_enc[8 * i + 15] = t; \ -} while (0) - -/** - * crypto_aes_expand_key - Expands the AES key as described in FIPS-197 - * @ctx: The location where the computed key will be stored. - * @in_key: The supplied key. - * @key_len: The length of the supplied key. - * - * Returns 0 on success. The function fails only if an invalid key size (or - * pointer) is supplied. - * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes - * key schedule plus a 16 bytes key which is used before the first round). - * The decryption key is prepared for the "Equivalent Inverse Cipher" as - * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is - * for the initial combination, the second slot for the first round and so on. - */ -int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, - unsigned int key_len) -{ - u32 i, t, u, v, w, j; - - if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && - key_len != AES_KEYSIZE_256) - return -EINVAL; - - ctx->key_length = key_len; - - ctx->key_enc[0] = get_unaligned_le32(in_key); - ctx->key_enc[1] = get_unaligned_le32(in_key + 4); - ctx->key_enc[2] = get_unaligned_le32(in_key + 8); - ctx->key_enc[3] = get_unaligned_le32(in_key + 12); - - ctx->key_dec[key_len + 24] = ctx->key_enc[0]; - ctx->key_dec[key_len + 25] = ctx->key_enc[1]; - ctx->key_dec[key_len + 26] = ctx->key_enc[2]; - ctx->key_dec[key_len + 27] = ctx->key_enc[3]; - - switch (key_len) { - case AES_KEYSIZE_128: - t = ctx->key_enc[3]; - for (i = 0; i < 10; ++i) - loop4(i); - break; - - case AES_KEYSIZE_192: - ctx->key_enc[4] = get_unaligned_le32(in_key + 16); - t = ctx->key_enc[5] = get_unaligned_le32(in_key + 20); - for (i = 0; i < 8; ++i) - loop6(i); - break; - - case AES_KEYSIZE_256: - ctx->key_enc[4] = get_unaligned_le32(in_key + 16); - ctx->key_enc[5] = get_unaligned_le32(in_key + 20); - ctx->key_enc[6] = get_unaligned_le32(in_key + 24); - t = ctx->key_enc[7] = get_unaligned_le32(in_key + 28); - for (i = 0; i < 6; ++i) - loop8(i); - loop8tophalf(i); - break; - } - - ctx->key_dec[0] = ctx->key_enc[key_len + 24]; - ctx->key_dec[1] = ctx->key_enc[key_len + 25]; - ctx->key_dec[2] = ctx->key_enc[key_len + 26]; - ctx->key_dec[3] = ctx->key_enc[key_len + 27]; - - for (i = 4; i < key_len + 24; ++i) { - j = key_len + 24 - (i & ~3) + (i & 3); - imix_col(ctx->key_dec[j], ctx->key_enc[i]); - } - return 0; -} -EXPORT_SYMBOL_GPL(crypto_aes_expand_key); - /** * crypto_aes_set_key - Set the AES key. * @tfm: The %crypto_tfm that is used in the context. @@ -1281,7 +1132,7 @@ EXPORT_SYMBOL_GPL(crypto_aes_expand_key); * @key_len: The size of the key. * * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm - * is set. The function uses crypto_aes_expand_key() to expand the key. + * is set. The function uses aes_expand_key() to expand the key. * &crypto_aes_ctx _must_ be the private data embedded in @tfm which is * retrieved with crypto_tfm_ctx(). */ @@ -1292,7 +1143,7 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, u32 *flags = &tfm->crt_flags; int ret; - ret = crypto_aes_expand_key(ctx, in_key, key_len); + ret = aes_expandkey(ctx, in_key, key_len); if (!ret) return 0; -- cgit v1.2.3 From 23013696179de41a1f86a99288bcb19460be82d4 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 2 Jul 2019 21:41:45 +0200 Subject: crypto: aes-generic - unexport last-round AES tables The versions of the AES lookup tables that are only used during the last round are never used outside of the driver, so there is no need to export their symbols. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 426deb43..71a5c190 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = { } }; -__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = { +static const u32 crypto_fl_tab[4][256] ____cacheline_aligned = { { 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, @@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = { } }; -__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = { +static const u32 crypto_il_tab[4][256] ____cacheline_aligned = { { 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, 0x00000030, 0x00000036, 0x000000a5, 0x00000038, @@ -1121,9 +1121,7 @@ __visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = { }; EXPORT_SYMBOL_GPL(crypto_ft_tab); -EXPORT_SYMBOL_GPL(crypto_fl_tab); EXPORT_SYMBOL_GPL(crypto_it_tab); -EXPORT_SYMBOL_GPL(crypto_il_tab); /** * crypto_aes_set_key - Set the AES key. -- cgit v1.2.3 From a20cf9d5e7b44b7d1c5e8ee3adb61c8628d30d1e Mon Sep 17 00:00:00 2001 From: Hannah Pan Date: Tue, 2 Jul 2019 15:16:02 -0700 Subject: crypto: testmgr - add tests for lzo-rle Add self-tests for the lzo-rle algorithm. Signed-off-by: Hannah Pan Signed-off-by: Herbert Xu --- crypto/testmgr.c | 10 +++++++ crypto/testmgr.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 8ba1e75c..5fe90ea4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4740,6 +4740,16 @@ static const struct alg_test_desc alg_test_descs[] = { .decomp = __VECS(lzo_decomp_tv_template) } } + }, { + .alg = "lzo-rle", + .test = alg_test_comp, + .fips_allowed = 1, + .suite = { + .comp = { + .comp = __VECS(lzorle_comp_tv_template), + .decomp = __VECS(lzorle_decomp_tv_template) + } + } }, { .alg = "md4", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 073bd2ef..b743ca6d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -32454,6 +32454,86 @@ static const struct comp_testvec lzo_decomp_tv_template[] = { }, }; +static const struct comp_testvec lzorle_comp_tv_template[] = { + { + .inlen = 70, + .outlen = 59, + .input = "Join us now and share the software " + "Join us now and share the software ", + .output = "\x11\x01\x00\x0d\x4a\x6f\x69\x6e" + "\x20\x75\x73\x20\x6e\x6f\x77\x20" + "\x61\x6e\x64\x20\x73\x68\x61\x72" + "\x65\x20\x74\x68\x65\x20\x73\x6f" + "\x66\x74\x77\x70\x01\x32\x88\x00" + "\x0c\x65\x20\x74\x68\x65\x20\x73" + "\x6f\x66\x74\x77\x61\x72\x65\x20" + "\x11\x00\x00", + }, { + .inlen = 159, + .outlen = 133, + .input = "This document describes a compression method based on the LZO " + "compression algorithm. This document defines the application of " + "the LZO algorithm used in UBIFS.", + .output = "\x11\x01\x00\x2c\x54\x68\x69\x73" + "\x20\x64\x6f\x63\x75\x6d\x65\x6e" + "\x74\x20\x64\x65\x73\x63\x72\x69" + "\x62\x65\x73\x20\x61\x20\x63\x6f" + "\x6d\x70\x72\x65\x73\x73\x69\x6f" + "\x6e\x20\x6d\x65\x74\x68\x6f\x64" + "\x20\x62\x61\x73\x65\x64\x20\x6f" + "\x6e\x20\x74\x68\x65\x20\x4c\x5a" + "\x4f\x20\x2a\x8c\x00\x09\x61\x6c" + "\x67\x6f\x72\x69\x74\x68\x6d\x2e" + "\x20\x20\x2e\x54\x01\x03\x66\x69" + "\x6e\x65\x73\x20\x74\x06\x05\x61" + "\x70\x70\x6c\x69\x63\x61\x74\x76" + "\x0a\x6f\x66\x88\x02\x60\x09\x27" + "\xf0\x00\x0c\x20\x75\x73\x65\x64" + "\x20\x69\x6e\x20\x55\x42\x49\x46" + "\x53\x2e\x11\x00\x00", + }, +}; + +static const struct comp_testvec lzorle_decomp_tv_template[] = { + { + .inlen = 133, + .outlen = 159, + .input = "\x00\x2b\x54\x68\x69\x73\x20\x64" + "\x6f\x63\x75\x6d\x65\x6e\x74\x20" + "\x64\x65\x73\x63\x72\x69\x62\x65" + "\x73\x20\x61\x20\x63\x6f\x6d\x70" + "\x72\x65\x73\x73\x69\x6f\x6e\x20" + "\x6d\x65\x74\x68\x6f\x64\x20\x62" + "\x61\x73\x65\x64\x20\x6f\x6e\x20" + "\x74\x68\x65\x20\x4c\x5a\x4f\x2b" + "\x8c\x00\x0d\x61\x6c\x67\x6f\x72" + "\x69\x74\x68\x6d\x2e\x20\x20\x54" + "\x68\x69\x73\x2a\x54\x01\x02\x66" + "\x69\x6e\x65\x73\x94\x06\x05\x61" + "\x70\x70\x6c\x69\x63\x61\x74\x76" + "\x0a\x6f\x66\x88\x02\x60\x09\x27" + "\xf0\x00\x0c\x20\x75\x73\x65\x64" + "\x20\x69\x6e\x20\x55\x42\x49\x46" + "\x53\x2e\x11\x00\x00", + .output = "This document describes a compression method based on the LZO " + "compression algorithm. This document defines the application of " + "the LZO algorithm used in UBIFS.", + }, { + .inlen = 59, + .outlen = 70, + .input = "\x11\x01\x00\x0d\x4a\x6f\x69\x6e" + "\x20\x75\x73\x20\x6e\x6f\x77\x20" + "\x61\x6e\x64\x20\x73\x68\x61\x72" + "\x65\x20\x74\x68\x65\x20\x73\x6f" + "\x66\x74\x77\x70\x01\x32\x88\x00" + "\x0c\x65\x20\x74\x68\x65\x20\x73" + "\x6f\x66\x74\x77\x61\x72\x65\x20" + "\x11\x00\x00", + .output = "Join us now and share the software " + "Join us now and share the software ", + }, +}; + /* * Michael MIC test vectors from IEEE 802.11i */ -- cgit v1.2.3 From 79f40edaefd4303f647356869c64d863b0d4ed03 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:06 +0200 Subject: crypto: morus - remove generic and x86 implementations MORUS was not selected as a winner in the CAESAR competition, which is not surprising since it is considered to be cryptographically broken [0]. (Note that this is not an implementation defect, but a flaw in the underlying algorithm). Since it is unlikely to be in use currently, let's remove it before we're stuck with it. [0] https://eprint.iacr.org/2019/172.pdf Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 56 -- crypto/Makefile | 2 - crypto/morus1280.c | 542 ----------------- crypto/morus640.c | 533 ---------------- crypto/testmgr.c | 12 - crypto/testmgr.h | 1707 ---------------------------------------------------- 6 files changed, 2852 deletions(-) delete mode 100644 crypto/morus1280.c delete mode 100644 crypto/morus640.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 80ea1186..160dc1a9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -344,62 +344,6 @@ config CRYPTO_AEGIS256_AESNI_SSE2 help AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm. -config CRYPTO_MORUS640 - tristate "MORUS-640 AEAD algorithm" - select CRYPTO_AEAD - help - Support for the MORUS-640 dedicated AEAD algorithm. - -config CRYPTO_MORUS640_GLUE - tristate - depends on X86 - select CRYPTO_AEAD - select CRYPTO_SIMD - help - Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD - algorithm. - -config CRYPTO_MORUS640_SSE2 - tristate "MORUS-640 AEAD algorithm (x86_64 SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_MORUS640_GLUE - help - SSE2 implementation of the MORUS-640 dedicated AEAD algorithm. - -config CRYPTO_MORUS1280 - tristate "MORUS-1280 AEAD algorithm" - select CRYPTO_AEAD - help - Support for the MORUS-1280 dedicated AEAD algorithm. - -config CRYPTO_MORUS1280_GLUE - tristate - depends on X86 - select CRYPTO_AEAD - select CRYPTO_SIMD - help - Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD - algorithm. - -config CRYPTO_MORUS1280_SSE2 - tristate "MORUS-1280 AEAD algorithm (x86_64 SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_MORUS1280_GLUE - help - SSE2 optimizedimplementation of the MORUS-1280 dedicated AEAD - algorithm. - -config CRYPTO_MORUS1280_AVX2 - tristate "MORUS-1280 AEAD algorithm (x86_64 AVX2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_MORUS1280_GLUE - help - AVX2 optimized implementation of the MORUS-1280 dedicated AEAD - algorithm. - config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index 9479e1a4..4bc1951d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -92,8 +92,6 @@ obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o -obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o -obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/morus1280.c b/crypto/morus1280.c deleted file mode 100644 index f8734c65..00000000 --- a/crypto/morus1280.c +++ /dev/null @@ -1,542 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The MORUS-1280 Authenticated-Encryption Algorithm - * - * Copyright (c) 2016-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MORUS1280_WORD_SIZE 8 -#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE) -#define MORUS1280_BLOCK_ALIGN (__alignof__(__le64)) -#define MORUS1280_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS1280_BLOCK_ALIGN) - -struct morus1280_block { - u64 words[MORUS_BLOCK_WORDS]; -}; - -union morus1280_block_in { - __le64 words[MORUS_BLOCK_WORDS]; - u8 bytes[MORUS1280_BLOCK_SIZE]; -}; - -struct morus1280_state { - struct morus1280_block s[MORUS_STATE_BLOCKS]; -}; - -struct morus1280_ctx { - struct morus1280_block key; -}; - -struct morus1280_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct morus1280_state *state, - u8 *dst, const u8 *src, unsigned int size); -}; - -static const struct morus1280_block crypto_morus1280_const[1] = { - { .words = { - U64_C(0x0d08050302010100), - U64_C(0x6279e99059372215), - U64_C(0xf12fc26d55183ddb), - U64_C(0xdd28b57342311120), - } }, -}; - -static void crypto_morus1280_round(struct morus1280_block *b0, - struct morus1280_block *b1, - struct morus1280_block *b2, - struct morus1280_block *b3, - struct morus1280_block *b4, - const struct morus1280_block *m, - unsigned int b, unsigned int w) -{ - unsigned int i; - struct morus1280_block tmp; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - b0->words[i] ^= b1->words[i] & b2->words[i]; - b0->words[i] ^= b3->words[i]; - b0->words[i] ^= m->words[i]; - b0->words[i] = rol64(b0->words[i], b); - } - - tmp = *b3; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i]; -} - -static void crypto_morus1280_update(struct morus1280_state *state, - const struct morus1280_block *m) -{ - static const struct morus1280_block z = {}; - - struct morus1280_block *s = state->s; - - crypto_morus1280_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 13, 1); - crypto_morus1280_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 46, 2); - crypto_morus1280_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 38, 3); - crypto_morus1280_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 7, 2); - crypto_morus1280_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 4, 1); -} - -static void crypto_morus1280_load_a(struct morus1280_block *dst, const u8 *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - dst->words[i] = le64_to_cpu(*(const __le64 *)src); - src += MORUS1280_WORD_SIZE; - } -} - -static void crypto_morus1280_load_u(struct morus1280_block *dst, const u8 *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - dst->words[i] = get_unaligned_le64(src); - src += MORUS1280_WORD_SIZE; - } -} - -static void crypto_morus1280_load(struct morus1280_block *dst, const u8 *src) -{ - if (MORUS1280_ALIGNED(src)) - crypto_morus1280_load_a(dst, src); - else - crypto_morus1280_load_u(dst, src); -} - -static void crypto_morus1280_store_a(u8 *dst, const struct morus1280_block *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - *(__le64 *)dst = cpu_to_le64(src->words[i]); - dst += MORUS1280_WORD_SIZE; - } -} - -static void crypto_morus1280_store_u(u8 *dst, const struct morus1280_block *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - put_unaligned_le64(src->words[i], dst); - dst += MORUS1280_WORD_SIZE; - } -} - -static void crypto_morus1280_store(u8 *dst, const struct morus1280_block *src) -{ - if (MORUS1280_ALIGNED(dst)) - crypto_morus1280_store_a(dst, src); - else - crypto_morus1280_store_u(dst, src); -} - -static void crypto_morus1280_ad(struct morus1280_state *state, const u8 *src, - unsigned int size) -{ - struct morus1280_block m; - - if (MORUS1280_ALIGNED(src)) { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_a(&m, src); - crypto_morus1280_update(state, &m); - - size -= MORUS1280_BLOCK_SIZE; - src += MORUS1280_BLOCK_SIZE; - } - } else { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_u(&m, src); - crypto_morus1280_update(state, &m); - - size -= MORUS1280_BLOCK_SIZE; - src += MORUS1280_BLOCK_SIZE; - } - } -} - -static void crypto_morus1280_core(const struct morus1280_state *state, - struct morus1280_block *blk) -{ - unsigned int i; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i]; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - blk->words[i] ^= state->s[0].words[i]; - blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i]; - } -} - -static void crypto_morus1280_encrypt_chunk(struct morus1280_state *state, - u8 *dst, const u8 *src, - unsigned int size) -{ - struct morus1280_block c, m; - - if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_a(&m, src); - c = m; - crypto_morus1280_core(state, &c); - crypto_morus1280_store_a(dst, &c); - crypto_morus1280_update(state, &m); - - src += MORUS1280_BLOCK_SIZE; - dst += MORUS1280_BLOCK_SIZE; - size -= MORUS1280_BLOCK_SIZE; - } - } else { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_u(&m, src); - c = m; - crypto_morus1280_core(state, &c); - crypto_morus1280_store_u(dst, &c); - crypto_morus1280_update(state, &m); - - src += MORUS1280_BLOCK_SIZE; - dst += MORUS1280_BLOCK_SIZE; - size -= MORUS1280_BLOCK_SIZE; - } - } - - if (size > 0) { - union morus1280_block_in tail; - - memcpy(tail.bytes, src, size); - memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); - - crypto_morus1280_load_a(&m, tail.bytes); - c = m; - crypto_morus1280_core(state, &c); - crypto_morus1280_store_a(tail.bytes, &c); - crypto_morus1280_update(state, &m); - - memcpy(dst, tail.bytes, size); - } -} - -static void crypto_morus1280_decrypt_chunk(struct morus1280_state *state, - u8 *dst, const u8 *src, - unsigned int size) -{ - struct morus1280_block m; - - if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_a(&m, src); - crypto_morus1280_core(state, &m); - crypto_morus1280_store_a(dst, &m); - crypto_morus1280_update(state, &m); - - src += MORUS1280_BLOCK_SIZE; - dst += MORUS1280_BLOCK_SIZE; - size -= MORUS1280_BLOCK_SIZE; - } - } else { - while (size >= MORUS1280_BLOCK_SIZE) { - crypto_morus1280_load_u(&m, src); - crypto_morus1280_core(state, &m); - crypto_morus1280_store_u(dst, &m); - crypto_morus1280_update(state, &m); - - src += MORUS1280_BLOCK_SIZE; - dst += MORUS1280_BLOCK_SIZE; - size -= MORUS1280_BLOCK_SIZE; - } - } - - if (size > 0) { - union morus1280_block_in tail; - - memcpy(tail.bytes, src, size); - memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); - - crypto_morus1280_load_a(&m, tail.bytes); - crypto_morus1280_core(state, &m); - crypto_morus1280_store_a(tail.bytes, &m); - memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); - crypto_morus1280_load_a(&m, tail.bytes); - crypto_morus1280_update(state, &m); - - memcpy(dst, tail.bytes, size); - } -} - -static void crypto_morus1280_init(struct morus1280_state *state, - const struct morus1280_block *key, - const u8 *iv) -{ - static const struct morus1280_block z = {}; - - union morus1280_block_in tmp; - unsigned int i; - - memcpy(tmp.bytes, iv, MORUS_NONCE_SIZE); - memset(tmp.bytes + MORUS_NONCE_SIZE, 0, - MORUS1280_BLOCK_SIZE - MORUS_NONCE_SIZE); - - crypto_morus1280_load(&state->s[0], tmp.bytes); - state->s[1] = *key; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[2].words[i] = U64_C(0xFFFFFFFFFFFFFFFF); - state->s[3] = z; - state->s[4] = crypto_morus1280_const[0]; - - for (i = 0; i < 16; i++) - crypto_morus1280_update(state, &z); - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[1].words[i] ^= key->words[i]; -} - -static void crypto_morus1280_process_ad(struct morus1280_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - struct morus1280_block m; - union morus1280_block_in buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= MORUS1280_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = MORUS1280_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - - crypto_morus1280_load_a(&m, buf.bytes); - crypto_morus1280_update(state, &m); - - pos = 0; - left -= fill; - src += fill; - } - - crypto_morus1280_ad(state, src, left); - src += left & ~(MORUS1280_BLOCK_SIZE - 1); - left &= MORUS1280_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); - - crypto_morus1280_load_a(&m, buf.bytes); - crypto_morus1280_update(state, &m); - } -} - -static void crypto_morus1280_process_crypt(struct morus1280_state *state, - struct aead_request *req, - const struct morus1280_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_morus1280_final(struct morus1280_state *state, - struct morus1280_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - struct morus1280_block tmp; - unsigned int i; - - tmp.words[0] = assoclen * 8; - tmp.words[1] = cryptlen * 8; - tmp.words[2] = 0; - tmp.words[3] = 0; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[4].words[i] ^= state->s[0].words[i]; - - for (i = 0; i < 10; i++) - crypto_morus1280_update(state, &tmp); - - crypto_morus1280_core(state, tag_xor); -} - -static int crypto_morus1280_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct morus1280_ctx *ctx = crypto_aead_ctx(aead); - union morus1280_block_in tmp; - - if (keylen == MORUS1280_BLOCK_SIZE) - crypto_morus1280_load(&ctx->key, key); - else if (keylen == MORUS1280_BLOCK_SIZE / 2) { - memcpy(tmp.bytes, key, keylen); - memcpy(tmp.bytes + keylen, key, keylen); - - crypto_morus1280_load(&ctx->key, tmp.bytes); - } else { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return 0; -} - -static int crypto_morus1280_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; -} - -static void crypto_morus1280_crypt(struct aead_request *req, - struct morus1280_block *tag_xor, - unsigned int cryptlen, - const struct morus1280_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); - struct morus1280_state state; - - crypto_morus1280_init(&state, &ctx->key, req->iv); - crypto_morus1280_process_ad(&state, req->src, req->assoclen); - crypto_morus1280_process_crypt(&state, req, ops); - crypto_morus1280_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_morus1280_encrypt(struct aead_request *req) -{ - static const struct morus1280_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_morus1280_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus1280_block tag = {}; - union morus1280_block_in tag_out; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_morus1280_crypt(req, &tag, cryptlen, &ops); - crypto_morus1280_store(tag_out.bytes, &tag); - - scatterwalk_map_and_copy(tag_out.bytes, req->dst, - req->assoclen + cryptlen, authsize, 1); - return 0; -} - -static int crypto_morus1280_decrypt(struct aead_request *req) -{ - static const struct morus1280_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_morus1280_decrypt_chunk, - }; - static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union morus1280_block_in tag_in; - struct morus1280_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag_in.bytes, req->src, - req->assoclen + cryptlen, authsize, 0); - - crypto_morus1280_load(&tag, tag_in.bytes); - crypto_morus1280_crypt(req, &tag, cryptlen, &ops); - crypto_morus1280_store(tag_in.bytes, &tag); - - return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static int crypto_morus1280_init_tfm(struct crypto_aead *tfm) -{ - return 0; -} - -static void crypto_morus1280_exit_tfm(struct crypto_aead *tfm) -{ -} - -static struct aead_alg crypto_morus1280_alg = { - .setkey = crypto_morus1280_setkey, - .setauthsize = crypto_morus1280_setauthsize, - .encrypt = crypto_morus1280_encrypt, - .decrypt = crypto_morus1280_decrypt, - .init = crypto_morus1280_init_tfm, - .exit = crypto_morus1280_exit_tfm, - - .ivsize = MORUS_NONCE_SIZE, - .maxauthsize = MORUS_MAX_AUTH_SIZE, - .chunksize = MORUS1280_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct morus1280_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "morus1280", - .cra_driver_name = "morus1280-generic", - - .cra_module = THIS_MODULE, - } -}; - - -static int __init crypto_morus1280_module_init(void) -{ - return crypto_register_aead(&crypto_morus1280_alg); -} - -static void __exit crypto_morus1280_module_exit(void) -{ - crypto_unregister_aead(&crypto_morus1280_alg); -} - -subsys_initcall(crypto_morus1280_module_init); -module_exit(crypto_morus1280_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("morus1280"); -MODULE_ALIAS_CRYPTO("morus1280-generic"); diff --git a/crypto/morus640.c b/crypto/morus640.c deleted file mode 100644 index ae5aa948..00000000 --- a/crypto/morus640.c +++ /dev/null @@ -1,533 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The MORUS-640 Authenticated-Encryption Algorithm - * - * Copyright (c) 2016-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MORUS640_WORD_SIZE 4 -#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE) -#define MORUS640_BLOCK_ALIGN (__alignof__(__le32)) -#define MORUS640_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS640_BLOCK_ALIGN) - -struct morus640_block { - u32 words[MORUS_BLOCK_WORDS]; -}; - -union morus640_block_in { - __le32 words[MORUS_BLOCK_WORDS]; - u8 bytes[MORUS640_BLOCK_SIZE]; -}; - -struct morus640_state { - struct morus640_block s[MORUS_STATE_BLOCKS]; -}; - -struct morus640_ctx { - struct morus640_block key; -}; - -struct morus640_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct morus640_state *state, - u8 *dst, const u8 *src, unsigned int size); -}; - -static const struct morus640_block crypto_morus640_const[2] = { - { .words = { - U32_C(0x02010100), - U32_C(0x0d080503), - U32_C(0x59372215), - U32_C(0x6279e990), - } }, - { .words = { - U32_C(0x55183ddb), - U32_C(0xf12fc26d), - U32_C(0x42311120), - U32_C(0xdd28b573), - } }, -}; - -static void crypto_morus640_round(struct morus640_block *b0, - struct morus640_block *b1, - struct morus640_block *b2, - struct morus640_block *b3, - struct morus640_block *b4, - const struct morus640_block *m, - unsigned int b, unsigned int w) -{ - unsigned int i; - struct morus640_block tmp; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - b0->words[i] ^= b1->words[i] & b2->words[i]; - b0->words[i] ^= b3->words[i]; - b0->words[i] ^= m->words[i]; - b0->words[i] = rol32(b0->words[i], b); - } - - tmp = *b3; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i]; -} - -static void crypto_morus640_update(struct morus640_state *state, - const struct morus640_block *m) -{ - static const struct morus640_block z = {}; - - struct morus640_block *s = state->s; - - crypto_morus640_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 5, 1); - crypto_morus640_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 31, 2); - crypto_morus640_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 7, 3); - crypto_morus640_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 22, 2); - crypto_morus640_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 13, 1); -} - -static void crypto_morus640_load_a(struct morus640_block *dst, const u8 *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - dst->words[i] = le32_to_cpu(*(const __le32 *)src); - src += MORUS640_WORD_SIZE; - } -} - -static void crypto_morus640_load_u(struct morus640_block *dst, const u8 *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - dst->words[i] = get_unaligned_le32(src); - src += MORUS640_WORD_SIZE; - } -} - -static void crypto_morus640_load(struct morus640_block *dst, const u8 *src) -{ - if (MORUS640_ALIGNED(src)) - crypto_morus640_load_a(dst, src); - else - crypto_morus640_load_u(dst, src); -} - -static void crypto_morus640_store_a(u8 *dst, const struct morus640_block *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - *(__le32 *)dst = cpu_to_le32(src->words[i]); - dst += MORUS640_WORD_SIZE; - } -} - -static void crypto_morus640_store_u(u8 *dst, const struct morus640_block *src) -{ - unsigned int i; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - put_unaligned_le32(src->words[i], dst); - dst += MORUS640_WORD_SIZE; - } -} - -static void crypto_morus640_store(u8 *dst, const struct morus640_block *src) -{ - if (MORUS640_ALIGNED(dst)) - crypto_morus640_store_a(dst, src); - else - crypto_morus640_store_u(dst, src); -} - -static void crypto_morus640_ad(struct morus640_state *state, const u8 *src, - unsigned int size) -{ - struct morus640_block m; - - if (MORUS640_ALIGNED(src)) { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_a(&m, src); - crypto_morus640_update(state, &m); - - size -= MORUS640_BLOCK_SIZE; - src += MORUS640_BLOCK_SIZE; - } - } else { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_u(&m, src); - crypto_morus640_update(state, &m); - - size -= MORUS640_BLOCK_SIZE; - src += MORUS640_BLOCK_SIZE; - } - } -} - -static void crypto_morus640_core(const struct morus640_state *state, - struct morus640_block *blk) -{ - unsigned int i; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i]; - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) { - blk->words[i] ^= state->s[0].words[i]; - blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i]; - } -} - -static void crypto_morus640_encrypt_chunk(struct morus640_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - struct morus640_block c, m; - - if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_a(&m, src); - c = m; - crypto_morus640_core(state, &c); - crypto_morus640_store_a(dst, &c); - crypto_morus640_update(state, &m); - - src += MORUS640_BLOCK_SIZE; - dst += MORUS640_BLOCK_SIZE; - size -= MORUS640_BLOCK_SIZE; - } - } else { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_u(&m, src); - c = m; - crypto_morus640_core(state, &c); - crypto_morus640_store_u(dst, &c); - crypto_morus640_update(state, &m); - - src += MORUS640_BLOCK_SIZE; - dst += MORUS640_BLOCK_SIZE; - size -= MORUS640_BLOCK_SIZE; - } - } - - if (size > 0) { - union morus640_block_in tail; - - memcpy(tail.bytes, src, size); - memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); - - crypto_morus640_load_a(&m, tail.bytes); - c = m; - crypto_morus640_core(state, &c); - crypto_morus640_store_a(tail.bytes, &c); - crypto_morus640_update(state, &m); - - memcpy(dst, tail.bytes, size); - } -} - -static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - struct morus640_block m; - - if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_a(&m, src); - crypto_morus640_core(state, &m); - crypto_morus640_store_a(dst, &m); - crypto_morus640_update(state, &m); - - src += MORUS640_BLOCK_SIZE; - dst += MORUS640_BLOCK_SIZE; - size -= MORUS640_BLOCK_SIZE; - } - } else { - while (size >= MORUS640_BLOCK_SIZE) { - crypto_morus640_load_u(&m, src); - crypto_morus640_core(state, &m); - crypto_morus640_store_u(dst, &m); - crypto_morus640_update(state, &m); - - src += MORUS640_BLOCK_SIZE; - dst += MORUS640_BLOCK_SIZE; - size -= MORUS640_BLOCK_SIZE; - } - } - - if (size > 0) { - union morus640_block_in tail; - - memcpy(tail.bytes, src, size); - memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); - - crypto_morus640_load_a(&m, tail.bytes); - crypto_morus640_core(state, &m); - crypto_morus640_store_a(tail.bytes, &m); - memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); - crypto_morus640_load_a(&m, tail.bytes); - crypto_morus640_update(state, &m); - - memcpy(dst, tail.bytes, size); - } -} - -static void crypto_morus640_init(struct morus640_state *state, - const struct morus640_block *key, - const u8 *iv) -{ - static const struct morus640_block z = {}; - - unsigned int i; - - crypto_morus640_load(&state->s[0], iv); - state->s[1] = *key; - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[2].words[i] = U32_C(0xFFFFFFFF); - state->s[3] = crypto_morus640_const[0]; - state->s[4] = crypto_morus640_const[1]; - - for (i = 0; i < 16; i++) - crypto_morus640_update(state, &z); - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[1].words[i] ^= key->words[i]; -} - -static void crypto_morus640_process_ad(struct morus640_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - struct morus640_block m; - union morus640_block_in buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= MORUS640_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = MORUS640_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - - crypto_morus640_load_a(&m, buf.bytes); - crypto_morus640_update(state, &m); - - pos = 0; - left -= fill; - src += fill; - } - - crypto_morus640_ad(state, src, left); - src += left & ~(MORUS640_BLOCK_SIZE - 1); - left &= MORUS640_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos); - - crypto_morus640_load_a(&m, buf.bytes); - crypto_morus640_update(state, &m); - } -} - -static void crypto_morus640_process_crypt(struct morus640_state *state, - struct aead_request *req, - const struct morus640_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_morus640_final(struct morus640_state *state, - struct morus640_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - struct morus640_block tmp; - unsigned int i; - - tmp.words[0] = lower_32_bits(assoclen * 8); - tmp.words[1] = upper_32_bits(assoclen * 8); - tmp.words[2] = lower_32_bits(cryptlen * 8); - tmp.words[3] = upper_32_bits(cryptlen * 8); - - for (i = 0; i < MORUS_BLOCK_WORDS; i++) - state->s[4].words[i] ^= state->s[0].words[i]; - - for (i = 0; i < 10; i++) - crypto_morus640_update(state, &tmp); - - crypto_morus640_core(state, tag_xor); -} - -static int crypto_morus640_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct morus640_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != MORUS640_BLOCK_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - crypto_morus640_load(&ctx->key, key); - return 0; -} - -static int crypto_morus640_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; -} - -static void crypto_morus640_crypt(struct aead_request *req, - struct morus640_block *tag_xor, - unsigned int cryptlen, - const struct morus640_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus640_ctx *ctx = crypto_aead_ctx(tfm); - struct morus640_state state; - - crypto_morus640_init(&state, &ctx->key, req->iv); - crypto_morus640_process_ad(&state, req->src, req->assoclen); - crypto_morus640_process_crypt(&state, req, ops); - crypto_morus640_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_morus640_encrypt(struct aead_request *req) -{ - static const struct morus640_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_morus640_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus640_block tag = {}; - union morus640_block_in tag_out; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_morus640_crypt(req, &tag, cryptlen, &ops); - crypto_morus640_store(tag_out.bytes, &tag); - - scatterwalk_map_and_copy(tag_out.bytes, req->dst, - req->assoclen + cryptlen, authsize, 1); - return 0; -} - -static int crypto_morus640_decrypt(struct aead_request *req) -{ - static const struct morus640_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_morus640_decrypt_chunk, - }; - static const u8 zeros[MORUS640_BLOCK_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union morus640_block_in tag_in; - struct morus640_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag_in.bytes, req->src, - req->assoclen + cryptlen, authsize, 0); - - crypto_morus640_load(&tag, tag_in.bytes); - crypto_morus640_crypt(req, &tag, cryptlen, &ops); - crypto_morus640_store(tag_in.bytes, &tag); - - return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static int crypto_morus640_init_tfm(struct crypto_aead *tfm) -{ - return 0; -} - -static void crypto_morus640_exit_tfm(struct crypto_aead *tfm) -{ -} - -static struct aead_alg crypto_morus640_alg = { - .setkey = crypto_morus640_setkey, - .setauthsize = crypto_morus640_setauthsize, - .encrypt = crypto_morus640_encrypt, - .decrypt = crypto_morus640_decrypt, - .init = crypto_morus640_init_tfm, - .exit = crypto_morus640_exit_tfm, - - .ivsize = MORUS_NONCE_SIZE, - .maxauthsize = MORUS_MAX_AUTH_SIZE, - .chunksize = MORUS640_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct morus640_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "morus640", - .cra_driver_name = "morus640-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_morus640_module_init(void) -{ - return crypto_register_aead(&crypto_morus640_alg); -} - -static void __exit crypto_morus640_module_exit(void) -{ - crypto_unregister_aead(&crypto_morus640_alg); -} - -subsys_initcall(crypto_morus640_module_init); -module_exit(crypto_morus640_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("MORUS-640 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("morus640"); -MODULE_ALIAS_CRYPTO("morus640-generic"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5fe90ea4..6258581a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4768,18 +4768,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(michael_mic_tv_template) } - }, { - .alg = "morus1280", - .test = alg_test_aead, - .suite = { - .aead = __VECS(morus1280_tv_template) - } - }, { - .alg = "morus640", - .test = alg_test_aead, - .suite = { - .aead = __VECS(morus640_tv_template) - } }, { .alg = "nhpoly1305", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index b743ca6d..fca03fa0 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -20472,1713 +20472,6 @@ static const struct aead_testvec aegis256_tv_template[] = { }, }; -/* - * MORUS-640 test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/morus640128v2/) - */ -static const struct aead_testvec morus640_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .assoc = "", - .alen = 0, - .ptext = "", - .plen = 0, - .ctext = "\x89\x62\x7d\xf3\x07\x9d\x52\x05" - "\x53\xc3\x04\x60\x93\xb4\x37\x9a", - .clen = 16, - }, { - .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" - "\x80\xda\xb2\x91\xf9\x24\xc2\x06", - .klen = 16, - .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .assoc = "", - .alen = 0, - .ptext = "\x69", - .plen = 1, - .ctext = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78" - "\xb6\x10\x9a\x59\x5f\x61\x37\x70" - "\x09", - .clen = 17, - }, { - .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" - "\x01\xb4\x64\x22\xf3\x48\x85\x0c", - .klen = 16, - .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .assoc = "", - .alen = 0, - .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17", - .plen = 15, - .ctext = "\x76\xdd\xb9\x05\x3d\xce\x61\x38" - "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5" - "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b" - "\xd4\x6e\xfe\xd9\xc8\x63\x4b", - .clen = 31, - }, { - .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47\x12", - .klen = 16, - .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .assoc = "", - .alen = 0, - .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97", - .plen = 16, - .ctext = "\xdc\x72\xe8\x14\xfb\x63\xad\x72" - "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6" - "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c" - "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c", - .clen = 32, - }, { - .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .klen = 16, - .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .assoc = "", - .alen = 0, - .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09", - .plen = 17, - .ctext = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82" - "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f" - "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2" - "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2" - "\x0d", - .clen = 33, - }, { - .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", - .klen = 16, - .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .assoc = "", - .alen = 0, - .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3", - .plen = 31, - .ctext = "\x59\xd1\x0f\x6b\xee\x27\x84\x92" - "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5" - "\x50\x32\xb4\x9a\x2e\x35\x83\x55" - "\x36\x12\x12\xed\xa3\x31\xc5\x30" - "\xa7\xe2\x4a\x6d\x05\x59\x43\x91" - "\x75\xfa\x6c\x17\xc6\x73\xca", - .clen = 47, - }, { - .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", - .klen = 16, - .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .assoc = "", - .alen = 0, - .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28", - .plen = 32, - .ctext = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1" - "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69" - "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39" - "\x1b\xde\x68\x38\xcc\x27\x52\xc5" - "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c" - "\x65\x9e\x58\xe6\x52\xa2\xfe\x59", - .clen = 48, - }, { - .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", - .klen = 16, - .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .assoc = "\xc5", - .alen = 1, - .ptext = "", - .plen = 0, - .ctext = "\x56\xe7\x24\x52\xdd\x95\x60\x5b" - "\x09\x48\x39\x69\x9c\xb3\x62\x46", - .clen = 16, - }, { - .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" - "\x07\xd1\x90\x8b\xcf\x23\x15\x31", - .klen = 16, - .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76", - .alen = 15, - .ptext = "", - .plen = 0, - .ctext = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01" - "\x13\xe5\x73\x46\x46\xf2\x5c\xe1", - .clen = 16, - }, { - .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" - "\x88\xab\x42\x1c\xc9\x47\xd7\x38", - .klen = 16, - .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .alen = 16, - .ptext = "", - .plen = 0, - .ctext = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac" - "\xa9\x21\x45\x0b\x16\x52\xf7\xe1", - .clen = 16, - }, { - .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" - "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", - .klen = 16, - .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c", - .alen = 17, - .ptext = "", - .plen = 0, - .ctext = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9" - "\xbb\xa8\x62\xad\x0a\xf5\x48\x60", - .clen = 16, - }, { - .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" - "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", - .klen = 16, - .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7", - .alen = 31, - .ptext = "", - .plen = 0, - .ctext = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d" - "\x4b\xef\x75\x16\x0a\x99\xae\x6b", - .clen = 16, - }, { - .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" - "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", - .klen = 16, - .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", - .alen = 32, - .ptext = "", - .plen = 0, - .ctext = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f" - "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0", - .clen = 16, - }, { - .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" - "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", - .klen = 16, - .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .assoc = "\x31", - .alen = 1, - .ptext = "\x40", - .plen = 1, - .ctext = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38" - "\x01\xfe\x84\x14\x85\xf8\xd1\xe3" - "\x22", - .clen = 17, - }, { - .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" - "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", - .klen = 16, - .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06", - .alen = 15, - .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .plen = 15, - .ctext = "\x77\x32\x61\xeb\xb4\x33\x29\x92" - "\x29\x95\xc5\x8e\x85\x76\xab\xfc" - "\x07\x95\xa7\x44\x74\xf7\x22\xff" - "\xd8\xd8\x36\x3d\x8a\x7f\x9e", - .clen = 31, - }, { - .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", - .klen = 16, - .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .alen = 16, - .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .plen = 16, - .ctext = "\xd8\xfd\x44\x45\xf6\x42\x12\x38" - "\xf2\x0b\xea\x4f\x9e\x11\x61\x07" - "\x48\x67\x98\x18\x9b\xd0\x0c\x59" - "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70", - .clen = 32, - }, { - .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .klen = 16, - .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b", - .alen = 17, - .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .plen = 17, - .ctext = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6" - "\x71\x3a\x00\x9f\x41\x88\xb0\xb2" - "\x71\x83\x85\x5f\xc8\x79\x0a\x99" - "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9" - "\x83", - .clen = 33, - }, { - .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", - .klen = 16, - .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62", - .alen = 31, - .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .plen = 31, - .ctext = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06" - "\x5c\x7b\xef\x64\x87\x00\xd1\x37" - "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0" - "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a" - "\x2d\x21\x30\xf9\x02\xdb\x06\x0c" - "\xf1\x5a\x66\x69\xe0\xca\x83", - .clen = 47, - }, { - .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70", - .klen = 16, - .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .alen = 32, - .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .plen = 32, - .ctext = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2" - "\x70\x57\x37\xc5\xc2\x4f\x8d\x14" - "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2" - "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54" - "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa" - "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81", - .clen = 48, - }, { - .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76", - .klen = 16, - .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" - "\x1a", - .alen = 33, - .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a", - .plen = 65, - .ctext = "\xc7\xca\x26\x61\x57\xee\xa2\xb9" - "\xb1\x37\xde\x95\x06\x90\x11\x08" - "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1" - "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76" - "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5" - "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb" - "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b" - "\xef\x35\x25\x48\xed\xcf\x29\xa8" - "\xac\xc3\xb9\xcb\x61\x8f\x73\x92" - "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1" - "\xc4", - .clen = 81, - }, { - .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", - .klen = 16, - .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" - "\x28\xce\x57\x34\xcd\x6e\x84\x4c" - "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" - "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" - "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" - "\x21", - .alen = 65, - .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac", - .plen = 33, - .ctext = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b" - "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40" - "\xf3\x78\x63\x34\x89\x79\x39\x6b" - "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3" - "\x54\x0b\xea\x05\xa6\x95\x64\xed" - "\x3d\x69\xa2\x0c\x27\x56\x2f\x34" - "\x66", - .clen = 49, - }, { - .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", - .klen = 16, - .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .alen = 16, - .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .plen = 16, - .ctext = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f" - "\x33\x98\x87\xde\x08\xb6\xb6\xae" - "\x3e\xa4\xf8\x19\xf1\x92\x60\x39" - "\xb9\x6b\x3f\xdf\xc8\xcb\x30", - .clen = 31, - }, { - .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .klen = 16, - .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .alen = 16, - .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .plen = 16, - .ctext = "\x74\x7d\x70\x07\xe9\xba\x01\xee" - "\x6c\xc6\x6f\x50\x25\x33\xbe\x50" - "\x17\xb8\x17\x62\xed\x80\xa2\xf5" - "\x03\xde\x85\x71\x5d\x34", - .clen = 30, - }, { - .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .klen = 16, - .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .alen = 16, - .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .plen = 16, - .ctext = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38" - "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf" - "\xba\xea\xd4\xfa\x3f\x11\x33\x98", - .clen = 24, - }, { - .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .klen = 16, - .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" - "\x36\xab\xde\xc6\x6d\x32\x70\x17", - .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" - "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98", - .alen = 16, - .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a", - .plen = 16, - .ctext = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86" - "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c" - "\x6e", - .clen = 17, - }, -}; - -/* - * MORUS-1280 test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/morus1280128v2/ and crypto_aead/morus1280256v2/ ) - */ -static const struct aead_testvec morus1280_tv_template[] = { - { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .assoc = "", - .alen = 0, - .ptext = "", - .plen = 0, - .ctext = "\x91\x85\x0f\xf5\x52\x9e\xce\xce" - "\x65\x99\xc7\xbf\xd3\x76\xe8\x98", - .clen = 16, - }, { - .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" - "\x80\xda\xb2\x91\xf9\x24\xc2\x06", - .klen = 16, - .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .assoc = "", - .alen = 0, - .ptext = "\x69", - .plen = 1, - .ctext = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13" - "\x96\xda\x76\x34\x33\x4e\xd5\x39" - "\x73", - .clen = 17, - }, { - .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" - "\x01\xb4\x64\x22\xf3\x48\x85\x0c", - .klen = 16, - .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .assoc = "", - .alen = 0, - .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17\x91" - "\xb4\x96\x9f\x6b\xce\x38\xa5\x46" - "\x13\x7d\x64\x93\xd7\x05\xf5", - .plen = 31, - .ctext = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22" - "\x75\x0b\x24\xa6\x0e\xc3\xde\x52" - "\x97\x0b\x64\xd4\xce\x90\x52\xf7" - "\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d" - "\xe0\x61\x33\x24\xc6\x4d\x51\xbc" - "\xa4\x21\x74\xcf\x19\x16\x59", - .clen = 47, - }, { - .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47\x12", - .klen = 16, - .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .assoc = "", - .alen = 0, - .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97" - "\xde\x58\xab\xf0\xd3\xd8\x27\x60" - "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4", - .plen = 32, - .ctext = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f" - "\xde\x9f\x77\xb2\xda\x92\x61\x5c" - "\x09\x0b\x2d\x9a\x26\xaa\x1c\x06" - "\xab\x74\xb7\x2b\x95\x5f\x9f\xa1" - "\x9a\xff\x50\xa0\xa2\xff\xc5\xad" - "\x21\x8e\x84\x5c\x12\x61\xb2\xae", - .clen = 48, - }, { - .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .klen = 16, - .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .assoc = "", - .alen = 0, - .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09\x1a\xb7\x74\xd8\x78\xa9\x79" - "\x96\xd8\x22\x43\x8c\xc3\x34\x7b" - "\xc4", - .plen = 33, - .ctext = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17" - "\xc1\x1d\x2a\xdd\x88\x67\xda\x1f" - "\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f" - "\xa6\xec\xc6\x92\x05\x4b\xc0\xa3" - "\x63\xef\x88\xa4\x9b\x0a\x5c\xed" - "\x2b\x6a\xac\x63\x52\xaa\x10\x94" - "\xd0", - .clen = 49, - }, { - .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", - .klen = 16, - .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .assoc = "", - .alen = 0, - .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3\x51" - "\xd3\xdf\x18\xc9\x30\x66\xed\xb1" - "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5" - "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1" - "\xe6\x16\xa2\xac\xde\x47\x40", - .plen = 63, - .ctext = "\x7d\x61\x1a\x35\x20\xcc\x07\x88" - "\x03\x98\x87\xcf\xc0\x6e\x4d\x19" - "\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a" - "\x3a\x06\x77\xce\x71\x2c\xcd\xdd" - "\xed\xf6\xc9\xbe\xa6\x3b\xb8\xfc" - "\x6c\xbe\x77\xed\x74\x0e\x20\x85" - "\xd0\x65\xde\x24\x6f\xe3\x25\xc5" - "\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9" - "\xe5\x81\x37\xde\x84\x7a\xf6\x84" - "\x99\x7a\x72\x9c\x54\x31\xa1", - .clen = 79, - }, { - .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", - .klen = 16, - .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .assoc = "", - .alen = 0, - .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28" - "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf" - "\xb0\x68\x69\xf2\x27\x35\x91\x84" - "\x2e\x37\x5b\x00\x04\xff\x16\x9c" - "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a", - .plen = 64, - .ctext = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c" - "\xa5\x07\x12\xa7\x12\x39\x60\x66" - "\x30\x81\x4a\x03\x78\x28\x45\x52" - "\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66" - "\x6f\x45\xd7\x3b\x67\x6f\x51\xb9" - "\xc0\x3d\x6c\xca\x1e\xae\xff\xb6" - "\x79\xa9\xe4\x82\x5d\x4c\x2d\xdf" - "\xeb\x71\x40\xc9\x2c\x40\x45\x6d" - "\x73\x77\x01\xf3\x4f\xf3\x9d\x2a" - "\x5d\x57\xa8\xa1\x18\xa2\xad\xcb", - .clen = 80, - }, { - .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", - .klen = 16, - .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .assoc = "\xc5", - .alen = 1, - .ptext = "", - .plen = 0, - .ctext = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e" - "\x89\x3b\x9d\x0f\x83\x1c\x08\xc3", - .clen = 16, - }, { - .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" - "\x07\xd1\x90\x8b\xcf\x23\x15\x31", - .klen = 16, - .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34" - "\xe8\x73\x62\x64\xab\x50\xd0\xda" - "\x6b\x83\x66\xaf\x3e\x27\xc9", - .alen = 31, - .ptext = "", - .plen = 0, - .ctext = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38" - "\x03\x12\xf9\xcc\x9e\x46\x42\x92", - .clen = 16, - }, { - .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" - "\x88\xab\x42\x1c\xc9\x47\xd7\x38", - .klen = 16, - .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b" - "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" - "\x2d\xb0\x45\x87\x18\x86\x68\xf6", - .alen = 32, - .ptext = "", - .plen = 0, - .ctext = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2" - "\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0", - .clen = 16, - }, { - .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" - "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", - .klen = 16, - .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d" - "\xee\xde\x23\x60\xf2\xe5\x08\xcc" - "\x97", - .alen = 33, - .ptext = "", - .plen = 0, - .ctext = "\x28\x64\x78\x51\x55\xd8\x56\x4a" - "\x58\x3e\xf7\xbe\xee\x21\xfe\x94", - .clen = 16, - }, { - .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" - "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", - .klen = 16, - .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3" - "\xa6\xbf\x31\x93\x60\xcd\xda\x63" - "\x2c\xb1\xaa\x19\xc8\x19\xf8\xeb" - "\x03\xa1\xe8\xbe\x37\x54\xec\xa2" - "\xcd\x2c\x45\x58\xbd\x8e\x80", - .alen = 63, - .ptext = "", - .plen = 0, - .ctext = "\xb3\xa6\x00\x4e\x09\x20\xac\x21" - "\x77\x72\x69\x76\x2d\x36\xe5\xc8", - .clen = 16, - }, { - .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" - "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", - .klen = 16, - .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a" - "\xb4\x6b\x35\xc2\xc1\xdf\xed\x60" - "\x46\xc1\x3e\x7f\x8c\xc2\x0e\x7a" - "\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d" - "\x9c\x2f\xdb\x97\xb8\x15\x69\x01", - .alen = 64, - .ptext = "", - .plen = 0, - .ctext = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd" - "\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14", - .clen = 16, - }, { - .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" - "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", - .klen = 16, - .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .assoc = "\x31", - .alen = 1, - .ptext = "\x40", - .plen = 1, - .ctext = "\x1d\x47\x17\x34\x86\xf5\x54\x1a" - "\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9" - "\xbf", - .clen = 17, - }, { - .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" - "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", - .klen = 16, - .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a" - "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" - "\xf4\x94\x9f\xc1\x5a\x61\x85", - .alen = 31, - .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37\xdb" - "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a" - "\x24\xa0\xd6\xb7\x11\x79\x6c", - .plen = 31, - .ctext = "\x78\x90\x52\xae\x0f\xf7\x2e\xef" - "\x63\x09\x08\x58\xb5\x56\xbd\x72" - "\x6e\x42\xcf\x27\x04\x7c\xdb\x92" - "\x18\xe9\xa4\x33\x90\xba\x62\xb5" - "\x70\xd3\x88\x9b\x4f\x05\xa7\x51" - "\x85\x87\x17\x09\x42\xed\x4e", - .clen = 47, - }, { - .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", - .klen = 16, - .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60" - "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" - "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", - .alen = 32, - .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2" - "\xdb\x74\x36\x23\x11\x58\x3f\x93" - "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3", - .plen = 32, - .ctext = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41" - "\x2e\x71\xc8\x3b\x92\x43\x58\xaf" - "\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e" - "\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a" - "\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6" - "\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7", - .clen = 48, - }, { - .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .klen = 16, - .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7" - "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4" - "\xee", - .alen = 33, - .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05\x36\x42\xa7\x16\xf8\xc1\xad" - "\xa7\xfb\x94\x68\xc5\x37\xab\x8a" - "\x72", - .plen = 33, - .ctext = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc" - "\xfd\x2e\x4b\x46\x84\xff\x78\x4e" - "\x50\xda\x5c\xb9\x61\x1d\xf5\xb9" - "\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd" - "\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37" - "\x08\x06\x5a\xe5\x96\x10\x95\xc2" - "\x5e", - .clen = 49, - }, { - .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", - .klen = 16, - .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa" - "\xfd\xc9\x4a\xa9\xa9\x39\x4b\x54" - "\xc8\x0e\x24\x7f\x5e\x10\x7a\x45" - "\x10\x0b\x56\x85\xad\x54\xaa\x66" - "\xa8\x43\xcd\xd4\x9b\xb7\xfa", - .alen = 63, - .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a\x60" - "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93" - "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8" - "\x1b\xa5\x50\xed\x87\xa9\x72\x59" - "\x9c\x44\xb2\xa4\x99\x98\x34", - .plen = 63, - .ctext = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22" - "\x49\x2d\x07\x92\xfc\x3d\x6d\x5f" - "\xef\x36\x19\xae\x91\xfa\xd6\x63" - "\x46\xea\x8a\x39\x14\x21\xa6\x37" - "\x18\xfc\x97\x3e\x16\xa5\x4d\x39" - "\x45\x2e\x69\xcc\x9c\x5f\xdf\x6d" - "\x5e\xa2\xbf\xac\x83\x32\x72\x52" - "\x58\x58\x23\x40\xfd\xa5\xc2\xe6" - "\xe9\x5a\x50\x98\x00\x58\xc9\x86" - "\x4f\x20\x37\xdb\x7b\x22\xa3", - .clen = 79, - }, { - .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70", - .klen = 16, - .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81" - "\x0b\x76\x4f\xd7\x0a\x4b\x5e\x51" - "\xe3\x1d\xb9\xe5\x21\xb9\x8f\xd4" - "\x3d\x72\x3e\x26\x16\xa9\xca\x32" - "\x77\x47\x63\x14\x95\x3d\xe4\x34", - .alen = 64, - .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37" - "\x8f\xa1\x50\x23\x22\x4f\xe3\x91" - "\xe9\x21\x5e\xbf\x52\x23\x95\x37" - "\x48\x0c\x38\x8f\xf0\xff\x92\x24" - "\x6b\x47\x49\xe3\x94\x1f\x1e\x01", - .plen = 64, - .ctext = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb" - "\x23\xec\x35\xe3\xae\xc9\xfb\x0b" - "\x90\x14\x46\xeb\xa8\x8d\xb0\x9b" - "\x39\xda\x8b\x48\xec\xb2\x00\x4e" - "\x80\x6f\x46\x4f\x9b\x1e\xbb\x35" - "\xea\x5a\xbc\xa2\x36\xa5\x89\x45" - "\xc2\xd6\xd7\x15\x0b\xf6\x6c\x56" - "\xec\x99\x7d\x61\xb3\x15\x93\xed" - "\x83\x1e\xd9\x48\x84\x0b\x37\xfe" - "\x95\x74\x44\xd5\x54\xa6\x27\x06", - .clen = 80, - }, { - .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76", - .klen = 16, - .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" - "\x1a\x22\x53\x05\x6b\x5c\x71\x4f" - "\xfd\x2d\x4d\x4c\xe5\x62\xa5\x63" - "\x6a\xda\x26\xc8\x7f\xff\xea\xfd" - "\x46\x4a\xfa\x53\x8f\xc4\xcd\x68" - "\x58", - .alen = 65, - .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a\xdf\x27\xa0\xe4\x60\x99\xae" - "\x8e\x43\xd9\x39\x7b\x10\x40\x67" - "\x5c\x7e\xc9\x70\x63\x34\xca\x59" - "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d" - "\x6a\x41\x64\x6f\x16\x7f\x65\x7e" - "\x89\x84\x68\xeb\xb0\x51\xbe\x55" - "\x33\x16\x59\x6c\x3b\xef\x88\xad" - "\x2f\xab\xbc\x25\x76\x87\x41\x2f" - "\x36", - .plen = 129, - .ctext = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9" - "\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb" - "\x52\xb5\xb3\xab\x50\x99\x3f\xa0" - "\x38\xa4\x74\xa5\x04\x15\x63\x05" - "\x8f\x54\x81\x06\x5a\x6b\xa4\x63" - "\x6d\xa7\x21\xcb\xff\x42\x30\x8e" - "\x3b\xd1\xca\x3f\x4b\x1a\xb8\xc3" - "\x42\x01\xe6\xbc\x75\x15\x87\xee" - "\xc9\x8e\x65\x01\xd9\xd8\xb5\x9f" - "\x48\x86\xa6\x5f\x2c\xc7\xb5\xb0" - "\xed\x5d\x14\x7c\x3f\x40\xb1\x0b" - "\x72\xef\x94\x8d\x7a\x85\x56\xe5" - "\x56\x08\x15\x56\xba\xaf\xbd\xf0" - "\x20\xef\xa0\xf6\xa9\xad\xa2\xc9" - "\x1c\x3b\x28\x51\x7e\x77\xb2\x18" - "\x4f\x61\x64\x37\x22\x36\x6d\x78" - "\xed\xed\x35\xe8\x83\xa5\xec\x25" - "\x6b\xff\x5f\x1a\x09\x96\x3d\xdc" - "\x20", - .clen = 145, - }, { - .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", - .klen = 16, - .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" - "\x28\xce\x57\x34\xcd\x6e\x84\x4c" - "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" - "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" - "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" - "\x21\xf7\x42\x89\xac\x12\x2a\x54" - "\x69\xee\x18\xc7\x8d\xed\xe8\xfd" - "\xbb\x04\x28\xe6\x8a\x3c\x98\xc1" - "\x04\x2d\xa9\xa1\x24\x83\xff\xe9" - "\x55\x7a\xf0\xd1\xf6\x63\x05\xe1" - "\xd9\x1e\x75\x72\xc1\x9f\xae\x32" - "\xe1\x6b\xcd\x9e\x61\x19\x23\x86" - "\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9" - "\x51", - .alen = 129, - .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .plen = 65, - .ctext = "\x36\x78\xb9\x22\xde\x62\x35\x55" - "\x1a\x7a\xf5\x45\xbc\xd7\x15\x82" - "\x01\xe9\x5a\x07\xea\x46\xaf\x91" - "\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2" - "\xdb\xd2\x9d\x59\xde\xfc\x83\x00" - "\xf5\x46\xac\x97\xd5\x57\xa9\xb9" - "\x1f\x8c\xe8\xca\x68\x8b\x91\x0c" - "\x01\xbe\x0a\xaf\x7c\xf6\x67\xa4" - "\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19" - "\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf" - "\xa2", - .clen = 81, - }, { - .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", - .klen = 16, - .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" - "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" - "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", - .alen = 32, - .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07" - "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d" - "\x6e\xde\xee\xa2\x08\x12\xc7\xba", - .plen = 32, - .ctext = "\x08\x1b\x95\x0e\x41\x95\x02\x4b" - "\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e" - "\x89\x14\x33\x70\x0a\xbc\xea\x39" - "\x88\xaa\x2b\xd5\x73\x11\x55\xf5" - "\x33\x33\x9c\xd7\x42\x34\x49\x8e" - "\x2f\x03\x30\x05\x47\xaf\x34", - .clen = 47, - }, { - .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .klen = 16, - .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c" - "\x39\x14\x05\xa0\xf3\x10\xec\x41" - "\xff\x01\x95\x84\x2b\x59\x7f\xdb", - .alen = 32, - .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d" - "\x03\xc4\x88\xc1\x35\xb8\xcd\x47" - "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91", - .plen = 32, - .ctext = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68" - "\x0c\x60\xb9\x27\xdf\xaa\x41\xc6" - "\x25\xd8\xf7\x1f\x10\x15\x48\x61" - "\x4c\x95\x00\xdf\x51\x9b\x7f\xe6" - "\x24\x40\x9e\xbe\x3b\xeb\x1b\x98" - "\xb9\x9c\xe5\xef\xf2\x05", - .clen = 46, - }, { - .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .klen = 16, - .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92" - "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" - "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", - .alen = 32, - .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13" - "\x2e\x86\x93\x45\x3a\x58\x4f\x61" - "\xf0\x3a\xac\x53\xbc\xd0\x06\x68", - .plen = 32, - .ctext = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d" - "\xb5\xec\x9b\x4e\x12\x23\xa3\xcf" - "\x1a\x5a\x70\x15\x5a\x10\x40\x51" - "\xca\x47\x4c\x9d\xc9\x97\xf4\x77" - "\xdb\xc8\x10\x2d\xdc\x65\x20\x3f", - .clen = 40, - }, { - .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .klen = 16, - .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" - "\x36\xab\xde\xc6\x6d\x32\x70\x17", - .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" - "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98" - "\x8d\x98\x1c\xa8\xfe\x50\xf0\x74" - "\x81\x5c\x53\x35\xe0\x17\xbd\x88", - .alen = 32, - .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a" - "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a" - "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e", - .plen = 32, - .ctext = "\xf1\x62\x44\xc7\x5f\x19\xca\x43" - "\x47\x2c\xaf\x68\x82\xbd\x51\xef" - "\x3d\x65\xd8\x45\x2d\x06\x07\x78" - "\x08\x2e\xb3\x23\xcd\x81\x12\x55" - "\x1a", - .clen = 33, - }, { - .key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7" - "\x96\x4e\x63\x33\x69\x8d\x02\x9b" - "\x23\xf9\x22\xeb\x80\xa0\xb1\x81" - "\xe2\x73\xc3\x21\x4d\x47\x8d\xf4", - .klen = 32, - .iv = "\xf8\x5e\x31\xf7\xd7\xb2\x25\x3e" - "\xb7\x85\x90\x58\x67\x57\x33\x1d", - .assoc = "", - .alen = 0, - .ptext = "", - .plen = 0, - .ctext = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf" - "\xb9\xd2\x41\xf6\x80\xa1\x52\x70", - .clen = 16, - }, { - .key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3" - "\x17\x29\x15\xc5\x63\xb2\xc5\xa1" - "\x4d\xbc\x2d\x6f\x85\x40\x33\x9a" - "\xa3\xa0\xa1\xfa\x27\xa6\x2c\xca", - .klen = 32, - .iv = "\x34\x83\x6a\x96\xe7\x2d\xce\x5a" - "\x38\x5f\x42\xe9\x61\x7b\xf5\x23", - .assoc = "", - .alen = 0, - .ptext = "\x53", - .plen = 1, - .ctext = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7" - "\x01\xf4\x08\xe3\x0d\xf7\xf0\x78" - "\x53", - .clen = 17, - }, { - .key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef" - "\x98\x03\xc7\x56\x5d\xd6\x87\xa8" - "\x77\x7e\x39\xf3\x8a\xe0\xb5\xb4" - "\x65\xce\x80\xd2\x01\x05\xcb\xa1", - .klen = 32, - .iv = "\x71\xa8\xa4\x35\xf7\xa9\x76\x75" - "\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29", - .assoc = "", - .alen = 0, - .ptext = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83" - "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c" - "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7" - "\xf6\xf0\x27\xb4\x25\x4c\x83", - .plen = 31, - .ctext = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07" - "\xff\x8e\x74\xf8\xa1\xa6\xd5\x37" - "\xa5\x64\x31\x5c\xca\x73\x9b\x43" - "\xe6\x70\x63\x46\x95\xcb\xf7\xb5" - "\x20\x8c\x75\x7a\x2a\x17\x2f\xa9" - "\xb8\x4d\x11\x42\xd1\xf8\xf1", - .clen = 47, - }, { - .key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a" - "\x19\xdd\x79\xe8\x57\xfb\x4a\xae" - "\xa2\x40\x45\x77\x90\x80\x37\xce" - "\x26\xfb\x5f\xaa\xdb\x64\x6b\x77", - .klen = 32, - .iv = "\xae\xcc\xde\xd5\x07\x25\x1f\x91" - "\x39\x14\xa6\x0c\x55\xc4\x7b\x30", - .assoc = "", - .alen = 0, - .ptext = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f" - "\x7a\x81\xff\x55\x52\x56\xdc\x33" - "\x01\x52\xcd\xdb\x53\x78\xd9\xe1" - "\xb7\x1d\x06\x8d\xff\xab\x22\x98", - .plen = 32, - .ctext = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37" - "\x0a\xee\xc4\x30\x19\xd7\x3a\x6f" - "\xf8\x2b\x67\xf5\x3b\x84\x87\x2a" - "\xfb\x07\x7a\x82\xb5\xe4\x85\x26" - "\x1e\xa8\xe5\x04\x54\xce\xe5\x5f" - "\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6", - .clen = 48, - }, { - .key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26" - "\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4" - "\xcc\x03\x50\xfc\x95\x20\xb9\xe7" - "\xe8\x29\x3e\x83\xb5\xc3\x0a\x4e", - .klen = 32, - .iv = "\xea\xf1\x18\x74\x17\xa0\xc8\xad" - "\xba\xee\x58\x9d\x4f\xe8\x3d\x36", - .assoc = "", - .alen = 0, - .ptext = "\x08\x84\x34\x44\x9f\xde\x1c\xbb" - "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39" - "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb" - "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f" - "\x2e", - .plen = 33, - .ctext = "\xc2\xf4\x40\x55\xf9\x59\xff\x73" - "\x08\xf5\x98\x92\x0c\x7b\x35\x9a" - "\xa8\xf4\x42\x7e\x6f\x93\xca\x22" - "\x23\x06\x1e\xf8\x89\x22\xf4\x46" - "\x7c\x7c\x67\x75\xab\xe5\x75\xaa" - "\x15\xd7\x83\x19\xfd\x31\x59\x5b" - "\x32", - .clen = 49, - }, { - .key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42" - "\x1b\x91\xdd\x0a\x4b\x43\xcf\xba" - "\xf6\xc5\x5c\x80\x9a\xc0\x3b\x01" - "\xa9\x56\x1d\x5b\x8f\x22\xa9\x25", - .klen = 32, - .iv = "\x27\x16\x51\x13\x27\x1c\x71\xc9" - "\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c", - .assoc = "", - .alen = 0, - .ptext = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7" - "\x7c\x35\x63\x77\x46\x9f\x61\x3f" - "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14" - "\x3a\x79\xc4\x3e\xb3\x69\x61\x46" - "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73" - "\x22\xda\x52\x8b\x7d\x11\x98\xea" - "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad" - "\x20\x76\x5a\xdc\x4e\x71\x13", - .plen = 63, - .ctext = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb" - "\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28" - "\x2a\xdc\xfa\x01\x84\x87\x9a\x70" - "\x81\x75\x37\x0a\xd2\x75\xa9\xb6" - "\x21\x72\xee\x7e\x65\x95\xe5\xcc" - "\x01\xb7\x39\xa6\x51\x15\xca\xff" - "\x61\xdc\x97\x38\xcc\xf4\xca\xc7" - "\x83\x9b\x05\x11\x72\x60\xf0\xb4" - "\x7e\x06\xab\x0a\xc0\xbb\x59\x23" - "\xaa\x2d\xfc\x4e\x35\x05\x59", - .clen = 79, - }, { - .key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e" - "\x9c\x6c\x8f\x9c\x45\x68\x92\xc1" - "\x21\x87\x67\x04\x9f\x60\xbd\x1b" - "\x6a\x84\xfc\x34\x6a\x81\x48\xfb", - .klen = 32, - .iv = "\x63\x3b\x8b\xb3\x37\x98\x1a\xe5" - "\xbc\xa2\xbc\xc0\x43\x31\xc2\x42", - .assoc = "", - .alen = 0, - .ptext = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3" - "\xfd\x0f\x15\x09\x40\xc3\x24\x45" - "\x81\x99\xf0\x67\x63\x58\x5e\x2e" - "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c" - "\x4b\x62\x87\x7c\x15\x38\xda\x70" - "\x3d\xea\xe7\xf2\x40\xba\xae\x79" - "\x8f\x48\xfc\xbf\x45\x53\x2e\x78" - "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c", - .plen = 64, - .ctext = "\x11\x7c\x7d\xef\xce\x29\x95\xec" - "\x7e\x9f\x42\xa6\x26\x07\xa1\x75" - "\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa" - "\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc" - "\x5f\x8c\x1c\x60\x15\x4f\x9b\x20" - "\x7b\xff\xcd\x82\x60\x84\xf4\xa5" - "\x20\x9a\x05\x19\x5b\x02\x0a\x72" - "\x43\x11\x26\x58\xcf\xc5\x41\xcf" - "\x13\xcc\xde\x32\x92\xfa\x86\xf2" - "\xaf\x16\xe8\x8f\xca\xb6\xfd\x54", - .clen = 80, - }, { - .key = "\x90\x96\x36\xea\x03\x74\x18\x7a" - "\x1d\x46\x42\x2d\x3f\x8c\x54\xc7" - "\x4b\x4a\x73\x89\xa4\x00\x3f\x34" - "\x2c\xb1\xdb\x0c\x44\xe0\xe8\xd2", - .klen = 32, - .iv = "\xa0\x5f\xc5\x52\x47\x13\xc2\x01" - "\x3d\x7c\x6e\x52\x3d\x55\x85\x48", - .assoc = "\xaf", - .alen = 1, - .ptext = "", - .plen = 0, - .ctext = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe" - "\x69\xdf\xc4\xc4\x02\x46\x3a\xf0", - .clen = 16, - }, { - .key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95" - "\x9e\x20\xf4\xbf\x39\xb1\x17\xcd" - "\x76\x0c\x7f\x0d\xa9\xa0\xc1\x4e" - "\xed\xdf\xb9\xe4\x1e\x3f\x87\xa8", - .klen = 32, - .iv = "\xdc\x84\xfe\xf1\x58\x8f\x6b\x1c" - "\xbe\x57\x20\xe3\x37\x7a\x48\x4f", - .assoc = "\xeb\x4d\x8d\x59\x9c\x2e\x15\xa3" - "\xde\x8d\x4d\x07\x36\x43\x78\xd0" - "\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b" - "\x4e\xf6\x29\xd1\x8b\x6f\x56", - .alen = 31, - .ptext = "", - .plen = 0, - .ctext = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d" - "\x2e\x9a\xd6\x61\x43\xc0\x74\x69", - .clen = 16, - }, { - .key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1" - "\x1f\xfa\xa6\x50\x33\xd5\xda\xd3" - "\xa0\xce\x8a\x91\xae\x40\x43\x68" - "\xae\x0d\x98\xbd\xf8\x9e\x26\x7f", - .klen = 32, - .iv = "\x19\xa9\x38\x91\x68\x0b\x14\x38" - "\x3f\x31\xd2\x74\x31\x9e\x0a\x55", - .assoc = "\x28\x72\xc7\xf8\xac\xaa\xbe\xbf" - "\x5f\x67\xff\x99\x30\x67\x3b\xd6" - "\x35\x2f\x90\xd3\x31\x90\x04\x74" - "\x0f\x23\x08\xa9\x65\xce\xf6\xea", - .alen = 32, - .ptext = "", - .plen = 0, - .ctext = "\xb9\x57\x13\x3e\x82\x31\x61\x65" - "\x0d\x7f\x6c\x96\x93\x5c\x50\xe2", - .clen = 16, - }, { - .key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd" - "\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda" - "\xca\x91\x96\x15\xb4\xe0\xc5\x81" - "\x70\x3a\x77\x95\xd2\xfd\xc5\x55", - .klen = 32, - .iv = "\x55\xcd\x72\x30\x78\x86\xbd\x54" - "\xc0\x0b\x84\x06\x2b\xc2\xcd\x5b", - .assoc = "\x64\x97\x00\x98\xbc\x25\x67\xdb" - "\xe0\x41\xb1\x2a\x2a\x8c\xfe\xdd" - "\x5f\xf2\x9c\x58\x36\x30\x86\x8e" - "\xd1\x51\xe6\x81\x3f\x2d\x95\xc1" - "\x01", - .alen = 33, - .ptext = "", - .plen = 0, - .ctext = "\x81\x96\x34\xde\xbb\x36\xdd\x3e" - "\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1", - .clen = 16, - }, { - .key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9" - "\x20\xaf\x0a\x73\x27\x1e\x5f\xe0" - "\xf5\x53\xa1\x9a\xb9\x80\x47\x9b" - "\x31\x68\x56\x6e\xac\x5c\x65\x2c", - .klen = 32, - .iv = "\x92\xf2\xac\xcf\x88\x02\x65\x70" - "\x41\xe5\x36\x97\x25\xe7\x90\x61", - .assoc = "\xa1\xbb\x3a\x37\xcc\xa1\x10\xf7" - "\x61\x1c\x63\xbc\x24\xb0\xc0\xe3" - "\x8a\xb4\xa7\xdc\x3b\xd0\x08\xa8" - "\x92\x7f\xc5\x5a\x19\x8c\x34\x97" - "\x0f\x95\x9b\x18\xe4\x8d\xb4\x24" - "\xb9\x33\x28\x18\xe1\x9d\x14\xe0" - "\x64\xb2\x89\x7d\x78\xa8\x05\x7e" - "\x07\x8c\xfc\x88\x2d\xb8\x53", - .alen = 63, - .ptext = "", - .plen = 0, - .ctext = "\x2e\x99\xb6\x79\x57\x56\x80\x36" - "\x8e\xc4\x1c\x12\x7d\x71\x36\x0c", - .clen = 16, - }, { - .key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05" - "\xa1\x89\xbc\x04\x21\x42\x22\xe6" - "\x1f\x15\xad\x1e\xbe\x20\xc9\xb4" - "\xf3\x95\x35\x46\x86\xbb\x04\x03", - .klen = 32, - .iv = "\xce\x17\xe5\x6f\x98\x7e\x0e\x8c" - "\xc2\xbf\xe8\x29\x1f\x0b\x52\x68", - .assoc = "\xdd\xe0\x74\xd6\xdc\x1d\xb8\x13" - "\xe2\xf6\x15\x4d\x1e\xd4\x83\xe9" - "\xb4\x76\xb3\x60\x40\x70\x8a\xc1" - "\x53\xac\xa4\x32\xf3\xeb\xd3\x6e" - "\x1e\x42\xa0\x46\x45\x9f\xc7\x22" - "\xd3\x43\xbc\x7e\xa5\x47\x2a\x6f" - "\x91\x19\x70\x1e\xe1\xfe\x25\x49" - "\xd6\x8f\x93\xc7\x28\x3f\x3d\x03", - .alen = 64, - .ptext = "", - .plen = 0, - .ctext = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce" - "\x3b\x89\x40\x36\xba\x6d\x0e\xa2", - .clen = 16, - }, { - .key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21" - "\x22\x63\x6e\x96\x1b\x67\xe4\xec" - "\x49\xd7\xb9\xa2\xc3\xc0\x4b\xce" - "\xb4\xc3\x14\x1e\x61\x1a\xa3\xd9", - .klen = 32, - .iv = "\x0b\x3c\x1f\x0e\xa8\xf9\xb7\xa7" - "\x42\x9a\x9a\xba\x19\x30\x15\x6e", - .assoc = "\x1a", - .alen = 1, - .ptext = "\x29", - .plen = 1, - .ctext = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6" - "\x17\x75\x81\x16\xdf\x26\xff\x67" - "\x92", - .clen = 17, - }, { - .key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c" - "\xa3\x3d\x20\x27\x15\x8b\xa7\xf2" - "\x74\x9a\xc4\x27\xc8\x60\xcd\xe8" - "\x75\xf0\xf2\xf7\x3b\x79\x42\xb0", - .klen = 32, - .iv = "\x47\x60\x59\xad\xb8\x75\x60\xc3" - "\xc3\x74\x4c\x4c\x13\x54\xd8\x74", - .assoc = "\x56\x29\xe7\x15\xfc\x14\x0a\x4a" - "\xe4\xaa\x79\x70\x12\x1d\x08\xf6" - "\x09\xfb\xca\x69\x4b\xb0\x8e\xf5" - "\xd6\x07\x62\xe3\xa8\xa9\x12", - .alen = 31, - .ptext = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1" - "\x04\xe1\xa6\x94\x10\xe6\x39\x77" - "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb" - "\x06\x13\x9a\xd9\x5e\xc0\xfa", - .plen = 31, - .ctext = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd" - "\x3c\xd1\x2a\xd4\x15\x86\x9d\xda" - "\xea\x6c\x6f\xa1\x33\xb0\x7a\x01" - "\x57\xe7\xf3\x7b\x73\xe7\x54\x10" - "\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6" - "\x76\xc3\xf5\x3a\x76\xfd\x4a", - .clen = 47, - }, { - .key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58" - "\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9" - "\x9e\x5c\xd0\xab\xcd\x00\x4f\x01" - "\x37\x1e\xd1\xcf\x15\xd8\xe2\x86", - .klen = 32, - .iv = "\x84\x85\x92\x4d\xc8\xf1\x08\xdf" - "\x44\x4e\xff\xdd\x0d\x78\x9a\x7a", - .assoc = "\x93\x4e\x21\xb4\x0c\x90\xb3\x66" - "\x65\x84\x2b\x01\x0b\x42\xcb\xfc" - "\x33\xbd\xd6\xed\x50\x50\x10\x0e" - "\x97\x35\x41\xbb\x82\x08\xb1\xf2", - .alen = 32, - .ptext = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed" - "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d" - "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15" - "\xc8\x40\x78\xb1\x38\x1f\x99\xa7", - .plen = 32, - .ctext = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a" - "\x71\xce\xe4\xaa\x45\x70\xe6\x84" - "\x62\x48\x08\x64\x86\x6a\xdf\xec" - "\xb4\xa0\xfb\x34\x03\x0c\x19\xf4" - "\x2b\x7b\x36\x73\xec\x54\xa9\x1e" - "\x30\x85\xdb\xe4\xac\xe9\x2c\xca", - .clen = 48, - }, { - .key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74" - "\xa5\xf2\x84\x4a\x08\xd4\x2c\xff" - "\xc8\x1e\xdb\x2f\xd2\xa0\xd1\x1b" - "\xf8\x4c\xb0\xa8\xef\x37\x81\x5d", - .klen = 32, - .iv = "\xc0\xaa\xcc\xec\xd8\x6c\xb1\xfb" - "\xc5\x28\xb1\x6e\x07\x9d\x5d\x81", - .assoc = "\xd0\x73\x5a\x54\x1d\x0b\x5b\x82" - "\xe5\x5f\xdd\x93\x05\x66\x8e\x02" - "\x5e\x80\xe1\x71\x55\xf0\x92\x28" - "\x59\x62\x20\x94\x5c\x67\x50\xc8" - "\x58", - .alen = 33, - .ptext = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09" - "\x06\x95\x0a\xb7\x04\x2f\xbe\x84" - "\x28\x30\x64\x92\x96\x98\x72\x2e" - "\x89\x6e\x57\x8a\x13\x7e\x38\x7e" - "\xdb", - .plen = 33, - .ctext = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60" - "\xcb\x4a\x54\x94\xcf\xf5\x7e\x34" - "\xe9\xf8\x80\x65\x53\xd0\x72\x70" - "\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c" - "\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67" - "\x63\x0f\x1a\x59\xb7\x89\xdb\xf4" - "\xc3", - .clen = 49, - }, { - .key = "\xee\x05\x77\x23\xa5\x49\xb0\x90" - "\x26\xcc\x36\xdc\x02\xf8\xef\x05" - "\xf3\xe1\xe7\xb3\xd8\x40\x53\x35" - "\xb9\x79\x8f\x80\xc9\x96\x20\x33", - .klen = 32, - .iv = "\xfd\xce\x06\x8b\xe9\xe8\x5a\x17" - "\x46\x02\x63\x00\x01\xc1\x20\x87", - .assoc = "\x0c\x98\x94\xf3\x2d\x87\x04\x9e" - "\x66\x39\x8f\x24\xff\x8a\x50\x08" - "\x88\x42\xed\xf6\x5a\x90\x14\x42" - "\x1a\x90\xfe\x6c\x36\xc6\xf0\x9f" - "\x66\xa0\xb5\x2d\x2c\xf8\x25\x15" - "\x55\x90\xa2\x7e\x77\x94\x96\x3a" - "\x71\x1c\xf7\x44\xee\xa8\xc3\x42" - "\xe2\xa3\x84\x04\x0b\xe1\xce", - .alen = 63, - .ptext = "\x1b\x61\x23\x5b\x71\x26\xae\x25" - "\x87\x6f\xbc\x49\xfe\x53\x81\x8a" - "\x53\xf2\x70\x17\x9b\x38\xf4\x48" - "\x4b\x9b\x36\x62\xed\xdd\xd8\x54" - "\xea\xcb\xb6\x79\x45\xfc\xaa\x54" - "\x5c\x94\x47\x58\xa7\xff\x9c\x9e" - "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35" - "\xd5\xa4\x6a\xd4\x09\xc2\x08", - .plen = 63, - .ctext = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a" - "\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8" - "\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b" - "\x59\xc0\x79\xbc\x17\xa0\x15\x01" - "\xf5\x72\xfb\x5a\xe7\xaf\x07\xe3" - "\x1b\x49\x21\x34\x23\x63\x55\x5e" - "\xee\x4f\x34\x17\xfa\xfe\xa5\x0c" - "\xed\x0b\x23\xea\x9b\xda\x57\x2f" - "\xf6\xa9\xae\x0d\x4e\x40\x96\x45" - "\x7f\xfa\xf0\xbf\xc4\x98\x78", - .clen = 79, - }, { - .key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac" - "\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b" - "\x1d\xa3\xf3\x38\xdd\xe0\xd5\x4e" - "\x7b\xa7\x6e\x58\xa3\xf5\xbf\x0a", - .klen = 32, - .iv = "\x39\xf3\x3f\x2b\xf9\x64\x03\x33" - "\xc7\xdd\x15\x91\xfb\xe6\xe2\x8d", - .assoc = "\x49\xbc\xce\x92\x3d\x02\xad\xba" - "\xe7\x13\x41\xb6\xf9\xaf\x13\x0f" - "\xb2\x04\xf8\x7a\x5f\x30\x96\x5b" - "\xdc\xbd\xdd\x44\x10\x25\x8f\x75" - "\x75\x4d\xb9\x5b\x8e\x0a\x38\x13" - "\x6f\x9f\x36\xe4\x3a\x3e\xac\xc9" - "\x9d\x83\xde\xe5\x57\xfd\xe3\x0e" - "\xb1\xa7\x1b\x44\x05\x67\xb7\x37", - .alen = 64, - .ptext = "\x58\x85\x5c\xfa\x81\xa1\x57\x40" - "\x08\x4a\x6e\xda\xf8\x78\x44\x90" - "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62" - "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b" - "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52" - "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d" - "\xa9\x1d\xd8\x4e\x31\x53\xab\x00" - "\xa5\xa7\x01\x13\x04\x49\xf2\x04", - .plen = 64, - .ctext = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1" - "\x58\x06\x1a\x9b\x8c\x67\xdf\xeb" - "\x35\x35\x60\x9d\x06\x40\x65\xc1" - "\x93\xe8\xb3\x82\x50\x29\xdd\xb5" - "\x2b\xcb\xde\x18\x78\x6b\x42\xbe" - "\x6d\x24\xd0\xb2\x7d\xd7\x08\x8f" - "\x4a\x18\x98\xad\x8c\xf2\x97\xb4" - "\xf4\x77\xe4\xbf\x41\x3b\xc4\x06" - "\xce\x9e\x34\x81\xf0\x89\x11\x13" - "\x02\x65\xa1\x7c\xdf\x07\x33\x06", - .clen = 80, - }, { - .key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7" - "\x28\x80\x9a\xfe\xf6\x41\x74\x12" - "\x48\x65\xfe\xbc\xe2\x80\x57\x68" - "\x3c\xd4\x4d\x31\x7d\x54\x5f\xe1", - .klen = 32, - .iv = "\x76\x18\x79\xca\x09\xdf\xac\x4e" - "\x48\xb7\xc7\x23\xf5\x0a\xa5\x93", - .assoc = "\x85\xe1\x08\x32\x4d\x7e\x56\xd5" - "\x68\xed\xf3\x47\xf3\xd3\xd6\x15" - "\xdd\xc7\x04\xfe\x64\xd0\x18\x75" - "\x9d\xeb\xbc\x1d\xea\x84\x2e\x4c" - "\x83\xf9\xbe\x8a\xef\x1c\x4b\x10" - "\x89\xaf\xcb\x4b\xfe\xe7\xc1\x58" - "\xca\xea\xc6\x87\xc0\x53\x03\xd9" - "\x80\xaa\xb2\x83\xff\xee\xa1\x6a" - "\x04", - .alen = 65, - .ptext = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c" - "\x88\x24\x20\x6b\xf2\x9c\x06\x96" - "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b" - "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02" - "\x07\x24\xbf\xd5\x08\x20\xd0\x4f" - "\x90\xb3\x70\x24\x2f\x51\xc7\xbb" - "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc" - "\x74\xab\x97\x53\xfe\xd0\xdb\x37" - "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3" - "\x1b\x34\x6d\x51\x72\x2b\x17\xe7" - "\x4d\xaa\x2c\x18\xda\xa3\x33\x89" - "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f" - "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0" - "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2" - "\x27\x7f\xe3\x34\x80\x02\x49\x4b" - "\x07\x68\x22\x2a\x88\x25\x53\xb2" - "\x2f", - .plen = 129, - .ctext = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6" - "\x85\x43\x88\xd0\xd7\x78\x60\x19" - "\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec" - "\xf7\x84\x85\xc7\x27\x0f\x74\x57" - "\x28\x9e\xdd\x90\x3c\x43\x12\xc5" - "\x51\x3d\x39\x8f\xa5\xf4\xe0\x0b" - "\x57\x04\xf1\x6d\xfe\x9b\x84\x27" - "\xe8\xeb\x4d\xda\x02\x0a\xc5\x49" - "\x1a\x55\x5e\x50\x56\x4d\x94\xda" - "\x20\xf8\x12\x54\x50\xb3\x11\xda" - "\xed\x44\x27\x67\xd5\xd1\x8b\x4b" - "\x38\x67\x56\x65\x59\xda\xe6\x97" - "\x81\xae\x2f\x92\x3b\xae\x22\x1c" - "\x91\x59\x38\x18\x00\xe8\xba\x92" - "\x04\x19\x56\xdf\xb0\x82\xeb\x6f" - "\x2e\xdb\x54\x3c\x4b\xbb\x60\x90" - "\x4c\x50\x10\x62\xba\x7a\xb1\x68" - "\x37\xd7\x87\x4e\xe4\x66\x09\x1f" - "\xa5", - .clen = 145, - }, { - .key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3" - "\xa9\x5a\x4c\x90\xf0\x65\x37\x18" - "\x72\x28\x0a\x40\xe7\x20\xd9\x82" - "\xfe\x02\x2b\x09\x57\xb3\xfe\xb7", - .klen = 32, - .iv = "\xb3\x3d\xb3\x69\x19\x5b\x54\x6a" - "\xc9\x91\x79\xb4\xef\x2e\x68\x99", - .assoc = "\xc2\x06\x41\xd1\x5d\xfa\xff\xf1" - "\xe9\xc7\xa5\xd9\xed\xf8\x98\x1b" - "\x07\x89\x10\x82\x6a\x70\x9a\x8f" - "\x5e\x19\x9b\xf5\xc5\xe3\xcd\x22" - "\x92\xa5\xc2\xb8\x51\x2e\x5e\x0e" - "\xa4\xbe\x5f\xb1\xc1\x90\xd7\xe7" - "\xf7\x52\xae\x28\x29\xa8\x22\xa4" - "\x4f\xae\x48\xc2\xfa\x75\x8b\x9e" - "\xce\x83\x2a\x88\x07\x55\xbb\x89" - "\xf6\xdf\xac\xdf\x83\x08\xbf\x7d" - "\xac\x30\x8b\x8e\x02\xac\x00\xf1" - "\x30\x46\xe1\xbc\x75\xbf\x49\xbb" - "\x26\x4e\x29\xf0\x2f\x21\xc6\x13" - "\x92\xd9\x3d\x11\xe4\x10\x00\x8e" - "\xd4\xd4\x58\x65\xa6\x2b\xe3\x25" - "\xb1\x8f\x15\x93\xe7\x71\xb9\x2c" - "\x4b", - .alen = 129, - .ptext = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78" - "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d" - "\xd2\x39\x93\xa3\xab\x18\x7a\x95" - "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8" - "\x15\xd1\xc3\x04\x69\x32\xe3\x4d" - "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a" - "\x02\xeb\xa8\x90\x03\xfd\xea\x97" - "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a" - "\x00", - .plen = 65, - .ctext = "\x7d\xde\x53\x22\xe4\x23\x3b\x30" - "\x78\xde\x35\x90\x7a\xd9\x0b\x93" - "\xf6\x0e\x0b\xed\x40\xee\x10\x9c" - "\x96\x3a\xd3\x34\xb2\xd0\x67\xcf" - "\x63\x7f\x2d\x0c\xcf\x96\xec\x64" - "\x1a\x87\xcc\x7d\x2c\x5e\x81\x4b" - "\xd2\x8f\x4c\x7c\x00\xb1\xb4\xe0" - "\x87\x4d\xb1\xbc\xd8\x78\x2c\x17" - "\xf2\x3b\xd8\x28\x40\xe2\x76\xf6" - "\x20\x13\x83\x46\xaf\xff\xe3\x0f" - "\x72", - .clen = 81, - }, { - .key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff" - "\x2a\x35\xfe\x21\xea\x8a\xfa\x1e" - "\x9c\xea\x15\xc5\xec\xc0\x5b\x9b" - "\xbf\x2f\x0a\xe1\x32\x12\x9d\x8e", - .klen = 32, - .iv = "\xef\x61\xed\x08\x29\xd7\xfd\x86" - "\x4a\x6b\x2b\x46\xe9\x53\x2a\xa0", - .assoc = "\xfe\x2a\x7b\x70\x6d\x75\xa7\x0d" - "\x6a\xa2\x57\x6a\xe7\x1c\x5b\x21" - "\x31\x4b\x1b\x07\x6f\x10\x1c\xa8" - "\x20\x46\x7a\xce\x9f\x42\x6d\xf9", - .alen = 32, - .ptext = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94" - "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3" - "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf" - "\x50\x52\xb1\xc4\x55\x59\x55\xaf", - .plen = 32, - .ctext = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe" - "\x53\xc7\xaa\x9a\x60\x74\x9c\xc4" - "\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc" - "\xbb\x51\x7e\x9c\x89\x73\xde\x4e" - "\x24\xf8\x52\x7c\x15\x41\x0e\xba" - "\x69\x0e\x36\x5f\x2f\x22\x8c", - .clen = 47, - }, { - .key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b" - "\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24" - "\xc7\xac\x21\x49\xf1\x60\xdd\xb5" - "\x80\x5d\xe9\xba\x0c\x71\x3c\x64", - .klen = 32, - .iv = "\x2c\x86\x26\xa8\x39\x52\xa6\xa2" - "\xcb\x45\xdd\xd7\xe3\x77\xed\xa6", - .assoc = "\x3b\x4f\xb5\x10\x7d\xf1\x50\x29" - "\xeb\x7c\x0a\xfb\xe1\x40\x1e\x27" - "\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2" - "\xe1\x74\x59\xa6\x79\xa1\x0c\xd0", - .alen = 32, - .ptext = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0" - "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9" - "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8" - "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85", - .plen = 32, - .ctext = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51" - "\xb8\xda\x92\x3c\xfd\xda\xac\x8e" - "\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1" - "\xab\xd6\x7c\x76\xad\xea\x7d\x76" - "\x53\xee\xb0\xcd\xd0\x02\xbb\x70" - "\x5b\x6f\x7b\xe2\x8c\xe8", - .clen = 46, - }, { - .key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37" - "\x2b\xe9\x63\x44\xde\xd3\x7f\x2b" - "\xf1\x6f\x2d\xcd\xf6\x00\x5f\xcf" - "\x42\x8a\xc8\x92\xe6\xd0\xdc\x3b", - .klen = 32, - .iv = "\x68\xab\x60\x47\x49\xce\x4f\xbe" - "\x4c\x20\x8f\x68\xdd\x9c\xb0\xac", - .assoc = "\x77\x74\xee\xaf\x8d\x6d\xf9\x45" - "\x6c\x56\xbc\x8d\xdb\x65\xe0\x2e" - "\x86\xd0\x32\x0f\x79\x50\x20\xdb" - "\xa2\xa1\x37\x7e\x53\x00\xab\xa6", - .alen = 32, - .ptext = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc" - "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf" - "\x51\x80\xb5\x30\xba\xf8\x00\xe2" - "\xd3\xad\x6f\x75\x09\x18\x93\x5c", - .plen = 32, - .ctext = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b" - "\xe7\x68\x81\x51\xbb\xfb\xdf\x60" - "\xbb\xac\xe8\xc1\xdc\x68\xae\x68" - "\x3a\xcd\x7a\x06\x49\xfe\x80\x11" - "\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf", - .clen = 40, - }, { - .key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53" - "\xac\xc3\x15\xd6\xd8\xf7\x42\x31" - "\x1b\x31\x38\x51\xfc\xa0\xe1\xe8" - "\x03\xb8\xa7\x6b\xc0\x2f\x7b\x11", - .klen = 32, - .iv = "\xa5\xcf\x9a\xe6\x59\x4a\xf7\xd9" - "\xcd\xfa\x41\xfa\xd7\xc0\x72\xb2", - .assoc = "\xb4\x99\x28\x4e\x9d\xe8\xa2\x60" - "\xed\x30\x6e\x1e\xd5\x89\xa3\x34" - "\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5" - "\x64\xcf\x16\x57\x2d\x5f\x4a\x7d", - .alen = 32, - .ptext = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7" - "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5" - "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc" - "\x94\xda\x4e\x4d\xe4\x77\x32\x32", - .plen = 32, - .ctext = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e" - "\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c" - "\x73\xc7\xd9\x6b\xbe\xce\x9b\x70" - "\xc7\x4f\x96\x16\x03\xfc\xea\xfb" - "\x56", - .clen = 33, - }, -}; - /* * All key wrapping test vectors taken from * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip -- cgit v1.2.3 From e44051d41ee4ca4114043f3acf6222a2aa655ee8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:07 +0200 Subject: crypto: aegis128l/aegis256 - remove x86 and generic implementations Three variants of AEGIS were proposed for the CAESAR competition, and only one was selected for the final portfolio: AEGIS128. The other variants, AEGIS128L and AEGIS256, are not likely to ever turn up in networking protocols or other places where interoperability between Linux and other systems is a concern, nor are they likely to be subjected to further cryptanalysis. However, uninformed users may think that AEGIS128L (which is faster) is equally fit for use. So let's remove them now, before anyone starts using them and we are forced to support them forever. Note that there are no known flaws in the algorithms or in any of these implementations, but they have simply outlived their usefulness. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 30 -- crypto/Makefile | 2 - crypto/aegis128l.c | 522 ---------------------------- crypto/aegis256.c | 473 ------------------------- crypto/testmgr.c | 12 - crypto/testmgr.h | 984 ----------------------------------------------------- 6 files changed, 2023 deletions(-) delete mode 100644 crypto/aegis128l.c delete mode 100644 crypto/aegis256.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 160dc1a9..559494bb 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,20 +306,6 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. -config CRYPTO_AEGIS128L - tristate "AEGIS-128L AEAD algorithm" - select CRYPTO_AEAD - select CRYPTO_AES # for AES S-box tables - help - Support for the AEGIS-128L dedicated AEAD algorithm. - -config CRYPTO_AEGIS256 - tristate "AEGIS-256 AEAD algorithm" - select CRYPTO_AEAD - select CRYPTO_AES # for AES S-box tables - help - Support for the AEGIS-256 dedicated AEAD algorithm. - config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT @@ -328,22 +314,6 @@ config CRYPTO_AEGIS128_AESNI_SSE2 help AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm. -config CRYPTO_AEGIS128L_AESNI_SSE2 - tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_SIMD - help - AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm. - -config CRYPTO_AEGIS256_AESNI_SSE2 - tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" - depends on X86 && 64BIT - select CRYPTO_AEAD - select CRYPTO_SIMD - help - AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm. - config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index 4bc1951d..93375e12 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -90,8 +90,6 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o -obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o -obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c deleted file mode 100644 index 9bca3d61..00000000 --- a/crypto/aegis128l.c +++ /dev/null @@ -1,522 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-128L Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS128L_CHUNK_BLOCKS 2 -#define AEGIS128L_CHUNK_SIZE (AEGIS128L_CHUNK_BLOCKS * AEGIS_BLOCK_SIZE) -#define AEGIS128L_NONCE_SIZE 16 -#define AEGIS128L_STATE_BLOCKS 8 -#define AEGIS128L_KEY_SIZE 16 -#define AEGIS128L_MIN_AUTH_SIZE 8 -#define AEGIS128L_MAX_AUTH_SIZE 16 - -union aegis_chunk { - union aegis_block blocks[AEGIS128L_CHUNK_BLOCKS]; - u8 bytes[AEGIS128L_CHUNK_SIZE]; -}; - -struct aegis_state { - union aegis_block blocks[AEGIS128L_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key; -}; - -struct aegis128l_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static void crypto_aegis128l_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS128L_STATE_BLOCKS - 1]; - for (i = AEGIS128L_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis128l_update_a(struct aegis_state *state, - const union aegis_chunk *msg) -{ - crypto_aegis128l_update(state); - crypto_aegis_block_xor(&state->blocks[0], &msg->blocks[0]); - crypto_aegis_block_xor(&state->blocks[4], &msg->blocks[1]); -} - -static void crypto_aegis128l_update_u(struct aegis_state *state, - const void *msg) -{ - crypto_aegis128l_update(state); - crypto_xor(state->blocks[0].bytes, msg + 0 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - crypto_xor(state->blocks[4].bytes, msg + 1 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis128l_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv; - union aegis_chunk chunk; - unsigned int i; - - memcpy(chunk.blocks[0].bytes, iv, AEGIS_BLOCK_SIZE); - chunk.blocks[1] = *key; - - key_iv = *key; - crypto_aegis_block_xor(&key_iv, &chunk.blocks[0]); - - state->blocks[0] = key_iv; - state->blocks[1] = crypto_aegis_const[1]; - state->blocks[2] = crypto_aegis_const[0]; - state->blocks[3] = crypto_aegis_const[1]; - state->blocks[4] = key_iv; - state->blocks[5] = *key; - state->blocks[6] = *key; - state->blocks[7] = *key; - - crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[6], &crypto_aegis_const[1]); - crypto_aegis_block_xor(&state->blocks[7], &crypto_aegis_const[0]); - - for (i = 0; i < 10; i++) { - crypto_aegis128l_update_a(state, &chunk); - } -} - -static void crypto_aegis128l_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_chunk *src_chunk = - (const union aegis_chunk *)src; - - while (size >= AEGIS128L_CHUNK_SIZE) { - crypto_aegis128l_update_a(state, src_chunk); - - size -= AEGIS128L_CHUNK_SIZE; - src_chunk += 1; - } - } else { - while (size >= AEGIS128L_CHUNK_SIZE) { - crypto_aegis128l_update_u(state, src); - - size -= AEGIS128L_CHUNK_SIZE; - src += AEGIS128L_CHUNK_SIZE; - } - } -} - -static void crypto_aegis128l_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_chunk tmp; - union aegis_block *tmp0 = &tmp.blocks[0]; - union aegis_block *tmp1 = &tmp.blocks[1]; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS128L_CHUNK_SIZE) { - union aegis_chunk *dst_blk = - (union aegis_chunk *)dst; - const union aegis_chunk *src_blk = - (const union aegis_chunk *)src; - - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]); - - crypto_aegis128l_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS128L_CHUNK_SIZE; - src += AEGIS128L_CHUNK_SIZE; - dst += AEGIS128L_CHUNK_SIZE; - } - } else { - while (size >= AEGIS128L_CHUNK_SIZE) { - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - - crypto_aegis128l_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE); - - size -= AEGIS128L_CHUNK_SIZE; - src += AEGIS128L_CHUNK_SIZE; - dst += AEGIS128L_CHUNK_SIZE; - } - } - - if (size > 0) { - union aegis_chunk msg = {}; - memcpy(msg.bytes, src, size); - - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - - crypto_aegis128l_update_a(state, &msg); - - crypto_aegis_block_xor(&msg.blocks[0], tmp0); - crypto_aegis_block_xor(&msg.blocks[1], tmp1); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128l_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_chunk tmp; - union aegis_block *tmp0 = &tmp.blocks[0]; - union aegis_block *tmp1 = &tmp.blocks[1]; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS128L_CHUNK_SIZE) { - union aegis_chunk *dst_blk = - (union aegis_chunk *)dst; - const union aegis_chunk *src_blk = - (const union aegis_chunk *)src; - - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]); - - crypto_aegis128l_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS128L_CHUNK_SIZE; - src += AEGIS128L_CHUNK_SIZE; - dst += AEGIS128L_CHUNK_SIZE; - } - } else { - while (size >= AEGIS128L_CHUNK_SIZE) { - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - - crypto_aegis128l_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE); - - size -= AEGIS128L_CHUNK_SIZE; - src += AEGIS128L_CHUNK_SIZE; - dst += AEGIS128L_CHUNK_SIZE; - } - } - - if (size > 0) { - union aegis_chunk msg = {}; - memcpy(msg.bytes, src, size); - - *tmp0 = state->blocks[2]; - crypto_aegis_block_and(tmp0, &state->blocks[3]); - crypto_aegis_block_xor(tmp0, &state->blocks[6]); - crypto_aegis_block_xor(tmp0, &state->blocks[1]); - crypto_aegis_block_xor(&msg.blocks[0], tmp0); - - *tmp1 = state->blocks[6]; - crypto_aegis_block_and(tmp1, &state->blocks[7]); - crypto_aegis_block_xor(tmp1, &state->blocks[5]); - crypto_aegis_block_xor(tmp1, &state->blocks[2]); - crypto_aegis_block_xor(&msg.blocks[1], tmp1); - - memset(msg.bytes + size, 0, AEGIS128L_CHUNK_SIZE - size); - - crypto_aegis128l_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128l_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_chunk buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS128L_CHUNK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS128L_CHUNK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis128l_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis128l_ad(state, src, left); - src += left & ~(AEGIS128L_CHUNK_SIZE - 1); - left &= AEGIS128L_CHUNK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS128L_CHUNK_SIZE - pos); - crypto_aegis128l_update_a(state, &buf); - } -} - -static void crypto_aegis128l_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis128l_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis128l_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_chunk tmp; - unsigned int i; - - tmp.blocks[0].words64[0] = cpu_to_le64(assocbits); - tmp.blocks[0].words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp.blocks[0], &state->blocks[2]); - - tmp.blocks[1] = tmp.blocks[0]; - for (i = 0; i < 7; i++) - crypto_aegis128l_update_a(state, &tmp); - - for (i = 0; i < 7; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis128l_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS128L_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE); - return 0; -} - -static int crypto_aegis128l_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS128L_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS128L_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis128l_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis128l_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis128l_init(&state, &ctx->key, req->iv); - crypto_aegis128l_process_ad(&state, req->src, req->assoclen); - crypto_aegis128l_process_crypt(&state, req, ops); - crypto_aegis128l_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis128l_encrypt(struct aead_request *req) -{ - static const struct aegis128l_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128l_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_aegis128l_crypt(req, &tag, cryptlen, &ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis128l_decrypt(struct aead_request *req) -{ - static const struct aegis128l_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128l_decrypt_chunk, - }; - static const u8 zeros[AEGIS128L_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - crypto_aegis128l_crypt(req, &tag, cryptlen, &ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static int crypto_aegis128l_init_tfm(struct crypto_aead *tfm) -{ - return 0; -} - -static void crypto_aegis128l_exit_tfm(struct crypto_aead *tfm) -{ -} - -static struct aead_alg crypto_aegis128l_alg = { - .setkey = crypto_aegis128l_setkey, - .setauthsize = crypto_aegis128l_setauthsize, - .encrypt = crypto_aegis128l_encrypt, - .decrypt = crypto_aegis128l_decrypt, - .init = crypto_aegis128l_init_tfm, - .exit = crypto_aegis128l_exit_tfm, - - .ivsize = AEGIS128L_NONCE_SIZE, - .maxauthsize = AEGIS128L_MAX_AUTH_SIZE, - .chunksize = AEGIS128L_CHUNK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis128l", - .cra_driver_name = "aegis128l-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis128l_module_init(void) -{ - return crypto_register_aead(&crypto_aegis128l_alg); -} - -static void __exit crypto_aegis128l_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis128l_alg); -} - -subsys_initcall(crypto_aegis128l_module_init); -module_exit(crypto_aegis128l_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis128l"); -MODULE_ALIAS_CRYPTO("aegis128l-generic"); diff --git a/crypto/aegis256.c b/crypto/aegis256.c deleted file mode 100644 index b47fd395..00000000 --- a/crypto/aegis256.c +++ /dev/null @@ -1,473 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-256 Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS256_NONCE_SIZE 32 -#define AEGIS256_STATE_BLOCKS 6 -#define AEGIS256_KEY_SIZE 32 -#define AEGIS256_MIN_AUTH_SIZE 8 -#define AEGIS256_MAX_AUTH_SIZE 16 - -struct aegis_state { - union aegis_block blocks[AEGIS256_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key[AEGIS256_KEY_SIZE / AEGIS_BLOCK_SIZE]; -}; - -struct aegis256_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static void crypto_aegis256_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS256_STATE_BLOCKS - 1]; - for (i = AEGIS256_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis256_update_a(struct aegis_state *state, - const union aegis_block *msg) -{ - crypto_aegis256_update(state); - crypto_aegis_block_xor(&state->blocks[0], msg); -} - -static void crypto_aegis256_update_u(struct aegis_state *state, const void *msg) -{ - crypto_aegis256_update(state); - crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis256_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv[2]; - unsigned int i; - - key_iv[0] = key[0]; - key_iv[1] = key[1]; - crypto_xor(key_iv[0].bytes, iv + 0 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - crypto_xor(key_iv[1].bytes, iv + 1 * AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - - state->blocks[0] = key_iv[0]; - state->blocks[1] = key_iv[1]; - state->blocks[2] = crypto_aegis_const[1]; - state->blocks[3] = crypto_aegis_const[0]; - state->blocks[4] = key[0]; - state->blocks[5] = key[1]; - - crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[1]); - - for (i = 0; i < 4; i++) { - crypto_aegis256_update_a(state, &key[0]); - crypto_aegis256_update_a(state, &key[1]); - crypto_aegis256_update_a(state, &key_iv[0]); - crypto_aegis256_update_a(state, &key_iv[1]); - } -} - -static void crypto_aegis256_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_block *src_blk = - (const union aegis_block *)src; - - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis256_update_a(state, src_blk); - - size -= AEGIS_BLOCK_SIZE; - src_blk++; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis256_update_u(state, src); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - } - } -} - -static void crypto_aegis256_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis256_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis256_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - - crypto_aegis256_update_a(state, &msg); - - crypto_aegis_block_xor(&msg, &tmp); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis256_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis256_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis256_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[5]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&msg, &tmp); - - memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - - crypto_aegis256_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis256_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis256_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis256_ad(state, src, left); - src += left & ~(AEGIS_BLOCK_SIZE - 1); - left &= AEGIS_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis256_update_a(state, &buf); - } -} - -static void crypto_aegis256_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis256_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis256_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_block tmp; - unsigned int i; - - tmp.words64[0] = cpu_to_le64(assocbits); - tmp.words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp, &state->blocks[3]); - - for (i = 0; i < 7; i++) - crypto_aegis256_update_a(state, &tmp); - - for (i = 0; i < AEGIS256_STATE_BLOCKS; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis256_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS256_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key[0].bytes, key, AEGIS_BLOCK_SIZE); - memcpy(ctx->key[1].bytes, key + AEGIS_BLOCK_SIZE, - AEGIS_BLOCK_SIZE); - return 0; -} - -static int crypto_aegis256_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS256_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS256_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis256_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis256_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis256_init(&state, ctx->key, req->iv); - crypto_aegis256_process_ad(&state, req->src, req->assoclen); - crypto_aegis256_process_crypt(&state, req, ops); - crypto_aegis256_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis256_encrypt(struct aead_request *req) -{ - static const struct aegis256_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis256_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_aegis256_crypt(req, &tag, cryptlen, &ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis256_decrypt(struct aead_request *req) -{ - static const struct aegis256_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis256_decrypt_chunk, - }; - static const u8 zeros[AEGIS256_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - crypto_aegis256_crypt(req, &tag, cryptlen, &ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static int crypto_aegis256_init_tfm(struct crypto_aead *tfm) -{ - return 0; -} - -static void crypto_aegis256_exit_tfm(struct crypto_aead *tfm) -{ -} - -static struct aead_alg crypto_aegis256_alg = { - .setkey = crypto_aegis256_setkey, - .setauthsize = crypto_aegis256_setauthsize, - .encrypt = crypto_aegis256_encrypt, - .decrypt = crypto_aegis256_decrypt, - .init = crypto_aegis256_init_tfm, - .exit = crypto_aegis256_exit_tfm, - - .ivsize = AEGIS256_NONCE_SIZE, - .maxauthsize = AEGIS256_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis256", - .cra_driver_name = "aegis256-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis256_module_init(void) -{ - return crypto_register_aead(&crypto_aegis256_alg); -} - -static void __exit crypto_aegis256_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis256_alg); -} - -subsys_initcall(crypto_aegis256_module_init); -module_exit(crypto_aegis256_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis256"); -MODULE_ALIAS_CRYPTO("aegis256-generic"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6258581a..d990eba7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3886,18 +3886,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .aead = __VECS(aegis128_tv_template) } - }, { - .alg = "aegis128l", - .test = alg_test_aead, - .suite = { - .aead = __VECS(aegis128l_tv_template) - } - }, { - .alg = "aegis256", - .test = alg_test_aead, - .suite = { - .aead = __VECS(aegis256_tv_template) - } }, { .alg = "ansi_cprng", .test = alg_test_cprng, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index fca03fa0..154052d0 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -19488,990 +19488,6 @@ static const struct aead_testvec aegis128_tv_template[] = { }, }; -/* - * AEGIS-128L test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/aegis128l/) - */ -static const struct aead_testvec aegis128l_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .klen = 16, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03", - .assoc = "", - .alen = 0, - .ptext = "", - .plen = 0, - .ctext = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6" - "\x20\x72\x78\xdd\x93\xc8\x57\xef", - .clen = 16, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .klen = 16, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", - .assoc = "", - .alen = 0, - .ptext = "\x79", - .plen = 1, - .ctext = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb" - "\x40\xb3\x71\xc5\x22\x58\x31\x77" - "\x6d", - .clen = 17, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .klen = 16, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", - .assoc = "", - .alen = 0, - .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .plen = 15, - .ctext = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03" - "\x2b\xee\x62\x99\x7b\x98\x13\x1f" - "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe" - "\xe1\xa8\x04\x7f\xe1\x71\xbe", - .clen = 31, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .klen = 16, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", - .assoc = "", - .alen = 0, - .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .plen = 16, - .ctext = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c" - "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e" - "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb" - "\xde\x54\x9b\xf5\x92\x8b\x93\xc5", - .clen = 32, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .klen = 16, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", - .assoc = "", - .alen = 0, - .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .plen = 17, - .ctext = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b" - "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2" - "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9" - "\x82\x64\xc7\x72\x00\x30\xfc\xf0" - "\xf8", - .clen = 33, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .klen = 16, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", - .assoc = "", - .alen = 0, - .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .plen = 31, - .ctext = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5" - "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1" - "\x45\x10\x96\xe0\xbf\x55\x0f\x4c" - "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9" - "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc" - "\x59\x28\x88\x3c\x79\x10\x6b", - .clen = 47, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .klen = 16, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", - .assoc = "", - .alen = 0, - .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .plen = 32, - .ctext = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d" - "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40" - "\xde\x11\x59\x3e\xfd\x74\xf6\x42" - "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6" - "\x06\xa3\x94\xda\x3d\x7f\x55\x0a" - "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc", - .clen = 48, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .klen = 16, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", - .assoc = "\xd5", - .alen = 1, - .ptext = "", - .plen = 0, - .ctext = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8" - "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f", - .clen = 16, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .klen = 16, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .ptext = "", - .plen = 0, - .ctext = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c" - "\x84\xfb\x26\xfb\xd5\xca\x62\x39", - .clen = 16, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .klen = 16, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .ptext = "", - .plen = 0, - .ctext = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1" - "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a", - .clen = 16, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .klen = 16, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .ptext = "", - .plen = 0, - .ctext = "\x33\xb1\x42\x97\x8e\x16\x7b\x63" - "\x06\xba\x5b\xcb\xae\x6d\x8b\x56", - .clen = 16, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .klen = 16, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .ptext = "", - .plen = 0, - .ctext = "\xda\x44\x08\x8c\x2a\xa5\x07\x35" - "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f", - .clen = 16, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .klen = 16, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .ptext = "", - .plen = 0, - .ctext = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88" - "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0", - .clen = 16, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .klen = 16, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54", - .assoc = "\x40", - .alen = 1, - .ptext = "\x4f", - .plen = 1, - .ctext = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9" - "\xa0\x98\x09\x85\xbe\x56\x8e\x79" - "\xf4", - .clen = 17, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .klen = 16, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .plen = 15, - .ctext = "\x99\x2e\x84\x50\x64\x5c\xab\x29" - "\x20\xba\xb9\x2f\x62\x3a\xce\x2a" - "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc" - "\x20\x63\x0b\x49\x7e\x97\x08", - .clen = 31, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .klen = 16, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .plen = 16, - .ctext = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee" - "\x78\x08\x12\xec\x09\xaf\x53\x14" - "\x90\x3e\x3d\x76\xad\x71\x21\x08" - "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb", - .clen = 32, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .klen = 16, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .plen = 17, - .ctext = "\xf3\xe7\x95\x86\xcf\x34\x95\x96" - "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a" - "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c" - "\x05\xd3\xb4\x2e\x4a\x74\x59\x49" - "\x7d", - .clen = 33, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .klen = 16, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .plen = 31, - .ctext = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24" - "\x0d\x19\x15\x61\x65\x3b\x06\x26" - "\x71\xe8\x7e\x16\xdb\x96\x01\x01" - "\x52\xcd\x49\x5b\x07\x33\x4e\xe7" - "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5" - "\xed\x90\xce\xb9\xcd\xcc\xa1", - .clen = 47, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .klen = 16, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .plen = 32, - .ctext = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1" - "\xbc\x0f\x35\x97\x97\x0c\x4b\x18" - "\xce\x58\xc8\x3b\xd4\x85\x93\x79" - "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c" - "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56" - "\x64\x4e\x47\xce\xb2\xb4\x92\xb4", - .clen = 48, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .klen = 16, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .plen = 65, - .ctext = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8" - "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f" - "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63" - "\x42\x8e\x99\x5a\x9c\x0b\x84\x77" - "\xbc\x46\x76\x48\x82\xc7\x57\x96" - "\xe1\x65\xd1\xed\x1d\xdd\x80\x24" - "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e" - "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a" - "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d" - "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35" - "\x01", - .clen = 81, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .klen = 16, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .plen = 33, - .ctext = "\x4c\xa9\xac\x71\xed\x10\xa6\x24" - "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb" - "\x05\xc9\xd6\x97\xb6\x10\x7f\x17" - "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99" - "\xf2\x62\x25\x28\x01\x23\x6f\x8b" - "\x04\x52\xbc\xb0\x3e\x66\x52\x90" - "\x9f", - .clen = 49, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .klen = 16, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .plen = 16, - .ctext = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5" - "\x96\xe6\x97\xe4\x10\xeb\x40\x95" - "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec" - "\x05\xa8\x31\x50\x11\x19\x44", - .clen = 31, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .klen = 16, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .plen = 16, - .ctext = "\x30\x95\x7d\xea\xdc\x62\xc0\x88" - "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7" - "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb" - "\x21\x93\x2e\x31\x84\x83", - .clen = 30, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .klen = 16, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .plen = 16, - .ctext = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16" - "\x63\x0d\x43\xd5\x49\xca\xa8\x85" - "\x49\xc0\xae\x13\xbc\x26\x1d\x4b", - .clen = 24, - }, -}; - -/* - * AEGIS-256 test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/aegis256/) - */ -static const struct aead_testvec aegis256_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81" - "\xca\xb0\x82\x21\x41\xa8\xe0\x06" - "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5", - .klen = 32, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03" - "\x95\x61\x05\x42\x82\x50\xc0\x0c" - "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b", - .assoc = "", - .alen = 0, - .ptext = "", - .plen = 0, - .ctext = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa" - "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2", - .clen = 16, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87" - "\xf4\x72\x8e\xa5\x46\x48\x62\x20" - "\xf1\x38\x16\xce\x90\x76\x87\x8c", - .klen = 32, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09" - "\xbf\x23\x11\xc6\x87\xf0\x42\x26" - "\x22\x44\x4e\xc4\x47\x8e\x6e\x41", - .assoc = "", - .alen = 0, - .ptext = "\x79", - .plen = 1, - .ctext = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16" - "\x9e\x89\xd9\x06\xa6\xa8\x14\x29" - "\x8b", - .clen = 17, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e" - "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39" - "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62", - .klen = 32, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f" - "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40" - "\xe3\x71\x2d\x9c\x21\xed\x0e\x18", - .assoc = "", - .alen = 0, - .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .plen = 15, - .ctext = "\x09\x94\x1f\xa6\x13\xc3\x74\x75" - "\x17\xad\x8a\x0e\xd8\x66\x9a\x28" - "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a" - "\x9f\x3b\xd7\xdd\x66\xd1\x2b", - .clen = 31, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94" - "\x49\xf7\xa5\xad\x50\x88\x66\x53" - "\x74\x94\xd4\x7f\x44\x34\xc5\x39", - .klen = 32, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15" - "\x14\xa8\x28\xce\x92\x30\x46\x59" - "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee", - .assoc = "", - .alen = 0, - .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .plen = 16, - .ctext = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f" - "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70" - "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1" - "\x29\x17\xc8\x3b\x52\xa4\x98\x72", - .clen = 32, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a" - "\x74\xb9\xb1\x32\x55\x28\xe8\x6d" - "\x35\xc1\xb3\x57\x1f\x93\x64\x0f", - .klen = 32, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c" - "\x3e\x6a\x34\x53\x97\xd0\xc8\x73" - "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5", - .assoc = "", - .alen = 0, - .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .plen = 17, - .ctext = "\x71\x6b\x37\x0b\x02\x61\x28\x12" - "\x83\xab\x66\x90\x84\xc7\xd1\xc5" - "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2" - "\xc0\x00\x39\x13\xb5\x51\x68\x44" - "\xad", - .clen = 33, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0" - "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86" - "\xf7\xef\x91\x30\xf9\xf2\x04\xe6", - .klen = 32, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22" - "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d" - "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c", - .assoc = "", - .alen = 0, - .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .plen = 31, - .ctext = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f" - "\x06\x3b\x52\x18\x49\x75\x1b\xf0" - "\x53\x09\x72\x7b\x45\x79\xe0\xbe" - "\x89\x85\x23\x15\xb8\x79\x07\x4c" - "\x53\x7a\x15\x37\x0a\xee\xb7\xfb" - "\xc4\x1f\x12\x27\xcf\x77\x90", - .clen = 47, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6" - "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0" - "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd", - .klen = 32, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28" - "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6" - "\xe8\x28\xa8\xfe\x89\x69\x8b\x72", - .assoc = "", - .alen = 0, - .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .plen = 32, - .ctext = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4" - "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7" - "\xeb\x0f\x05\x2b\xba\xb3\xca\xef" - "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5" - "\xbf\x7a\x3e\xcc\x31\x76\x09\x80" - "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3", - .clen = 48, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad" - "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9" - "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93", - .klen = 32, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e" - "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0" - "\xaa\x55\x87\xd6\x63\xc8\x2a\x49", - .assoc = "\xd5", - .alen = 1, - .ptext = "", - .plen = 0, - .ctext = "\x96\x43\x30\xca\x6c\x4f\xd7\x12" - "\xba\xd9\xb3\x18\x86\xdf\xc3\x52", - .clen = 16, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3" - "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3" - "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a", - .klen = 32, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34" - "\xe8\x73\x62\x64\xab\x50\xd0\xda" - "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .ptext = "", - .plen = 0, - .ctext = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83" - "\x11\x9f\xb0\x74\xee\xc7\x03\xdd", - .clen = 16, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9" - "\x47\x85\xeb\xc7\x6f\x48\x72\xed" - "\xfc\xa5\x0d\x91\x61\x6e\x81\x40", - .klen = 32, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b" - "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" - "\x2d\xb0\x45\x87\x18\x86\x68\xf6", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .ptext = "", - .plen = 0, - .ctext = "\x16\x44\x73\x33\x5d\xf2\xb9\x04" - "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1", - .clen = 16, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf" - "\x72\x47\xf6\x4b\x74\xe8\xf4\x06" - "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17", - .klen = 32, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d" - "\xee\xde\x23\x60\xf2\xe5\x08\xcc", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .ptext = "", - .plen = 0, - .ctext = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45" - "\x98\x54\x8c\xed\x3d\x17\xf0\xdd", - .clen = 16, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6" - "\x9c\x0a\x02\xd0\x79\x88\x76\x20" - "\x7f\x00\xca\x42\x15\x2c\xbf\xed", - .klen = 32, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .ptext = "", - .plen = 0, - .ctext = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0" - "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc", - .clen = 16, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc" - "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a" - "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4", - .klen = 32, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .ptext = "", - .plen = 0, - .ctext = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1" - "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0", - .clen = 16, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2" - "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53" - "\x02\x5b\x88\xf3\xca\xea\xfe\x9b", - .klen = 32, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54" - "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a" - "\x32\x67\xc0\xe9\x80\x02\xe5\x50", - .assoc = "\x40", - .alen = 1, - .ptext = "\x4f", - .plen = 1, - .ctext = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b" - "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83" - "\xc7", - .clen = 17, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8" - "\x1b\x50\x25\x5d\x89\x68\xfc\x6d" - "\xc3\x89\x67\xcb\xa4\x49\x9d\x71", - .klen = 32, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a" - "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" - "\xf4\x94\x9f\xc1\x5a\x61\x85\x27", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .plen = 15, - .ctext = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba" - "\x7e\x98\x83\x02\x34\x23\xf7\x94" - "\xde\x35\xe6\x1d\x14\x18\xe5\x38" - "\x14\x80\x6a\xa7\x1b\xae\x1d", - .clen = 31, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf" - "\x46\x13\x31\xe1\x8e\x08\x7e\x87" - "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48", - .klen = 32, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60" - "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" - "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .plen = 16, - .ctext = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01" - "\xbe\x28\x98\x10\x6f\xe9\x61\x32" - "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9" - "\x46\x2d\x55\xe3\x42\x67\xf2\xaf", - .clen = 32, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5" - "\x70\xd5\x3c\x65\x93\xa8\x00\xa0" - "\x46\xe4\x25\x7c\x58\x08\xdb\x1e", - .klen = 32, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7" - "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .plen = 17, - .ctext = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8" - "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e" - "\x87\x9d\x26\xdf\xff\x81\x11\xd2" - "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b" - "\x55", - .clen = 33, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb" - "\x9a\x97\x48\xe9\x98\x48\x82\xba" - "\x07\x11\x04\x54\x32\x67\x7b\xf5", - .klen = 32, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .plen = 31, - .ctext = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5" - "\x69\x7f\x7a\xdd\x8c\x46\xda\xba" - "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2" - "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66" - "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b" - "\xdf\xb8\xea\xd8\xa9\x38\xed", - .clen = 47, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1" - "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4" - "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb", - .klen = 32, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .plen = 32, - .ctext = "\xd3\x68\x14\x70\x3c\x01\x43\x86" - "\x02\xab\xbe\x75\xaa\xe7\xf5\x53" - "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61" - "\x8f\x69\x05\x75\x8e\xca\x60\x0c" - "\x5b\xa2\x48\x61\x32\x74\x11\x2b" - "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a", - .clen = 48, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7" - "\xef\x1c\x5f\xf2\xa3\x88\x86\xed" - "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2", - .klen = 32, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .plen = 65, - .ctext = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2" - "\x15\x3a\x6c\x72\x83\x9b\xb1\x75" - "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4" - "\x1a\x93\x33\x79\x97\x82\x81\xc0" - "\x96\xc2\x00\xab\x39\xae\xa1\x62" - "\x53\xa3\x86\xc9\x07\x8c\xaf\x22" - "\x47\x31\x29\xca\x4a\x95\xf5\xd5" - "\x20\x63\x5a\x54\x80\x2c\x4a\x63" - "\xfb\x18\x73\x31\x4f\x08\x21\x5d" - "\x20\xe9\xc3\x7e\xea\x25\x77\x3a" - "\x65", - .clen = 81, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe" - "\x19\xde\x6b\x76\xa8\x28\x08\x07" - "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79", - .klen = 32, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .plen = 33, - .ctext = "\x33\xc1\xda\xfa\x15\x21\x07\x8e" - "\x93\x68\xea\x64\x7b\x3d\x4b\x6b" - "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2" - "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81" - "\x26\x3a\x5a\x09\xe8\xce\x73\x72" - "\xde\x7b\x58\x9e\x85\xb9\xa4\x28" - "\xda", - .clen = 49, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04" - "\x44\xa1\x76\xfb\xad\xc8\x8a\x21" - "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f", - .klen = 32, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" - "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" - "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .plen = 16, - .ctext = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02" - "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99" - "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b" - "\xc7\xff\xbd\xbc\x85\x09\x0b", - .clen = 31, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a" - "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a" - "\xce\xf5\x5e\x8e\x75\x42\x97\x26", - .klen = 32, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c" - "\x39\x14\x05\xa0\xf3\x10\xec\x41" - "\xff\x01\x95\x84\x2b\x59\x7f\xdb", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .plen = 16, - .ctext = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e" - "\x6c\xd9\x84\x63\x70\x97\x61\x37" - "\x08\x2f\x16\x90\x9e\x62\x30\x0d" - "\x62\xd5\xc8\xf0\x46\x1a", - .clen = 30, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10" - "\x98\x25\x8d\x03\xb7\x08\x8e\x54" - "\x90\x23\x3d\x67\x4f\xa1\x36\xfc", - .klen = 32, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92" - "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" - "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .plen = 16, - .ctext = "\xce\xf3\x17\x87\x49\xc2\x00\x46" - "\xc6\x12\x5c\x8f\x81\x38\xaa\x55" - "\xf8\x67\x75\xf1\x75\xe3\x2a\x24", - .clen = 24, - }, -}; - /* * All key wrapping test vectors taken from * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip -- cgit v1.2.3 From 91629665d924844502d7625346d44e99911d1e1e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:08 +0200 Subject: crypto: aegis128 - drop empty TFM init/exit routines TFM init/exit routines are optional, so no need to provide empty ones. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aegis128.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'crypto') diff --git a/crypto/aegis128.c b/crypto/aegis128.c index d78f77fc..32840d5e 100644 --- a/crypto/aegis128.c +++ b/crypto/aegis128.c @@ -403,22 +403,11 @@ static int crypto_aegis128_decrypt(struct aead_request *req) return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; } -static int crypto_aegis128_init_tfm(struct crypto_aead *tfm) -{ - return 0; -} - -static void crypto_aegis128_exit_tfm(struct crypto_aead *tfm) -{ -} - static struct aead_alg crypto_aegis128_alg = { .setkey = crypto_aegis128_setkey, .setauthsize = crypto_aegis128_setauthsize, .encrypt = crypto_aegis128_encrypt, .decrypt = crypto_aegis128_decrypt, - .init = crypto_aegis128_init_tfm, - .exit = crypto_aegis128_exit_tfm, .ivsize = AEGIS128_NONCE_SIZE, .maxauthsize = AEGIS128_MAX_AUTH_SIZE, -- cgit v1.2.3 From a0384638c5e45347d9a1508bdecaa247be49b385 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:09 +0200 Subject: crypto: aegis - avoid prerotated AES tables The generic AES code provides four sets of lookup tables, where each set consists of four tables containing the same 32-bit values, but rotated by 0, 8, 16 and 24 bits, respectively. This makes sense for CISC architectures such as x86 which support memory operands, but for other architectures, the rotates are quite cheap, and using all four tables needlessly thrashes the D-cache, and actually hurts rather than helps performance. Since x86 already has its own implementation of AEGIS based on AES-NI instructions, let's tweak the generic implementation towards other architectures, and avoid the prerotated tables, and perform the rotations inline. On ARM Cortex-A53, this results in a ~8% speedup. Acked-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aegis.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/aegis.h b/crypto/aegis.h index 41a3090c..3308066d 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -10,6 +10,7 @@ #define _CRYPTO_AEGIS_H #include +#include #include #define AEGIS_BLOCK_SIZE 16 @@ -53,16 +54,13 @@ static void crypto_aegis_aesenc(union aegis_block *dst, const union aegis_block *key) { const u8 *s = src->bytes; - const u32 *t0 = crypto_ft_tab[0]; - const u32 *t1 = crypto_ft_tab[1]; - const u32 *t2 = crypto_ft_tab[2]; - const u32 *t3 = crypto_ft_tab[3]; + const u32 *t = crypto_ft_tab[0]; u32 d0, d1, d2, d3; - d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]]; - d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]]; - d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]]; - d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]]; + d0 = t[s[ 0]] ^ rol32(t[s[ 5]], 8) ^ rol32(t[s[10]], 16) ^ rol32(t[s[15]], 24); + d1 = t[s[ 4]] ^ rol32(t[s[ 9]], 8) ^ rol32(t[s[14]], 16) ^ rol32(t[s[ 3]], 24); + d2 = t[s[ 8]] ^ rol32(t[s[13]], 8) ^ rol32(t[s[ 2]], 16) ^ rol32(t[s[ 7]], 24); + d3 = t[s[12]] ^ rol32(t[s[ 1]], 8) ^ rol32(t[s[ 6]], 16) ^ rol32(t[s[11]], 24); dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0]; dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1]; -- cgit v1.2.3 From 467920a4b6c1cc41fdd55776be2d4a74d9d10469 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:10 +0200 Subject: crypto: aegis128 - add support for SIMD acceleration Add some plumbing to allow the AEGIS128 code to be built with SIMD routines for acceleration. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/aegis.h | 14 +- crypto/aegis128-core.c | 481 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128.c | 447 --------------------------------------------- 4 files changed, 489 insertions(+), 454 deletions(-) create mode 100644 crypto/aegis128-core.c delete mode 100644 crypto/aegis128.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 93375e12..362a36f0 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o +aegis128-y := aegis128-core.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis.h b/crypto/aegis.h index 3308066d..6cb65a49 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -35,23 +35,23 @@ static const union aegis_block crypto_aegis_const[2] = { } }, }; -static void crypto_aegis_block_xor(union aegis_block *dst, - const union aegis_block *src) +static inline void crypto_aegis_block_xor(union aegis_block *dst, + const union aegis_block *src) { dst->words64[0] ^= src->words64[0]; dst->words64[1] ^= src->words64[1]; } -static void crypto_aegis_block_and(union aegis_block *dst, - const union aegis_block *src) +static inline void crypto_aegis_block_and(union aegis_block *dst, + const union aegis_block *src) { dst->words64[0] &= src->words64[0]; dst->words64[1] &= src->words64[1]; } -static void crypto_aegis_aesenc(union aegis_block *dst, - const union aegis_block *src, - const union aegis_block *key) +static inline void crypto_aegis_aesenc(union aegis_block *dst, + const union aegis_block *src, + const union aegis_block *key) { const u8 *s = src->bytes; const u32 *t = crypto_ft_tab[0]; diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c new file mode 100644 index 00000000..f815b468 --- /dev/null +++ b/crypto/aegis128-core.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static bool have_simd; + +bool crypto_aegis128_have_simd(void); +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); + +static void crypto_aegis128_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; + for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + if (have_simd && crypto_simd_usable()) { + crypto_aegis128_update_simd(state, msg); + return; + } + + crypto_aegis128_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +{ + if (have_simd && crypto_simd_usable()) { + crypto_aegis128_update_simd(state, msg); + return; + } + + crypto_aegis128_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + unsigned int i; + + key_iv = *key; + crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = *key; + state->blocks[4] = *key; + + crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); + + for (i = 0; i < 5; i++) { + crypto_aegis128_update_a(state, key); + crypto_aegis128_update_a(state, &key_iv); + } +} + +static void crypto_aegis128_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis128_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis128_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis128_update_a(state, &buf); + } +} + +static void crypto_aegis128_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128_ops *ops) +{ + struct skcipher_walk walk; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); + } +} + +static void crypto_aegis128_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis128_update_a(state, &tmp); + + for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + return 0; +} + +static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_process_crypt(&state, req, ops); + crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128_encrypt(struct aead_request *req) +{ + const struct aegis128_ops *ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + if (have_simd && crypto_simd_usable()) + ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk_simd }; + + crypto_aegis128_crypt(req, &tag, cryptlen, ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128_decrypt(struct aead_request *req) +{ + const struct aegis128_ops *ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk, + }; + static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + if (have_simd && crypto_simd_usable()) + ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk_simd }; + + crypto_aegis128_crypt(req, &tag, cryptlen, ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static struct aead_alg crypto_aegis128_alg = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt, + .decrypt = crypto_aegis128_decrypt, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128_module_init(void) +{ + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD)) + have_simd = crypto_aegis128_have_simd(); + + return crypto_register_aead(&crypto_aegis128_alg); +} + +static void __exit crypto_aegis128_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128_alg); +} + +subsys_initcall(crypto_aegis128_module_init); +module_exit(crypto_aegis128_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-generic"); diff --git a/crypto/aegis128.c b/crypto/aegis128.c deleted file mode 100644 index 32840d5e..00000000 --- a/crypto/aegis128.c +++ /dev/null @@ -1,447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-128 Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS128_NONCE_SIZE 16 -#define AEGIS128_STATE_BLOCKS 5 -#define AEGIS128_KEY_SIZE 16 -#define AEGIS128_MIN_AUTH_SIZE 8 -#define AEGIS128_MAX_AUTH_SIZE 16 - -struct aegis_state { - union aegis_block blocks[AEGIS128_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key; -}; - -struct aegis128_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static void crypto_aegis128_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; - for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis128_update_a(struct aegis_state *state, - const union aegis_block *msg) -{ - crypto_aegis128_update(state); - crypto_aegis_block_xor(&state->blocks[0], msg); -} - -static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) -{ - crypto_aegis128_update(state); - crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis128_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv; - unsigned int i; - - key_iv = *key; - crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); - - state->blocks[0] = key_iv; - state->blocks[1] = crypto_aegis_const[1]; - state->blocks[2] = crypto_aegis_const[0]; - state->blocks[3] = *key; - state->blocks[4] = *key; - - crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); - - for (i = 0; i < 5; i++) { - crypto_aegis128_update_a(state, key); - crypto_aegis128_update_a(state, &key_iv); - } -} - -static void crypto_aegis128_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_block *src_blk = - (const union aegis_block *)src; - - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_a(state, src_blk); - - size -= AEGIS_BLOCK_SIZE; - src_blk++; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_u(state, src); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - } - } -} - -static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - - crypto_aegis128_update_a(state, &msg); - - crypto_aegis_block_xor(&msg, &tmp); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&msg, &tmp); - - memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - - crypto_aegis128_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis128_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis128_ad(state, src, left); - src += left & ~(AEGIS_BLOCK_SIZE - 1); - left &= AEGIS_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis128_update_a(state, &buf); - } -} - -static void crypto_aegis128_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis128_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis128_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_block tmp; - unsigned int i; - - tmp.words64[0] = cpu_to_le64(assocbits); - tmp.words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp, &state->blocks[3]); - - for (i = 0; i < 7; i++) - crypto_aegis128_update_a(state, &tmp); - - for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); - return 0; -} - -static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS128_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS128_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis128_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis128_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, ops); - crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis128_encrypt(struct aead_request *req) -{ - static const struct aegis128_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_aegis128_crypt(req, &tag, cryptlen, &ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis128_decrypt(struct aead_request *req) -{ - static const struct aegis128_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk, - }; - static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - crypto_aegis128_crypt(req, &tag, cryptlen, &ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static struct aead_alg crypto_aegis128_alg = { - .setkey = crypto_aegis128_setkey, - .setauthsize = crypto_aegis128_setauthsize, - .encrypt = crypto_aegis128_encrypt, - .decrypt = crypto_aegis128_decrypt, - - .ivsize = AEGIS128_NONCE_SIZE, - .maxauthsize = AEGIS128_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis128", - .cra_driver_name = "aegis128-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis128_module_init(void) -{ - return crypto_register_aead(&crypto_aegis128_alg); -} - -static void __exit crypto_aegis128_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis128_alg); -} - -subsys_initcall(crypto_aegis128_module_init); -module_exit(crypto_aegis128_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis128"); -MODULE_ALIAS_CRYPTO("aegis128-generic"); -- cgit v1.2.3 From 3cd22a9ca7fcacfa00025ecf925c8a0121c7cc92 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:11 +0200 Subject: crypto: aegis128 - provide a SIMD implementation based on NEON intrinsics Provide an accelerated implementation of aegis128 by wiring up the SIMD hooks in the generic driver to an implementation based on NEON intrinsics, which can be compiled to both ARM and arm64 code. This results in a performance of 2.2 cycles per byte on Cortex-A53, which is a performance increase of ~11x compared to the generic code. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 ++ crypto/Makefile | 11 ++++ crypto/aegis128-neon-inner.c | 149 +++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128-neon.c | 43 +++++++++++++ 4 files changed, 208 insertions(+) create mode 100644 crypto/aegis128-neon-inner.c create mode 100644 crypto/aegis128-neon.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 559494bb..2e7f08ba 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,6 +306,11 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. +config CRYPTO_AEGIS128_SIMD + bool "Support SIMD acceleration for AEGIS-128" + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + default y + config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT diff --git a/crypto/Makefile b/crypto/Makefile index 362a36f0..b3e16b4f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -91,6 +91,17 @@ obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o aegis128-y := aegis128-core.o + +ifeq ($(ARCH),arm) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp -mfpu=crypto-neon-fp-armv8 +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif +ifeq ($(ARCH),arm64) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto +CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif + obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c new file mode 100644 index 00000000..26e9450a --- /dev/null +++ b/crypto/aegis128-neon-inner.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro, Ltd. + */ + +#ifdef CONFIG_ARM64 +#include + +#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b" +#else +#include + +#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0" +#endif + +#define AEGIS_BLOCK_SIZE 16 + +#include + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); + +struct aegis128_state { + uint8x16_t v[5]; +}; + +static struct aegis128_state aegis128_load_state_neon(const void *state) +{ + return (struct aegis128_state){ { + vld1q_u8(state), + vld1q_u8(state + 16), + vld1q_u8(state + 32), + vld1q_u8(state + 48), + vld1q_u8(state + 64) + } }; +} + +static void aegis128_save_state_neon(struct aegis128_state st, void *state) +{ + vst1q_u8(state, st.v[0]); + vst1q_u8(state + 16, st.v[1]); + vst1q_u8(state + 32, st.v[2]); + vst1q_u8(state + 48, st.v[3]); + vst1q_u8(state + 64, st.v[4]); +} + +static uint8x16_t aegis_aes_round(uint8x16_t w) +{ + uint8x16_t z = {}; + + /* + * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics + * to force the compiler to issue the aese/aesmc instructions in pairs. + * This is much faster on many cores, where the instruction pair can + * execute in a single cycle. + */ + asm(AES_ROUND : "+w"(w) : "w"(z)); + return w; +} + +static struct aegis128_state aegis128_update_neon(struct aegis128_state st, + uint8x16_t m) +{ + uint8x16_t t; + + t = aegis_aes_round(st.v[3]); + st.v[3] ^= aegis_aes_round(st.v[2]); + st.v[2] ^= aegis_aes_round(st.v[1]); + st.v[1] ^= aegis_aes_round(st.v[0]); + st.v[0] ^= aegis_aes_round(st.v[4]) ^ m; + st.v[4] ^= t; + + return st; +} + +void crypto_aegis128_update_neon(void *state, const void *msg) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + + st = aegis128_update_neon(st, vld1q_u8(msg)); + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t tmp; + + while (size >= AEGIS_BLOCK_SIZE) { + uint8x16_t s = vld1q_u8(src); + + tmp = s ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + st = aegis128_update_neon(st, s); + vst1q_u8(dst, tmp); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8_t buf[AEGIS_BLOCK_SIZE] = {}; + uint8x16_t msg; + + memcpy(buf, src, size); + msg = vld1q_u8(buf); + tmp = msg ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + st = aegis128_update_neon(st, msg); + vst1q_u8(buf, tmp); + memcpy(dst, buf, size); + } + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t tmp; + + while (size >= AEGIS_BLOCK_SIZE) { + tmp = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + st = aegis128_update_neon(st, tmp); + vst1q_u8(dst, tmp); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8_t buf[AEGIS_BLOCK_SIZE] = {}; + uint8x16_t msg; + + memcpy(buf, src, size); + msg = vld1q_u8(buf) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + vst1q_u8(buf, msg); + memcpy(dst, buf, size); + + memset(buf + size, 0, AEGIS_BLOCK_SIZE - size); + msg = vld1q_u8(buf); + st = aegis128_update_neon(st, msg); + } + + aegis128_save_state_neon(st, state); +} diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c new file mode 100644 index 00000000..c1c0a168 --- /dev/null +++ b/crypto/aegis128-neon.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro Ltd + */ + +#include +#include + +#include "aegis.h" + +void crypto_aegis128_update_neon(void *state, const void *msg); +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); + +bool crypto_aegis128_have_simd(void) +{ + return cpu_have_feature(cpu_feature(AES)); +} + +void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +{ + kernel_neon_begin(); + crypto_aegis128_update_neon(state, msg); + kernel_neon_end(); +} + +void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +} + +void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +} -- cgit v1.2.3 From f3b8e93c4d6ded77e3c1edf651a496f0323f5e39 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 3 Jul 2019 10:55:12 +0200 Subject: crypto: tcrypt - add a speed test for AEGIS128 Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ad78ab5b..c578ccd9 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2327,6 +2327,13 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) 0, speed_template_32); break; + case 221: + test_aead_speed("aegis128", ENCRYPT, sec, + NULL, 0, 16, 8, speed_template_16); + test_aead_speed("aegis128", DECRYPT, sec, + NULL, 0, 16, 8, speed_template_16); + break; + case 300: if (alg) { test_hash_speed(alg, sec, generic_hash_speed_template); -- cgit v1.2.3 From 21ab11901c29e9b4fdbfe036f910ee5bd0f256e6 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Tue, 9 Jul 2019 13:11:24 +0200 Subject: crypto: user - make NETLINK_CRYPTO work inside netns Currently, NETLINK_CRYPTO works only in the init network namespace. It doesn't make much sense to cut it out of the other network namespaces, so do the minor plumbing work necessary to make it work in any network namespace. Code inspired by net/core/sock_diag.c. Tested using kcapi-dgst from libkcapi [1]: Before: # unshare -n kcapi-dgst -c sha256 Signed-off-by: Herbert Xu --- crypto/crypto_user_base.c | 37 +++++++++++++++++++++++++------------ crypto/crypto_user_stat.c | 4 +++- 2 files changed, 28 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c index c65e3900..910e0b46 100644 --- a/crypto/crypto_user_base.c +++ b/crypto/crypto_user_base.c @@ -10,9 +10,10 @@ #include #include #include -#include #include +#include #include +#include #include #include #include @@ -25,9 +26,6 @@ static DEFINE_MUTEX(crypto_cfg_mutex); -/* The crypto netlink socket */ -struct sock *crypto_nlsk; - struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; @@ -186,6 +184,7 @@ out: static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { + struct net *net = sock_net(in_skb->sk); struct crypto_user_alg *p = nlmsg_data(in_nlh); struct crypto_alg *alg; struct sk_buff *skb; @@ -217,7 +216,7 @@ drop_alg: if (err) return err; - return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); + return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb) @@ -420,6 +419,7 @@ static const struct crypto_link { static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct net *net = sock_net(skb->sk); struct nlattr *attrs[CRYPTOCFGA_MAX+1]; const struct crypto_link *link; int type, err; @@ -450,7 +450,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, .done = link->done, .min_dump_alloc = min(dump_alloc, 65535UL), }; - err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); + err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c); } return err; @@ -474,22 +474,35 @@ static void crypto_netlink_rcv(struct sk_buff *skb) mutex_unlock(&crypto_cfg_mutex); } -static int __init crypto_user_init(void) +static int __net_init crypto_netlink_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = crypto_netlink_rcv, }; - crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg); - if (!crypto_nlsk) - return -ENOMEM; + net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg); + return net->crypto_nlsk == NULL ? -ENOMEM : 0; +} - return 0; +static void __net_exit crypto_netlink_exit(struct net *net) +{ + netlink_kernel_release(net->crypto_nlsk); + net->crypto_nlsk = NULL; +} + +static struct pernet_operations crypto_netlink_net_ops = { + .init = crypto_netlink_init, + .exit = crypto_netlink_exit, +}; + +static int __init crypto_user_init(void) +{ + return register_pernet_subsys(&crypto_netlink_net_ops); } static void __exit crypto_user_exit(void) { - netlink_kernel_release(crypto_nlsk); + unregister_pernet_subsys(&crypto_netlink_net_ops); } module_init(crypto_user_init); diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index a03f326a..8bad8841 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -298,6 +299,7 @@ out: int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { + struct net *net = sock_net(in_skb->sk); struct crypto_user_alg *p = nlmsg_data(in_nlh); struct crypto_alg *alg; struct sk_buff *skb; @@ -329,7 +331,7 @@ drop_alg: if (err) return err; - return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); + return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); } MODULE_LICENSE("GPL"); -- cgit v1.2.3 From fd8933366cb9dc6c9852c653c9284baa87e7d8a9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jul 2019 15:50:04 +0200 Subject: crypto: aegis - fix badly optimized clang output Clang sometimes makes very different inlining decisions from gcc. In case of the aegis crypto algorithms, it decides to turn the innermost primitives (and, xor, ...) into separate functions but inline most of the rest. This results in a huge amount of variables spilled on the stack, leading to rather slow execution as well as kernel stack usage beyond the 32-bit warning limit when CONFIG_KASAN is enabled: crypto/aegis256.c:123:13: warning: stack frame size of 648 bytes in function 'crypto_aegis256_encrypt_chunk' [-Wframe-larger-than=] crypto/aegis256.c:366:13: warning: stack frame size of 1264 bytes in function 'crypto_aegis256_crypt' [-Wframe-larger-than=] crypto/aegis256.c:187:13: warning: stack frame size of 656 bytes in function 'crypto_aegis256_decrypt_chunk' [-Wframe-larger-than=] crypto/aegis128l.c:135:13: warning: stack frame size of 832 bytes in function 'crypto_aegis128l_encrypt_chunk' [-Wframe-larger-than=] crypto/aegis128l.c:415:13: warning: stack frame size of 1480 bytes in function 'crypto_aegis128l_crypt' [-Wframe-larger-than=] crypto/aegis128l.c:218:13: warning: stack frame size of 848 bytes in function 'crypto_aegis128l_decrypt_chunk' [-Wframe-larger-than=] crypto/aegis128.c:116:13: warning: stack frame size of 584 bytes in function 'crypto_aegis128_encrypt_chunk' [-Wframe-larger-than=] crypto/aegis128.c:351:13: warning: stack frame size of 1064 bytes in function 'crypto_aegis128_crypt' [-Wframe-larger-than=] crypto/aegis128.c:177:13: warning: stack frame size of 592 bytes in function 'crypto_aegis128_decrypt_chunk' [-Wframe-larger-than=] Forcing the primitives to all get inlined avoids the issue and the resulting code is similar to what gcc produces. Signed-off-by: Arnd Bergmann Acked-by: Nick Desaulniers Signed-off-by: Herbert Xu --- crypto/aegis.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/aegis.h b/crypto/aegis.h index 6cb65a49..4d56a85a 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -35,23 +35,23 @@ static const union aegis_block crypto_aegis_const[2] = { } }, }; -static inline void crypto_aegis_block_xor(union aegis_block *dst, - const union aegis_block *src) +static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, + const union aegis_block *src) { dst->words64[0] ^= src->words64[0]; dst->words64[1] ^= src->words64[1]; } -static inline void crypto_aegis_block_and(union aegis_block *dst, - const union aegis_block *src) +static __always_inline void crypto_aegis_block_and(union aegis_block *dst, + const union aegis_block *src) { dst->words64[0] &= src->words64[0]; dst->words64[1] &= src->words64[1]; } -static inline void crypto_aegis_aesenc(union aegis_block *dst, - const union aegis_block *src, - const union aegis_block *key) +static __always_inline void crypto_aegis_aesenc(union aegis_block *dst, + const union aegis_block *src, + const union aegis_block *key) { const u8 *s = src->bytes; const u32 *t = crypto_ft_tab[0]; -- cgit v1.2.3 From 31b7e024fb864e4947af8c7bef444ce01ebb5c5c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 19 Jul 2019 23:09:18 -0700 Subject: crypto: ghash - add comment and improve help text To help avoid confusion, add a comment to ghash-generic.c which explains the convention that the kernel's implementation of GHASH uses. Also update the Kconfig help text and module descriptions to call GHASH a "hash function" rather than a "message digest", since the latter normally means a real cryptographic hash function, which GHASH is not. Cc: Pascal Van Leeuwen Signed-off-by: Eric Biggers Reviewed-by: Ard Biesheuvel Acked-by: Pascal Van Leeuwen Signed-off-by: Herbert Xu --- crypto/Kconfig | 11 ++++++----- crypto/ghash-generic.c | 31 ++++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 2e7f08ba..455a3354 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -647,11 +647,12 @@ config CRYPTO_VPMSUM_TESTER Unless you are testing these algorithms, you don't need this. config CRYPTO_GHASH - tristate "GHASH digest algorithm" + tristate "GHASH hash function" select CRYPTO_GF128MUL select CRYPTO_HASH help - GHASH is message digest algorithm for GCM (Galois/Counter Mode). + GHASH is the hash function used in GCM (Galois/Counter Mode). + It is not a general-purpose cryptographic hash function. config CRYPTO_POLY1305 tristate "Poly1305 authenticator algorithm" @@ -976,12 +977,12 @@ config CRYPTO_WP512 config CRYPTO_GHASH_CLMUL_NI_INTEL - tristate "GHASH digest algorithm (CLMUL-NI accelerated)" + tristate "GHASH hash function (CLMUL-NI accelerated)" depends on X86 && 64BIT select CRYPTO_CRYPTD help - GHASH is message digest algorithm for GCM (Galois/Counter Mode). - The implementation is accelerated by CLMUL-NI of Intel. + This is the x86_64 CLMUL-NI accelerated implementation of + GHASH, the hash function used in GCM (Galois/Counter mode). comment "Ciphers" diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index dad9e1f9..5027b346 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -1,12 +1,37 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * GHASH: digest algorithm for GCM (Galois/Counter Mode). + * GHASH: hash function for GCM (Galois/Counter Mode). * * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen * Copyright (c) 2009 Intel Corp. * Author: Huang Ying + */ + +/* + * GHASH is a keyed hash function used in GCM authentication tag generation. + * + * The original GCM paper [1] presents GHASH as a function GHASH(H, A, C) which + * takes a 16-byte hash key H, additional authenticated data A, and a ciphertext + * C. It formats A and C into a single byte string X, interprets X as a + * polynomial over GF(2^128), and evaluates this polynomial at the point H. + * + * However, the NIST standard for GCM [2] presents GHASH as GHASH(H, X) where X + * is the already-formatted byte string containing both A and C. + * + * "ghash" in the Linux crypto API uses the 'X' (pre-formatted) convention, + * since the API supports only a single data stream per hash. Thus, the + * formatting of 'A' and 'C' is done in the "gcm" template, not in "ghash". + * + * The reason "ghash" is separate from "gcm" is to allow "gcm" to use an + * accelerated "ghash" when a standalone accelerated "gcm(aes)" is unavailable. + * It is generally inappropriate to use "ghash" for other purposes, since it is + * an "ε-almost-XOR-universal hash function", not a cryptographic hash function. + * It can only be used securely in crypto modes specially designed to use it. * - * The algorithm implementation is copied from gcm.c. + * [1] The Galois/Counter Mode of Operation (GCM) + * (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.694.695&rep=rep1&type=pdf) + * [2] Recommendation for Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC + * (https://csrc.nist.gov/publications/detail/sp/800-38d/final) */ #include @@ -156,6 +181,6 @@ subsys_initcall(ghash_mod_init); module_exit(ghash_mod_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); +MODULE_DESCRIPTION("GHASH hash function"); MODULE_ALIAS_CRYPTO("ghash"); MODULE_ALIAS_CRYPTO("ghash-generic"); -- cgit v1.2.3 From 0e875603a56e8580f7f9a61416733ce4da1ff163 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Aug 2019 13:31:35 +1000 Subject: Revert "crypto: aegis128 - add support for SIMD acceleration" This reverts commit 3cd22a9ca7fcacfa00025ecf925c8a0121c7cc92 ("crypto: aegis128 - provide a SIMD implementation based on NEON intrinsics") and commit 467920a4b6c1cc41fdd55776be2d4a74d9d10469 ("crypto: aegis128 - add support for SIMD acceleration"). They cause compile errors on platforms other than ARM because the mechanism to selectively compile the SIMD code is broken. Repoted-by: Heiko Carstens Reported-by: Stephen Rothwell Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 - crypto/Makefile | 12 -- crypto/aegis128-core.c | 481 ------------------------------------------- crypto/aegis128-neon-inner.c | 149 -------------- crypto/aegis128-neon.c | 43 ---- crypto/aegis128.c | 447 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 447 insertions(+), 690 deletions(-) delete mode 100644 crypto/aegis128-core.c delete mode 100644 crypto/aegis128-neon-inner.c delete mode 100644 crypto/aegis128-neon.c create mode 100644 crypto/aegis128.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 455a3354..8880c1fc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,11 +306,6 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. -config CRYPTO_AEGIS128_SIMD - bool "Support SIMD acceleration for AEGIS-128" - depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - default y - config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT diff --git a/crypto/Makefile b/crypto/Makefile index b3e16b4f..93375e12 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -90,18 +90,6 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o -aegis128-y := aegis128-core.o - -ifeq ($(ARCH),arm) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp -mfpu=crypto-neon-fp-armv8 -aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o -endif -ifeq ($(ARCH),arm64) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto -CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only -aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o -endif - obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c deleted file mode 100644 index f815b468..00000000 --- a/crypto/aegis128-core.c +++ /dev/null @@ -1,481 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-128 Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS128_NONCE_SIZE 16 -#define AEGIS128_STATE_BLOCKS 5 -#define AEGIS128_KEY_SIZE 16 -#define AEGIS128_MIN_AUTH_SIZE 8 -#define AEGIS128_MAX_AUTH_SIZE 16 - -struct aegis_state { - union aegis_block blocks[AEGIS128_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key; -}; - -struct aegis128_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static bool have_simd; - -bool crypto_aegis128_have_simd(void); -void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); -void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); - -static void crypto_aegis128_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; - for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis128_update_a(struct aegis_state *state, - const union aegis_block *msg) -{ - if (have_simd && crypto_simd_usable()) { - crypto_aegis128_update_simd(state, msg); - return; - } - - crypto_aegis128_update(state); - crypto_aegis_block_xor(&state->blocks[0], msg); -} - -static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) -{ - if (have_simd && crypto_simd_usable()) { - crypto_aegis128_update_simd(state, msg); - return; - } - - crypto_aegis128_update(state); - crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis128_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv; - unsigned int i; - - key_iv = *key; - crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); - - state->blocks[0] = key_iv; - state->blocks[1] = crypto_aegis_const[1]; - state->blocks[2] = crypto_aegis_const[0]; - state->blocks[3] = *key; - state->blocks[4] = *key; - - crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); - - for (i = 0; i < 5; i++) { - crypto_aegis128_update_a(state, key); - crypto_aegis128_update_a(state, &key_iv); - } -} - -static void crypto_aegis128_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_block *src_blk = - (const union aegis_block *)src; - - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_a(state, src_blk); - - size -= AEGIS_BLOCK_SIZE; - src_blk++; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_u(state, src); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - } - } -} - -static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - - crypto_aegis128_update_a(state, &msg); - - crypto_aegis_block_xor(&msg, &tmp); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&msg, &tmp); - - memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - - crypto_aegis128_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis128_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis128_ad(state, src, left); - src += left & ~(AEGIS_BLOCK_SIZE - 1); - left &= AEGIS_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis128_update_a(state, &buf); - } -} - -static void crypto_aegis128_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis128_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis128_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_block tmp; - unsigned int i; - - tmp.words64[0] = cpu_to_le64(assocbits); - tmp.words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp, &state->blocks[3]); - - for (i = 0; i < 7; i++) - crypto_aegis128_update_a(state, &tmp); - - for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); - return 0; -} - -static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS128_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS128_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis128_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis128_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, ops); - crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis128_encrypt(struct aead_request *req) -{ - const struct aegis128_ops *ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - if (have_simd && crypto_simd_usable()) - ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk_simd }; - - crypto_aegis128_crypt(req, &tag, cryptlen, ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis128_decrypt(struct aead_request *req) -{ - const struct aegis128_ops *ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk, - }; - static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - if (have_simd && crypto_simd_usable()) - ops = &(struct aegis128_ops){ - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk_simd }; - - crypto_aegis128_crypt(req, &tag, cryptlen, ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static struct aead_alg crypto_aegis128_alg = { - .setkey = crypto_aegis128_setkey, - .setauthsize = crypto_aegis128_setauthsize, - .encrypt = crypto_aegis128_encrypt, - .decrypt = crypto_aegis128_decrypt, - - .ivsize = AEGIS128_NONCE_SIZE, - .maxauthsize = AEGIS128_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis128", - .cra_driver_name = "aegis128-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis128_module_init(void) -{ - if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD)) - have_simd = crypto_aegis128_have_simd(); - - return crypto_register_aead(&crypto_aegis128_alg); -} - -static void __exit crypto_aegis128_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis128_alg); -} - -subsys_initcall(crypto_aegis128_module_init); -module_exit(crypto_aegis128_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis128"); -MODULE_ALIAS_CRYPTO("aegis128-generic"); diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c deleted file mode 100644 index 26e9450a..00000000 --- a/crypto/aegis128-neon-inner.c +++ /dev/null @@ -1,149 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2019 Linaro, Ltd. - */ - -#ifdef CONFIG_ARM64 -#include - -#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b" -#else -#include - -#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0" -#endif - -#define AEGIS_BLOCK_SIZE 16 - -#include - -void *memcpy(void *dest, const void *src, size_t n); -void *memset(void *s, int c, size_t n); - -struct aegis128_state { - uint8x16_t v[5]; -}; - -static struct aegis128_state aegis128_load_state_neon(const void *state) -{ - return (struct aegis128_state){ { - vld1q_u8(state), - vld1q_u8(state + 16), - vld1q_u8(state + 32), - vld1q_u8(state + 48), - vld1q_u8(state + 64) - } }; -} - -static void aegis128_save_state_neon(struct aegis128_state st, void *state) -{ - vst1q_u8(state, st.v[0]); - vst1q_u8(state + 16, st.v[1]); - vst1q_u8(state + 32, st.v[2]); - vst1q_u8(state + 48, st.v[3]); - vst1q_u8(state + 64, st.v[4]); -} - -static uint8x16_t aegis_aes_round(uint8x16_t w) -{ - uint8x16_t z = {}; - - /* - * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics - * to force the compiler to issue the aese/aesmc instructions in pairs. - * This is much faster on many cores, where the instruction pair can - * execute in a single cycle. - */ - asm(AES_ROUND : "+w"(w) : "w"(z)); - return w; -} - -static struct aegis128_state aegis128_update_neon(struct aegis128_state st, - uint8x16_t m) -{ - uint8x16_t t; - - t = aegis_aes_round(st.v[3]); - st.v[3] ^= aegis_aes_round(st.v[2]); - st.v[2] ^= aegis_aes_round(st.v[1]); - st.v[1] ^= aegis_aes_round(st.v[0]); - st.v[0] ^= aegis_aes_round(st.v[4]) ^ m; - st.v[4] ^= t; - - return st; -} - -void crypto_aegis128_update_neon(void *state, const void *msg) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - - st = aegis128_update_neon(st, vld1q_u8(msg)); - - aegis128_save_state_neon(st, state); -} - -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - uint8x16_t tmp; - - while (size >= AEGIS_BLOCK_SIZE) { - uint8x16_t s = vld1q_u8(src); - - tmp = s ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, s); - vst1q_u8(dst, tmp); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - - if (size > 0) { - uint8_t buf[AEGIS_BLOCK_SIZE] = {}; - uint8x16_t msg; - - memcpy(buf, src, size); - msg = vld1q_u8(buf); - tmp = msg ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, msg); - vst1q_u8(buf, tmp); - memcpy(dst, buf, size); - } - - aegis128_save_state_neon(st, state); -} - -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size) -{ - struct aegis128_state st = aegis128_load_state_neon(state); - uint8x16_t tmp; - - while (size >= AEGIS_BLOCK_SIZE) { - tmp = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - st = aegis128_update_neon(st, tmp); - vst1q_u8(dst, tmp); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - - if (size > 0) { - uint8_t buf[AEGIS_BLOCK_SIZE] = {}; - uint8x16_t msg; - - memcpy(buf, src, size); - msg = vld1q_u8(buf) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; - vst1q_u8(buf, msg); - memcpy(dst, buf, size); - - memset(buf + size, 0, AEGIS_BLOCK_SIZE - size); - msg = vld1q_u8(buf); - st = aegis128_update_neon(st, msg); - } - - aegis128_save_state_neon(st, state); -} diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c deleted file mode 100644 index c1c0a168..00000000 --- a/crypto/aegis128-neon.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2019 Linaro Ltd - */ - -#include -#include - -#include "aegis.h" - -void crypto_aegis128_update_neon(void *state, const void *msg); -void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); -void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, - unsigned int size); - -bool crypto_aegis128_have_simd(void) -{ - return cpu_have_feature(cpu_feature(AES)); -} - -void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) -{ - kernel_neon_begin(); - crypto_aegis128_update_neon(state, msg); - kernel_neon_end(); -} - -void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, - const u8 *src, unsigned int size) -{ - kernel_neon_begin(); - crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); -} - -void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, - const u8 *src, unsigned int size) -{ - kernel_neon_begin(); - crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); - kernel_neon_end(); -} diff --git a/crypto/aegis128.c b/crypto/aegis128.c new file mode 100644 index 00000000..32840d5e --- /dev/null +++ b/crypto/aegis128.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static void crypto_aegis128_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; + for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + crypto_aegis128_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +{ + crypto_aegis128_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + unsigned int i; + + key_iv = *key; + crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = *key; + state->blocks[4] = *key; + + crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); + + for (i = 0; i < 5; i++) { + crypto_aegis128_update_a(state, key); + crypto_aegis128_update_a(state, &key_iv); + } +} + +static void crypto_aegis128_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis128_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis128_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis128_update_a(state, &buf); + } +} + +static void crypto_aegis128_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128_ops *ops) +{ + struct skcipher_walk walk; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); + } +} + +static void crypto_aegis128_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis128_update_a(state, &tmp); + + for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + return 0; +} + +static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_process_crypt(&state, req, ops); + crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128_encrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128_decrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk, + }; + static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static struct aead_alg crypto_aegis128_alg = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt, + .decrypt = crypto_aegis128_decrypt, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128_module_init(void) +{ + return crypto_register_aead(&crypto_aegis128_alg); +} + +static void __exit crypto_aegis128_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128_alg); +} + +subsys_initcall(crypto_aegis128_module_init); +module_exit(crypto_aegis128_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-generic"); -- cgit v1.2.3 From 8e0e027e58c1bc0da6ab132e8c4dc35864b99d83 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 24 Jul 2019 20:51:55 +0200 Subject: crypto: jitterentropy - build without sanitizer Recent clang-9 snapshots double the kernel stack usage when building this file with -O0 -fsanitize=kernel-hwaddress, compared to clang-8 and older snapshots, this changed between commits svn364966 and svn366056: crypto/jitterentropy.c:516:5: error: stack frame size of 2640 bytes in function 'jent_entropy_init' [-Werror,-Wframe-larger-than=] int jent_entropy_init(void) ^ crypto/jitterentropy.c:185:14: error: stack frame size of 2224 bytes in function 'jent_lfsr_time' [-Werror,-Wframe-larger-than=] static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) ^ I prepared a reduced test case in case any clang developers want to take a closer look, but from looking at the earlier output it seems that even with clang-8, something was very wrong here. Turn off any KASAN and UBSAN sanitizing for this file, as that likely clashes with -O0 anyway. Turning off just KASAN avoids the warning already, but I suspect both of these have undesired side-effects for jitterentropy. Link: https://godbolt.org/z/fDcwZ5 Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu --- crypto/Makefile | 2 ++ 1 file changed, 2 insertions(+) (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 93375e12..cfcc954e 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -132,6 +132,8 @@ obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_DRBG) += drbg.o obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy_rng.o CFLAGS_jitterentropy.o = -O0 +KASAN_SANITIZE_jitterentropy.o = n +UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o -- cgit v1.2.3 From 28769e25edecd083e85a20e315861489f21b879a Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 31 Jul 2019 16:05:54 +0300 Subject: crypto: gcm - helper functions for assoclen/authsize check Added inline helper functions to check authsize and assoclen for gcm, rfc4106 and rfc4543. These are used in the generic implementation of gcm, rfc4106 and rfc4543. Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- crypto/gcm.c | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index f254e2d4..2f3b50f8 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -152,20 +152,7 @@ out: static int crypto_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - switch (authsize) { - case 4: - case 8: - case 12: - case 13: - case 14: - case 15: - case 16: - break; - default: - return -EINVAL; - } - - return 0; + return crypto_gcm_check_authsize(authsize); } static void crypto_gcm_init_common(struct aead_request *req) @@ -762,15 +749,11 @@ static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); + int err; - switch (authsize) { - case 8: - case 12: - case 16: - break; - default: - return -EINVAL; - } + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; return crypto_aead_setauthsize(ctx->child, authsize); } @@ -818,8 +801,11 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) static int crypto_rfc4106_encrypt(struct aead_request *req) { - if (req->assoclen != 16 && req->assoclen != 20) - return -EINVAL; + int err; + + err = crypto_ipsec_check_assoclen(req->assoclen); + if (err) + return err; req = crypto_rfc4106_crypt(req); @@ -828,8 +814,11 @@ static int crypto_rfc4106_encrypt(struct aead_request *req) static int crypto_rfc4106_decrypt(struct aead_request *req) { - if (req->assoclen != 16 && req->assoclen != 20) - return -EINVAL; + int err; + + err = crypto_ipsec_check_assoclen(req->assoclen); + if (err) + return err; req = crypto_rfc4106_crypt(req); -- cgit v1.2.3 From a3c1bdb28648a3a461ca8e7ba44d8e35f7bc6780 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 1 Aug 2019 13:13:51 +0200 Subject: crypto: engine - Reduce default RT priority The crypto engine initializes its kworker thread to FIFO-99 (when requesting RT priority), reduce this to FIFO-50. FIFO-99 is the very highest priority available to SCHED_FIFO and it not a suitable default; it would indicate the crypto work is the most important work on the machine. Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index d7502ec3..055d1797 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -425,7 +425,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; struct crypto_engine *engine; if (!dev) -- cgit v1.2.3 From fa711c56054638f10f5dd11f746a04086c782554 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Fri, 2 Aug 2019 11:47:33 +0300 Subject: crypto: gcm - restrict assoclen for rfc4543 Based on seqiv, IPsec ESP and rfc4543/rfc4106 the assoclen can be 16 or 20 bytes. From esp4/esp6, assoclen is sizeof IP Header. This includes spi, seq_no and extended seq_no, that is 8 or 12 bytes. In seqiv, to asscolen is added the IV size (8 bytes). Therefore, the assoclen, for rfc4543, should be restricted to 16 or 20 bytes, as for rfc4106. Signed-off-by: Iuliana Prodan Reviewed-by: Horia Geanta Signed-off-by: Herbert Xu --- crypto/gcm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/gcm.c b/crypto/gcm.c index 2f3b50f8..73884208 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -1034,12 +1034,14 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) static int crypto_rfc4543_encrypt(struct aead_request *req) { - return crypto_rfc4543_crypt(req, true); + return crypto_ipsec_check_assoclen(req->assoclen) ?: + crypto_rfc4543_crypt(req, true); } static int crypto_rfc4543_decrypt(struct aead_request *req) { - return crypto_rfc4543_crypt(req, false); + return crypto_ipsec_check_assoclen(req->assoclen) ?: + crypto_rfc4543_crypt(req, false); } static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) -- cgit v1.2.3 From b852c19fb03b54077c8f650d7515ce4135c16f1f Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Thu, 8 Aug 2019 16:00:22 +0800 Subject: crypto: cryptd - Use refcount_t for refcount Reference counters are preferred to use refcount_t instead of atomic_t. This is because the implementation of refcount_t can prevent overflows and detect possible use-after-free. So convert atomic_t ref counters to refcount_t. Signed-off-by: Chuhong Yuan Signed-off-by: Herbert Xu --- crypto/cryptd.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 3748f9b4..927760b3 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -63,7 +63,7 @@ struct aead_instance_ctx { }; struct cryptd_skcipher_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_sync_skcipher *child; }; @@ -72,7 +72,7 @@ struct cryptd_skcipher_request_ctx { }; struct cryptd_hash_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_shash *child; }; @@ -82,7 +82,7 @@ struct cryptd_hash_request_ctx { }; struct cryptd_aead_ctx { - atomic_t refcnt; + refcount_t refcnt; struct crypto_aead *child; }; @@ -127,7 +127,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, { int cpu, err; struct cryptd_cpu_queue *cpu_queue; - atomic_t *refcnt; + refcount_t *refcnt; cpu = get_cpu(); cpu_queue = this_cpu_ptr(queue->cpu_queue); @@ -140,10 +140,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, queue_work_on(cpu, cryptd_wq, &cpu_queue->work); - if (!atomic_read(refcnt)) + if (!refcount_read(refcnt)) goto out_put_cpu; - atomic_inc(refcnt); + refcount_inc(refcnt); out_put_cpu: put_cpu(); @@ -270,13 +270,13 @@ static void cryptd_skcipher_complete(struct skcipher_request *req, int err) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); - int refcnt = atomic_read(&ctx->refcnt); + int refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_skcipher(tfm); } @@ -521,13 +521,13 @@ static void cryptd_hash_complete(struct ahash_request *req, int err) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); - int refcnt = atomic_read(&ctx->refcnt); + int refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); rctx->complete(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_ahash(tfm); } @@ -772,13 +772,13 @@ static void cryptd_aead_crypt(struct aead_request *req, out: ctx = crypto_aead_ctx(tfm); - refcnt = atomic_read(&ctx->refcnt); + refcnt = refcount_read(&ctx->refcnt); local_bh_disable(); compl(&req->base, err); local_bh_enable(); - if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) + if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt)) crypto_free_aead(tfm); } @@ -979,7 +979,7 @@ struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, } ctx = crypto_skcipher_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return container_of(tfm, struct cryptd_skcipher, base); } @@ -997,7 +997,7 @@ bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); @@ -1005,7 +1005,7 @@ void cryptd_free_skcipher(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_skcipher(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_skcipher); @@ -1029,7 +1029,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, } ctx = crypto_ahash_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return __cryptd_ahash_cast(tfm); } @@ -1054,7 +1054,7 @@ bool cryptd_ahash_queued(struct cryptd_ahash *tfm) { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_ahash_queued); @@ -1062,7 +1062,7 @@ void cryptd_free_ahash(struct cryptd_ahash *tfm) { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_ahash(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_ahash); @@ -1086,7 +1086,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, } ctx = crypto_aead_ctx(tfm); - atomic_set(&ctx->refcnt, 1); + refcount_set(&ctx->refcnt, 1); return __cryptd_aead_cast(tfm); } @@ -1104,7 +1104,7 @@ bool cryptd_aead_queued(struct cryptd_aead *tfm) { struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); - return atomic_read(&ctx->refcnt) - 1; + return refcount_read(&ctx->refcnt) - 1; } EXPORT_SYMBOL_GPL(cryptd_aead_queued); @@ -1112,7 +1112,7 @@ void cryptd_free_aead(struct cryptd_aead *tfm) { struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); - if (atomic_dec_and_test(&ctx->refcnt)) + if (refcount_dec_and_test(&ctx->refcnt)) crypto_free_aead(&tfm->base); } EXPORT_SYMBOL_GPL(cryptd_free_aead); -- cgit v1.2.3 From f94daba2cecf8211b9acec1ed89325d407b8793a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 9 Aug 2019 16:29:19 +0800 Subject: crypto: aes-generic - remove unused variable 'rco_tab' crypto/aes_generic.c:64:18: warning: rco_tab defined but not used [-Wunused-const-variable=] It is never used, so can be removed. Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 71a5c190..22e58671 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -61,8 +61,6 @@ static inline u8 byte(const u32 x, const unsigned n) return x >> (n << 3); } -static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; - /* cacheline-aligned to facilitate prefetching into cache */ __visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = { { -- cgit v1.2.3 From 7e28a4ed64ca3a3f2ff2264a1afe776c6722b7f9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 9 Aug 2019 16:49:05 +0800 Subject: crypto: streebog - remove two unused variables crypto/streebog_generic.c:162:17: warning: Pi defined but not used [-Wunused-const-variable=] crypto/streebog_generic.c:151:17: warning: Tau defined but not used [-Wunused-const-variable=] They are never used, so can be removed. Reported-by: Hulk Robot Signed-off-by: YueHaibing Reviewed-by: Vitaly Chikunov Signed-off-by: Herbert Xu --- crypto/streebog_generic.c | 46 ---------------------------------------------- 1 file changed, 46 deletions(-) (limited to 'crypto') diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c index 63663c3b..dc625ffc 100644 --- a/crypto/streebog_generic.c +++ b/crypto/streebog_generic.c @@ -148,52 +148,6 @@ static const struct streebog_uint512 C[12] = { } } }; -static const u8 Tau[64] = { - 0, 8, 16, 24, 32, 40, 48, 56, - 1, 9, 17, 25, 33, 41, 49, 57, - 2, 10, 18, 26, 34, 42, 50, 58, - 3, 11, 19, 27, 35, 43, 51, 59, - 4, 12, 20, 28, 36, 44, 52, 60, - 5, 13, 21, 29, 37, 45, 53, 61, - 6, 14, 22, 30, 38, 46, 54, 62, - 7, 15, 23, 31, 39, 47, 55, 63 -}; - -static const u8 Pi[256] = { - 252, 238, 221, 17, 207, 110, 49, 22, - 251, 196, 250, 218, 35, 197, 4, 77, - 233, 119, 240, 219, 147, 46, 153, 186, - 23, 54, 241, 187, 20, 205, 95, 193, - 249, 24, 101, 90, 226, 92, 239, 33, - 129, 28, 60, 66, 139, 1, 142, 79, - 5, 132, 2, 174, 227, 106, 143, 160, - 6, 11, 237, 152, 127, 212, 211, 31, - 235, 52, 44, 81, 234, 200, 72, 171, - 242, 42, 104, 162, 253, 58, 206, 204, - 181, 112, 14, 86, 8, 12, 118, 18, - 191, 114, 19, 71, 156, 183, 93, 135, - 21, 161, 150, 41, 16, 123, 154, 199, - 243, 145, 120, 111, 157, 158, 178, 177, - 50, 117, 25, 61, 255, 53, 138, 126, - 109, 84, 198, 128, 195, 189, 13, 87, - 223, 245, 36, 169, 62, 168, 67, 201, - 215, 121, 214, 246, 124, 34, 185, 3, - 224, 15, 236, 222, 122, 148, 176, 188, - 220, 232, 40, 80, 78, 51, 10, 74, - 167, 151, 96, 115, 30, 0, 98, 68, - 26, 184, 56, 130, 100, 159, 38, 65, - 173, 69, 70, 146, 39, 94, 85, 47, - 140, 163, 165, 125, 105, 213, 149, 59, - 7, 88, 179, 64, 134, 172, 29, 247, - 48, 55, 107, 228, 136, 217, 231, 137, - 225, 27, 131, 73, 76, 63, 248, 254, - 141, 83, 170, 144, 202, 216, 133, 97, - 32, 113, 103, 164, 45, 43, 9, 91, - 203, 155, 37, 208, 190, 229, 108, 82, - 89, 166, 116, 210, 230, 244, 180, 192, - 209, 102, 175, 194, 57, 75, 99, 182 -}; - static const unsigned long long Ax[8][256] = { { 0xd01f715b5c7ef8e6ULL, 0x16fa240980778325ULL, 0xa8a42e857ee049c8ULL, -- cgit v1.2.3 From 9e9b33a2d019edc4ba465ae5721256dd610f0991 Mon Sep 17 00:00:00 2001 From: Pascal van Leeuwen Date: Fri, 9 Aug 2019 17:51:07 +0200 Subject: crypto: aead - Do not allow authsize=0 if auth. alg has digestsize>0 Return -EINVAL on an attempt to set the authsize to 0 with an auth. algorithm with a non-zero digestsize (i.e. anything but digest_null) as authenticating the data and then throwing away the result does not make any sense at all. The digestsize zero exception is for use with digest_null for testing purposes only. Signed-off-by: Pascal van Leeuwen Signed-off-by: Herbert Xu --- crypto/aead.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/aead.c b/crypto/aead.c index fbf0ec93..ce035589 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -70,7 +70,8 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { int err; - if (authsize > crypto_aead_maxauthsize(tfm)) + if ((!authsize && crypto_aead_maxauthsize(tfm)) || + authsize > crypto_aead_maxauthsize(tfm)) return -EINVAL; if (crypto_aead_alg(tfm)->setauthsize) { -- cgit v1.2.3 From 8118cd4cf772aa4026e1c57ab8e7dee80f4c6e85 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 9 Aug 2019 20:14:57 +0300 Subject: crypto: xts - add support for ciphertext stealing Add support for the missing ciphertext stealing part of the XTS-AES specification, which permits inputs of any size >= the block size. Cc: Pascal van Leeuwen Cc: Ondrej Mosnacek Tested-by: Milan Broz Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/xts.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 132 insertions(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/xts.c b/crypto/xts.c index 11211003..ab117633 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* XTS: as defined in IEEE1619/D16 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf - * (sector sizes which are not a multiple of 16 bytes are, - * however currently unsupported) * * Copyright (c) 2007 Rik Snel * @@ -34,6 +32,8 @@ struct xts_instance_ctx { struct rctx { le128 t; + struct scatterlist *tail; + struct scatterlist sg[2]; struct skcipher_request subreq; }; @@ -84,10 +84,11 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) { struct rctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const int bs = XTS_BLOCK_SIZE; struct skcipher_walk w; le128 t = rctx->t; @@ -109,6 +110,20 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) wdst = w.dst.virt.addr; do { + if (unlikely(cts) && + w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) { + if (!enc) { + if (second_pass) + rctx->t = t; + gf128mul_x_ble(&t, &t); + } + le128_xor(wdst, &t, wsrc); + if (enc && second_pass) + gf128mul_x_ble(&rctx->t, &t); + skcipher_walk_done(&w, avail - bs); + return 0; + } + le128_xor(wdst++, &t, wsrc++); gf128mul_x_ble(&t, &t); } while ((avail -= bs) >= bs); @@ -119,17 +134,71 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int xor_tweak_pre(struct skcipher_request *req, bool enc) { - return xor_tweak(req, false); + return xor_tweak(req, false, enc); } -static int xor_tweak_post(struct skcipher_request *req) +static int xor_tweak_post(struct skcipher_request *req, bool enc) { - return xor_tweak(req, true); + return xor_tweak(req, true, enc); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void cts_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; + le128 b; + + if (!err) { + struct rctx *rctx = skcipher_request_ctx(req); + + scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); + le128_xor(&b, &rctx->t, &b); + scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); + } + + skcipher_request_complete(req, err); +} + +static int cts_final(struct skcipher_request *req, + int (*crypt)(struct skcipher_request *req)) +{ + struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); + struct rctx *rctx = skcipher_request_ctx(req); + struct skcipher_request *subreq = &rctx->subreq; + int tail = req->cryptlen % XTS_BLOCK_SIZE; + le128 b[2]; + int err; + + rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst, + offset - XTS_BLOCK_SIZE); + + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); + memcpy(b + 1, b, tail); + scatterwalk_map_and_copy(b, req->src, offset, tail, 0); + + le128_xor(b, &rctx->t, b); + + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); + + skcipher_request_set_tfm(subreq, ctx->child); + skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); + skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, + XTS_BLOCK_SIZE, NULL); + + err = crypt(subreq); + if (err) + return err; + + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); + le128_xor(b, &rctx->t, b); + scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1); + + return 0; +} + +static void encrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; @@ -137,47 +206,90 @@ static void crypt_done(struct crypto_async_request *areq, int err) struct rctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = xor_tweak_post(req, true); + + if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { + err = cts_final(req, crypto_skcipher_encrypt); + if (err == -EINPROGRESS) + return; + } } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void decrypt_done(struct crypto_async_request *areq, int err) +{ + struct skcipher_request *req = areq->data; + + if (!err) { + struct rctx *rctx = skcipher_request_ctx(req); + + rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = xor_tweak_post(req, false); + + if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { + err = cts_final(req, crypto_skcipher_decrypt); + if (err == -EINPROGRESS) + return; + } + } + + skcipher_request_complete(req, err); +} + +static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) { struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct rctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; + if (req->cryptlen < XTS_BLOCK_SIZE) + return -EINVAL; + skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, compl, req); skcipher_request_set_crypt(subreq, req->dst, req->dst, - req->cryptlen, NULL); + req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL); /* calculate first value of T */ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); + + return 0; } static int encrypt(struct skcipher_request *req) { struct rctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; + int err; - init_crypt(req); - return xor_tweak_pre(req) ?: - crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + err = init_crypt(req, encrypt_done) ?: + xor_tweak_pre(req, true) ?: + crypto_skcipher_encrypt(subreq) ?: + xor_tweak_post(req, true); + + if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) + return err; + + return cts_final(req, crypto_skcipher_encrypt); } static int decrypt(struct skcipher_request *req) { struct rctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; + int err; + + err = init_crypt(req, decrypt_done) ?: + xor_tweak_pre(req, false) ?: + crypto_skcipher_decrypt(subreq) ?: + xor_tweak_post(req, false); + + if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) + return err; - init_crypt(req); - return xor_tweak_pre(req) ?: - crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + return cts_final(req, crypto_skcipher_decrypt); } static int init_tfm(struct crypto_skcipher *tfm) -- cgit v1.2.3 From b474dfe38245907280efb4cec5e7d9178ce1572a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 12 Aug 2019 01:59:10 +0300 Subject: crypto: aegis128 - add support for SIMD acceleration Add some plumbing to allow the AEGIS128 code to be built with SIMD routines for acceleration. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/aegis128-core.c | 491 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128.c | 447 -------------------------------------------- 3 files changed, 492 insertions(+), 447 deletions(-) create mode 100644 crypto/aegis128-core.c delete mode 100644 crypto/aegis128.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index cfcc954e..92e98571 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o +aegis128-y := aegis128-core.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c new file mode 100644 index 00000000..fa69e999 --- /dev/null +++ b/crypto/aegis128-core.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "aegis.h" + +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static bool have_simd; + +static bool aegis128_do_simd(void) +{ +#ifdef CONFIG_CRYPTO_AEGIS128_SIMD + if (have_simd) + return crypto_simd_usable(); +#endif + return false; +} + +bool crypto_aegis128_have_simd(void); +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); + +static void crypto_aegis128_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; + for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + if (aegis128_do_simd()) { + crypto_aegis128_update_simd(state, msg); + return; + } + + crypto_aegis128_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +{ + if (aegis128_do_simd()) { + crypto_aegis128_update_simd(state, msg); + return; + } + + crypto_aegis128_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + unsigned int i; + + key_iv = *key; + crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = *key; + state->blocks[4] = *key; + + crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); + + for (i = 0; i < 5; i++) { + crypto_aegis128_update_a(state, key); + crypto_aegis128_update_a(state, &key_iv); + } +} + +static void crypto_aegis128_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis128_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis128_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis128_update_a(state, &buf); + } +} + +static void crypto_aegis128_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128_ops *ops) +{ + struct skcipher_walk walk; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); + } +} + +static void crypto_aegis128_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis128_update_a(state, &tmp); + + for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + return 0; +} + +static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_process_crypt(&state, req, ops); + crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128_encrypt(struct aead_request *req) +{ + const struct aegis128_ops *ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + if (aegis128_do_simd()) + ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk_simd }; + + crypto_aegis128_crypt(req, &tag, cryptlen, ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128_decrypt(struct aead_request *req) +{ + const struct aegis128_ops *ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk, + }; + static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + if (aegis128_do_simd()) + ops = &(struct aegis128_ops){ + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk_simd }; + + crypto_aegis128_crypt(req, &tag, cryptlen, ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static struct aead_alg crypto_aegis128_alg = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt, + .decrypt = crypto_aegis128_decrypt, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128_module_init(void) +{ + if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD)) + have_simd = crypto_aegis128_have_simd(); + + return crypto_register_aead(&crypto_aegis128_alg); +} + +static void __exit crypto_aegis128_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128_alg); +} + +subsys_initcall(crypto_aegis128_module_init); +module_exit(crypto_aegis128_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-generic"); diff --git a/crypto/aegis128.c b/crypto/aegis128.c deleted file mode 100644 index 32840d5e..00000000 --- a/crypto/aegis128.c +++ /dev/null @@ -1,447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * The AEGIS-128 Authenticated-Encryption Algorithm - * - * Copyright (c) 2017-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "aegis.h" - -#define AEGIS128_NONCE_SIZE 16 -#define AEGIS128_STATE_BLOCKS 5 -#define AEGIS128_KEY_SIZE 16 -#define AEGIS128_MIN_AUTH_SIZE 8 -#define AEGIS128_MAX_AUTH_SIZE 16 - -struct aegis_state { - union aegis_block blocks[AEGIS128_STATE_BLOCKS]; -}; - -struct aegis_ctx { - union aegis_block key; -}; - -struct aegis128_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_chunk)(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -}; - -static void crypto_aegis128_update(struct aegis_state *state) -{ - union aegis_block tmp; - unsigned int i; - - tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; - for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) - crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], - &state->blocks[i]); - crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); -} - -static void crypto_aegis128_update_a(struct aegis_state *state, - const union aegis_block *msg) -{ - crypto_aegis128_update(state); - crypto_aegis_block_xor(&state->blocks[0], msg); -} - -static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) -{ - crypto_aegis128_update(state); - crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); -} - -static void crypto_aegis128_init(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv) -{ - union aegis_block key_iv; - unsigned int i; - - key_iv = *key; - crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); - - state->blocks[0] = key_iv; - state->blocks[1] = crypto_aegis_const[1]; - state->blocks[2] = crypto_aegis_const[0]; - state->blocks[3] = *key; - state->blocks[4] = *key; - - crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); - crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); - - for (i = 0; i < 5; i++) { - crypto_aegis128_update_a(state, key); - crypto_aegis128_update_a(state, &key_iv); - } -} - -static void crypto_aegis128_ad(struct aegis_state *state, - const u8 *src, unsigned int size) -{ - if (AEGIS_ALIGNED(src)) { - const union aegis_block *src_blk = - (const union aegis_block *)src; - - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_a(state, src_blk); - - size -= AEGIS_BLOCK_SIZE; - src_blk++; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - crypto_aegis128_update_u(state, src); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - } - } -} - -static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, src_blk); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_u(state, src); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - - crypto_aegis128_update_a(state, &msg); - - crypto_aegis_block_xor(&msg, &tmp); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size) -{ - union aegis_block tmp; - - if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { - while (size >= AEGIS_BLOCK_SIZE) { - union aegis_block *dst_blk = - (union aegis_block *)dst; - const union aegis_block *src_blk = - (const union aegis_block *)src; - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&tmp, src_blk); - - crypto_aegis128_update_a(state, &tmp); - - *dst_blk = tmp; - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } else { - while (size >= AEGIS_BLOCK_SIZE) { - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); - - crypto_aegis128_update_a(state, &tmp); - - memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); - - size -= AEGIS_BLOCK_SIZE; - src += AEGIS_BLOCK_SIZE; - dst += AEGIS_BLOCK_SIZE; - } - } - - if (size > 0) { - union aegis_block msg = {}; - memcpy(msg.bytes, src, size); - - tmp = state->blocks[2]; - crypto_aegis_block_and(&tmp, &state->blocks[3]); - crypto_aegis_block_xor(&tmp, &state->blocks[4]); - crypto_aegis_block_xor(&tmp, &state->blocks[1]); - crypto_aegis_block_xor(&msg, &tmp); - - memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); - - crypto_aegis128_update_a(state, &msg); - - memcpy(dst, msg.bytes, size); - } -} - -static void crypto_aegis128_process_ad(struct aegis_state *state, - struct scatterlist *sg_src, - unsigned int assoclen) -{ - struct scatter_walk walk; - union aegis_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= AEGIS_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = AEGIS_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - crypto_aegis128_update_a(state, &buf); - pos = 0; - left -= fill; - src += fill; - } - - crypto_aegis128_ad(state, src, left); - src += left & ~(AEGIS_BLOCK_SIZE - 1); - left &= AEGIS_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); - crypto_aegis128_update_a(state, &buf); - } -} - -static void crypto_aegis128_process_crypt(struct aegis_state *state, - struct aead_request *req, - const struct aegis128_ops *ops) -{ - struct skcipher_walk walk; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes); - - skcipher_walk_done(&walk, walk.nbytes - nbytes); - } -} - -static void crypto_aegis128_final(struct aegis_state *state, - union aegis_block *tag_xor, - u64 assoclen, u64 cryptlen) -{ - u64 assocbits = assoclen * 8; - u64 cryptbits = cryptlen * 8; - - union aegis_block tmp; - unsigned int i; - - tmp.words64[0] = cpu_to_le64(assocbits); - tmp.words64[1] = cpu_to_le64(cryptbits); - - crypto_aegis_block_xor(&tmp, &state->blocks[3]); - - for (i = 0; i < 7; i++) - crypto_aegis128_update_a(state, &tmp); - - for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) - crypto_aegis_block_xor(tag_xor, &state->blocks[i]); -} - -static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct aegis_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != AEGIS128_KEY_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); - return 0; -} - -static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > AEGIS128_MAX_AUTH_SIZE) - return -EINVAL; - if (authsize < AEGIS128_MIN_AUTH_SIZE) - return -EINVAL; - return 0; -} - -static void crypto_aegis128_crypt(struct aead_request *req, - union aegis_block *tag_xor, - unsigned int cryptlen, - const struct aegis128_ops *ops) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aegis_ctx *ctx = crypto_aead_ctx(tfm); - struct aegis_state state; - - crypto_aegis128_init(&state, &ctx->key, req->iv); - crypto_aegis128_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_process_crypt(&state, req, ops); - crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); -} - -static int crypto_aegis128_encrypt(struct aead_request *req) -{ - static const struct aegis128_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_chunk = crypto_aegis128_encrypt_chunk, - }; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_aegis128_crypt(req, &tag, cryptlen, &ops); - - scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, - authsize, 1); - return 0; -} - -static int crypto_aegis128_decrypt(struct aead_request *req) -{ - static const struct aegis128_ops ops = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_chunk = crypto_aegis128_decrypt_chunk, - }; - static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - union aegis_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, - authsize, 0); - - crypto_aegis128_crypt(req, &tag, cryptlen, &ops); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} - -static struct aead_alg crypto_aegis128_alg = { - .setkey = crypto_aegis128_setkey, - .setauthsize = crypto_aegis128_setauthsize, - .encrypt = crypto_aegis128_encrypt, - .decrypt = crypto_aegis128_decrypt, - - .ivsize = AEGIS128_NONCE_SIZE, - .maxauthsize = AEGIS128_MAX_AUTH_SIZE, - .chunksize = AEGIS_BLOCK_SIZE, - - .base = { - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct aegis_ctx), - .cra_alignmask = 0, - - .cra_priority = 100, - - .cra_name = "aegis128", - .cra_driver_name = "aegis128-generic", - - .cra_module = THIS_MODULE, - } -}; - -static int __init crypto_aegis128_module_init(void) -{ - return crypto_register_aead(&crypto_aegis128_alg); -} - -static void __exit crypto_aegis128_module_exit(void) -{ - crypto_unregister_aead(&crypto_aegis128_alg); -} - -subsys_initcall(crypto_aegis128_module_init); -module_exit(crypto_aegis128_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); -MODULE_ALIAS_CRYPTO("aegis128"); -MODULE_ALIAS_CRYPTO("aegis128-generic"); -- cgit v1.2.3 From 254ed9c88f9e6983530ac65abe5beae630248e8e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 12 Aug 2019 01:59:11 +0300 Subject: crypto: aegis128 - provide a SIMD implementation based on NEON intrinsics Provide an accelerated implementation of aegis128 by wiring up the SIMD hooks in the generic driver to an implementation based on NEON intrinsics, which can be compiled to both ARM and arm64 code. This results in a performance of 2.2 cycles per byte on Cortex-A53, which is a performance increase of ~11x compared to the generic code. Reviewed-by: Ondrej Mosnacek Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 5 ++ crypto/Makefile | 12 ++++ crypto/aegis128-neon-inner.c | 147 +++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128-neon.c | 43 +++++++++++++ 4 files changed, 207 insertions(+) create mode 100644 crypto/aegis128-neon-inner.c create mode 100644 crypto/aegis128-neon.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 8880c1fc..455a3354 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -306,6 +306,11 @@ config CRYPTO_AEGIS128 help Support for the AEGIS-128 dedicated AEAD algorithm. +config CRYPTO_AEGIS128_SIMD + bool "Support SIMD acceleration for AEGIS-128" + depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) + default y + config CRYPTO_AEGIS128_AESNI_SSE2 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" depends on X86 && 64BIT diff --git a/crypto/Makefile b/crypto/Makefile index 92e98571..99a9fa90 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -91,6 +91,18 @@ obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o aegis128-y := aegis128-core.o + +ifeq ($(ARCH),arm) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -march=armv7-a -mfloat-abi=softfp +CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8 +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif +ifeq ($(ARCH),arm64) +CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto +CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only +aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o +endif + obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des_generic.o diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c new file mode 100644 index 00000000..3d8043c4 --- /dev/null +++ b/crypto/aegis128-neon-inner.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro, Ltd. + */ + +#ifdef CONFIG_ARM64 +#include + +#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b" +#else +#include + +#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0" +#endif + +#define AEGIS_BLOCK_SIZE 16 + +#include + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); + +struct aegis128_state { + uint8x16_t v[5]; +}; + +static struct aegis128_state aegis128_load_state_neon(const void *state) +{ + return (struct aegis128_state){ { + vld1q_u8(state), + vld1q_u8(state + 16), + vld1q_u8(state + 32), + vld1q_u8(state + 48), + vld1q_u8(state + 64) + } }; +} + +static void aegis128_save_state_neon(struct aegis128_state st, void *state) +{ + vst1q_u8(state, st.v[0]); + vst1q_u8(state + 16, st.v[1]); + vst1q_u8(state + 32, st.v[2]); + vst1q_u8(state + 48, st.v[3]); + vst1q_u8(state + 64, st.v[4]); +} + +static inline __attribute__((always_inline)) +uint8x16_t aegis_aes_round(uint8x16_t w) +{ + uint8x16_t z = {}; + + /* + * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics + * to force the compiler to issue the aese/aesmc instructions in pairs. + * This is much faster on many cores, where the instruction pair can + * execute in a single cycle. + */ + asm(AES_ROUND : "+w"(w) : "w"(z)); + return w; +} + +static inline __attribute__((always_inline)) +struct aegis128_state aegis128_update_neon(struct aegis128_state st, + uint8x16_t m) +{ + m ^= aegis_aes_round(st.v[4]); + st.v[4] ^= aegis_aes_round(st.v[3]); + st.v[3] ^= aegis_aes_round(st.v[2]); + st.v[2] ^= aegis_aes_round(st.v[1]); + st.v[1] ^= aegis_aes_round(st.v[0]); + st.v[0] ^= m; + + return st; +} + +void crypto_aegis128_update_neon(void *state, const void *msg) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + + st = aegis128_update_neon(st, vld1q_u8(msg)); + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t msg; + + while (size >= AEGIS_BLOCK_SIZE) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + + msg = vld1q_u8(src); + st = aegis128_update_neon(st, msg); + vst1q_u8(dst, msg ^ s); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + uint8_t buf[AEGIS_BLOCK_SIZE] = {}; + + memcpy(buf, src, size); + msg = vld1q_u8(buf); + st = aegis128_update_neon(st, msg); + vst1q_u8(buf, msg ^ s); + memcpy(dst, buf, size); + } + + aegis128_save_state_neon(st, state); +} + +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size) +{ + struct aegis128_state st = aegis128_load_state_neon(state); + uint8x16_t msg; + + while (size >= AEGIS_BLOCK_SIZE) { + msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + st = aegis128_update_neon(st, msg); + vst1q_u8(dst, msg); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + + if (size > 0) { + uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; + uint8_t buf[AEGIS_BLOCK_SIZE]; + + vst1q_u8(buf, s); + memcpy(buf, src, size); + msg = vld1q_u8(buf) ^ s; + vst1q_u8(buf, msg); + memcpy(dst, buf, size); + + st = aegis128_update_neon(st, msg); + } + + aegis128_save_state_neon(st, state); +} diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c new file mode 100644 index 00000000..c1c0a168 --- /dev/null +++ b/crypto/aegis128-neon.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 Linaro Ltd + */ + +#include +#include + +#include "aegis.h" + +void crypto_aegis128_update_neon(void *state, const void *msg); +void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); +void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, + unsigned int size); + +bool crypto_aegis128_have_simd(void) +{ + return cpu_have_feature(cpu_feature(AES)); +} + +void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +{ + kernel_neon_begin(); + crypto_aegis128_update_neon(state, msg); + kernel_neon_end(); +} + +void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_encrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +} + +void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, + const u8 *src, unsigned int size) +{ + kernel_neon_begin(); + crypto_aegis128_decrypt_chunk_neon(state, dst, src, size); + kernel_neon_end(); +} -- cgit v1.2.3 From 64095d77e80e74b96e57157522834eb845314d4c Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 12 Aug 2019 01:59:12 +0300 Subject: crypto: arm64/aegis128 - implement plain NEON version Provide a version of the core AES transform to the aegis128 SIMD code that does not rely on the special AES instructions, but uses plain NEON instructions instead. This allows the SIMD version of the aegis128 driver to be used on arm64 systems that do not implement those instructions (which are not mandatory in the architecture), such as the Raspberry Pi 3. Since GCC makes a mess of this when using the tbl/tbx intrinsics to perform the sbox substitution, preload the Sbox into v16..v31 in this case and use inline asm to emit the tbl/tbx instructions. Clang does not support this approach, nor does it require it, since it does a much better job at code generation, so there we use the intrinsics as usual. Cc: Nick Desaulniers Signed-off-by: Ard Biesheuvel Acked-by: Nick Desaulniers Signed-off-by: Herbert Xu --- crypto/Makefile | 9 +++++- crypto/aegis128-neon-inner.c | 65 ++++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128-neon.c | 8 +++++- 3 files changed, 80 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 99a9fa90..0d2cdd52 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -98,7 +98,14 @@ CFLAGS_aegis128-neon-inner.o += -mfpu=crypto-neon-fp-armv8 aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o endif ifeq ($(ARCH),arm64) -CFLAGS_aegis128-neon-inner.o += -ffreestanding -mcpu=generic+crypto +aegis128-cflags-y := -ffreestanding -mcpu=generic+crypto +aegis128-cflags-$(CONFIG_CC_IS_GCC) += -ffixed-q16 -ffixed-q17 -ffixed-q18 \ + -ffixed-q19 -ffixed-q20 -ffixed-q21 \ + -ffixed-q22 -ffixed-q23 -ffixed-q24 \ + -ffixed-q25 -ffixed-q26 -ffixed-q27 \ + -ffixed-q28 -ffixed-q29 -ffixed-q30 \ + -ffixed-q31 +CFLAGS_aegis128-neon-inner.o += $(aegis128-cflags-y) CFLAGS_REMOVE_aegis128-neon-inner.o += -mgeneral-regs-only aegis128-$(CONFIG_CRYPTO_AEGIS128_SIMD) += aegis128-neon.o aegis128-neon-inner.o endif diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c index 3d8043c4..ed55568a 100644 --- a/crypto/aegis128-neon-inner.c +++ b/crypto/aegis128-neon-inner.c @@ -17,6 +17,8 @@ #include +extern int aegis128_have_aes_insn; + void *memcpy(void *dest, const void *src, size_t n); void *memset(void *s, int c, size_t n); @@ -24,6 +26,8 @@ struct aegis128_state { uint8x16_t v[5]; }; +extern const uint8x16x4_t crypto_aes_sbox[]; + static struct aegis128_state aegis128_load_state_neon(const void *state) { return (struct aegis128_state){ { @@ -49,6 +53,46 @@ uint8x16_t aegis_aes_round(uint8x16_t w) { uint8x16_t z = {}; +#ifdef CONFIG_ARM64 + if (!__builtin_expect(aegis128_have_aes_insn, 1)) { + static const uint8x16_t shift_rows = { + 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3, + 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb, + }; + static const uint8x16_t ror32by8 = { + 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4, + 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc, + }; + uint8x16_t v; + + // shift rows + w = vqtbl1q_u8(w, shift_rows); + + // sub bytes + if (!IS_ENABLED(CONFIG_CC_IS_GCC)) { + v = vqtbl4q_u8(crypto_aes_sbox[0], w); + v = vqtbx4q_u8(v, crypto_aes_sbox[1], w - 0x40); + v = vqtbx4q_u8(v, crypto_aes_sbox[2], w - 0x80); + v = vqtbx4q_u8(v, crypto_aes_sbox[3], w - 0xc0); + } else { + asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w)); + } + + // mix columns + w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b); + w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v); + w ^= vqtbl1q_u8(v ^ w, ror32by8); + + return w; + } +#endif + /* * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics * to force the compiler to issue the aese/aesmc instructions in pairs. @@ -73,10 +117,27 @@ struct aegis128_state aegis128_update_neon(struct aegis128_state st, return st; } +static inline __attribute__((always_inline)) +void preload_sbox(void) +{ + if (!IS_ENABLED(CONFIG_ARM64) || + !IS_ENABLED(CONFIG_CC_IS_GCC) || + __builtin_expect(aegis128_have_aes_insn, 1)) + return; + + asm("ld1 {v16.16b-v19.16b}, [%0], #64 \n\t" + "ld1 {v20.16b-v23.16b}, [%0], #64 \n\t" + "ld1 {v24.16b-v27.16b}, [%0], #64 \n\t" + "ld1 {v28.16b-v31.16b}, [%0] \n\t" + :: "r"(crypto_aes_sbox)); +} + void crypto_aegis128_update_neon(void *state, const void *msg) { struct aegis128_state st = aegis128_load_state_neon(state); + preload_sbox(); + st = aegis128_update_neon(st, vld1q_u8(msg)); aegis128_save_state_neon(st, state); @@ -88,6 +149,8 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, struct aegis128_state st = aegis128_load_state_neon(state); uint8x16_t msg; + preload_sbox(); + while (size >= AEGIS_BLOCK_SIZE) { uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; @@ -120,6 +183,8 @@ void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, struct aegis128_state st = aegis128_load_state_neon(state); uint8x16_t msg; + preload_sbox(); + while (size >= AEGIS_BLOCK_SIZE) { msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4]; st = aegis128_update_neon(st, msg); diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index c1c0a168..751f9c19 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -14,9 +14,15 @@ void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src, void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src, unsigned int size); +int aegis128_have_aes_insn __ro_after_init; + bool crypto_aegis128_have_simd(void) { - return cpu_have_feature(cpu_feature(AES)); + if (cpu_have_feature(cpu_feature(AES))) { + aegis128_have_aes_insn = 1; + return true; + } + return IS_ENABLED(CONFIG_ARM64); } void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) -- cgit v1.2.3 From e43050c598f759993e21e64b0ad939fcf9eaec77 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:00:43 +0300 Subject: crypto: des/3des_ede - add new helpers to verify keys The recently added helper routine to perform key strength validation of triple DES keys is slightly inadequate, since it comes in two versions, neither of which are highly useful for anything other than skciphers (and many drivers still use the older blkcipher interfaces). So let's add a new helper and, considering that this is a helper function that is only intended to be used by crypto code itself, put it in a new des.h header under crypto/internal. While at it, implement a similar helper for single DES, so that we can start replacing the pattern of calling des_ekey() into a temp buffer that occurs in many drivers in drivers/crypto. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/des_generic.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'crypto') diff --git a/crypto/des_generic.c b/crypto/des_generic.c index dc085514..c4d8ecda 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -841,19 +841,6 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) d[1] = cpu_to_le32(L); } -/* - * RFC2451: - * - * For DES-EDE3, there is no known need to reject weak or - * complementation keys. Any weakness is obviated by the use of - * multiple keys. - * - * However, if the first two or last two independent 64-bit keys are - * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the - * same as DES. Implementers MUST reject keys that exhibit this - * property. - * - */ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From 52c062b6bb825a5232b221f3aefef7561b2fafc5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:07 +0300 Subject: crypto: 3des - move verification out of exported routine In preparation of moving the shared key expansion routine into the DES library, move the verification done by __des3_ede_setkey() into its callers. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/des_generic.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/des_generic.c b/crypto/des_generic.c index c4d8ecda..f15ae766 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -846,10 +846,6 @@ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, { int err; - err = __des3_verify_key(flags, key); - if (unlikely(err)) - return err; - des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; des_ekey(expkey, key); @@ -862,8 +858,12 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; u32 *expkey = dctx->expkey; + int err; + + err = crypto_des3_ede_verify_key(tfm, key); + if (err) + return err; return __des3_ede_setkey(expkey, flags, key, keylen); } -- cgit v1.2.3 From a041b0253c4c8efcd7984e27a82fa6fa751a58e9 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:09 +0300 Subject: crypto: des - split off DES library from generic DES cipher driver Another one for the cipher museum: split off DES core processing into a separate module so other drivers (mostly for crypto accelerators) can reuse the code without pulling in the generic DES cipher itself. This will also permit the cipher interface to be made private to the crypto API itself once we move the only user in the kernel (CIFS) to this library interface. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 +- crypto/des_generic.c | 917 +++------------------------------------------------ 2 files changed, 49 insertions(+), 876 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 455a3354..42a17fe9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1306,9 +1306,13 @@ config CRYPTO_CAST6_AVX_X86_64 This module provides the Cast6 cipher algorithm that processes eight blocks parallel using the AVX instruction set. +config CRYPTO_LIB_DES + tristate + config CRYPTO_DES tristate "DES and Triple DES EDE cipher algorithms" select CRYPTO_ALGAPI + select CRYPTO_LIB_DES help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). @@ -1316,7 +1320,7 @@ config CRYPTO_DES_SPARC64 tristate "DES and Triple DES EDE cipher algorithms (SPARC64)" depends on SPARC64 select CRYPTO_ALGAPI - select CRYPTO_DES + select CRYPTO_LIB_DES help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3), optimized using SPARC64 crypto opcodes. @@ -1325,7 +1329,7 @@ config CRYPTO_DES3_EDE_X86_64 tristate "Triple DES EDE cipher algorithm (x86-64)" depends on X86 && 64BIT select CRYPTO_BLKCIPHER - select CRYPTO_DES + select CRYPTO_LIB_DES help Triple DES EDE (FIPS 46-3) algorithm. diff --git a/crypto/des_generic.c b/crypto/des_generic.c index f15ae766..e021a321 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -13,832 +13,42 @@ #include #include #include -#include -#include - -#define ROL(x, r) ((x) = rol32((x), (r))) -#define ROR(x, r) ((x) = ror32((x), (r))) - -struct des_ctx { - u32 expkey[DES_EXPKEY_WORDS]; -}; - -struct des3_ede_ctx { - u32 expkey[DES3_EDE_EXPKEY_WORDS]; -}; - -/* Lookup tables for key expansion */ - -static const u8 pc1[256] = { - 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14, - 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54, - 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16, - 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56, - 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c, - 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c, - 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e, - 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e, - 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34, - 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74, - 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36, - 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76, - 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c, - 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c, - 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e, - 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e, - 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94, - 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4, - 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96, - 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6, - 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c, - 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc, - 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e, - 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde, - 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4, - 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4, - 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6, - 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6, - 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc, - 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc, - 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe, - 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe -}; - -static const u8 rs[256] = { - 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82, - 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86, - 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a, - 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e, - 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92, - 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96, - 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a, - 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e, - 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2, - 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6, - 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa, - 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae, - 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2, - 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6, - 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba, - 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe, - 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2, - 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6, - 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca, - 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce, - 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2, - 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6, - 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda, - 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde, - 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2, - 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6, - 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea, - 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee, - 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2, - 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6, - 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa, - 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe -}; - -static const u32 pc2[1024] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00040000, 0x00000000, 0x04000000, 0x00100000, - 0x00400000, 0x00000008, 0x00000800, 0x40000000, - 0x00440000, 0x00000008, 0x04000800, 0x40100000, - 0x00000400, 0x00000020, 0x08000000, 0x00000100, - 0x00040400, 0x00000020, 0x0c000000, 0x00100100, - 0x00400400, 0x00000028, 0x08000800, 0x40000100, - 0x00440400, 0x00000028, 0x0c000800, 0x40100100, - 0x80000000, 0x00000010, 0x00000000, 0x00800000, - 0x80040000, 0x00000010, 0x04000000, 0x00900000, - 0x80400000, 0x00000018, 0x00000800, 0x40800000, - 0x80440000, 0x00000018, 0x04000800, 0x40900000, - 0x80000400, 0x00000030, 0x08000000, 0x00800100, - 0x80040400, 0x00000030, 0x0c000000, 0x00900100, - 0x80400400, 0x00000038, 0x08000800, 0x40800100, - 0x80440400, 0x00000038, 0x0c000800, 0x40900100, - 0x10000000, 0x00000000, 0x00200000, 0x00001000, - 0x10040000, 0x00000000, 0x04200000, 0x00101000, - 0x10400000, 0x00000008, 0x00200800, 0x40001000, - 0x10440000, 0x00000008, 0x04200800, 0x40101000, - 0x10000400, 0x00000020, 0x08200000, 0x00001100, - 0x10040400, 0x00000020, 0x0c200000, 0x00101100, - 0x10400400, 0x00000028, 0x08200800, 0x40001100, - 0x10440400, 0x00000028, 0x0c200800, 0x40101100, - 0x90000000, 0x00000010, 0x00200000, 0x00801000, - 0x90040000, 0x00000010, 0x04200000, 0x00901000, - 0x90400000, 0x00000018, 0x00200800, 0x40801000, - 0x90440000, 0x00000018, 0x04200800, 0x40901000, - 0x90000400, 0x00000030, 0x08200000, 0x00801100, - 0x90040400, 0x00000030, 0x0c200000, 0x00901100, - 0x90400400, 0x00000038, 0x08200800, 0x40801100, - 0x90440400, 0x00000038, 0x0c200800, 0x40901100, - 0x00000200, 0x00080000, 0x00000000, 0x00000004, - 0x00040200, 0x00080000, 0x04000000, 0x00100004, - 0x00400200, 0x00080008, 0x00000800, 0x40000004, - 0x00440200, 0x00080008, 0x04000800, 0x40100004, - 0x00000600, 0x00080020, 0x08000000, 0x00000104, - 0x00040600, 0x00080020, 0x0c000000, 0x00100104, - 0x00400600, 0x00080028, 0x08000800, 0x40000104, - 0x00440600, 0x00080028, 0x0c000800, 0x40100104, - 0x80000200, 0x00080010, 0x00000000, 0x00800004, - 0x80040200, 0x00080010, 0x04000000, 0x00900004, - 0x80400200, 0x00080018, 0x00000800, 0x40800004, - 0x80440200, 0x00080018, 0x04000800, 0x40900004, - 0x80000600, 0x00080030, 0x08000000, 0x00800104, - 0x80040600, 0x00080030, 0x0c000000, 0x00900104, - 0x80400600, 0x00080038, 0x08000800, 0x40800104, - 0x80440600, 0x00080038, 0x0c000800, 0x40900104, - 0x10000200, 0x00080000, 0x00200000, 0x00001004, - 0x10040200, 0x00080000, 0x04200000, 0x00101004, - 0x10400200, 0x00080008, 0x00200800, 0x40001004, - 0x10440200, 0x00080008, 0x04200800, 0x40101004, - 0x10000600, 0x00080020, 0x08200000, 0x00001104, - 0x10040600, 0x00080020, 0x0c200000, 0x00101104, - 0x10400600, 0x00080028, 0x08200800, 0x40001104, - 0x10440600, 0x00080028, 0x0c200800, 0x40101104, - 0x90000200, 0x00080010, 0x00200000, 0x00801004, - 0x90040200, 0x00080010, 0x04200000, 0x00901004, - 0x90400200, 0x00080018, 0x00200800, 0x40801004, - 0x90440200, 0x00080018, 0x04200800, 0x40901004, - 0x90000600, 0x00080030, 0x08200000, 0x00801104, - 0x90040600, 0x00080030, 0x0c200000, 0x00901104, - 0x90400600, 0x00080038, 0x08200800, 0x40801104, - 0x90440600, 0x00080038, 0x0c200800, 0x40901104, - 0x00000002, 0x00002000, 0x20000000, 0x00000001, - 0x00040002, 0x00002000, 0x24000000, 0x00100001, - 0x00400002, 0x00002008, 0x20000800, 0x40000001, - 0x00440002, 0x00002008, 0x24000800, 0x40100001, - 0x00000402, 0x00002020, 0x28000000, 0x00000101, - 0x00040402, 0x00002020, 0x2c000000, 0x00100101, - 0x00400402, 0x00002028, 0x28000800, 0x40000101, - 0x00440402, 0x00002028, 0x2c000800, 0x40100101, - 0x80000002, 0x00002010, 0x20000000, 0x00800001, - 0x80040002, 0x00002010, 0x24000000, 0x00900001, - 0x80400002, 0x00002018, 0x20000800, 0x40800001, - 0x80440002, 0x00002018, 0x24000800, 0x40900001, - 0x80000402, 0x00002030, 0x28000000, 0x00800101, - 0x80040402, 0x00002030, 0x2c000000, 0x00900101, - 0x80400402, 0x00002038, 0x28000800, 0x40800101, - 0x80440402, 0x00002038, 0x2c000800, 0x40900101, - 0x10000002, 0x00002000, 0x20200000, 0x00001001, - 0x10040002, 0x00002000, 0x24200000, 0x00101001, - 0x10400002, 0x00002008, 0x20200800, 0x40001001, - 0x10440002, 0x00002008, 0x24200800, 0x40101001, - 0x10000402, 0x00002020, 0x28200000, 0x00001101, - 0x10040402, 0x00002020, 0x2c200000, 0x00101101, - 0x10400402, 0x00002028, 0x28200800, 0x40001101, - 0x10440402, 0x00002028, 0x2c200800, 0x40101101, - 0x90000002, 0x00002010, 0x20200000, 0x00801001, - 0x90040002, 0x00002010, 0x24200000, 0x00901001, - 0x90400002, 0x00002018, 0x20200800, 0x40801001, - 0x90440002, 0x00002018, 0x24200800, 0x40901001, - 0x90000402, 0x00002030, 0x28200000, 0x00801101, - 0x90040402, 0x00002030, 0x2c200000, 0x00901101, - 0x90400402, 0x00002038, 0x28200800, 0x40801101, - 0x90440402, 0x00002038, 0x2c200800, 0x40901101, - 0x00000202, 0x00082000, 0x20000000, 0x00000005, - 0x00040202, 0x00082000, 0x24000000, 0x00100005, - 0x00400202, 0x00082008, 0x20000800, 0x40000005, - 0x00440202, 0x00082008, 0x24000800, 0x40100005, - 0x00000602, 0x00082020, 0x28000000, 0x00000105, - 0x00040602, 0x00082020, 0x2c000000, 0x00100105, - 0x00400602, 0x00082028, 0x28000800, 0x40000105, - 0x00440602, 0x00082028, 0x2c000800, 0x40100105, - 0x80000202, 0x00082010, 0x20000000, 0x00800005, - 0x80040202, 0x00082010, 0x24000000, 0x00900005, - 0x80400202, 0x00082018, 0x20000800, 0x40800005, - 0x80440202, 0x00082018, 0x24000800, 0x40900005, - 0x80000602, 0x00082030, 0x28000000, 0x00800105, - 0x80040602, 0x00082030, 0x2c000000, 0x00900105, - 0x80400602, 0x00082038, 0x28000800, 0x40800105, - 0x80440602, 0x00082038, 0x2c000800, 0x40900105, - 0x10000202, 0x00082000, 0x20200000, 0x00001005, - 0x10040202, 0x00082000, 0x24200000, 0x00101005, - 0x10400202, 0x00082008, 0x20200800, 0x40001005, - 0x10440202, 0x00082008, 0x24200800, 0x40101005, - 0x10000602, 0x00082020, 0x28200000, 0x00001105, - 0x10040602, 0x00082020, 0x2c200000, 0x00101105, - 0x10400602, 0x00082028, 0x28200800, 0x40001105, - 0x10440602, 0x00082028, 0x2c200800, 0x40101105, - 0x90000202, 0x00082010, 0x20200000, 0x00801005, - 0x90040202, 0x00082010, 0x24200000, 0x00901005, - 0x90400202, 0x00082018, 0x20200800, 0x40801005, - 0x90440202, 0x00082018, 0x24200800, 0x40901005, - 0x90000602, 0x00082030, 0x28200000, 0x00801105, - 0x90040602, 0x00082030, 0x2c200000, 0x00901105, - 0x90400602, 0x00082038, 0x28200800, 0x40801105, - 0x90440602, 0x00082038, 0x2c200800, 0x40901105, - - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000008, 0x00080000, 0x10000000, - 0x02000000, 0x00000000, 0x00000080, 0x00001000, - 0x02000000, 0x00000008, 0x00080080, 0x10001000, - 0x00004000, 0x00000000, 0x00000040, 0x00040000, - 0x00004000, 0x00000008, 0x00080040, 0x10040000, - 0x02004000, 0x00000000, 0x000000c0, 0x00041000, - 0x02004000, 0x00000008, 0x000800c0, 0x10041000, - 0x00020000, 0x00008000, 0x08000000, 0x00200000, - 0x00020000, 0x00008008, 0x08080000, 0x10200000, - 0x02020000, 0x00008000, 0x08000080, 0x00201000, - 0x02020000, 0x00008008, 0x08080080, 0x10201000, - 0x00024000, 0x00008000, 0x08000040, 0x00240000, - 0x00024000, 0x00008008, 0x08080040, 0x10240000, - 0x02024000, 0x00008000, 0x080000c0, 0x00241000, - 0x02024000, 0x00008008, 0x080800c0, 0x10241000, - 0x00000000, 0x01000000, 0x00002000, 0x00000020, - 0x00000000, 0x01000008, 0x00082000, 0x10000020, - 0x02000000, 0x01000000, 0x00002080, 0x00001020, - 0x02000000, 0x01000008, 0x00082080, 0x10001020, - 0x00004000, 0x01000000, 0x00002040, 0x00040020, - 0x00004000, 0x01000008, 0x00082040, 0x10040020, - 0x02004000, 0x01000000, 0x000020c0, 0x00041020, - 0x02004000, 0x01000008, 0x000820c0, 0x10041020, - 0x00020000, 0x01008000, 0x08002000, 0x00200020, - 0x00020000, 0x01008008, 0x08082000, 0x10200020, - 0x02020000, 0x01008000, 0x08002080, 0x00201020, - 0x02020000, 0x01008008, 0x08082080, 0x10201020, - 0x00024000, 0x01008000, 0x08002040, 0x00240020, - 0x00024000, 0x01008008, 0x08082040, 0x10240020, - 0x02024000, 0x01008000, 0x080020c0, 0x00241020, - 0x02024000, 0x01008008, 0x080820c0, 0x10241020, - 0x00000400, 0x04000000, 0x00100000, 0x00000004, - 0x00000400, 0x04000008, 0x00180000, 0x10000004, - 0x02000400, 0x04000000, 0x00100080, 0x00001004, - 0x02000400, 0x04000008, 0x00180080, 0x10001004, - 0x00004400, 0x04000000, 0x00100040, 0x00040004, - 0x00004400, 0x04000008, 0x00180040, 0x10040004, - 0x02004400, 0x04000000, 0x001000c0, 0x00041004, - 0x02004400, 0x04000008, 0x001800c0, 0x10041004, - 0x00020400, 0x04008000, 0x08100000, 0x00200004, - 0x00020400, 0x04008008, 0x08180000, 0x10200004, - 0x02020400, 0x04008000, 0x08100080, 0x00201004, - 0x02020400, 0x04008008, 0x08180080, 0x10201004, - 0x00024400, 0x04008000, 0x08100040, 0x00240004, - 0x00024400, 0x04008008, 0x08180040, 0x10240004, - 0x02024400, 0x04008000, 0x081000c0, 0x00241004, - 0x02024400, 0x04008008, 0x081800c0, 0x10241004, - 0x00000400, 0x05000000, 0x00102000, 0x00000024, - 0x00000400, 0x05000008, 0x00182000, 0x10000024, - 0x02000400, 0x05000000, 0x00102080, 0x00001024, - 0x02000400, 0x05000008, 0x00182080, 0x10001024, - 0x00004400, 0x05000000, 0x00102040, 0x00040024, - 0x00004400, 0x05000008, 0x00182040, 0x10040024, - 0x02004400, 0x05000000, 0x001020c0, 0x00041024, - 0x02004400, 0x05000008, 0x001820c0, 0x10041024, - 0x00020400, 0x05008000, 0x08102000, 0x00200024, - 0x00020400, 0x05008008, 0x08182000, 0x10200024, - 0x02020400, 0x05008000, 0x08102080, 0x00201024, - 0x02020400, 0x05008008, 0x08182080, 0x10201024, - 0x00024400, 0x05008000, 0x08102040, 0x00240024, - 0x00024400, 0x05008008, 0x08182040, 0x10240024, - 0x02024400, 0x05008000, 0x081020c0, 0x00241024, - 0x02024400, 0x05008008, 0x081820c0, 0x10241024, - 0x00000800, 0x00010000, 0x20000000, 0x00000010, - 0x00000800, 0x00010008, 0x20080000, 0x10000010, - 0x02000800, 0x00010000, 0x20000080, 0x00001010, - 0x02000800, 0x00010008, 0x20080080, 0x10001010, - 0x00004800, 0x00010000, 0x20000040, 0x00040010, - 0x00004800, 0x00010008, 0x20080040, 0x10040010, - 0x02004800, 0x00010000, 0x200000c0, 0x00041010, - 0x02004800, 0x00010008, 0x200800c0, 0x10041010, - 0x00020800, 0x00018000, 0x28000000, 0x00200010, - 0x00020800, 0x00018008, 0x28080000, 0x10200010, - 0x02020800, 0x00018000, 0x28000080, 0x00201010, - 0x02020800, 0x00018008, 0x28080080, 0x10201010, - 0x00024800, 0x00018000, 0x28000040, 0x00240010, - 0x00024800, 0x00018008, 0x28080040, 0x10240010, - 0x02024800, 0x00018000, 0x280000c0, 0x00241010, - 0x02024800, 0x00018008, 0x280800c0, 0x10241010, - 0x00000800, 0x01010000, 0x20002000, 0x00000030, - 0x00000800, 0x01010008, 0x20082000, 0x10000030, - 0x02000800, 0x01010000, 0x20002080, 0x00001030, - 0x02000800, 0x01010008, 0x20082080, 0x10001030, - 0x00004800, 0x01010000, 0x20002040, 0x00040030, - 0x00004800, 0x01010008, 0x20082040, 0x10040030, - 0x02004800, 0x01010000, 0x200020c0, 0x00041030, - 0x02004800, 0x01010008, 0x200820c0, 0x10041030, - 0x00020800, 0x01018000, 0x28002000, 0x00200030, - 0x00020800, 0x01018008, 0x28082000, 0x10200030, - 0x02020800, 0x01018000, 0x28002080, 0x00201030, - 0x02020800, 0x01018008, 0x28082080, 0x10201030, - 0x00024800, 0x01018000, 0x28002040, 0x00240030, - 0x00024800, 0x01018008, 0x28082040, 0x10240030, - 0x02024800, 0x01018000, 0x280020c0, 0x00241030, - 0x02024800, 0x01018008, 0x280820c0, 0x10241030, - 0x00000c00, 0x04010000, 0x20100000, 0x00000014, - 0x00000c00, 0x04010008, 0x20180000, 0x10000014, - 0x02000c00, 0x04010000, 0x20100080, 0x00001014, - 0x02000c00, 0x04010008, 0x20180080, 0x10001014, - 0x00004c00, 0x04010000, 0x20100040, 0x00040014, - 0x00004c00, 0x04010008, 0x20180040, 0x10040014, - 0x02004c00, 0x04010000, 0x201000c0, 0x00041014, - 0x02004c00, 0x04010008, 0x201800c0, 0x10041014, - 0x00020c00, 0x04018000, 0x28100000, 0x00200014, - 0x00020c00, 0x04018008, 0x28180000, 0x10200014, - 0x02020c00, 0x04018000, 0x28100080, 0x00201014, - 0x02020c00, 0x04018008, 0x28180080, 0x10201014, - 0x00024c00, 0x04018000, 0x28100040, 0x00240014, - 0x00024c00, 0x04018008, 0x28180040, 0x10240014, - 0x02024c00, 0x04018000, 0x281000c0, 0x00241014, - 0x02024c00, 0x04018008, 0x281800c0, 0x10241014, - 0x00000c00, 0x05010000, 0x20102000, 0x00000034, - 0x00000c00, 0x05010008, 0x20182000, 0x10000034, - 0x02000c00, 0x05010000, 0x20102080, 0x00001034, - 0x02000c00, 0x05010008, 0x20182080, 0x10001034, - 0x00004c00, 0x05010000, 0x20102040, 0x00040034, - 0x00004c00, 0x05010008, 0x20182040, 0x10040034, - 0x02004c00, 0x05010000, 0x201020c0, 0x00041034, - 0x02004c00, 0x05010008, 0x201820c0, 0x10041034, - 0x00020c00, 0x05018000, 0x28102000, 0x00200034, - 0x00020c00, 0x05018008, 0x28182000, 0x10200034, - 0x02020c00, 0x05018000, 0x28102080, 0x00201034, - 0x02020c00, 0x05018008, 0x28182080, 0x10201034, - 0x00024c00, 0x05018000, 0x28102040, 0x00240034, - 0x00024c00, 0x05018008, 0x28182040, 0x10240034, - 0x02024c00, 0x05018000, 0x281020c0, 0x00241034, - 0x02024c00, 0x05018008, 0x281820c0, 0x10241034 -}; - -/* S-box lookup tables */ - -static const u32 S1[64] = { - 0x01010400, 0x00000000, 0x00010000, 0x01010404, - 0x01010004, 0x00010404, 0x00000004, 0x00010000, - 0x00000400, 0x01010400, 0x01010404, 0x00000400, - 0x01000404, 0x01010004, 0x01000000, 0x00000004, - 0x00000404, 0x01000400, 0x01000400, 0x00010400, - 0x00010400, 0x01010000, 0x01010000, 0x01000404, - 0x00010004, 0x01000004, 0x01000004, 0x00010004, - 0x00000000, 0x00000404, 0x00010404, 0x01000000, - 0x00010000, 0x01010404, 0x00000004, 0x01010000, - 0x01010400, 0x01000000, 0x01000000, 0x00000400, - 0x01010004, 0x00010000, 0x00010400, 0x01000004, - 0x00000400, 0x00000004, 0x01000404, 0x00010404, - 0x01010404, 0x00010004, 0x01010000, 0x01000404, - 0x01000004, 0x00000404, 0x00010404, 0x01010400, - 0x00000404, 0x01000400, 0x01000400, 0x00000000, - 0x00010004, 0x00010400, 0x00000000, 0x01010004 -}; - -static const u32 S2[64] = { - 0x80108020, 0x80008000, 0x00008000, 0x00108020, - 0x00100000, 0x00000020, 0x80100020, 0x80008020, - 0x80000020, 0x80108020, 0x80108000, 0x80000000, - 0x80008000, 0x00100000, 0x00000020, 0x80100020, - 0x00108000, 0x00100020, 0x80008020, 0x00000000, - 0x80000000, 0x00008000, 0x00108020, 0x80100000, - 0x00100020, 0x80000020, 0x00000000, 0x00108000, - 0x00008020, 0x80108000, 0x80100000, 0x00008020, - 0x00000000, 0x00108020, 0x80100020, 0x00100000, - 0x80008020, 0x80100000, 0x80108000, 0x00008000, - 0x80100000, 0x80008000, 0x00000020, 0x80108020, - 0x00108020, 0x00000020, 0x00008000, 0x80000000, - 0x00008020, 0x80108000, 0x00100000, 0x80000020, - 0x00100020, 0x80008020, 0x80000020, 0x00100020, - 0x00108000, 0x00000000, 0x80008000, 0x00008020, - 0x80000000, 0x80100020, 0x80108020, 0x00108000 -}; - -static const u32 S3[64] = { - 0x00000208, 0x08020200, 0x00000000, 0x08020008, - 0x08000200, 0x00000000, 0x00020208, 0x08000200, - 0x00020008, 0x08000008, 0x08000008, 0x00020000, - 0x08020208, 0x00020008, 0x08020000, 0x00000208, - 0x08000000, 0x00000008, 0x08020200, 0x00000200, - 0x00020200, 0x08020000, 0x08020008, 0x00020208, - 0x08000208, 0x00020200, 0x00020000, 0x08000208, - 0x00000008, 0x08020208, 0x00000200, 0x08000000, - 0x08020200, 0x08000000, 0x00020008, 0x00000208, - 0x00020000, 0x08020200, 0x08000200, 0x00000000, - 0x00000200, 0x00020008, 0x08020208, 0x08000200, - 0x08000008, 0x00000200, 0x00000000, 0x08020008, - 0x08000208, 0x00020000, 0x08000000, 0x08020208, - 0x00000008, 0x00020208, 0x00020200, 0x08000008, - 0x08020000, 0x08000208, 0x00000208, 0x08020000, - 0x00020208, 0x00000008, 0x08020008, 0x00020200 -}; - -static const u32 S4[64] = { - 0x00802001, 0x00002081, 0x00002081, 0x00000080, - 0x00802080, 0x00800081, 0x00800001, 0x00002001, - 0x00000000, 0x00802000, 0x00802000, 0x00802081, - 0x00000081, 0x00000000, 0x00800080, 0x00800001, - 0x00000001, 0x00002000, 0x00800000, 0x00802001, - 0x00000080, 0x00800000, 0x00002001, 0x00002080, - 0x00800081, 0x00000001, 0x00002080, 0x00800080, - 0x00002000, 0x00802080, 0x00802081, 0x00000081, - 0x00800080, 0x00800001, 0x00802000, 0x00802081, - 0x00000081, 0x00000000, 0x00000000, 0x00802000, - 0x00002080, 0x00800080, 0x00800081, 0x00000001, - 0x00802001, 0x00002081, 0x00002081, 0x00000080, - 0x00802081, 0x00000081, 0x00000001, 0x00002000, - 0x00800001, 0x00002001, 0x00802080, 0x00800081, - 0x00002001, 0x00002080, 0x00800000, 0x00802001, - 0x00000080, 0x00800000, 0x00002000, 0x00802080 -}; - -static const u32 S5[64] = { - 0x00000100, 0x02080100, 0x02080000, 0x42000100, - 0x00080000, 0x00000100, 0x40000000, 0x02080000, - 0x40080100, 0x00080000, 0x02000100, 0x40080100, - 0x42000100, 0x42080000, 0x00080100, 0x40000000, - 0x02000000, 0x40080000, 0x40080000, 0x00000000, - 0x40000100, 0x42080100, 0x42080100, 0x02000100, - 0x42080000, 0x40000100, 0x00000000, 0x42000000, - 0x02080100, 0x02000000, 0x42000000, 0x00080100, - 0x00080000, 0x42000100, 0x00000100, 0x02000000, - 0x40000000, 0x02080000, 0x42000100, 0x40080100, - 0x02000100, 0x40000000, 0x42080000, 0x02080100, - 0x40080100, 0x00000100, 0x02000000, 0x42080000, - 0x42080100, 0x00080100, 0x42000000, 0x42080100, - 0x02080000, 0x00000000, 0x40080000, 0x42000000, - 0x00080100, 0x02000100, 0x40000100, 0x00080000, - 0x00000000, 0x40080000, 0x02080100, 0x40000100 -}; - -static const u32 S6[64] = { - 0x20000010, 0x20400000, 0x00004000, 0x20404010, - 0x20400000, 0x00000010, 0x20404010, 0x00400000, - 0x20004000, 0x00404010, 0x00400000, 0x20000010, - 0x00400010, 0x20004000, 0x20000000, 0x00004010, - 0x00000000, 0x00400010, 0x20004010, 0x00004000, - 0x00404000, 0x20004010, 0x00000010, 0x20400010, - 0x20400010, 0x00000000, 0x00404010, 0x20404000, - 0x00004010, 0x00404000, 0x20404000, 0x20000000, - 0x20004000, 0x00000010, 0x20400010, 0x00404000, - 0x20404010, 0x00400000, 0x00004010, 0x20000010, - 0x00400000, 0x20004000, 0x20000000, 0x00004010, - 0x20000010, 0x20404010, 0x00404000, 0x20400000, - 0x00404010, 0x20404000, 0x00000000, 0x20400010, - 0x00000010, 0x00004000, 0x20400000, 0x00404010, - 0x00004000, 0x00400010, 0x20004010, 0x00000000, - 0x20404000, 0x20000000, 0x00400010, 0x20004010 -}; - -static const u32 S7[64] = { - 0x00200000, 0x04200002, 0x04000802, 0x00000000, - 0x00000800, 0x04000802, 0x00200802, 0x04200800, - 0x04200802, 0x00200000, 0x00000000, 0x04000002, - 0x00000002, 0x04000000, 0x04200002, 0x00000802, - 0x04000800, 0x00200802, 0x00200002, 0x04000800, - 0x04000002, 0x04200000, 0x04200800, 0x00200002, - 0x04200000, 0x00000800, 0x00000802, 0x04200802, - 0x00200800, 0x00000002, 0x04000000, 0x00200800, - 0x04000000, 0x00200800, 0x00200000, 0x04000802, - 0x04000802, 0x04200002, 0x04200002, 0x00000002, - 0x00200002, 0x04000000, 0x04000800, 0x00200000, - 0x04200800, 0x00000802, 0x00200802, 0x04200800, - 0x00000802, 0x04000002, 0x04200802, 0x04200000, - 0x00200800, 0x00000000, 0x00000002, 0x04200802, - 0x00000000, 0x00200802, 0x04200000, 0x00000800, - 0x04000002, 0x04000800, 0x00000800, 0x00200002 -}; - -static const u32 S8[64] = { - 0x10001040, 0x00001000, 0x00040000, 0x10041040, - 0x10000000, 0x10001040, 0x00000040, 0x10000000, - 0x00040040, 0x10040000, 0x10041040, 0x00041000, - 0x10041000, 0x00041040, 0x00001000, 0x00000040, - 0x10040000, 0x10000040, 0x10001000, 0x00001040, - 0x00041000, 0x00040040, 0x10040040, 0x10041000, - 0x00001040, 0x00000000, 0x00000000, 0x10040040, - 0x10000040, 0x10001000, 0x00041040, 0x00040000, - 0x00041040, 0x00040000, 0x10041000, 0x00001000, - 0x00000040, 0x10040040, 0x00001000, 0x00041040, - 0x10001000, 0x00000040, 0x10000040, 0x10040000, - 0x10040040, 0x10000000, 0x00040000, 0x10001040, - 0x00000000, 0x10041040, 0x00040040, 0x10000040, - 0x10040000, 0x10001000, 0x10001040, 0x00000000, - 0x10041040, 0x00041000, 0x00041000, 0x00001040, - 0x00001040, 0x00040040, 0x10000000, 0x10041000 -}; - -/* Encryption components: IP, FP, and round function */ - -#define IP(L, R, T) \ - ROL(R, 4); \ - T = L; \ - L ^= R; \ - L &= 0xf0f0f0f0; \ - R ^= L; \ - L ^= T; \ - ROL(R, 12); \ - T = L; \ - L ^= R; \ - L &= 0xffff0000; \ - R ^= L; \ - L ^= T; \ - ROR(R, 14); \ - T = L; \ - L ^= R; \ - L &= 0xcccccccc; \ - R ^= L; \ - L ^= T; \ - ROL(R, 6); \ - T = L; \ - L ^= R; \ - L &= 0xff00ff00; \ - R ^= L; \ - L ^= T; \ - ROR(R, 7); \ - T = L; \ - L ^= R; \ - L &= 0xaaaaaaaa; \ - R ^= L; \ - L ^= T; \ - ROL(L, 1); - -#define FP(L, R, T) \ - ROR(L, 1); \ - T = L; \ - L ^= R; \ - L &= 0xaaaaaaaa; \ - R ^= L; \ - L ^= T; \ - ROL(R, 7); \ - T = L; \ - L ^= R; \ - L &= 0xff00ff00; \ - R ^= L; \ - L ^= T; \ - ROR(R, 6); \ - T = L; \ - L ^= R; \ - L &= 0xcccccccc; \ - R ^= L; \ - L ^= T; \ - ROL(R, 14); \ - T = L; \ - L ^= R; \ - L &= 0xffff0000; \ - R ^= L; \ - L ^= T; \ - ROR(R, 12); \ - T = L; \ - L ^= R; \ - L &= 0xf0f0f0f0; \ - R ^= L; \ - L ^= T; \ - ROR(R, 4); - -#define ROUND(L, R, A, B, K, d) \ - B = K[0]; A = K[1]; K += d; \ - B ^= R; A ^= R; \ - B &= 0x3f3f3f3f; ROR(A, 4); \ - L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \ - L ^= S6[0xff & (B >> 8)]; B >>= 16; \ - L ^= S7[0xff & A]; \ - L ^= S5[0xff & (A >> 8)]; A >>= 16; \ - L ^= S4[0xff & B]; \ - L ^= S2[0xff & (B >> 8)]; \ - L ^= S3[0xff & A]; \ - L ^= S1[0xff & (A >> 8)]; - -/* - * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved - * tables of 128 elements. One set is for C_i and the other for D_i, while - * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i. - * - * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i - * or D_i in bits 7-1 (bit 0 being the least significant). - */ - -#define T1(x) pt[2 * (x) + 0] -#define T2(x) pt[2 * (x) + 1] -#define T3(x) pt[2 * (x) + 2] -#define T4(x) pt[2 * (x) + 3] - -#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) - -/* - * Encryption key expansion - * - * RFC2451: Weak key checks SHOULD be performed. - * - * FIPS 74: - * - * Keys having duals are keys which produce all zeros, all ones, or - * alternating zero-one patterns in the C and D registers after Permuted - * Choice 1 has operated on the key. - * - */ -unsigned long des_ekey(u32 *pe, const u8 *k) -{ - /* K&R: long is at least 32 bits */ - unsigned long a, b, c, d, w; - const u32 *pt = pc2; - - d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; - c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; - b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; - a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; - - pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; - pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; - pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; - pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); - - /* Check if first half is weak */ - w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); - - /* Skip to next table set */ - pt += 512; - - d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; - c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; - b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; - a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; - - /* Check if second half is weak */ - w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); - - pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; - pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; - pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; - pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); - - /* Fixup: 2413 5768 -> 1357 2468 */ - for (d = 0; d < 16; ++d) { - a = pe[2 * d]; - b = pe[2 * d + 1]; - c = a ^ b; - c &= 0xffff0000; - a ^= c; - b ^= c; - ROL(b, 18); - pe[2 * d] = a; - pe[2 * d + 1] = b; - } - - /* Zero if weak key */ - return w; -} -EXPORT_SYMBOL_GPL(des_ekey); - -/* - * Decryption key expansion - * - * No weak key checking is performed, as this is only used by triple DES - * - */ -static void dkey(u32 *pe, const u8 *k) -{ - /* K&R: long is at least 32 bits */ - unsigned long a, b, c, d; - const u32 *pt = pc2; - - d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; - c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; - b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; - a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; - - pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; - pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; - pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; - pe[15 * 2] = DES_PC2(b, c, d, a); - - /* Skip to next table set */ - pt += 512; - - d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; - c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; - b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; - a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; - - pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; - pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; - pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; - pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; - pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; - pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; - pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; - pe[15 * 2 + 1] = DES_PC2(b, c, d, a); - - /* Fixup: 2413 5768 -> 1357 2468 */ - for (d = 0; d < 16; ++d) { - a = pe[2 * d]; - b = pe[2 * d + 1]; - c = a ^ b; - c &= 0xffff0000; - a ^= c; - b ^= c; - ROL(b, 18); - pe[2 * d] = a; - pe[2 * d + 1] = b; - } -} +#include static int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des_ctx *dctx = crypto_tfm_ctx(tfm); - u32 *flags = &tfm->crt_flags; - u32 tmp[DES_EXPKEY_WORDS]; - int ret; - - /* Expand to tmp */ - ret = des_ekey(tmp, key); + int err; - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - *flags |= CRYPTO_TFM_RES_WEAK_KEY; - return -EINVAL; + err = des_expand_key(dctx, key, keylen); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; } - /* Copy to output */ - memcpy(dctx->expkey, tmp, sizeof(dctx->expkey)); - - return 0; + if (err) { + memset(dctx, 0, sizeof(*dctx)); + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + } + return err; } -static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +static void crypto_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct des_ctx *ctx = crypto_tfm_ctx(tfm); - const u32 *K = ctx->expkey; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; - u32 L, R, A, B; - int i; - - L = le32_to_cpu(s[0]); - R = le32_to_cpu(s[1]); - - IP(L, R, A); - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, 2); - ROUND(R, L, A, B, K, 2); - } - FP(R, L, A); + const struct des_ctx *dctx = crypto_tfm_ctx(tfm); - d[0] = cpu_to_le32(R); - d[1] = cpu_to_le32(L); + des_encrypt(dctx, dst, src); } -static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +static void crypto_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct des_ctx *ctx = crypto_tfm_ctx(tfm); - const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; - u32 L, R, A, B; - int i; - - L = le32_to_cpu(s[0]); - R = le32_to_cpu(s[1]); + const struct des_ctx *dctx = crypto_tfm_ctx(tfm); - IP(L, R, A); - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, -2); - ROUND(R, L, A, B, K, -2); - } - FP(R, L, A); - - d[0] = cpu_to_le32(R); - d[1] = cpu_to_le32(L); + des_decrypt(dctx, dst, src); } int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, @@ -858,76 +68,37 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - u32 *expkey = dctx->expkey; int err; - err = crypto_des3_ede_verify_key(tfm, key); - if (err) - return err; + err = des3_ede_expand_key(dctx, key, keylen); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } - return __des3_ede_setkey(expkey, flags, key, keylen); + if (err) { + memset(dctx, 0, sizeof(*dctx)); + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + } + return err; } -static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +static void crypto_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, + const u8 *src) { - struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - const u32 *K = dctx->expkey; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; - u32 L, R, A, B; - int i; - - L = le32_to_cpu(s[0]); - R = le32_to_cpu(s[1]); + const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - IP(L, R, A); - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, 2); - ROUND(R, L, A, B, K, 2); - } - for (i = 0; i < 8; i++) { - ROUND(R, L, A, B, K, 2); - ROUND(L, R, A, B, K, 2); - } - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, 2); - ROUND(R, L, A, B, K, 2); - } - FP(R, L, A); - - d[0] = cpu_to_le32(R); - d[1] = cpu_to_le32(L); + des3_ede_encrypt(dctx, dst, src); } -static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +static void crypto_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, + const u8 *src) { - struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; - const __le32 *s = (const __le32 *)src; - __le32 *d = (__le32 *)dst; - u32 L, R, A, B; - int i; - - L = le32_to_cpu(s[0]); - R = le32_to_cpu(s[1]); - - IP(L, R, A); - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, -2); - ROUND(R, L, A, B, K, -2); - } - for (i = 0; i < 8; i++) { - ROUND(R, L, A, B, K, -2); - ROUND(L, R, A, B, K, -2); - } - for (i = 0; i < 8; i++) { - ROUND(L, R, A, B, K, -2); - ROUND(R, L, A, B, K, -2); - } - FP(R, L, A); + const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); - d[0] = cpu_to_le32(R); - d[1] = cpu_to_le32(L); + des3_ede_decrypt(dctx, dst, src); } static struct crypto_alg des_algs[2] = { { @@ -938,13 +109,12 @@ static struct crypto_alg des_algs[2] = { { .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des_ctx), .cra_module = THIS_MODULE, - .cra_alignmask = 3, .cra_u = { .cipher = { .cia_min_keysize = DES_KEY_SIZE, .cia_max_keysize = DES_KEY_SIZE, .cia_setkey = des_setkey, - .cia_encrypt = des_encrypt, - .cia_decrypt = des_decrypt } } + .cia_encrypt = crypto_des_encrypt, + .cia_decrypt = crypto_des_decrypt } } }, { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-generic", @@ -953,13 +123,12 @@ static struct crypto_alg des_algs[2] = { { .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_ctx), .cra_module = THIS_MODULE, - .cra_alignmask = 3, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_setkey, - .cia_encrypt = des3_ede_encrypt, - .cia_decrypt = des3_ede_decrypt } } + .cia_encrypt = crypto_des3_ede_encrypt, + .cia_decrypt = crypto_des3_ede_decrypt } } } }; static int __init des_generic_mod_init(void) -- cgit v1.2.3 From d570091ef4c0c5096f898f9ce3cff964c96dd2ae Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 15 Aug 2019 12:01:11 +0300 Subject: crypto: des - remove now unused __des3_ede_setkey() Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/des_generic.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'crypto') diff --git a/crypto/des_generic.c b/crypto/des_generic.c index e021a321..6e13a4a2 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -51,19 +51,6 @@ static void crypto_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) des_decrypt(dctx, dst, src); } -int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, - unsigned int keylen) -{ - int err; - - des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; - dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; - des_ekey(expkey, key); - - return 0; -} -EXPORT_SYMBOL_GPL(__des3_ede_setkey); - static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From dc1ce1fd5002bbca4004e757b6ff857499fd90e7 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 17 Aug 2019 16:24:30 +0200 Subject: crypto: sha256_generic - Fix some coding style issues Add a bunch of missing spaces after commas and arround operators. Note the main goal of this is to make sha256_transform and its helpers identical in formatting too the duplcate implementation in lib/sha256.c, so that "diff -u" can be used to compare them to prove that no functional changes are made when further patches in this series consolidate the 2 implementations into 1. Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- crypto/sha256_generic.c | 268 ++++++++++++++++++++++++------------------------ 1 file changed, 134 insertions(+), 134 deletions(-) (limited to 'crypto') diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index b7502a96..dac930ca 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -48,10 +48,10 @@ static inline u32 Maj(u32 x, u32 y, u32 z) return (x & y) | (z & (x | y)); } -#define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22)) -#define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25)) -#define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3)) -#define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10)) +#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) +#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) +#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) +#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) static inline void LOAD_OP(int I, u32 *W, const u8 *input) { @@ -78,145 +78,145 @@ static void sha256_transform(u32 *state, const u8 *input) BLEND_OP(i, W); /* load the state into our registers */ - a=state[0]; b=state[1]; c=state[2]; d=state[3]; - e=state[4]; f=state[5]; g=state[6]; h=state[7]; + a = state[0]; b = state[1]; c = state[2]; d = state[3]; + e = state[4]; f = state[5]; g = state[6]; h = state[7]; /* now iterate */ - t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56]; - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57]; - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58]; - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59]; - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60]; - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61]; - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62]; - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63]; - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; + t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; + t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57]; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; + t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; + t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; + t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; + t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; + t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; + t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; -- cgit v1.2.3 From 3a4db3b2cce206ad17b4c80ed9f2f71eef23f9d9 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 17 Aug 2019 16:24:33 +0200 Subject: crypto: sha256 - Make lib/crypto/sha256.c suitable for generic use Before this commit lib/crypto/sha256.c has only been used in the s390 and x86 purgatory code, make it suitable for generic use: * Export interesting symbols * Add -D__DISABLE_EXPORTS to CFLAGS_sha256.o for purgatory builds to avoid the exports for the purgatory builds * Add to lib/crypto/Makefile and crypto/Kconfig Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 42a17fe9..e96b321b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -849,6 +849,9 @@ config CRYPTO_SHA1_PPC_SPE SHA-1 secure hash standard (DFIPS 180-4) implemented using powerpc SPE SIMD instruction set. +config CRYPTO_LIB_SHA256 + tristate + config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH -- cgit v1.2.3 From 736f76fe5199d7338e00785481d59bbe88d86eda Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 17 Aug 2019 16:24:35 +0200 Subject: crypto: sha256_generic - Switch to the generic lib/crypto/sha256.c lib code Drop the duplicate generic sha256 (and sha224) implementation from crypto/sha256_generic.c and use the implementation from lib/crypto/sha256.c instead. "diff -u lib/crypto/sha256.c sha256_generic.c" shows that the core sha256_transform function from both implementations is identical and the other code is functionally identical too. Suggested-by: Eric Biggers Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/sha256_generic.c | 225 ++++-------------------------------------------- 2 files changed, 19 insertions(+), 207 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index e96b321b..ad86463d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -855,6 +855,7 @@ config CRYPTO_LIB_SHA256 config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" select CRYPTO_HASH + select CRYPTO_LIB_SHA256 help SHA256 secure hash standard (DFIPS 180-2). diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index dac930ca..eafd10f9 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Cryptographic API. - * - * SHA-256, as specified in - * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf - * - * SHA-256 code by Jean-Luc Cooke . + * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c * * Copyright (c) Jean-Luc Cooke * Copyright (c) Andrew McDonald @@ -18,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -38,229 +34,44 @@ const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { }; EXPORT_SYMBOL_GPL(sha256_zero_message_hash); -static inline u32 Ch(u32 x, u32 y, u32 z) -{ - return z ^ (x & (y ^ z)); -} - -static inline u32 Maj(u32 x, u32 y, u32 z) +static int crypto_sha256_init(struct shash_desc *desc) { - return (x & y) | (z & (x | y)); + return sha256_init(shash_desc_ctx(desc)); } -#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) -#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) -#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) -#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) - -static inline void LOAD_OP(int I, u32 *W, const u8 *input) +static int crypto_sha224_init(struct shash_desc *desc) { - W[I] = get_unaligned_be32((__u32 *)input + I); -} - -static inline void BLEND_OP(int I, u32 *W) -{ - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; -} - -static void sha256_transform(u32 *state, const u8 *input) -{ - u32 a, b, c, d, e, f, g, h, t1, t2; - u32 W[64]; - int i; - - /* load the input */ - for (i = 0; i < 16; i++) - LOAD_OP(i, W, input); - - /* now blend */ - for (i = 16; i < 64; i++) - BLEND_OP(i, W); - - /* load the state into our registers */ - a = state[0]; b = state[1]; c = state[2]; d = state[3]; - e = state[4]; f = state[5]; g = state[6]; h = state[7]; - - /* now iterate */ - t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - state[0] += a; state[1] += b; state[2] += c; state[3] += d; - state[4] += e; state[5] += f; state[6] += g; state[7] += h; - - /* clear any sensitive info... */ - a = b = c = d = e = f = g = h = t1 = t2 = 0; - memzero_explicit(W, 64 * sizeof(u32)); -} - -static void sha256_generic_block_fn(struct sha256_state *sst, u8 const *src, - int blocks) -{ - while (blocks--) { - sha256_transform(sst->state, src); - src += SHA256_BLOCK_SIZE; - } + return sha224_init(shash_desc_ctx(desc)); } int crypto_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha256_base_do_update(desc, data, len, sha256_generic_block_fn); + return sha256_update(shash_desc_ctx(desc), data, len); } EXPORT_SYMBOL(crypto_sha256_update); -static int sha256_final(struct shash_desc *desc, u8 *out) +static int crypto_sha256_final(struct shash_desc *desc, u8 *out) { - sha256_base_do_finalize(desc, sha256_generic_block_fn); - return sha256_base_finish(desc, out); + if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) + return sha224_final(shash_desc_ctx(desc), out); + else + return sha256_final(shash_desc_ctx(desc), out); } int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { - sha256_base_do_update(desc, data, len, sha256_generic_block_fn); - return sha256_final(desc, hash); + sha256_update(shash_desc_ctx(desc), data, len); + return crypto_sha256_final(desc, hash); } EXPORT_SYMBOL(crypto_sha256_finup); static struct shash_alg sha256_algs[2] = { { .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, + .init = crypto_sha256_init, .update = crypto_sha256_update, - .final = sha256_final, + .final = crypto_sha256_final, .finup = crypto_sha256_finup, .descsize = sizeof(struct sha256_state), .base = { @@ -272,9 +83,9 @@ static struct shash_alg sha256_algs[2] = { { } }, { .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, + .init = crypto_sha224_init, .update = crypto_sha256_update, - .final = sha256_final, + .final = crypto_sha256_final, .finup = crypto_sha256_finup, .descsize = sizeof(struct sha256_state), .base = { -- cgit v1.2.3 From af774e14399ab5e75c52469246310e457da4d657 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 19 Aug 2019 17:15:00 +0300 Subject: crypto: arm64/aegis128 - use explicit vector load for permute vectors When building the new aegis128 NEON code in big endian mode, Clang complains about the const uint8x16_t permute vectors in the following way: crypto/aegis128-neon-inner.c:58:40: warning: vector initializers are not compatible with NEON intrinsics in big endian mode [-Wnonportable-vector-initialization] static const uint8x16_t shift_rows = { ^ crypto/aegis128-neon-inner.c:58:40: note: consider using vld1q_u8() to initialize a vector from memory, or vcombine_u8(vcreate_u8(), vcreate_u8()) to initialize from integer constants Since the same issue applies to the uint8x16x4_t loads of the AES Sbox, update those references as well. However, since GCC does not implement the vld1q_u8_x4() intrinsic, switch from IS_ENABLED() to a preprocessor conditional to conditionally include this code. Reported-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel Tested-by: Nathan Chancellor Signed-off-by: Herbert Xu --- crypto/aegis128-neon-inner.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/aegis128-neon-inner.c b/crypto/aegis128-neon-inner.c index ed55568a..f05310ca 100644 --- a/crypto/aegis128-neon-inner.c +++ b/crypto/aegis128-neon-inner.c @@ -26,7 +26,7 @@ struct aegis128_state { uint8x16_t v[5]; }; -extern const uint8x16x4_t crypto_aes_sbox[]; +extern const uint8_t crypto_aes_sbox[]; static struct aegis128_state aegis128_load_state_neon(const void *state) { @@ -55,39 +55,39 @@ uint8x16_t aegis_aes_round(uint8x16_t w) #ifdef CONFIG_ARM64 if (!__builtin_expect(aegis128_have_aes_insn, 1)) { - static const uint8x16_t shift_rows = { + static const uint8_t shift_rows[] = { 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3, 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb, }; - static const uint8x16_t ror32by8 = { + static const uint8_t ror32by8[] = { 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4, 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc, }; uint8x16_t v; // shift rows - w = vqtbl1q_u8(w, shift_rows); + w = vqtbl1q_u8(w, vld1q_u8(shift_rows)); // sub bytes - if (!IS_ENABLED(CONFIG_CC_IS_GCC)) { - v = vqtbl4q_u8(crypto_aes_sbox[0], w); - v = vqtbx4q_u8(v, crypto_aes_sbox[1], w - 0x40); - v = vqtbx4q_u8(v, crypto_aes_sbox[2], w - 0x80); - v = vqtbx4q_u8(v, crypto_aes_sbox[3], w - 0xc0); - } else { - asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w)); - w -= 0x40; - asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w)); - w -= 0x40; - asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w)); - w -= 0x40; - asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w)); - } +#ifndef CONFIG_CC_IS_GCC + v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w); + v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40); + v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80); + v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0); +#else + asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w)); + w -= 0x40; + asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w)); +#endif // mix columns w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b); w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v); - w ^= vqtbl1q_u8(v ^ w, ror32by8); + w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8)); return w; } -- cgit v1.2.3 From 0928d3188ce21d88d1c386c1244a191d7e3670e3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 19 Aug 2019 17:17:34 +0300 Subject: crypto: essiv - add tests for essiv in cbc(aes)+sha256 mode Add a test vector for the ESSIV mode that is the most widely used, i.e., using cbc(aes) and sha256, in both skcipher and AEAD modes (the latter is used by tcrypt to encapsulate the authenc template or h/w instantiations of the same) Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 9 + crypto/testmgr.c | 14 ++ crypto/testmgr.h | 497 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 520 insertions(+) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index c578ccd9..83ad0b1f 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2327,6 +2327,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) 0, speed_template_32); break; + case 220: + test_acipher_speed("essiv(cbc(aes),sha256)", + ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_acipher_speed("essiv(cbc(aes),sha256)", + DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + break; + case 221: test_aead_speed("aegis128", ENCRYPT, sec, NULL, 0, 16, 8, speed_template_16); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d990eba7..c39e39e5 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4544,6 +4544,20 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .akcipher = __VECS(ecrdsa_tv_template) } + }, { + .alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)", + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = __VECS(essiv_hmac_sha256_aes_cbc_tv_temp) + } + }, { + .alg = "essiv(cbc(aes),sha256)", + .test = alg_test_skcipher, + .fips_allowed = 1, + .suite = { + .cipher = __VECS(essiv_aes_cbc_tv_template) + } }, { .alg = "gcm(aes)", .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 154052d0..ef7d21f3 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -31070,4 +31070,501 @@ static const struct comp_testvec zstd_decomp_tv_template[] = { "functions.", }, }; + +/* based on aes_cbc_tv_template */ +static const struct cipher_testvec essiv_aes_cbc_tv_template[] = { + { + .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", + .klen = 16, + .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "Single block msg", + .ctext = "\xfa\x59\xe7\x5f\x41\x56\x65\xc3" + "\x36\xca\x6b\x72\x10\x9f\x8c\xd4", + .len = 16, + }, { + .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" + "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", + .klen = 16, + .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .ctext = "\xc8\x59\x9a\xfe\x79\xe6\x7b\x20" + "\x06\x7d\x55\x0a\x5e\xc7\xb5\xa7" + "\x0b\x9c\x80\xd2\x15\xa1\xb8\x6d" + "\xc6\xab\x7b\x65\xd9\xfd\x88\xeb", + .len = 32, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x96\x6d\xa9\x7a\x42\xe6\x01\xc7" + "\x17\xfc\xa7\x41\xd3\x38\x0b\xe5" + "\x51\x48\xf7\x7e\x5e\x26\xa9\xfe" + "\x45\x72\x1c\xd9\xde\xab\xf3\x4d" + "\x39\x47\xc5\x4f\x97\x3a\x55\x63" + "\x80\x29\x64\x4c\x33\xe8\x21\x8a" + "\x6a\xef\x6b\x6a\x8f\x43\xc0\xcb" + "\xf0\xf3\x6e\x74\x54\x44\x92\x44", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x24\x52\xf1\x48\x74\xd0\xa7\x93" + "\x75\x9b\x63\x46\xc0\x1c\x1e\x17" + "\x4d\xdc\x5b\x3a\x27\x93\x2a\x63" + "\xf7\xf1\xc7\xb3\x54\x56\x5b\x50" + "\xa3\x31\xa5\x8b\xd6\xfd\xb6\x3c" + "\x8b\xf6\xf2\x45\x05\x0c\xc8\xbb" + "\x32\x0b\x26\x1c\xe9\x8b\x02\xc0" + "\xb2\x6f\x37\xa7\x5b\xa8\xa9\x42", + .len = 64, + }, { + .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" + "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" + "\xBE\xE1\x04\x27\xE1\x04\x27\x4A" + "\x6D\x90\x4A\x6D\x90\xB3\xD6\xF9", + .klen = 32, + .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" + "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" + "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" + "\x1D\x86\xEF\x58\xE4\x4D\xB6\x1F" + "\xAB\x14\x7D\x09\x72\xDB\x44\xD0" + "\x39\xA2\x0B\x97\x00\x69\xF5\x5E" + "\xC7\x30\xBC\x25\x8E\x1A\x83\xEC" + "\x55\xE1\x4A\xB3\x1C\xA8\x11\x7A" + "\x06\x6F\xD8\x41\xCD\x36\x9F\x08" + "\x94\xFD\x66\xF2\x5B\xC4\x2D\xB9" + "\x22\x8B\x17\x80\xE9\x52\xDE\x47" + "\xB0\x19\xA5\x0E\x77\x03\x6C\xD5" + "\x3E\xCA\x33\x9C\x05\x91\xFA\x63" + "\xEF\x58\xC1\x2A\xB6\x1F\x88\x14" + "\x7D\xE6\x4F\xDB\x44\xAD\x16\xA2" + "\x0B\x74\x00\x69\xD2\x3B\xC7\x30" + "\x99\x02\x8E\xF7\x60\xEC\x55\xBE" + "\x27\xB3\x1C\x85\x11\x7A\xE3\x4C" + "\xD8\x41\xAA\x13\x9F\x08\x71\xFD" + "\x66\xCF\x38\xC4\x2D\x96\x22\x8B" + "\xF4\x5D\xE9\x52\xBB\x24\xB0\x19" + "\x82\x0E\x77\xE0\x49\xD5\x3E\xA7" + "\x10\x9C\x05\x6E\xFA\x63\xCC\x35" + "\xC1\x2A\x93\x1F\x88\xF1\x5A\xE6" + "\x4F\xB8\x21\xAD\x16\x7F\x0B\x74" + "\xDD\x46\xD2\x3B\xA4\x0D\x99\x02" + "\x6B\xF7\x60\xC9\x32\xBE\x27\x90" + "\x1C\x85\xEE\x57\xE3\x4C\xB5\x1E" + "\xAA\x13\x7C\x08\x71\xDA\x43\xCF" + "\x38\xA1\x0A\x96\xFF\x68\xF4\x5D" + "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB" + "\x54\xE0\x49\xB2\x1B\xA7\x10\x79" + "\x05\x6E\xD7\x40\xCC\x35\x9E\x07" + "\x93\xFC\x65\xF1\x5A\xC3\x2C\xB8" + "\x21\x8A\x16\x7F\xE8\x51\xDD\x46" + "\xAF\x18\xA4\x0D\x76\x02\x6B\xD4" + "\x3D\xC9\x32\x9B\x04\x90\xF9\x62" + "\xEE\x57\xC0\x29\xB5\x1E\x87\x13" + "\x7C\xE5\x4E\xDA\x43\xAC\x15\xA1" + "\x0A\x73\xFF\x68\xD1\x3A\xC6\x2F" + "\x98\x01\x8D\xF6\x5F\xEB\x54\xBD" + "\x26\xB2\x1B\x84\x10\x79\xE2\x4B" + "\xD7\x40\xA9\x12\x9E\x07\x70\xFC" + "\x65\xCE\x37\xC3\x2C\x95\x21\x8A" + "\xF3\x5C\xE8\x51\xBA\x23\xAF\x18" + "\x81\x0D\x76\xDF\x48\xD4\x3D\xA6" + "\x0F\x9B\x04\x6D\xF9\x62\xCB\x34" + "\xC0\x29\x92\x1E\x87\xF0\x59\xE5" + "\x4E\xB7\x20\xAC\x15\x7E\x0A\x73" + "\xDC\x45\xD1\x3A\xA3\x0C\x98\x01" + "\x6A\xF6\x5F\xC8\x31\xBD\x26\x8F" + "\x1B\x84\xED\x56\xE2\x4B\xB4\x1D" + "\xA9\x12\x7B\x07\x70\xD9\x42\xCE" + "\x37\xA0\x09\x95\xFE\x67\xF3\x5C" + "\xC5\x2E\xBA\x23\x8C\x18\x81\xEA" + "\x53\xDF\x48\xB1\x1A\xA6\x0F\x78" + "\x04\x6D\xD6\x3F\xCB\x34\x9D\x06" + "\x92\xFB\x64\xF0\x59\xC2\x2B\xB7" + "\x20\x89\x15\x7E\xE7\x50\xDC\x45" + "\xAE\x17\xA3\x0C\x75\x01\x6A\xD3" + "\x3C\xC8\x31\x9A\x03\x8F\xF8\x61" + "\xED\x56\xBF\x28\xB4\x1D\x86\x12", + .ctext = "\x97\x7f\x69\x0f\x0f\x34\xa6\x33" + "\x66\x49\x7e\xd0\x4d\x1b\xc9\x64" + "\xf9\x61\x95\x98\x11\x00\x88\xf8" + "\x2e\x88\x01\x0f\x2b\xe1\xae\x3e" + "\xfe\xd6\x47\x30\x11\x68\x7d\x99" + "\xad\x69\x6a\xe8\x41\x5f\x1e\x16" + "\x00\x3a\x47\xdf\x8e\x7d\x23\x1c" + "\x19\x5b\x32\x76\x60\x03\x05\xc1" + "\xa0\xff\xcf\xcc\x74\x39\x46\x63" + "\xfe\x5f\xa6\x35\xa7\xb4\xc1\xf9" + "\x4b\x5e\x38\xcc\x8c\xc1\xa2\xcf" + "\x9a\xc3\xae\x55\x42\x46\x93\xd9" + "\xbd\x22\xd3\x8a\x19\x96\xc3\xb3" + "\x7d\x03\x18\xf9\x45\x09\x9c\xc8" + "\x90\xf3\x22\xb3\x25\x83\x9a\x75" + "\xbb\x04\x48\x97\x3a\x63\x08\x04" + "\xa0\x69\xf6\x52\xd4\x89\x93\x69" + "\xb4\x33\xa2\x16\x58\xec\x4b\x26" + "\x76\x54\x10\x0b\x6e\x53\x1e\xbc" + "\x16\x18\x42\xb1\xb1\xd3\x4b\xda" + "\x06\x9f\x8b\x77\xf7\xab\xd6\xed" + "\xa3\x1d\x90\xda\x49\x38\x20\xb8" + "\x6c\xee\xae\x3e\xae\x6c\x03\xb8" + "\x0b\xed\xc8\xaa\x0e\xc5\x1f\x90" + "\x60\xe2\xec\x1b\x76\xd0\xcf\xda" + "\x29\x1b\xb8\x5a\xbc\xf4\xba\x13" + "\x91\xa6\xcb\x83\x3f\xeb\xe9\x7b" + "\x03\xba\x40\x9e\xe6\x7a\xb2\x4a" + "\x73\x49\xfc\xed\xfb\x55\xa4\x24" + "\xc7\xa4\xd7\x4b\xf5\xf7\x16\x62" + "\x80\xd3\x19\x31\x52\x25\xa8\x69" + "\xda\x9a\x87\xf5\xf2\xee\x5d\x61" + "\xc1\x12\x72\x3e\x52\x26\x45\x3a" + "\xd8\x9d\x57\xfa\x14\xe2\x9b\x2f" + "\xd4\xaa\x5e\x31\xf4\x84\x89\xa4" + "\xe3\x0e\xb0\x58\x41\x75\x6a\xcb" + "\x30\x01\x98\x90\x15\x80\xf5\x27" + "\x92\x13\x81\xf0\x1c\x1e\xfc\xb1" + "\x33\xf7\x63\xb0\x67\xec\x2e\x5c" + "\x85\xe3\x5b\xd0\x43\x8a\xb8\x5f" + "\x44\x9f\xec\x19\xc9\x8f\xde\xdf" + "\x79\xef\xf8\xee\x14\x87\xb3\x34" + "\x76\x00\x3a\x9b\xc7\xed\xb1\x3d" + "\xef\x07\xb0\xe4\xfd\x68\x9e\xeb" + "\xc2\xb4\x1a\x85\x9a\x7d\x11\x88" + "\xf8\xab\x43\x55\x2b\x8a\x4f\x60" + "\x85\x9a\xf4\xba\xae\x48\x81\xeb" + "\x93\x07\x97\x9e\xde\x2a\xfc\x4e" + "\x31\xde\xaa\x44\xf7\x2a\xc3\xee" + "\x60\xa2\x98\x2c\x0a\x88\x50\xc5" + "\x6d\x89\xd3\xe4\xb6\xa7\xf4\xb0" + "\xcf\x0e\x89\xe3\x5e\x8f\x82\xf4" + "\x9d\xd1\xa9\x51\x50\x8a\xd2\x18" + "\x07\xb2\xaa\x3b\x7f\x58\x9b\xf4" + "\xb7\x24\x39\xd3\x66\x2f\x1e\xc0" + "\x11\xa3\x56\x56\x2a\x10\x73\xbc" + "\xe1\x23\xbf\xa9\x37\x07\x9c\xc3" + "\xb2\xc9\xa8\x1c\x5b\x5c\x58\xa4" + "\x77\x02\x26\xad\xc3\x40\x11\x53" + "\x93\x68\x72\xde\x05\x8b\x10\xbc" + "\xa6\xd4\x1b\xd9\x27\xd8\x16\x12" + "\x61\x2b\x31\x2a\x44\x87\x96\x58", + .len = 496, + }, +}; + +/* based on hmac_sha256_aes_cbc_tv_temp */ +static const struct aead_testvec essiv_hmac_sha256_aes_cbc_tv_temp[] = { + { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x06\xa9\x21\x40\x36\xb8\xa1\x5b" + "\x51\x2e\x03\xd5\x34\x12\x00\x06", + .klen = 8 + 32 + 16, + .iv = "\xb3\x0c\x5a\x11\x41\xad\xc1\x04" + "\xbc\x1e\x7e\x35\xb0\x5d\x78\x29", + .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" + "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .alen = 16, + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + "\x27\x08\x94\x2d\xbe\x77\x18\x1a" + "\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc" + "\x38\x06\x38\x51\xb4\xb8\xf3\x5b" + "\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5" + "\x6a\x6d\x44\xaa\x26\xa8\x44\xa5", + .clen = 16 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" + "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", + .klen = 8 + 32 + 16, + .iv = "\x56\xe8\x14\xa5\x74\x18\x75\x13" + "\x2f\x79\xe7\xc8\x65\xe3\x48\x45", + .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" + "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .alen = 16, + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" + "\x75\x86\x60\x2d\x25\x3c\xff\xf9" + "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" + "\xf5\x33\x53\xf3\x68\x85\x2a\x99" + "\x0e\x06\x58\x8f\xba\xf6\x06\xda" + "\x49\x69\x0d\x5b\xd4\x36\x06\x62" + "\x35\x5e\x54\x58\x53\x4d\xdf\xbf", + .clen = 32 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x6c\x3e\xa0\x47\x76\x30\xce\x21" + "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd", + .klen = 8 + 32 + 16, + .iv = "\x1f\x6b\xfb\xd6\x6b\x72\x2f\xc9" + "\xb6\x9f\x8c\x10\xa8\x96\x15\x64", + .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" + "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", + .alen = 16, + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" + "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" + "\x50\x69\x39\x27\x67\x72\xf8\xd5" + "\x02\x1c\x19\x21\x6b\xad\x52\x5c" + "\x85\x79\x69\x5d\x83\xba\x26\x84" + "\x68\xb9\x3e\x90\x38\xa0\x88\x01" + "\xe7\xc6\xce\x10\x31\x2f\x9b\x1d" + "\x24\x78\xfb\xbe\x02\xe0\x4f\x40" + "\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a", + .clen = 48 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x56\xe4\x7a\x38\xc5\x59\x89\x74" + "\xbc\x46\x90\x3d\xba\x29\x03\x49", + .klen = 8 + 32 + 16, + .iv = "\x13\xe5\xf2\xef\x61\x97\x59\x35" + "\x9b\x36\x84\x46\x4e\x63\xd1\x41", + .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" + "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", + .alen = 16, + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" + "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" + "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" + "\x35\x90\x7a\xa6\x32\xc3\xff\xdf" + "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad" + "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d" + "\x49\xa5\x3e\x87\xf4\xc3\xda\x55" + "\x7a\x1b\xd4\x3c\xdb\x17\x95\xe2" + "\xe0\x93\xec\xc9\x9f\xf7\xce\xd8" + "\x3f\x54\xe2\x49\x39\xe3\x71\x25" + "\x2b\x6c\xe9\x5d\xec\xec\x2b\x64", + .clen = 64 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x10" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x90\xd3\x82\xb4\x10\xee\xba\x7a" + "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf", + .klen = 8 + 32 + 16, + .iv = "\xe4\x13\xa1\x15\xe9\x6b\xb8\x23" + "\x81\x7a\x94\x29\xab\xfd\xd2\x2c", + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" + "\xe9\x6e\x8c\x08\xab\x46\x57\x63" + "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", + .alen = 24, + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + "\xa9\x45\x3e\x19\x4e\x12\x08\x49" + "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" + "\x33\x00\x13\xb4\x89\x8d\xc8\x56" + "\xa4\x69\x9e\x52\x3a\x55\xdb\x08" + "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52" + "\x77\x5b\x07\xd1\xdb\x34\xed\x9c" + "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a" + "\xa2\x69\xad\xd0\x47\xad\x2d\x59" + "\x13\xac\x19\xb7\xcf\xba\xd4\xa6" + "\xbb\xd4\x0f\xbe\xa3\x3b\x4c\xb8" + "\x3a\xd2\xe1\x03\x86\xa5\x59\xb7" + "\x73\xc3\x46\x20\x2c\xb1\xef\x68" + "\xbb\x8a\x32\x7e\x12\x8c\x69\xcf", + .clen = 80 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x18" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 8 + 32 + 24, + .iv = "\x49\xca\x41\xc9\x6b\xbf\x6c\x98" + "\x38\x2f\xa7\x3d\x4d\x80\x49\xb0", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .alen = 16, + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" + "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" + "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" + "\x57\x1b\x24\x20\x12\xfb\x7a\xe0" + "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0" + "\x08\xb0\xe2\x79\x88\x59\x88\x81" + "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd" + "\x2f\xee\x5f\xdb\x66\xfe\x79\x09" + "\x61\x81\x31\xea\x5b\x3d\x8e\xfb" + "\xca\x71\x85\x93\xf7\x85\x55\x8b" + "\x7a\xe4\x94\xca\x8b\xba\x19\x33", + .clen = 64 + 32, + }, { +#ifdef __LITTLE_ENDIAN + .key = "\x08\x00" /* rta length */ + "\x01\x00" /* rta type */ +#else + .key = "\x00\x08" /* rta length */ + "\x00\x01" /* rta type */ +#endif + "\x00\x00\x00\x20" /* enc key length */ + "\x11\x22\x33\x44\x55\x66\x77\x88" + "\x99\xaa\xbb\xcc\xdd\xee\xff\x11" + "\x22\x33\x44\x55\x66\x77\x88\x99" + "\xaa\xbb\xcc\xdd\xee\xff\x11\x22" + "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 8 + 32 + 32, + .iv = "\xdf\xab\xf2\x7c\xdc\xe0\x33\x4c" + "\xf9\x75\xaf\xf9\x2f\x60\x3a\x9b", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .alen = 16, + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" + "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" + "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" + "\x39\xf2\x33\x69\xa9\xd9\xba\xcf" + "\xa5\x30\xe2\x63\x04\x23\x14\x61" + "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" + "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b" + "\x24\x29\xed\xc2\x31\x49\xdb\xb1" + "\x8f\x74\xbd\x17\x92\x03\xbe\x8f" + "\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0" + "\xcc\xce\xe9\x85\x57\xcf\x6f\x5f", + .clen = 64 + 32, + }, +}; + #endif /* _CRYPTO_TESTMGR_H */ -- cgit v1.2.3 From 355b1364a1ab6f16119ad98edae6af8b86c4d2c2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 22 Aug 2019 22:41:38 +0800 Subject: crypto: aegis128 - Fix -Wunused-const-variable warning crypto/aegis.h:27:32: warning: crypto_aegis_const defined but not used [-Wunused-const-variable=] crypto_aegis_const is only used in aegis128-core.c, just move the definition over there. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Herbert Xu --- crypto/aegis.h | 11 ----------- crypto/aegis128-core.c | 11 +++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'crypto') diff --git a/crypto/aegis.h b/crypto/aegis.h index 4d56a85a..6920ebe7 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -24,17 +24,6 @@ union aegis_block { #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) -static const union aegis_block crypto_aegis_const[2] = { - { .words64 = { - cpu_to_le64(U64_C(0x0d08050302010100)), - cpu_to_le64(U64_C(0x6279e99059372215)), - } }, - { .words64 = { - cpu_to_le64(U64_C(0xf12fc26d55183ddb)), - cpu_to_le64(U64_C(0xdd28b57342311120)), - } }, -}; - static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, const union aegis_block *src) { diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index fa69e999..80e73611 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -45,6 +45,17 @@ struct aegis128_ops { static bool have_simd; +static const union aegis_block crypto_aegis_const[2] = { + { .words64 = { + cpu_to_le64(U64_C(0x0d08050302010100)), + cpu_to_le64(U64_C(0x6279e99059372215)), + } }, + { .words64 = { + cpu_to_le64(U64_C(0xf12fc26d55183ddb)), + cpu_to_le64(U64_C(0xdd28b57342311120)), + } }, +}; + static bool aegis128_do_simd(void) { #ifdef CONFIG_CRYPTO_AEGIS128_SIMD -- cgit v1.2.3 From 9dfce227a2bafcdbe9e873338c6713fc6f75b824 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 1 Sep 2019 22:35:31 +0200 Subject: crypto: sha256 - Merge crypto/sha256.h into crypto/sha.h The generic sha256 implementation from lib/crypto/sha256.c uses data structs defined in crypto/sha.h, so lets move the function prototypes there too. Signed-off-by: Hans de Goede Signed-off-by: Herbert Xu --- crypto/sha256_generic.c | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index eafd10f9..f2d7095d 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From bc139413f5536fea6b8b93d5935e357f3f3ac3a8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 6 Sep 2019 13:13:06 +1000 Subject: crypto: skcipher - Unmap pages after an external error skcipher_walk_done may be called with an error by internal or external callers. For those internal callers we shouldn't unmap pages but for external callers we must unmap any pages that are in use. This patch distinguishes between the two cases by checking whether walk->nbytes is zero or not. For internal callers, we now set walk->nbytes to zero prior to the call. For external callers, walk->nbytes has always been non-zero (as zero is used to indicate the termination of a walk). Reported-by: Ard Biesheuvel Fixes: 61cea2599a20 ("[CRYPTO] cipher: Added block cipher type") Cc: Signed-off-by: Herbert Xu Tested-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/skcipher.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'crypto') diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 5d836fc3..22753c1c 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -90,7 +90,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -98,19 +98,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); + return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { - unsigned int n; /* bytes processed */ - bool more; + unsigned int n = walk->nbytes; + unsigned int nbytes = 0; - if (unlikely(err < 0)) + if (!n) goto finish; - n = walk->nbytes - err; - walk->total -= n; - more = (walk->total != 0); + if (likely(err >= 0)) { + n -= err; + nbytes = walk->total - n; + } if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | SKCIPHER_WALK_SLOW | @@ -126,7 +128,7 @@ unmap_src: memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (err) { + if (err > 0) { /* * Didn't process all bytes. Either the algorithm is * broken, or this was the last step and it turned out @@ -134,27 +136,29 @@ unmap_src: * the algorithm requires it. */ err = -EINVAL; - goto finish; - } - skcipher_done_slow(walk, n); - goto already_advanced; + nbytes = 0; + } else + n = skcipher_done_slow(walk, n); } + if (err > 0) + err = 0; + + walk->total = nbytes; + walk->nbytes = 0; + scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); -already_advanced: - scatterwalk_done(&walk->in, 0, more); - scatterwalk_done(&walk->out, 1, more); + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); - if (more) { + if (nbytes) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } - err = 0; -finish: - walk->nbytes = 0; +finish: /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; -- cgit v1.2.3 From 562a16de26bdc99de1baea432a317d7b0a51fafc Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Thu, 5 Sep 2019 21:40:21 -0400 Subject: padata: allocate workqueue internally Move workqueue allocation inside of padata to prepare for further changes to how padata uses workqueues. Guarantees the workqueue is created with max_active=1, which padata relies on to work correctly. No functional change. Signed-off-by: Daniel Jordan Acked-by: Steffen Klassert Cc: Herbert Xu Cc: Jonathan Corbet Cc: Lai Jiangshan Cc: Peter Zijlstra Cc: Tejun Heo Cc: linux-crypto@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 0edf5b54..d6729306 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -20,7 +20,6 @@ struct padata_pcrypt { struct padata_instance *pinst; - struct workqueue_struct *wq; /* * Cpumask for callback CPUs. It should be @@ -397,14 +396,9 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, get_online_cpus(); - pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, - 1, name); - if (!pcrypt->wq) - goto err; - - pcrypt->pinst = padata_alloc_possible(pcrypt->wq); + pcrypt->pinst = padata_alloc_possible(name); if (!pcrypt->pinst) - goto err_destroy_workqueue; + goto err; mask = kmalloc(sizeof(*mask), GFP_KERNEL); if (!mask) @@ -437,8 +431,6 @@ err_free_cpumask: kfree(mask); err_free_padata: padata_free(pcrypt->pinst); -err_destroy_workqueue: - destroy_workqueue(pcrypt->wq); err: put_online_cpus(); @@ -452,7 +444,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) padata_stop(pcrypt->pinst); padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); - destroy_workqueue(pcrypt->wq); padata_free(pcrypt->pinst); } -- cgit v1.2.3 From 254ffbd9085d3349168a25aff2278d375740751c Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Thu, 5 Sep 2019 21:40:24 -0400 Subject: padata: make padata_do_parallel find alternate callback CPU padata_do_parallel currently returns -EINVAL if the callback CPU isn't in the callback cpumask. pcrypt tries to prevent this situation by keeping its own callback cpumask in sync with padata's and checks that the callback CPU it passes to padata is valid. Make padata handle this instead. padata_do_parallel now takes a pointer to the callback CPU and updates it for the caller if an alternate CPU is used. Overall behavior in terms of which callback CPUs are chosen stays the same. Prepares for removal of the padata cpumask notifier in pcrypt, which will fix a lockdep complaint about nested acquisition of the CPU hotplug lock later in the series. Signed-off-by: Daniel Jordan Acked-by: Steffen Klassert Cc: Herbert Xu Cc: Lai Jiangshan Cc: Peter Zijlstra Cc: Tejun Heo Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 33 ++------------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index d6729306..efca962a 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -57,35 +57,6 @@ struct pcrypt_aead_ctx { unsigned int cb_cpu; }; -static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, - struct padata_pcrypt *pcrypt) -{ - unsigned int cpu_index, cpu, i; - struct pcrypt_cpumask *cpumask; - - cpu = *cb_cpu; - - rcu_read_lock_bh(); - cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); - if (cpumask_test_cpu(cpu, cpumask->mask)) - goto out; - - if (!cpumask_weight(cpumask->mask)) - goto out; - - cpu_index = cpu % cpumask_weight(cpumask->mask); - - cpu = cpumask_first(cpumask->mask); - for (i = 0; i < cpu_index; i++) - cpu = cpumask_next(cpu, cpumask->mask); - - *cb_cpu = cpu; - -out: - rcu_read_unlock_bh(); - return padata_do_parallel(pcrypt->pinst, padata, cpu); -} - static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -157,7 +128,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); + err = padata_do_parallel(pencrypt.pinst, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -199,7 +170,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); + err = padata_do_parallel(pdecrypt.pinst, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; -- cgit v1.2.3 From bd386d43070c947c33b9ca0e84463deca5e81c27 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Thu, 5 Sep 2019 21:40:25 -0400 Subject: crypto: pcrypt - remove padata cpumask notifier Now that padata_do_parallel takes care of finding an alternate callback CPU, there's no need for pcrypt's callback cpumask, so remove it and the notifier callback that keeps it in sync. Signed-off-by: Daniel Jordan Acked-by: Steffen Klassert Cc: Herbert Xu Cc: Lai Jiangshan Cc: Peter Zijlstra Cc: Tejun Heo Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 125 ++++++++------------------------------------------------ 1 file changed, 18 insertions(+), 107 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index efca962a..2ec36e6a 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -18,33 +18,8 @@ #include #include -struct padata_pcrypt { - struct padata_instance *pinst; - - /* - * Cpumask for callback CPUs. It should be - * equal to serial cpumask of corresponding padata instance, - * so it is updated when padata notifies us about serial - * cpumask change. - * - * cb_cpumask is protected by RCU. This fact prevents us from - * using cpumask_var_t directly because the actual type of - * cpumsak_var_t depends on kernel configuration(particularly on - * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration - * cpumask_var_t may be either a pointer to the struct cpumask - * or a variable allocated on the stack. Thus we can not safely use - * cpumask_var_t with RCU operations such as rcu_assign_pointer or - * rcu_dereference. So cpumask_var_t is wrapped with struct - * pcrypt_cpumask which makes possible to use it with RCU. - */ - struct pcrypt_cpumask { - cpumask_var_t mask; - } *cb_cpumask; - struct notifier_block nblock; -}; - -static struct padata_pcrypt pencrypt; -static struct padata_pcrypt pdecrypt; +static struct padata_instance *pencrypt; +static struct padata_instance *pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { @@ -128,7 +103,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pencrypt.pinst, padata, &ctx->cb_cpu); + err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -170,7 +145,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); - err = padata_do_parallel(pdecrypt.pinst, padata, &ctx->cb_cpu); + err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; @@ -317,36 +292,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static int pcrypt_cpumask_change_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - struct padata_pcrypt *pcrypt; - struct pcrypt_cpumask *new_mask, *old_mask; - struct padata_cpumask *cpumask = (struct padata_cpumask *)data; - - if (!(val & PADATA_CPU_SERIAL)) - return 0; - - pcrypt = container_of(self, struct padata_pcrypt, nblock); - new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); - if (!new_mask) - return -ENOMEM; - if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { - kfree(new_mask); - return -ENOMEM; - } - - old_mask = pcrypt->cb_cpumask; - - cpumask_copy(new_mask->mask, cpumask->cbcpu); - rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); - synchronize_rcu(); - - free_cpumask_var(old_mask->mask); - kfree(old_mask); - return 0; -} - static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) { int ret; @@ -359,63 +304,29 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) return ret; } -static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, - const char *name) +static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - struct pcrypt_cpumask *mask; get_online_cpus(); - pcrypt->pinst = padata_alloc_possible(name); - if (!pcrypt->pinst) - goto err; - - mask = kmalloc(sizeof(*mask), GFP_KERNEL); - if (!mask) - goto err_free_padata; - if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { - kfree(mask); - goto err_free_padata; - } - - cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); - rcu_assign_pointer(pcrypt->cb_cpumask, mask); - - pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; - ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); - if (ret) - goto err_free_cpumask; + *pinst = padata_alloc_possible(name); + if (!*pinst) + return ret; - ret = pcrypt_sysfs_add(pcrypt->pinst, name); + ret = pcrypt_sysfs_add(*pinst, name); if (ret) - goto err_unregister_notifier; + padata_free(*pinst); put_online_cpus(); - return ret; - -err_unregister_notifier: - padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); -err_free_cpumask: - free_cpumask_var(mask->mask); - kfree(mask); -err_free_padata: - padata_free(pcrypt->pinst); -err: - put_online_cpus(); - return ret; } -static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) +static void pcrypt_fini_padata(struct padata_instance *pinst) { - free_cpumask_var(pcrypt->cb_cpumask->mask); - kfree(pcrypt->cb_cpumask); - - padata_stop(pcrypt->pinst); - padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); - padata_free(pcrypt->pinst); + padata_stop(pinst); + padata_free(pinst); } static struct crypto_template pcrypt_tmpl = { @@ -440,13 +351,13 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt.pinst); - padata_start(pdecrypt.pinst); + padata_start(pencrypt); + padata_start(pdecrypt); return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(&pencrypt); + pcrypt_fini_padata(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -455,8 +366,8 @@ err: static void __exit pcrypt_exit(void) { - pcrypt_fini_padata(&pencrypt); - pcrypt_fini_padata(&pdecrypt); + pcrypt_fini_padata(pencrypt); + pcrypt_fini_padata(pdecrypt); kset_unregister(pcrypt_kset); crypto_unregister_template(&pcrypt_tmpl); -- cgit v1.2.3 From 24a9f0e4cb52070c48602e9e6d31470dd98ccdcf Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Thu, 5 Sep 2019 21:40:26 -0400 Subject: padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible With pcrypt's cpumask no longer used, take the CPU hotplug lock inside padata_alloc_possible. Useful later in the series for avoiding nested acquisition of the CPU hotplug lock in padata when padata_alloc_possible is allocating an unbound workqueue. Without this patch, this nested acquisition would happen later in the series: pcrypt_init_padata get_online_cpus alloc_padata_possible alloc_padata alloc_workqueue(WQ_UNBOUND) // later in the series alloc_and_link_pwqs apply_wqattrs_lock get_online_cpus // recursive rwsem acquisition Signed-off-by: Daniel Jordan Acked-by: Steffen Klassert Cc: Herbert Xu Cc: Lai Jiangshan Cc: Peter Zijlstra Cc: Tejun Heo Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 2ec36e6a..543792e0 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -308,8 +308,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - get_online_cpus(); - *pinst = padata_alloc_possible(name); if (!*pinst) return ret; @@ -318,8 +316,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) if (ret) padata_free(*pinst); - put_online_cpus(); - return ret; } -- cgit v1.2.3