summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Johansen <john.johansen@canonical.com>2018-05-02 00:38:52 -0700
committerJohn Johansen <john.johansen@canonical.com>2018-05-02 00:38:52 -0700
commitdb5c6566ee03e097b4b7f7e207189c0906e9d1aa (patch)
treefb78e31032dcca2fcb0e9123cf11be12728f99c5
parent21864b027d847a6d91903a5ba219770403ba8aad (diff)
parentef734472771a62ae9f901367a40a89382a991917 (diff)
downloadlinux-crypto-db5c6566ee03e097b4b7f7e207189c0906e9d1aa.tar.gz
linux-crypto-db5c6566ee03e097b4b7f7e207189c0906e9d1aa.zip
Merge tag 'v4.17-rc3' into apparmor-next
Linux v4.17-rc3 Merge in v4.17 for LSM updates Signed-off-by: John Johansen <john.johansen@canonical.com>
-rw-r--r--crypto/.gitignore1
-rw-r--r--crypto/Kconfig143
-rw-r--r--crypto/Makefile19
-rw-r--r--crypto/ablk_helper.c153
-rw-r--r--crypto/aead.c19
-rw-r--r--crypto/af_alg.c30
-rw-r--r--crypto/ahash.c58
-rw-r--r--crypto/algapi.c21
-rw-r--r--crypto/algif_aead.c15
-rw-r--r--crypto/algif_hash.c52
-rw-r--r--crypto/algif_skcipher.c60
-rw-r--r--crypto/api.c45
-rw-r--r--crypto/asymmetric_keys/.gitignore1
-rw-r--r--crypto/asymmetric_keys/Makefile31
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c1
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c12
-rw-r--r--crypto/asymmetric_keys/public_key.c4
-rw-r--r--crypto/asymmetric_keys/restrict.c21
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c4
-rw-r--r--crypto/authenc.c4
-rw-r--r--crypto/authencesn.c4
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--crypto/camellia_generic.c3
-rw-r--r--crypto/cast5_generic.c3
-rw-r--r--crypto/cast6_generic.c3
-rw-r--r--crypto/cfb.c353
-rw-r--r--crypto/chacha20_generic.c33
-rw-r--r--crypto/crc32_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/cryptd.c17
-rw-r--r--crypto/crypto_engine.c301
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecc.c25
-rw-r--r--crypto/ecdh.c23
-rw-r--r--crypto/echainiv.c5
-rw-r--r--crypto/gcm.c4
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--crypto/internal.h9
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/lrw.c154
-rw-r--r--crypto/mcryptd.c45
-rw-r--r--crypto/md4.c17
-rw-r--r--crypto/md5.c17
-rw-r--r--crypto/poly1305_generic.c27
-rw-r--r--crypto/proc.c2
-rw-r--r--crypto/rsa-pkcs1pad.c2
-rw-r--r--crypto/rsa_helper.c4
-rw-r--r--crypto/salsa20_generic.c240
-rw-r--r--crypto/scompress.c51
-rw-r--r--crypto/seqiv.c5
-rw-r--r--crypto/sha3_generic.c364
-rw-r--r--crypto/shash.c25
-rw-r--r--crypto/simd.c54
-rw-r--r--crypto/skcipher.c30
-rw-r--r--crypto/sm4_generic.c244
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c1086
-rw-r--r--crypto/testmgr.c86
-rw-r--r--crypto/testmgr.h2432
-rw-r--r--crypto/twofish_common.c5
-rw-r--r--crypto/twofish_generic.c5
-rw-r--r--crypto/xcbc.c3
-rw-r--r--crypto/xts.c72
67 files changed, 5352 insertions, 1429 deletions
diff --git a/crypto/.gitignore b/crypto/.gitignore
deleted file mode 100644
index ee328374..00000000
--- a/crypto/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f7911963..76e8c88c 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -106,6 +106,7 @@ config CRYPTO_KPP
config CRYPTO_ACOMP2
tristate
select CRYPTO_ALGAPI2
+ select SGL_ALLOC
config CRYPTO_ACOMP
tristate
@@ -130,7 +131,7 @@ config CRYPTO_DH
config CRYPTO_ECDH
tristate "ECDH algorithm"
- select CRYTPO_KPP
+ select CRYPTO_KPP
select CRYPTO_RNG_DEFAULT
help
Generic implementation of the ECDH algorithm
@@ -244,10 +245,6 @@ config CRYPTO_TEST
help
Quick & dirty crypto test module.
-config CRYPTO_ABLK_HELPER
- tristate
- select CRYPTO_CRYPTD
-
config CRYPTO_SIMD
tristate
select CRYPTO_CRYPTD
@@ -323,6 +320,14 @@ config CRYPTO_CBC
CBC: Cipher Block Chaining mode
This block cipher algorithm is required for IPSec.
+config CRYPTO_CFB
+ tristate "CFB support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ help
+ CFB: Cipher FeedBack mode
+ This block cipher algorithm is required for TPM2 Cryptography.
+
config CRYPTO_CTR
tristate "CTR support"
select CRYPTO_BLKCIPHER
@@ -495,6 +500,15 @@ config CRYPTO_CRC32_PCLMUL
which will enable any routine to use the CRC-32-IEEE 802.3 checksum
and gain better performance as compared with the table implementation.
+config CRYPTO_CRC32_MIPS
+ tristate "CRC32c and CRC32 CRC algorithm (MIPS)"
+ depends on MIPS_CRC_SUPPORT
+ select CRYPTO_HASH
+ help
+ CRC32c and CRC32 CRC algorithms implemented using mips crypto
+ instructions, when available.
+
+
config CRYPTO_CRCT10DIF
tristate "CRCT10DIF algorithm"
select CRYPTO_HASH
@@ -1113,7 +1127,7 @@ config CRYPTO_BLOWFISH_COMMON
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_BLOWFISH_COMMON
help
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
@@ -1144,10 +1158,8 @@ config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64).
@@ -1163,12 +1175,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_LRW
+ select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX).
@@ -1185,14 +1195,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
depends on X86 && 64BIT
depends on CRYPTO
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_CAMELLIA_X86_64
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
@@ -1237,11 +1240,10 @@ config CRYPTO_CAST5
config CRYPTO_CAST5_AVX_X86_64
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_CAST_COMMON
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAST5
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_SIMD
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
@@ -1260,13 +1262,11 @@ config CRYPTO_CAST6
config CRYPTO_CAST6_AVX_X86_64
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_CAST_COMMON
+ select CRYPTO_BLKCIPHER
select CRYPTO_CAST6
- select CRYPTO_LRW
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
@@ -1293,7 +1293,7 @@ config CRYPTO_DES_SPARC64
config CRYPTO_DES3_EDE_X86_64
tristate "Triple DES EDE cipher algorithm (x86-64)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_DES
help
Triple DES EDE (FIPS 46-3) algorithm.
@@ -1339,6 +1339,7 @@ config CRYPTO_SALSA20_586
tristate "Salsa20 stream cipher algorithm (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
@@ -1352,6 +1353,7 @@ config CRYPTO_SALSA20_X86_64
tristate "Salsa20 stream cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
@@ -1419,13 +1421,10 @@ config CRYPTO_SERPENT
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1441,13 +1440,10 @@ config CRYPTO_SERPENT_SSE2_X86_64
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
depends on X86 && !64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
- select CRYPTO_XTS
+ select CRYPTO_SIMD
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1463,12 +1459,10 @@ config CRYPTO_SERPENT_SSE2_586
config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SERPENT
- select CRYPTO_LRW
+ select CRYPTO_SIMD
select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1485,14 +1479,7 @@ config CRYPTO_SERPENT_AVX_X86_64
config CRYPTO_SERPENT_AVX2_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX2)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
- select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_SERPENT
select CRYPTO_SERPENT_AVX_X86_64
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1505,6 +1492,45 @@ config CRYPTO_SERPENT_AVX2_X86_64
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
+config CRYPTO_SM4
+ tristate "SM4 cipher algorithm"
+ select CRYPTO_ALGAPI
+ help
+ SM4 cipher algorithms (OSCCA GB/T 32907-2016).
+
+ SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+ Organization of State Commercial Administration of China (OSCCA)
+ as an authorized cryptographic algorithms for the use within China.
+
+ SMS4 was originally created for use in protecting wireless
+ networks, and is mandated in the Chinese National Standard for
+ Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
+ (GB.15629.11-2003).
+
+ The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
+ standardized through TC 260 of the Standardization Administration
+ of the People's Republic of China (SAC).
+
+ The input, output, and key of SMS4 are each 128 bits.
+
+ See also: <https://eprint.iacr.org/2008/329.pdf>
+
+ If unsure, say N.
+
+config CRYPTO_SPECK
+ tristate "Speck cipher algorithm"
+ select CRYPTO_ALGAPI
+ help
+ Speck is a lightweight block cipher that is tuned for optimal
+ performance in software (rather than hardware).
+
+ Speck may not be as secure as AES, and should only be used on systems
+ where AES is not fast enough.
+
+ See also: <https://eprint.iacr.org/2013/404.pdf>
+
+ If unsure, say N.
+
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@@ -1578,12 +1604,10 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_GLUE_HELPER_X86
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Twofish cipher algorithm (x86_64, 3-way parallel).
@@ -1601,15 +1625,12 @@ config CRYPTO_TWOFISH_X86_64_3WAY
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
depends on X86 && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_CRYPTD
- select CRYPTO_ABLK_HELPER
+ select CRYPTO_BLKCIPHER
select CRYPTO_GLUE_HELPER_X86
+ select CRYPTO_SIMD
select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64
select CRYPTO_TWOFISH_X86_64_3WAY
- select CRYPTO_LRW
- select CRYPTO_XTS
help
Twofish cipher algorithm (x86_64/AVX).
diff --git a/crypto/Makefile b/crypto/Makefile
index d674884b..3a5f0161 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -35,14 +35,12 @@ dh_generic-y := dh.o
dh_generic-y += dh_helper.o
obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
-$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
-$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
-$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
-clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
-clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
-
-rsa_generic-y := rsapubkey-asn1.o
-rsa_generic-y += rsaprivkey-asn1.o
+$(obj)/rsapubkey.asn1.o: $(obj)/rsapubkey.asn1.c $(obj)/rsapubkey.asn1.h
+$(obj)/rsaprivkey.asn1.o: $(obj)/rsaprivkey.asn1.c $(obj)/rsaprivkey.asn1.h
+$(obj)/rsa_helper.o: $(obj)/rsapubkey.asn1.h $(obj)/rsaprivkey.asn1.h
+
+rsa_generic-y := rsapubkey.asn1.o
+rsa_generic-y += rsaprivkey.asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
@@ -78,6 +76,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_CFB) += cfb.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_CTS) += cts.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
@@ -99,6 +98,8 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
+obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
@@ -109,6 +110,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
+obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -148,6 +150,5 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
-obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
crypto_simd-y := simd.o
obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
deleted file mode 100644
index 1441f07d..00000000
--- a/crypto/ablk_helper.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Shared async block cipher helpers
- *
- * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
- *
- * Based on aesni-intel_glue.c by:
- * Copyright (C) 2008, Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <crypto/algapi.h>
-#include <crypto/cryptd.h>
-#include <crypto/ablk_helper.h>
-#include <asm/simd.h>
-
-int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
- int err;
-
- crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
- & CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(child, key, key_len);
- crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
- & CRYPTO_TFM_RES_MASK);
- return err;
-}
-EXPORT_SYMBOL_GPL(ablk_set_key);
-
-int __ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->encrypt(
- &desc, req->dst, req->src, req->nbytes);
-}
-EXPORT_SYMBOL_GPL(__ablk_encrypt);
-
-int ablk_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd() ||
- (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_encrypt(cryptd_req);
- } else {
- return __ablk_encrypt(req);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_encrypt);
-
-int ablk_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-
- if (!may_use_simd() ||
- (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
- struct ablkcipher_request *cryptd_req =
- ablkcipher_request_ctx(req);
-
- *cryptd_req = *req;
- ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
-
- return crypto_ablkcipher_decrypt(cryptd_req);
- } else {
- struct blkcipher_desc desc;
-
- desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
- desc.info = req->info;
- desc.flags = 0;
-
- return crypto_blkcipher_crt(desc.tfm)->decrypt(
- &desc, req->dst, req->src, req->nbytes);
- }
-}
-EXPORT_SYMBOL_GPL(ablk_decrypt);
-
-void ablk_exit(struct crypto_tfm *tfm)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_free_ablkcipher(ctx->cryptd_tfm);
-}
-EXPORT_SYMBOL_GPL(ablk_exit);
-
-int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
-{
- struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
- struct cryptd_ablkcipher *cryptd_tfm;
-
- cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- ctx->cryptd_tfm = cryptd_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(&cryptd_tfm->base);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ablk_init_common);
-
-int ablk_init(struct crypto_tfm *tfm)
-{
- char drv_name[CRYPTO_MAX_ALG_NAME];
-
- snprintf(drv_name, sizeof(drv_name), "__driver-%s",
- crypto_tfm_alg_driver_name(tfm));
-
- return ablk_init_common(tfm, drv_name);
-}
-EXPORT_SYMBOL_GPL(ablk_init);
-
-MODULE_LICENSE("GPL");
diff --git a/crypto/aead.c b/crypto/aead.c
index f794b30a..60b3bbe9 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
+ err = setkey_unaligned(tfm, key, keylen);
+ else
+ err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
@@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+
aead->authsize = alg->maxauthsize;
if (alg->exit)
@@ -295,7 +304,7 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher2();
+ ctx->sknull = crypto_get_default_null_skcipher();
err = PTR_ERR(ctx->sknull);
if (IS_ERR(ctx->sknull))
goto out;
@@ -315,7 +324,7 @@ out:
return err;
drop_null:
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -325,7 +334,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 35d4dcea..7846c0c2 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- const u32 forbidden = CRYPTO_ALG_INTERNAL;
+ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -164,6 +164,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < sizeof(*sa))
return -EINVAL;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
- private = type->bind(sa->salg_name,
- sa->salg_feat & ~forbidden,
- sa->salg_mask & ~forbidden);
+ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
@@ -733,9 +735,9 @@ void af_alg_wmem_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM |
- POLLRDBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+ EPOLLRDNORM |
+ EPOLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
@@ -798,9 +800,9 @@ void af_alg_data_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLRDNORM |
- POLLRDBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLRDNORM |
+ EPOLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
@@ -1062,22 +1064,22 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
/**
* af_alg_poll - poll system call handler
*/
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (!ctx->more || ctx->used)
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (af_alg_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3a35d67d..a64c1431 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (nbytes && walk->offset & alignmask && !err) {
walk->offset = ALIGN(walk->offset, alignmask + 1);
- walk->data += walk->offset;
-
nbytes = min(nbytes,
((unsigned int)(PAGE_SIZE)) - walk->offset);
walk->entrylen -= nbytes;
- return nbytes;
+ if (nbytes) {
+ walk->data += walk->offset;
+ return nbytes;
+ }
}
if (walk->flags & CRYPTO_ALG_ASYNC)
@@ -193,11 +194,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return ahash_setkey_unaligned(tfm, key, keylen);
+ err = ahash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = tfm->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return tfm->setkey(tfm, key, keylen);
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
@@ -368,7 +376,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_ahash_op(req, tfm->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -434,25 +447,12 @@ static int ahash_def_finup(struct ahash_request *req)
return ahash_def_finup_finish1(req, err);
}
-static int ahash_no_export(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int ahash_no_import(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
- hash->has_setkey = false;
- hash->export = ahash_no_export;
- hash->import = ahash_no_import;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
@@ -462,15 +462,14 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->final = alg->final;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
+ hash->export = alg->export;
+ hash->import = alg->import;
if (alg->setkey) {
hash->setkey = alg->setkey;
- hash->has_setkey = true;
+ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
- if (alg->export)
- hash->export = alg->export;
- if (alg->import)
- hash->import = alg->import;
return 0;
}
@@ -649,5 +648,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 9a636f96..2a0271b5 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_priority < 0)
return -EINVAL;
- atomic_set(&alg->cra_refcnt, 1);
+ refcount_set(&alg->cra_refcnt, 1);
return crypto_set_driver_name(alg);
}
@@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
@@ -236,7 +235,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (!larval->adult)
goto free_larval;
- atomic_set(&larval->alg.cra_refcnt, 1);
+ refcount_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
@@ -392,7 +391,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
alg->cra_flags |= CRYPTO_ALG_DEAD;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
@@ -411,7 +409,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
if (ret)
return ret;
- BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+ BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -470,7 +468,6 @@ int crypto_register_template(struct crypto_template *tmpl)
}
list_add(&tmpl->list, &crypto_template_list);
- crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
@@ -497,12 +494,10 @@ void crypto_unregister_template(struct crypto_template *tmpl)
BUG_ON(err);
}
- crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
-
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, n, list, list) {
- BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
+ BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1);
crypto_free_instance(inst);
}
crypto_remove_final(&users);
@@ -548,9 +543,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
- if (unlikely(!crypto_mod_get(&inst->alg)))
- return -EAGAIN;
-
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(&inst->alg);
@@ -568,14 +560,9 @@ unlock:
goto err;
crypto_wait_for_test(larval);
-
- /* Remove instance if test failed */
- if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
- crypto_unregister_instance(inst);
err = 0;
err:
- crypto_mod_put(&inst->alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e9885a35..4b07edd5 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,6 @@
struct aead_tfm {
struct crypto_aead *aead;
- bool has_key;
struct crypto_skcipher *null_tfm;
};
@@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -491,7 +490,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
- null_tfm = crypto_get_default_null_skcipher2();
+ null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
@@ -509,7 +508,7 @@ static void aead_release(void *private)
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
kfree(tfm);
}
@@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize)
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
- int err;
-
- err = crypto_aead_setkey(tfm->aead, key, keylen);
- tfm->has_key = !err;
- return err;
+ return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 76d2e716..6c9b1927 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,11 +34,6 @@ struct hash_ctx {
struct ahash_request req;
};
-struct algif_hash_tfm {
- struct crypto_ahash *hash;
- bool has_key;
-};
-
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct algif_hash_tfm *tfm;
+ struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = {
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- struct algif_hash_tfm *tfm;
- struct crypto_ahash *hash;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- hash = crypto_alloc_ahash(name, type, mask);
- if (IS_ERR(hash)) {
- kfree(tfm);
- return ERR_CAST(hash);
- }
-
- tfm->hash = hash;
-
- return tfm;
+ return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
- struct algif_hash_tfm *tfm = private;
-
- crypto_free_ahash(tfm->hash);
- kfree(tfm);
+ crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct algif_hash_tfm *tfm = private;
- int err;
-
- err = crypto_ahash_setkey(tfm->hash, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
@@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk)
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
- struct hash_ctx *ctx;
+ struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
- struct algif_hash_tfm *tfm = private;
- struct crypto_ahash *hash = tfm->hash;
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ struct hash_ctx *ctx;
+ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
@@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
static int hash_accept_parent(void *private, struct sock *sk)
{
- struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *tfm = private;
- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index c5c47b68..c4e885df 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -38,11 +38,6 @@
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_tfm {
- struct crypto_skcipher *skcipher;
- bool has_key;
-};
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
@@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_blocksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
@@ -193,7 +186,6 @@ out:
return ret;
}
-
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
@@ -221,7 +213,7 @@ static int skcipher_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct skcipher_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -235,7 +227,7 @@ static int skcipher_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -314,41 +306,17 @@ static struct proto_ops algif_skcipher_ops_nokey = {
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- struct skcipher_tfm *tfm;
- struct crypto_skcipher *skcipher;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- skcipher = crypto_alloc_skcipher(name, type, mask);
- if (IS_ERR(skcipher)) {
- kfree(tfm);
- return ERR_CAST(skcipher);
- }
-
- tfm->skcipher = skcipher;
-
- return tfm;
+ return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
- struct skcipher_tfm *tfm = private;
-
- crypto_free_skcipher(tfm->skcipher);
- kfree(tfm);
+ crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct skcipher_tfm *tfm = private;
- int err;
-
- err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
@@ -357,8 +325,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
@@ -370,22 +337,21 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_tfm *tfm = private;
- struct crypto_skcipher *skcipher = tfm->skcipher;
+ struct crypto_skcipher *tfm = private;
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
@@ -405,9 +371,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
static int skcipher_accept_parent(void *private, struct sock *sk)
{
- struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *tfm = private;
- if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
diff --git a/crypto/api.c b/crypto/api.c
index 2a2479d1..0ee632bb 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (IS_ERR(larval))
return ERR_CAST(larval);
- atomic_set(&larval->alg.cra_refcnt, 2);
+ refcount_set(&larval->alg.cra_refcnt, 2);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
@@ -193,19 +193,32 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
return alg;
}
-struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
+ u32 test = 0;
+
+ if (!((type | mask) & CRYPTO_ALG_TESTED))
+ test |= CRYPTO_ALG_TESTED;
down_read(&crypto_alg_sem);
- alg = __crypto_alg_lookup(name, type, mask);
+ alg = __crypto_alg_lookup(name, type | test, mask | test);
+ if (!alg && test) {
+ alg = __crypto_alg_lookup(name, type, mask);
+ if (alg && !crypto_is_larval(alg)) {
+ /* Test failed */
+ crypto_mod_put(alg);
+ alg = ERR_PTR(-ELIBBAD);
+ }
+ }
up_read(&crypto_alg_sem);
return alg;
}
-EXPORT_SYMBOL_GPL(crypto_alg_lookup);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
@@ -226,12 +239,13 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
alg = crypto_alg_lookup(name, type, mask);
}
- if (alg)
- return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
+ if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
+ alg = crypto_larval_wait(alg);
+ else if (!alg)
+ alg = crypto_larval_add(name, type, mask);
- return crypto_larval_add(name, type, mask);
+ return alg;
}
-EXPORT_SYMBOL_GPL(crypto_larval_lookup);
int crypto_probing_notify(unsigned long val, void *v)
{
@@ -253,11 +267,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
struct crypto_alg *larval;
int ok;
- if (!((type | mask) & CRYPTO_ALG_TESTED)) {
- type |= CRYPTO_ALG_TESTED;
- mask |= CRYPTO_ALG_TESTED;
- }
-
/*
* If the internal flag is set for a cipher, require a caller to
* to invoke the cipher with the internal flag to use that cipher.
@@ -485,20 +494,14 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
{
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
- crypto_alg_mod_lookup;
-
if (frontend) {
type &= frontend->maskclear;
mask &= frontend->maskclear;
type |= frontend->type;
mask |= frontend->maskset;
-
- if (frontend->lookup)
- lookup = frontend->lookup;
}
- return lookup(alg_name, type, mask);
+ return crypto_alg_mod_lookup(alg_name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_find_alg);
diff --git a/crypto/asymmetric_keys/.gitignore b/crypto/asymmetric_keys/.gitignore
deleted file mode 100644
index ee328374..00000000
--- a/crypto/asymmetric_keys/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index 4719aad5..d4b2e1b2 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -17,35 +17,30 @@ obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
#
obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
x509_key_parser-y := \
- x509-asn1.o \
- x509_akid-asn1.o \
+ x509.asn1.o \
+ x509_akid.asn1.o \
x509_cert_parser.o \
x509_public_key.o
$(obj)/x509_cert_parser.o: \
- $(obj)/x509-asn1.h \
- $(obj)/x509_akid-asn1.h
+ $(obj)/x509.asn1.h \
+ $(obj)/x509_akid.asn1.h
-$(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
-$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
-
-clean-files += x509-asn1.c x509-asn1.h
-clean-files += x509_akid-asn1.c x509_akid-asn1.h
+$(obj)/x509.asn1.o: $(obj)/x509.asn1.c $(obj)/x509.asn1.h
+$(obj)/x509_akid.asn1.o: $(obj)/x509_akid.asn1.c $(obj)/x509_akid.asn1.h
#
# PKCS#7 message handling
#
obj-$(CONFIG_PKCS7_MESSAGE_PARSER) += pkcs7_message.o
pkcs7_message-y := \
- pkcs7-asn1.o \
+ pkcs7.asn1.o \
pkcs7_parser.o \
pkcs7_trust.o \
pkcs7_verify.o
-$(obj)/pkcs7_parser.o: $(obj)/pkcs7-asn1.h
-$(obj)/pkcs7-asn1.o: $(obj)/pkcs7-asn1.c $(obj)/pkcs7-asn1.h
-
-clean-files += pkcs7-asn1.c pkcs7-asn1.h
+$(obj)/pkcs7_parser.o: $(obj)/pkcs7.asn1.h
+$(obj)/pkcs7.asn1.o: $(obj)/pkcs7.asn1.c $(obj)/pkcs7.asn1.h
#
# PKCS#7 parser testing key
@@ -62,9 +57,7 @@ obj-$(CONFIG_SIGNED_PE_FILE_VERIFICATION) += verify_signed_pefile.o
verify_signed_pefile-y := \
verify_pefile.o \
mscode_parser.o \
- mscode-asn1.o
-
-$(obj)/mscode_parser.o: $(obj)/mscode-asn1.h $(obj)/mscode-asn1.h
-$(obj)/mscode-asn1.o: $(obj)/mscode-asn1.c $(obj)/mscode-asn1.h
+ mscode.asn1.o
-clean-files += mscode-asn1.c mscode-asn1.h
+$(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h
+$(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 9492e1c2..83d2e9b3 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -16,7 +16,7 @@
#include <linux/oid_registry.h>
#include <crypto/pkcs7.h>
#include "verify_pefile.h"
-#include "mscode-asn1.h"
+#include "mscode.asn1.h"
/*
* Parse a Microsoft Individual Code Signing blob
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index a6dcaa65..0f134162 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -18,7 +18,7 @@
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
-#include "pkcs7-asn1.h"
+#include "pkcs7.asn1.h"
MODULE_DESCRIPTION("PKCS#7 parser");
MODULE_AUTHOR("Red Hat, Inc.");
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 1f4e25f1..598906b1 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
pr_devel("sinfo %u: Direct signer is key %x\n",
sinfo->index, key_serial(key));
x509 = NULL;
+ sig = sinfo->sig;
goto matched;
}
if (PTR_ERR(key) != -ENOKEY)
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 39e6de0c..97c77f66 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
sinfo->index);
return 0;
}
- ret = public_key_verify_signature(p->pub, p->sig);
+ ret = public_key_verify_signature(p->pub, x509->sig);
if (ret < 0)
return ret;
x509->signer = p;
@@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
*
* (*) -EBADMSG if some part of the message was invalid, or:
*
- * (*) 0 if no signature chains were found to be blacklisted or to contain
- * unsupported crypto, or:
+ * (*) 0 if a signature chain passed verification, or:
*
* (*) -EKEYREJECTED if a blacklisted key was encountered, or:
*
@@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_verify_one(pkcs7, sinfo);
- if (sinfo->blacklisted && actual_ret == -ENOPKG)
- actual_ret = -EKEYREJECTED;
+ if (sinfo->blacklisted) {
+ if (actual_ret == -ENOPKG)
+ actual_ret = -EKEYREJECTED;
+ continue;
+ }
if (ret < 0) {
if (ret == -ENOPKG) {
sinfo->unsupported_crypto = true;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index de996586..e929fe1e 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey,
BUG_ON(!pkey);
BUG_ON(!sig);
- BUG_ON(!sig->digest);
BUG_ON(!sig->s);
+ if (!sig->digest)
+ return -ENOPKG;
+
alg_name = sig->pkey_algo;
if (strcmp(sig->pkey_algo, "rsa") == 0) {
/* The data wangled by the RSA algorithm is typically padded
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 86fb6850..7c93c772 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup);
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
* matching parent certificate in the trusted list, -EKEYREJECTED if the
- * signature check fails or the key is blacklisted and some other error if
- * there is a matching certificate but the signature check cannot be performed.
+ * signature check fails or the key is blacklisted, -ENOPKG if the signature
+ * uses unsupported crypto, or some other error if there is a matching
+ * certificate but the signature check cannot be performed.
*/
int restrict_link_by_signature(struct key *dest_keyring,
const struct key_type *type,
@@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring,
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
+ if (!sig)
+ return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY;
@@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring,
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
+ if (!sig)
+ return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1])
return -ENOKEY;
@@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring,
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring(struct key *dest_keyring,
const struct key_type *type,
@@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
const struct key_type *type,
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ce2df8c9..7d81e6bb 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -17,8 +17,8 @@
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "x509_parser.h"
-#include "x509-asn1.h"
-#include "x509_akid-asn1.h"
+#include "x509.asn1.h"
+#include "x509_akid.asn1.h"
struct x509_parse_context {
struct x509_certificate *cert; /* Certificate being constructed */
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 875470b0..d3d6d72f 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0cf5fefd..15f91ddd 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6c43a0a1..01c0d4aa 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -18,7 +18,6 @@
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/errno.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index a02286bf..32ddd483 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index df5c7262..66169c17 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -16,8 +16,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 058c8d75..c8e5ec69 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -13,8 +13,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/cfb.c b/crypto/cfb.c
new file mode 100644
index 00000000..94ee39be
--- /dev/null
+++ b/crypto/cfb.c
@@ -0,0 +1,353 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * CFB: Cipher FeedBack mode
+ *
+ * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
+ *
+ * CFB is a stream cipher mode which is layered on to a block
+ * encryption scheme. It works very much like a one time pad where
+ * the pad is generated initially from the encrypted IV and then
+ * subsequently from the encrypted previous block of ciphertext. The
+ * pad is XOR'd into the plain text to get the final ciphertext.
+ *
+ * The scheme of CFB is best described by wikipedia:
+ *
+ * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
+ *
+ * Note that since the pad for both encryption and decryption is
+ * generated by an encryption operation, CFB never uses the block
+ * decryption function.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+struct crypto_cfb_ctx {
+ struct crypto_cipher *child;
+};
+
+static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+
+ return crypto_cipher_blocksize(child);
+}
+
+static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
+ const u8 *src, u8 *dst)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_cipher_encrypt_one(ctx->child, dst, src);
+}
+
+/* final encrypt and decrypt is the same */
+static void crypto_cfb_final(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ u8 tmp[bsize + alignmask];
+ u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+ unsigned int nbytes = walk->nbytes;
+
+ crypto_cfb_encrypt_one(tfm, iv, stream);
+ crypto_xor_cpy(dst, stream, src, nbytes);
+}
+
+static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, dst);
+ crypto_xor(dst, src, bsize);
+ memcpy(iv, dst, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmp[bsize];
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, tmp);
+ crypto_xor(src, tmp, bsize);
+ iv = src;
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ unsigned int bsize = crypto_cfb_bsize(tfm);
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes >= bsize) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ err = crypto_cfb_encrypt_inplace(&walk, tfm);
+ else
+ err = crypto_cfb_encrypt_segment(&walk, tfm);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ if (walk.nbytes) {
+ crypto_cfb_final(&walk, tfm);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, dst);
+ crypto_xor(dst, iv, bsize);
+ iv = src;
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmp[bsize];
+
+ do {
+ crypto_cfb_encrypt_one(tfm, iv, tmp);
+ memcpy(iv, src, bsize);
+ crypto_xor(src, tmp, bsize);
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
+ struct crypto_skcipher *tfm)
+{
+ if (walk->src.virt.addr == walk->dst.virt.addr)
+ return crypto_cfb_decrypt_inplace(walk, tfm);
+ else
+ return crypto_cfb_decrypt_segment(walk, tfm);
+}
+
+static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err;
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_cfb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_walk walk;
+ const unsigned int bsize = crypto_cfb_bsize(tfm);
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes >= bsize) {
+ err = crypto_cfb_decrypt_blocks(&walk, tfm);
+ err = skcipher_walk_done(&walk, err);
+ }
+
+ if (walk.nbytes) {
+ crypto_cfb_final(&walk, tfm);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_cipher(ctx->child);
+}
+
+static void crypto_cfb_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
+ struct crypto_alg *alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ goto err_free_inst;
+
+ mask = CRYPTO_ALG_TYPE_MASK |
+ crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
+ err = PTR_ERR(alg);
+ if (IS_ERR(alg))
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
+ if (err)
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ /* we're a stream cipher independend of the crypto cra_blocksize */
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
+
+ inst->alg.init = crypto_cfb_init_tfm;
+ inst->alg.exit = crypto_cfb_exit_tfm;
+
+ inst->alg.setkey = crypto_cfb_setkey;
+ inst->alg.encrypt = crypto_cfb_encrypt;
+ inst->alg.decrypt = crypto_cfb_decrypt;
+
+ inst->free = crypto_cfb_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
+ kfree(inst);
+ goto out;
+}
+
+static struct crypto_template crypto_cfb_tmpl = {
+ .name = "cfb",
+ .create = crypto_cfb_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_cfb_module_init(void)
+{
+ return crypto_register_template(&crypto_cfb_tmpl);
+}
+
+static void __exit crypto_cfb_module_exit(void)
+{
+ crypto_unregister_template(&crypto_cfb_tmpl);
+}
+
+module_init(crypto_cfb_module_init);
+module_exit(crypto_cfb_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CFB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("cfb");
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 4a45fa48..e451c3cb 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -9,44 +9,38 @@
* (at your option) any later version.
*/
+#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u8 stream[CHACHA20_BLOCK_SIZE];
+ u32 stream[CHACHA20_BLOCK_WORDS];
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, bytes);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
}
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
{
- static const char constant[16] = "expand 32-byte k";
-
- state[0] = le32_to_cpuvp(constant + 0);
- state[1] = le32_to_cpuvp(constant + 4);
- state[2] = le32_to_cpuvp(constant + 8);
- state[3] = le32_to_cpuvp(constant + 12);
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
@@ -55,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
- state[12] = le32_to_cpuvp(iv + 0);
- state[13] = le32_to_cpuvp(iv + 4);
- state[14] = le32_to_cpuvp(iv + 8);
- state[15] = le32_to_cpuvp(iv + 12);
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
@@ -72,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
@@ -111,7 +105,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index aa2a25fc..718cbce8 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 4c0a0e27..37232039 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -146,6 +146,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index bd43cf5b..addca7ba 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -32,7 +32,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#define CRYPTD_MAX_CPU_QLEN 1000
+static unsigned int cryptd_max_cpu_qlen = 1000;
+module_param(cryptd_max_cpu_qlen, uint, 0);
+MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
struct cryptd_cpu_queue {
struct crypto_queue queue;
@@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
}
@@ -893,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
@@ -911,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
@@ -1372,7 +1375,7 @@ static int __init cryptd_init(void)
{
int err;
- err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
+ err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
if (err)
return err;
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 61e7c4e0..992e8d8d 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -15,13 +15,50 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <crypto/engine.h>
-#include <crypto/internal/hash.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
/**
+ * crypto_finalize_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+static void crypto_finalize_request(struct crypto_engine *engine,
+ struct crypto_async_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+ struct crypto_engine_ctx *enginectx;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == req)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ enginectx = crypto_tfm_ctx(req->tfm);
+ if (engine->cur_req_prepared &&
+ enginectx->op.unprepare_request) {
+ ret = enginectx->op.unprepare_request(engine, req);
+ if (ret)
+ dev_err(engine->dev, "failed to unprepare request\n");
+ }
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->complete(req, err);
+
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
+}
+
+/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
* @in_kthread: true if we are in the context of the request pump thread
@@ -34,11 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
- struct ahash_request *hreq;
- struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
- int ret, rtype;
+ int ret;
+ struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -94,7 +130,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
- rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
@@ -104,57 +139,31 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
- switch (rtype) {
- case CRYPTO_ALG_TYPE_AHASH:
- hreq = ahash_request_cast(engine->cur_req);
- if (engine->prepare_hash_request) {
- ret = engine->prepare_hash_request(engine, hreq);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err;
- }
- engine->cur_req_prepared = true;
- }
- ret = engine->hash_one_request(engine, hreq);
- if (ret) {
- dev_err(engine->dev, "failed to hash one request from queue\n");
- goto req_err;
- }
- return;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- breq = ablkcipher_request_cast(engine->cur_req);
- if (engine->prepare_cipher_request) {
- ret = engine->prepare_cipher_request(engine, breq);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err;
- }
- engine->cur_req_prepared = true;
- }
- ret = engine->cipher_one_request(engine, breq);
+ enginectx = crypto_tfm_ctx(async_req->tfm);
+
+ if (enginectx->op.prepare_request) {
+ ret = enginectx->op.prepare_request(engine, async_req);
if (ret) {
- dev_err(engine->dev, "failed to cipher one request from queue\n");
+ dev_err(engine->dev, "failed to prepare request: %d\n",
+ ret);
goto req_err;
}
- return;
- default:
- dev_err(engine->dev, "failed to prepare request of unknown type\n");
- return;
+ engine->cur_req_prepared = true;
+ }
+ if (!enginectx->op.do_one_request) {
+ dev_err(engine->dev, "failed to do request\n");
+ ret = -EINVAL;
+ goto req_err;
}
+ ret = enginectx->op.do_one_request(engine, async_req);
+ if (ret) {
+ dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
+ goto req_err;
+ }
+ return;
req_err:
- switch (rtype) {
- case CRYPTO_ALG_TYPE_AHASH:
- hreq = ahash_request_cast(engine->cur_req);
- crypto_finalize_hash_request(engine, hreq, ret);
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- breq = ablkcipher_request_cast(engine->cur_req);
- crypto_finalize_cipher_request(engine, breq, ret);
- break;
- }
+ crypto_finalize_request(engine, async_req, ret);
return;
out:
@@ -170,13 +179,12 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
- * crypto_transfer_cipher_request - transfer the new request into the
- * enginequeue
+ * crypto_transfer_request - transfer the new request into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req,
+static int crypto_transfer_request(struct crypto_engine *engine,
+ struct crypto_async_request *req,
bool need_pump)
{
unsigned long flags;
@@ -189,7 +197,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
return -ESHUTDOWN;
}
- ret = ablkcipher_enqueue_request(&engine->queue, req);
+ ret = crypto_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -197,102 +205,131 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
/**
- * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * crypto_transfer_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+ struct crypto_async_request *req)
{
- return crypto_transfer_cipher_request(engine, req, true);
+ return crypto_transfer_request(engine, req, true);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
/**
- * crypto_transfer_hash_request - transfer the new request into the
- * enginequeue
+ * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
+ * to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
+ * TODO: Remove this function when skcipher conversion is finished
*/
-int crypto_transfer_hash_request(struct crypto_engine *engine,
- struct ahash_request *req, bool need_pump)
+int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
-
- if (!engine->running) {
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- return -ESHUTDOWN;
- }
-
- ret = ahash_enqueue_request(&engine->queue, req);
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
- if (!engine->busy && need_pump)
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+/**
+ * crypto_transfer_aead_request_to_engine - transfer one aead_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
+ struct aead_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- return ret;
+/**
+ * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
+ struct akcipher_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
/**
- * crypto_transfer_hash_request_to_engine - transfer one request to list
- * into the engine queue
+ * crypto_transfer_hash_request_to_engine - transfer one ahash_request
+ * to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req)
{
- return crypto_transfer_hash_request(engine, req, true);
+ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
- * crypto_finalize_cipher_request - finalize one request if the request is done
+ * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
+ * to list into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
+ struct skcipher_request *req)
+{
+ return crypto_transfer_request_to_engine(engine, &req->base);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
+
+/**
+ * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
+ * the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
+ * TODO: Remove this function when skcipher conversion is finished
*/
-void crypto_finalize_cipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
+void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
{
- unsigned long flags;
- bool finalize_cur_req = false;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == &req->base)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (finalize_cur_req) {
- if (engine->cur_req_prepared &&
- engine->unprepare_cipher_request) {
- ret = engine->unprepare_cipher_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- }
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
- req->base.complete(&req->base, err);
+/**
+ * crypto_finalize_aead_request - finalize one aead_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_aead_request(struct crypto_engine *engine,
+ struct aead_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+/**
+ * crypto_finalize_akcipher_request - finalize one akcipher_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+ struct akcipher_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
}
-EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
/**
- * crypto_finalize_hash_request - finalize one request if the request is done
+ * crypto_finalize_hash_request - finalize one ahash_request if
+ * the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
@@ -300,35 +337,25 @@ EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err)
{
- unsigned long flags;
- bool finalize_cur_req = false;
- int ret;
-
- spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == &req->base)
- finalize_cur_req = true;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
-
- if (finalize_cur_req) {
- if (engine->cur_req_prepared &&
- engine->unprepare_hash_request) {
- ret = engine->unprepare_hash_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- spin_lock_irqsave(&engine->queue_lock, flags);
- engine->cur_req = NULL;
- engine->cur_req_prepared = false;
- spin_unlock_irqrestore(&engine->queue_lock, flags);
- }
-
- req->base.complete(&req->base, err);
-
- kthread_queue_work(engine->kworker, &engine->pump_requests);
+ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
+ * crypto_finalize_skcipher_request - finalize one skcipher_request if
+ * the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_skcipher_request(struct crypto_engine *engine,
+ struct skcipher_request *req, int err)
+{
+ return crypto_finalize_request(engine, &req->base, err);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
+
+/**
* crypto_engine_start - start the hardware engine
* @engine: the hardware engine need to be started
*
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 0dbe2be7..0e89b545 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg,
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
@@ -271,7 +271,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
return -ENOENT;
err = -ENOMEM;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
goto drop_alg;
@@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
goto drop_alg;
err = -EBUSY;
- if (atomic_read(&alg->cra_refcnt) > 2)
+ if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
err = crypto_unregister_instance((struct crypto_instance *)alg);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 4faa2781..466a112a 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
if (!drbg)
return;
kzfree(drbg->Vbuf);
+ drbg->Vbuf = NULL;
drbg->V = NULL;
kzfree(drbg->Cbuf);
+ drbg->Cbuf = NULL;
drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 633a9bcd..9c066b5a 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
- err = -EFAULT;
+ return -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
@@ -1025,9 +1025,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
{
int ret = 0;
struct ecc_point *product, *pk;
- u64 priv[ndigits];
- u64 rand_z[ndigits];
- unsigned int nbytes;
+ u64 *priv, *rand_z;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !public_key || !curve) {
@@ -1035,14 +1033,22 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
- nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
- get_random_bytes(rand_z, nbytes);
+ rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
+ if (!rand_z) {
+ ret = -ENOMEM;
+ goto kfree_out;
+ }
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
- goto out;
+ goto kfree_out;
}
product = ecc_alloc_point(ndigits);
@@ -1051,6 +1057,8 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
+ get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
+
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
@@ -1065,6 +1073,9 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
+kfree_out:
+ kzfree(priv);
+ kzfree(rand_z);
out:
return ret;
}
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 3aca0933..d2ec33f0 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -89,12 +89,19 @@ static int ecdh_compute_value(struct kpp_request *req)
if (!shared_secret)
goto free_pubkey;
- copied = sg_copy_to_buffer(req->src, 1, public_key,
- public_key_sz);
- if (copied != public_key_sz) {
- ret = -EINVAL;
+ /* from here on it's invalid parameters */
+ ret = -EINVAL;
+
+ /* must have exactly two points to be on the curve */
+ if (public_key_sz != req->src_len)
+ goto free_all;
+
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ public_key_sz),
+ public_key, public_key_sz);
+ if (copied != public_key_sz)
goto free_all;
- }
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
ctx->private_key, public_key,
@@ -111,7 +118,11 @@ static int ecdh_compute_value(struct kpp_request *req)
if (ret < 0)
goto free_all;
- copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
+ /* might want less than we've got */
+ nbytes = min_t(size_t, nbytes, req->dst_len);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
+ nbytes),
+ buf, nbytes);
if (copied != nbytes)
ret = -EINVAL;
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index e3d889b1..45819e60 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8589681f..0ad879e1 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 24e60195..a4b1c026 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
-
- /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a..1bffb3f7 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/crypto/internal.h b/crypto/internal.h
index f0732042..9a3f3993 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -30,9 +30,6 @@
enum {
CRYPTO_MSG_ALG_REQUEST,
CRYPTO_MSG_ALG_REGISTER,
- CRYPTO_MSG_ALG_UNREGISTER,
- CRYPTO_MSG_TMPL_REGISTER,
- CRYPTO_MSG_TMPL_UNREGISTER,
};
struct crypto_instance;
@@ -70,7 +67,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
}
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
-struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
@@ -78,7 +74,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
@@ -106,13 +101,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
- atomic_inc(&alg->cra_refcnt);
+ refcount_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
+ if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
alg->cra_destroy(alg);
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 744e3513..ec5c6a08 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
@@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
diff --git a/crypto/lrw.c b/crypto/lrw.c
index cbbd7c50..954a7064 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -28,13 +28,31 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#include <crypto/lrw.h>
#define LRW_BUFFER_SIZE 128u
+#define LRW_BLOCK_SIZE 16
+
struct priv {
struct crypto_skcipher *child;
- struct lrw_table_ctx table;
+
+ /*
+ * optimizes multiplying a random (non incrementing, as at the
+ * start of a new sector) value with key2, we could also have
+ * used 4k optimization tables or no optimization at all. In the
+ * latter case we would have to store key2 here
+ */
+ struct gf128mul_64k *table;
+
+ /*
+ * stores:
+ * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+ * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+ * key2*{ 0,0,...1,1,1,1,1 }, etc
+ * needed for optimized multiplication of incrementing values
+ * with key2
+ */
+ be128 mulinc[128];
};
struct rctx {
@@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit)
), b);
}
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
+ struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_skcipher *child = ctx->child;
+ int err, bsize = LRW_BLOCK_SIZE;
+ const u8 *tweak = key + keylen - bsize;
be128 tmp = { 0 };
int i;
+ crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(child, key, keylen - bsize);
+ crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
+
if (ctx->table)
gf128mul_free_64k(ctx->table);
@@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
return 0;
}
-EXPORT_SYMBOL_GPL(lrw_init_table);
-
-void lrw_free_table(struct lrw_table_ctx *ctx)
-{
- if (ctx->table)
- gf128mul_free_64k(ctx->table);
-}
-EXPORT_SYMBOL_GPL(lrw_free_table);
-
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct priv *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
- int err, bsize = LRW_BLOCK_SIZE;
- const u8 *tweak = key + keylen - bsize;
-
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen - bsize);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->table, tweak);
-}
static inline void inc(be128 *iv)
{
@@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req)
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor(&rctx->t, &rctx->t,
- &ctx->table.mulinc[get_index128(iv)]);
+ &ctx->mulinc[get_index128(iv)]);
inc(iv);
} while ((avail -= bs) >= bs);
@@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
memcpy(&rctx->t, req->iv, sizeof(rctx->t));
/* T <- I*Key2 */
- gf128mul_64k_bbe(&rctx->t, ctx->table.table);
+ gf128mul_64k_bbe(&rctx->t, ctx->table);
return 0;
}
@@ -313,7 +317,7 @@ static void exit_crypt(struct skcipher_request *req)
rctx->left = 0;
if (rctx->ext)
- kfree(rctx->ext);
+ kzfree(rctx->ext);
}
static int do_encrypt(struct skcipher_request *req, int err)
@@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req)
return do_decrypt(req, init_crypt(req, decrypt_done));
}
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
- struct scatterlist *ssrc, unsigned int nbytes,
- struct lrw_crypt_req *req)
-{
- const unsigned int bsize = LRW_BLOCK_SIZE;
- const unsigned int max_blks = req->tbuflen / bsize;
- struct lrw_table_ctx *ctx = req->table_ctx;
- struct blkcipher_walk walk;
- unsigned int nblocks;
- be128 *iv, *src, *dst, *t;
- be128 *t_buf = req->tbuf;
- int err, i;
-
- BUG_ON(max_blks < 1);
-
- blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- nblocks = min(walk.nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
-
- /* calculate first value of T */
- iv = (be128 *)walk.iv;
- t_buf[0] = *iv;
-
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&t_buf[0], ctx->table);
-
- i = 0;
- goto first;
-
- for (;;) {
- do {
- for (i = 0; i < nblocks; i++) {
- /* T <- I*Key2, using the optimization
- * discussed in the specification */
- be128_xor(&t_buf[i], t,
- &ctx->mulinc[get_index128(iv)]);
- inc(iv);
-first:
- t = &t_buf[i];
-
- /* PP <- T xor P */
- be128_xor(dst + i, t, src + i);
- }
-
- /* CC <- E(Key2,PP) */
- req->crypt_fn(req->crypt_ctx, (u8 *)dst,
- nblocks * bsize);
-
- /* C <- T xor CC */
- for (i = 0; i < nblocks; i++)
- be128_xor(dst + i, dst + i, &t_buf[i]);
-
- src += nblocks;
- dst += nblocks;
- nbytes -= nblocks * bsize;
- nblocks = min(nbytes / bsize, max_blks);
- } while (nblocks > 0);
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- if (!nbytes)
- break;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(lrw_crypt);
-
static int init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
@@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm)
{
struct priv *ctx = crypto_skcipher_ctx(tfm);
- lrw_free_table(&ctx->table);
+ if (ctx->table)
+ gf128mul_free_64k(ctx->table);
crypto_free_skcipher(ctx->child);
}
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index eca04d37..f1415214 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/slab.h>
-#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
@@ -368,7 +367,7 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_update(&rctx->areq);
+ err = crypto_ahash_update(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -395,7 +394,7 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_final(&rctx->areq);
+ err = crypto_ahash_final(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
@@ -421,7 +420,7 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
- err = ahash_mcryptd_finup(&rctx->areq);
+ err = crypto_ahash_finup(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
@@ -456,7 +455,7 @@ static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
rctx->complete, req_async);
rctx->out = req->result;
- err = ahash_mcryptd_digest(desc);
+ err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
out:
local_bh_disable();
@@ -517,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
@@ -535,7 +533,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
- inst->alg.setkey = mcryptd_hash_setkey;
+ if (crypto_hash_alg_has_setkey(halg))
+ inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
@@ -613,32 +612,6 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
}
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-int ahash_mcryptd_digest(struct ahash_request *desc)
-{
- return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
-}
-
-int ahash_mcryptd_update(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_update(desc);
-}
-
-int ahash_mcryptd_finup(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_finup(desc);
-}
-
-int ahash_mcryptd_final(struct ahash_request *desc)
-{
- /* alignment is to be done by multi-buffer crypto algorithm if needed */
-
- return crypto_ahash_final(desc);
-}
-
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
diff --git a/crypto/md4.c b/crypto/md4.c
index 3515af42..810fefb0 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -64,23 +64,6 @@ static inline u32 H(u32 x, u32 y, u32 z)
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
-/* XXX: this stuff can be optimized */
-static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __le32_to_cpus(buf);
- buf++;
- }
-}
-
-static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __cpu_to_le32s(buf);
- buf++;
- }
-}
-
static void md4_transform(u32 *hash, u32 const *in)
{
u32 a, b, c, d;
diff --git a/crypto/md5.c b/crypto/md5.c
index f7ae1a48..f776ef43 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -32,23 +32,6 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
-/* XXX: this stuff can be optimized */
-static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __le32_to_cpus(buf);
- buf++;
- }
-}
-
-static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
-{
- while (words--) {
- __cpu_to_le32s(buf);
- buf++;
- }
-}
-
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b1c2d57d..b7a3a061 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- /* Poly1305 requires a unique key for each tag, which implies that
- * we can't set it on the tfm that gets accessed by multiple users
- * simultaneously. Instead we expect the key as the first 32 bytes in
- * the update() call. */
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
-
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
dctx->s[3] = get_unaligned_le32(key + 12);
}
+/*
+ * Poly1305 requires a unique key for each tag, which implies that we can't set
+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we
+ * expect the key as the first 32 bytes in the update() call.
+ */
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
@@ -210,7 +204,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- __le32 *mac = (__le32 *)dst;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
@@ -267,10 +260,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h3 = (h3 >> 18) | (h4 << 8);
/* mac = (h + s) % (2^128) */
- f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
- f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
- f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
- f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
+ f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
return 0;
}
@@ -281,14 +274,12 @@ static struct shash_alg poly1305_alg = {
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/proc.c b/crypto/proc.c
index 2cc10c96..822fcef6 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
- seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
+ seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt));
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 2908f93c..9893dbfc 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -192,7 +192,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
if (likely(!pad_len))
goto out;
- out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
+ out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
err = -ENOMEM;
if (!out_buf)
goto out;
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index cad395d7..efc78fe7 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -15,8 +15,8 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <crypto/internal/rsa.h>
-#include "rsapubkey-asn1.h"
-#include "rsaprivkey-asn1.h"
+#include "rsapubkey.asn1.h"
+#include "rsaprivkey.asn1.h"
int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0eea..5074006a 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -19,49 +19,19 @@
*
*/
-#include <linux/init.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <crypto/algapi.h>
-#include <asm/byteorder.h>
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-/*
- * Start of code taken from D. J. Bernstein's reference implementation.
- * With some modifications and optimizations made to suit our needs.
- */
-
-/*
-salsa20-ref.c version 20051118
-D. J. Bernstein
-Public domain.
-*/
-
-#define U32TO8_LITTLE(p, v) \
- { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
- (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
-#define U8TO32_LITTLE(p) \
- (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
- ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-
-static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
+static void salsa20_block(u32 *state, __le32 *stream)
{
u32 x[16];
int i;
- memcpy(x, input, sizeof(x));
- for (i = 20; i > 0; i -= 2) {
+ memcpy(x, state, sizeof(x));
+
+ for (i = 0; i < 20; i += 2) {
x[ 4] ^= rol32((x[ 0] + x[12]), 7);
x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
x[12] ^= rol32((x[ 8] + x[ 4]), 13);
@@ -95,145 +65,137 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
x[14] ^= rol32((x[13] + x[12]), 13);
x[15] ^= rol32((x[14] + x[13]), 18);
}
- for (i = 0; i < 16; ++i)
- x[i] += input[i];
- for (i = 0; i < 16; ++i)
- U32TO8_LITTLE(output + 4 * i,x[i]);
-}
-static const char sigma[16] = "expand 32-byte k";
-static const char tau[16] = "expand 16-byte k";
+ for (i = 0; i < 16; i++)
+ stream[i] = cpu_to_le32(x[i] + state[i]);
+
+ if (++state[8] == 0)
+ state[9]++;
+}
-static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
{
- const char *constants;
+ __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- ctx->input[1] = U8TO32_LITTLE(k + 0);
- ctx->input[2] = U8TO32_LITTLE(k + 4);
- ctx->input[3] = U8TO32_LITTLE(k + 8);
- ctx->input[4] = U8TO32_LITTLE(k + 12);
- if (kbytes == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* kbytes == 16 */
- constants = tau;
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= SALSA20_BLOCK_SIZE) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ bytes -= SALSA20_BLOCK_SIZE;
+ dst += SALSA20_BLOCK_SIZE;
+ }
+ if (bytes) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
- ctx->input[11] = U8TO32_LITTLE(k + 0);
- ctx->input[12] = U8TO32_LITTLE(k + 4);
- ctx->input[13] = U8TO32_LITTLE(k + 8);
- ctx->input[14] = U8TO32_LITTLE(k + 12);
- ctx->input[0] = U8TO32_LITTLE(constants + 0);
- ctx->input[5] = U8TO32_LITTLE(constants + 4);
- ctx->input[10] = U8TO32_LITTLE(constants + 8);
- ctx->input[15] = U8TO32_LITTLE(constants + 12);
}
-static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv)
{
- ctx->input[6] = U8TO32_LITTLE(iv + 0);
- ctx->input[7] = U8TO32_LITTLE(iv + 4);
- ctx->input[8] = 0;
- ctx->input[9] = 0;
+ memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
+ state[6] = get_unaligned_le32(iv + 0);
+ state[7] = get_unaligned_le32(iv + 4);
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_init);
-static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
- const u8 *src, unsigned int bytes)
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
{
- u8 buf[64];
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes) {
- salsa20_wordtobyte(buf, ctx->input);
-
- ctx->input[8]++;
- if (!ctx->input[8])
- ctx->input[9]++;
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+ struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *constants;
- if (bytes <= 64) {
- crypto_xor(dst, buf, bytes);
- return;
- }
+ if (keysize != SALSA20_MIN_KEY_SIZE &&
+ keysize != SALSA20_MAX_KEY_SIZE)
+ return -EINVAL;
- crypto_xor(dst, buf, 64);
- bytes -= 64;
- dst += 64;
+ ctx->initial_state[1] = get_unaligned_le32(key + 0);
+ ctx->initial_state[2] = get_unaligned_le32(key + 4);
+ ctx->initial_state[3] = get_unaligned_le32(key + 8);
+ ctx->initial_state[4] = get_unaligned_le32(key + 12);
+ if (keysize == 32) { /* recommended */
+ key += 16;
+ constants = sigma;
+ } else { /* keysize == 16 */
+ constants = tau;
}
-}
-
-/*
- * End of code taken from D. J. Bernstein's reference implementation.
- */
+ ctx->initial_state[11] = get_unaligned_le32(key + 0);
+ ctx->initial_state[12] = get_unaligned_le32(key + 4);
+ ctx->initial_state[13] = get_unaligned_le32(key + 8);
+ ctx->initial_state[14] = get_unaligned_le32(key + 12);
+ ctx->initial_state[0] = get_unaligned_le32(constants + 0);
+ ctx->initial_state[5] = get_unaligned_le32(constants + 4);
+ ctx->initial_state[10] = get_unaligned_le32(constants + 8);
+ ctx->initial_state[15] = get_unaligned_le32(constants + 12);
+
+ /* space for the nonce; it will be overridden for each request */
+ ctx->initial_state[6] = 0;
+ ctx->initial_state[7] = 0;
+
+ /* initial block number */
+ ctx->initial_state[8] = 0;
+ ctx->initial_state[9] = 0;
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
-{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_setkey);
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int salsa20_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_crypt,
+ .decrypt = salsa20_crypt,
};
static int __init salsa20_generic_mod_init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit salsa20_generic_mod_fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(salsa20_generic_mod_init);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 2075e2c4..968bbcf6 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -140,53 +140,6 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static void crypto_scomp_sg_free(struct scatterlist *sgl)
-{
- int i, n;
- struct page *page;
-
- if (!sgl)
- return;
-
- n = sg_nents(sgl);
- for_each_sg(sgl, sgl, n, i) {
- page = sg_page(sgl);
- if (page)
- __free_page(page);
- }
-
- kfree(sgl);
-}
-
-static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
-{
- struct scatterlist *sgl;
- struct page *page;
- int i, n;
-
- n = ((size - 1) >> PAGE_SHIFT) + 1;
-
- sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
- if (!sgl)
- return NULL;
-
- sg_init_table(sgl, n);
-
- for (i = 0; i < n; i++) {
- page = alloc_page(gfp);
- if (!page)
- goto err;
- sg_set_page(sgl + i, page, PAGE_SIZE, 0);
- }
-
- return sgl;
-
-err:
- sg_mark_end(sgl + i);
- crypto_scomp_sg_free(sgl);
- return NULL;
-}
-
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
@@ -220,7 +173,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch_dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
- req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
+ req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
if (!req->dst)
goto out;
}
@@ -274,7 +227,7 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = crypto_scomp_sg_free;
+ crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0;
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 570b7d1a..39dbf2f7 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize != sizeof(u64))
goto free_inst;
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7e8ed962..264ec12c 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -5,6 +5,7 @@
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -17,11 +18,23 @@
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha3.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
-#define KECCAK_ROUNDS 24
+/*
+ * On some 32-bit architectures (h8300), GCC ends up using
+ * over 1 KB of stack if we inline the round calculation into the loop
+ * in keccakf(). On the other hand, on 64-bit architectures with plenty
+ * of [64-bit wide] general purpose registers, not inlining it severely
+ * hurts performance. So let's use 64-bitness as a heuristic to decide
+ * whether to inline or not.
+ */
+#ifdef CONFIG_64BIT
+#define SHA3_INLINE inline
+#else
+#define SHA3_INLINE noinline
+#endif
-#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
+#define KECCAK_ROUNDS 24
static const u64 keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
@@ -34,100 +47,137 @@ static const u64 keccakf_rndc[24] = {
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
-static const int keccakf_rotc[24] = {
- 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
- 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
-};
-
-static const int keccakf_piln[24] = {
- 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
- 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
-};
-
/* update the state with given number of rounds */
-static void keccakf(u64 st[25])
+static SHA3_INLINE void keccakf_round(u64 st[25])
{
- int i, j, round;
- u64 t, bc[5];
-
- for (round = 0; round < KECCAK_ROUNDS; round++) {
-
- /* Theta */
- for (i = 0; i < 5; i++)
- bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
- ^ st[i + 20];
-
- for (i = 0; i < 5; i++) {
- t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
- for (j = 0; j < 25; j += 5)
- st[j + i] ^= t;
- }
-
- /* Rho Pi */
- t = st[1];
- for (i = 0; i < 24; i++) {
- j = keccakf_piln[i];
- bc[0] = st[j];
- st[j] = ROTL64(t, keccakf_rotc[i]);
- t = bc[0];
- }
+ u64 t[5], tt, bc[5];
+
+ /* Theta */
+ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+ bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+ bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+ bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+ bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+ t[0] = bc[4] ^ rol64(bc[1], 1);
+ t[1] = bc[0] ^ rol64(bc[2], 1);
+ t[2] = bc[1] ^ rol64(bc[3], 1);
+ t[3] = bc[2] ^ rol64(bc[4], 1);
+ t[4] = bc[3] ^ rol64(bc[0], 1);
+
+ st[0] ^= t[0];
+
+ /* Rho Pi */
+ tt = st[1];
+ st[ 1] = rol64(st[ 6] ^ t[1], 44);
+ st[ 6] = rol64(st[ 9] ^ t[4], 20);
+ st[ 9] = rol64(st[22] ^ t[2], 61);
+ st[22] = rol64(st[14] ^ t[4], 39);
+ st[14] = rol64(st[20] ^ t[0], 18);
+ st[20] = rol64(st[ 2] ^ t[2], 62);
+ st[ 2] = rol64(st[12] ^ t[2], 43);
+ st[12] = rol64(st[13] ^ t[3], 25);
+ st[13] = rol64(st[19] ^ t[4], 8);
+ st[19] = rol64(st[23] ^ t[3], 56);
+ st[23] = rol64(st[15] ^ t[0], 41);
+ st[15] = rol64(st[ 4] ^ t[4], 27);
+ st[ 4] = rol64(st[24] ^ t[4], 14);
+ st[24] = rol64(st[21] ^ t[1], 2);
+ st[21] = rol64(st[ 8] ^ t[3], 55);
+ st[ 8] = rol64(st[16] ^ t[1], 45);
+ st[16] = rol64(st[ 5] ^ t[0], 36);
+ st[ 5] = rol64(st[ 3] ^ t[3], 28);
+ st[ 3] = rol64(st[18] ^ t[3], 21);
+ st[18] = rol64(st[17] ^ t[2], 15);
+ st[17] = rol64(st[11] ^ t[1], 10);
+ st[11] = rol64(st[ 7] ^ t[2], 6);
+ st[ 7] = rol64(st[10] ^ t[0], 3);
+ st[10] = rol64( tt ^ t[1], 1);
+
+ /* Chi */
+ bc[ 0] = ~st[ 1] & st[ 2];
+ bc[ 1] = ~st[ 2] & st[ 3];
+ bc[ 2] = ~st[ 3] & st[ 4];
+ bc[ 3] = ~st[ 4] & st[ 0];
+ bc[ 4] = ~st[ 0] & st[ 1];
+ st[ 0] ^= bc[ 0];
+ st[ 1] ^= bc[ 1];
+ st[ 2] ^= bc[ 2];
+ st[ 3] ^= bc[ 3];
+ st[ 4] ^= bc[ 4];
+
+ bc[ 0] = ~st[ 6] & st[ 7];
+ bc[ 1] = ~st[ 7] & st[ 8];
+ bc[ 2] = ~st[ 8] & st[ 9];
+ bc[ 3] = ~st[ 9] & st[ 5];
+ bc[ 4] = ~st[ 5] & st[ 6];
+ st[ 5] ^= bc[ 0];
+ st[ 6] ^= bc[ 1];
+ st[ 7] ^= bc[ 2];
+ st[ 8] ^= bc[ 3];
+ st[ 9] ^= bc[ 4];
+
+ bc[ 0] = ~st[11] & st[12];
+ bc[ 1] = ~st[12] & st[13];
+ bc[ 2] = ~st[13] & st[14];
+ bc[ 3] = ~st[14] & st[10];
+ bc[ 4] = ~st[10] & st[11];
+ st[10] ^= bc[ 0];
+ st[11] ^= bc[ 1];
+ st[12] ^= bc[ 2];
+ st[13] ^= bc[ 3];
+ st[14] ^= bc[ 4];
+
+ bc[ 0] = ~st[16] & st[17];
+ bc[ 1] = ~st[17] & st[18];
+ bc[ 2] = ~st[18] & st[19];
+ bc[ 3] = ~st[19] & st[15];
+ bc[ 4] = ~st[15] & st[16];
+ st[15] ^= bc[ 0];
+ st[16] ^= bc[ 1];
+ st[17] ^= bc[ 2];
+ st[18] ^= bc[ 3];
+ st[19] ^= bc[ 4];
+
+ bc[ 0] = ~st[21] & st[22];
+ bc[ 1] = ~st[22] & st[23];
+ bc[ 2] = ~st[23] & st[24];
+ bc[ 3] = ~st[24] & st[20];
+ bc[ 4] = ~st[20] & st[21];
+ st[20] ^= bc[ 0];
+ st[21] ^= bc[ 1];
+ st[22] ^= bc[ 2];
+ st[23] ^= bc[ 3];
+ st[24] ^= bc[ 4];
+}
- /* Chi */
- for (j = 0; j < 25; j += 5) {
- for (i = 0; i < 5; i++)
- bc[i] = st[j + i];
- for (i = 0; i < 5; i++)
- st[j + i] ^= (~bc[(i + 1) % 5]) &
- bc[(i + 2) % 5];
- }
+static void __optimize("O3") keccakf(u64 st[25])
+{
+ int round;
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+ keccakf_round(st);
/* Iota */
st[0] ^= keccakf_rndc[round];
}
}
-static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
-{
- memset(sctx, 0, sizeof(*sctx));
- sctx->md_len = digest_sz;
- sctx->rsiz = 200 - 2 * digest_sz;
- sctx->rsizw = sctx->rsiz / 8;
-}
-
-static int sha3_224_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_224_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_256_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_256_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_384_init(struct shash_desc *desc)
+int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- sha3_init(sctx, SHA3_384_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_512_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
+ sctx->rsiz = 200 - 2 * digest_size;
+ sctx->rsizw = sctx->rsiz / 8;
+ sctx->partial = 0;
- sha3_init(sctx, SHA3_512_DIGEST_SIZE);
+ memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_init);
-static int sha3_update(struct shash_desc *desc, const u8 *data,
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
@@ -149,7 +199,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int i;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) src)[i];
+ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
keccakf(sctx->st);
done += sctx->rsiz;
@@ -163,125 +213,89 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_update);
-static int sha3_final(struct shash_desc *desc, u8 *out)
+int crypto_sha3_final(struct shash_desc *desc, u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int i, inlen = sctx->partial;
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
sctx->buf[inlen++] = 0x06;
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx->buf[sctx->rsiz - 1] |= 0x80;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
+ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
keccakf(sctx->st);
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] = cpu_to_le64(sctx->st[i]);
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
- memcpy(out, sctx->st, sctx->md_len);
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
memset(sctx, 0, sizeof(*sctx));
return 0;
}
-
-static struct shash_alg sha3_224 = {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = sha3_224_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-224",
- .cra_driver_name = "sha3-224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_256 = {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = sha3_256_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-256",
- .cra_driver_name = "sha3-256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_384 = {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = sha3_384_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "sha3-384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_512 = {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = sha3_512_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-512",
- .cra_driver_name = "sha3-512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
+EXPORT_SYMBOL(crypto_sha3_final);
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
static int __init sha3_generic_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&sha3_224);
- if (ret < 0)
- goto err_out;
- ret = crypto_register_shash(&sha3_256);
- if (ret < 0)
- goto err_out_224;
- ret = crypto_register_shash(&sha3_384);
- if (ret < 0)
- goto err_out_256;
- ret = crypto_register_shash(&sha3_512);
- if (ret < 0)
- goto err_out_384;
-
- return 0;
-
-err_out_384:
- crypto_unregister_shash(&sha3_384);
-err_out_256:
- crypto_unregister_shash(&sha3_256);
-err_out_224:
- crypto_unregister_shash(&sha3_224);
-err_out:
- return ret;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha3_generic_mod_fini(void)
{
- crypto_unregister_shash(&sha3_224);
- crypto_unregister_shash(&sha3_256);
- crypto_unregister_shash(&sha3_384);
- crypto_unregister_shash(&sha3_512);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha3_generic_mod_init);
diff --git a/crypto/shash.c b/crypto/shash.c
index e849d3ee..5d732c6b 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return shash_setkey_unaligned(tfm, key, keylen);
+ err = shash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return shash->setkey(tfm, key, keylen);
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
- crt->has_setkey = alg->setkey != shash_no_setkey;
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
if (alg->export)
crt->export = shash_async_export;
@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ hash->descsize = alg->descsize;
+
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
- hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 88203370..ea7240be 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -223,4 +221,54 @@ void simd_skcipher_free(struct simd_skcipher_alg *salg)
}
EXPORT_SYMBOL_GPL(simd_skcipher_free);
+int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs)
+{
+ int err;
+ int i;
+ const char *algname;
+ const char *drvname;
+ const char *basename;
+ struct simd_skcipher_alg *simd;
+
+ err = crypto_register_skciphers(algs, count);
+ if (err)
+ return err;
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
+ WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+ simd = simd_skcipher_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+ simd_algs[i] = simd;
+ }
+ return 0;
+
+err_unregister:
+ simd_unregister_skciphers(algs, count, simd_algs);
+ return err;
+}
+EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
+
+void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
+ struct simd_skcipher_alg **simd_algs)
+{
+ int i;
+
+ crypto_unregister_skciphers(algs, count);
+
+ for (i = 0; i < count; i++) {
+ if (simd_algs[i]) {
+ simd_skcipher_free(simd_algs[i]);
+ simd_algs[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
+
MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 11af5fd6..0fe2a292 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
@@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
@@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ int err;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if ((unsigned long)key & alignmask)
- return skcipher_setkey_unaligned(tfm, key, keylen);
+ err = skcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ err = cipher->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return cipher->setkey(tfm, key, keylen);
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
@@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
new file mode 100644
index 00000000..f537a276
--- /dev/null
+++ b/crypto/sm4_generic.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * SM4 Cipher Algorithm.
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * All rights reserved.
+ */
+
+#include <crypto/sm4.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+static const u32 fk[4] = {
+ 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u8 sbox[256] = {
+ 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+ 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+ 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+ 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+ 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+ 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+ 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+ 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+ 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+ 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+ 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+ 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+ 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+ 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+ 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+ 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+ 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+ 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+ 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+ 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+ 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+ 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+ 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+ 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+ 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+ 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+ 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+ 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+ 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+ 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+ 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+ 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static const u32 ck[] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static u32 sm4_t_non_lin_sub(u32 x)
+{
+ int i;
+ u8 *b = (u8 *)&x;
+
+ for (i = 0; i < 4; ++i)
+ b[i] = sbox[b[i]];
+
+ return x;
+}
+
+static u32 sm4_key_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 13) ^ rol32(x, 23);
+
+}
+
+static u32 sm4_enc_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static u32 sm4_key_sub(u32 x)
+{
+ return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static u32 sm4_enc_sub(u32 x)
+{
+ return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static u32 sm4_round(const u32 *x, const u32 rk)
+{
+ return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
+}
+
+
+/**
+ * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ u32 rk[4], t;
+ const u32 *key = (u32 *)in_key;
+ int i;
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < 4; ++i)
+ rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
+
+ for (i = 0; i < 32; ++i) {
+ t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
+ ctx->rkey_enc[i] = t;
+ rk[0] = rk[1];
+ rk[1] = rk[2];
+ rk[2] = rk[3];
+ rk[3] = t;
+ }
+
+ for (i = 0; i < 32; ++i)
+ ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
+
+/**
+ * crypto_sm4_set_key - Set the AES key.
+ * @tfm: The %crypto_tfm that is used in the context.
+ * @in_key: The input key.
+ * @key_len: The size of the key.
+ *
+ * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
+ * is set. The function uses crypto_sm4_expand_key() to expand the key.
+ * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
+ * retrieved with crypto_tfm_ctx().
+ */
+int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *flags = &tfm->crt_flags;
+ int ret;
+
+ ret = crypto_sm4_expand_key(ctx, in_key, key_len);
+ if (!ret)
+ return 0;
+
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
+
+static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
+{
+ u32 x[4], i, t;
+
+ for (i = 0; i < 4; ++i)
+ x[i] = get_unaligned_be32(&in[i]);
+
+ for (i = 0; i < 32; ++i) {
+ t = sm4_round(x, rk[i]);
+ x[0] = x[1];
+ x[1] = x[2];
+ x[2] = x[3];
+ x[3] = t;
+ }
+
+ for (i = 0; i < 4; ++i)
+ put_unaligned_be32(x[3 - i], &out[i]);
+}
+
+/* encrypt a block of text */
+
+static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
+}
+
+/* decrypt a block of text */
+
+static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
+}
+
+static struct crypto_alg sm4_alg = {
+ .cra_name = "sm4",
+ .cra_driver_name = "sm4-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_sm4_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SM4_KEY_SIZE,
+ .cia_max_keysize = SM4_KEY_SIZE,
+ .cia_setkey = crypto_sm4_set_key,
+ .cia_encrypt = sm4_encrypt,
+ .cia_decrypt = sm4_decrypt
+ }
+ }
+};
+
+static int __init sm4_init(void)
+{
+ return crypto_register_alg(&sm4_alg);
+}
+
+static void __exit sm4_fini(void)
+{
+ crypto_unregister_alg(&sm4_alg);
+}
+
+module_init(sm4_init);
+module_exit(sm4_fini);
+
+MODULE_DESCRIPTION("SM4 Cipher Algorithm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sm4");
+MODULE_ALIAS_CRYPTO("sm4-generic");
diff --git a/crypto/speck.c b/crypto/speck.c
new file mode 100644
index 00000000..58aa9f7f
--- /dev/null
+++ b/crypto/speck.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Speck: a lightweight block cipher
+ *
+ * Copyright (c) 2018 Google, Inc
+ *
+ * Speck has 10 variants, including 5 block sizes. For now we only implement
+ * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
+ * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
+ * and a key size of K bits. The Speck128 variants are believed to be the most
+ * secure variants, and they use the same block size and key sizes as AES. The
+ * Speck64 variants are less secure, but on 32-bit processors are usually
+ * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
+ * secure and/or not as well suited for implementation on either 32-bit or
+ * 64-bit processors, so are omitted.
+ *
+ * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
+ * https://eprint.iacr.org/2013/404.pdf
+ *
+ * In a correspondence, the Speck designers have also clarified that the words
+ * should be interpreted in little-endian format, and the words should be
+ * ordered such that the first word of each block is 'y' rather than 'x', and
+ * the first key word (rather than the last) becomes the first round key.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/speck.h>
+#include <linux/bitops.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+/* Speck128 */
+
+static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
+{
+ *x = ror64(*x, 8);
+ *x += *y;
+ *x ^= k;
+ *y = rol64(*y, 3);
+ *y ^= *x;
+}
+
+static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
+{
+ *y ^= *x;
+ *y = ror64(*y, 3);
+ *x ^= k;
+ *x -= *y;
+ *x = rol64(*x, 8);
+}
+
+void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u64 y = get_unaligned_le64(in);
+ u64 x = get_unaligned_le64(in + 8);
+ int i;
+
+ for (i = 0; i < ctx->nrounds; i++)
+ speck128_round(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le64(y, out);
+ put_unaligned_le64(x, out + 8);
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
+
+static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u64 y = get_unaligned_le64(in);
+ u64 x = get_unaligned_le64(in + 8);
+ int i;
+
+ for (i = ctx->nrounds - 1; i >= 0; i--)
+ speck128_unround(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le64(y, out);
+ put_unaligned_le64(x, out + 8);
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
+
+static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u64 l[3];
+ u64 k;
+ int i;
+
+ switch (keylen) {
+ case SPECK128_128_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ ctx->nrounds = SPECK128_128_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[0], &k, i);
+ }
+ break;
+ case SPECK128_192_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ l[1] = get_unaligned_le64(key + 16);
+ ctx->nrounds = SPECK128_192_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[i % 2], &k, i);
+ }
+ break;
+ case SPECK128_256_KEY_SIZE:
+ k = get_unaligned_le64(key);
+ l[0] = get_unaligned_le64(key + 8);
+ l[1] = get_unaligned_le64(key + 16);
+ l[2] = get_unaligned_le64(key + 24);
+ ctx->nrounds = SPECK128_256_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck128_round(&l[i % 3], &k, i);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
+
+static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
+}
+
+/* Speck64 */
+
+static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
+{
+ *x = ror32(*x, 8);
+ *x += *y;
+ *x ^= k;
+ *y = rol32(*y, 3);
+ *y ^= *x;
+}
+
+static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
+{
+ *y ^= *x;
+ *y = ror32(*y, 3);
+ *x ^= k;
+ *x -= *y;
+ *x = rol32(*x, 8);
+}
+
+void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u32 y = get_unaligned_le32(in);
+ u32 x = get_unaligned_le32(in + 4);
+ int i;
+
+ for (i = 0; i < ctx->nrounds; i++)
+ speck64_round(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le32(y, out);
+ put_unaligned_le32(x, out + 4);
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
+
+static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
+ u8 *out, const u8 *in)
+{
+ u32 y = get_unaligned_le32(in);
+ u32 x = get_unaligned_le32(in + 4);
+ int i;
+
+ for (i = ctx->nrounds - 1; i >= 0; i--)
+ speck64_unround(&x, &y, ctx->round_keys[i]);
+
+ put_unaligned_le32(y, out);
+ put_unaligned_le32(x, out + 4);
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
+
+static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
+}
+
+int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u32 l[3];
+ u32 k;
+ int i;
+
+ switch (keylen) {
+ case SPECK64_96_KEY_SIZE:
+ k = get_unaligned_le32(key);
+ l[0] = get_unaligned_le32(key + 4);
+ l[1] = get_unaligned_le32(key + 8);
+ ctx->nrounds = SPECK64_96_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck64_round(&l[i % 2], &k, i);
+ }
+ break;
+ case SPECK64_128_KEY_SIZE:
+ k = get_unaligned_le32(key);
+ l[0] = get_unaligned_le32(key + 4);
+ l[1] = get_unaligned_le32(key + 8);
+ l[2] = get_unaligned_le32(key + 12);
+ ctx->nrounds = SPECK64_128_NROUNDS;
+ for (i = 0; i < ctx->nrounds; i++) {
+ ctx->round_keys[i] = k;
+ speck64_round(&l[i % 3], &k, i);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
+
+static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
+}
+
+/* Algorithm definitions */
+
+static struct crypto_alg speck_algs[] = {
+ {
+ .cra_name = "speck128",
+ .cra_driver_name = "speck128-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SPECK128_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SPECK128_128_KEY_SIZE,
+ .cia_max_keysize = SPECK128_256_KEY_SIZE,
+ .cia_setkey = speck128_setkey,
+ .cia_encrypt = speck128_encrypt,
+ .cia_decrypt = speck128_decrypt
+ }
+ }
+ }, {
+ .cra_name = "speck64",
+ .cra_driver_name = "speck64-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = SPECK64_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = SPECK64_96_KEY_SIZE,
+ .cia_max_keysize = SPECK64_128_KEY_SIZE,
+ .cia_setkey = speck64_setkey,
+ .cia_encrypt = speck64_encrypt,
+ .cia_decrypt = speck64_decrypt
+ }
+ }
+ }
+};
+
+static int __init speck_module_init(void)
+{
+ return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+static void __exit speck_module_exit(void)
+{
+ crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
+}
+
+module_init(speck_module_init);
+module_exit(speck_module_exit);
+
+MODULE_DESCRIPTION("Speck block cipher (generic)");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("speck128");
+MODULE_ALIAS_CRYPTO("speck128-generic");
+MODULE_ALIAS_CRYPTO("speck64");
+MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9267cbdb..51fe7c87 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -67,6 +67,7 @@ static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
+static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
@@ -79,6 +80,66 @@ static char *check[] = {
NULL
};
+static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
+static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
+
+#define XBUFSIZE 8
+#define MAX_IVLEN 32
+
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void testmgr_free_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
+ unsigned int buflen, const void *assoc,
+ unsigned int aad_size)
+{
+ int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
+ int k, rem;
+
+ if (np > XBUFSIZE) {
+ rem = PAGE_SIZE;
+ np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
+ }
+
+ sg_init_table(sg, np + 1);
+
+ sg_set_buf(&sg[0], assoc, aad_size);
+
+ if (rem)
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
+
+ if (rem)
+ sg_set_buf(&sg[k + 1], xbuf[k], rem);
+}
+
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -86,6 +147,298 @@ static inline int do_one_aead_op(struct aead_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_aead_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct scatterlist sgout[XBUFSIZE];
+ struct aead_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+};
+
+static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_aead_encrypt(data[i].req);
+ else
+ rc[i] = crypto_aead_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_aead_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_aead_speed(const char *algo, int enc, int secs,
+ struct aead_speed_template *template,
+ unsigned int tcount, u8 authsize,
+ unsigned int aad_size, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_aead_data *data;
+ struct crypto_aead *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ void *assoc;
+ u32 *b_size;
+ char *iv;
+ int ret;
+
+
+ if (aad_size >= PAGE_SIZE) {
+ pr_err("associate data length (%u) too big\n", aad_size);
+ return;
+ }
+
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_iv;
+
+ tfm = crypto_alloc_aead(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, authsize);
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].axbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].axbuf);
+ goto out_free_xbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xoutbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xoutbuf);
+ goto out_free_axbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ aead_request_free(data[i].req);
+ goto out_free_xoutbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ crypto_init_wait(&data[i].wait);
+ aead_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_aead, tfm), e);
+
+ i = 0;
+ do {
+ b_size = aead_sizes;
+ do {
+ if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ authsize + *b_size,
+ XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_aead_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_aead_ivsize(tfm);
+ if (iv_len)
+ memset(iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_aead_data *cur = &data[j];
+
+ assoc = cur->axbuf[0];
+ memset(assoc, 0xff, aad_size);
+
+ sg_init_aead(cur->sg, cur->xbuf,
+ *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
+
+ sg_init_aead(cur->sgout, cur->xoutbuf,
+ *b_size + (enc ? authsize : 0),
+ assoc, aad_size);
+
+ aead_request_set_ad(cur->req, aad_size);
+
+ if (!enc) {
+
+ aead_request_set_crypt(cur->req,
+ cur->sgout,
+ cur->sg,
+ *b_size, iv);
+ ret = crypto_aead_encrypt(cur->req);
+ ret = do_one_aead_op(cur->req, ret);
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
+
+ aead_request_set_crypt(cur->req, cur->sg,
+ cur->sgout, *b_size +
+ (enc ? 0 : authsize),
+ iv);
+
+ }
+
+ if (secs)
+ ret = test_mb_aead_jiffies(data, enc, *b_size,
+ secs, num_mb);
+ else
+ ret = test_mb_aead_cycles(data, enc, *b_size,
+ num_mb);
+
+ if (ret) {
+ pr_err("%s() failed return code=%d\n", e, ret);
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ aead_request_free(data[i].req);
+out_free_xoutbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xoutbuf);
+out_free_axbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].axbuf);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_aead(tfm);
+out_free_data:
+ kfree(data);
+out_free_iv:
+ kfree(iv);
+}
+
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
@@ -151,60 +504,6 @@ out:
return ret;
}
-static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
-static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
-
-#define XBUFSIZE 8
-#define MAX_IVLEN 32
-
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++) {
- buf[i] = (void *)__get_free_page(GFP_KERNEL);
- if (!buf[i])
- goto err_free_buf;
- }
-
- return 0;
-
-err_free_buf:
- while (i-- > 0)
- free_page((unsigned long)buf[i]);
-
- return -ENOMEM;
-}
-
-static void testmgr_free_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++)
- free_page((unsigned long)buf[i]);
-}
-
-static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
- unsigned int buflen)
-{
- int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
- int k, rem;
-
- if (np > XBUFSIZE) {
- rem = PAGE_SIZE;
- np = XBUFSIZE;
- } else {
- rem = buflen % PAGE_SIZE;
- }
-
- sg_init_table(sg, np + 1);
- np--;
- for (k = 0; k < np; k++)
- sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
-
- sg_set_buf(&sg[k + 1], xbuf[k], rem);
-}
-
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
@@ -316,19 +615,37 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
- sg_init_aead(sg, xbuf,
- *b_size + (enc ? 0 : authsize));
+ sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? authsize : 0), assoc,
+ aad_size);
+
+ aead_request_set_ad(req, aad_size);
- sg_set_buf(&sg[0], assoc, aad_size);
- sg_set_buf(&sgout[0], assoc, aad_size);
+ if (!enc) {
+
+ /*
+ * For decryption we need a proper auth so
+ * we do the encryption path once with buffers
+ * reversed (input <-> output) to calculate it
+ */
+ aead_request_set_crypt(req, sgout, sg,
+ *b_size, iv);
+ ret = do_one_aead_op(req,
+ crypto_aead_encrypt(req));
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
- aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
@@ -381,24 +698,98 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
}
struct test_mb_ahash_data {
- struct scatterlist sg[TVMEMSIZE];
+ struct scatterlist sg[XBUFSIZE];
char result[64];
struct ahash_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
-static void test_mb_ahash_speed(const char *algo, unsigned int sec,
- struct hash_speed *speed)
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++)
+ rc[i] = crypto_ahash_digest(data[i].req);
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
- unsigned long start, end;
- unsigned long cycles;
unsigned int i, j, k;
int ret;
- data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
@@ -409,7 +800,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto free_data;
}
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < num_mb; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
@@ -424,7 +815,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
&data[i].wait);
- test_hash_sg_init(data[i].sg);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
@@ -435,16 +831,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (speed[i].blen != speed[i].plen)
continue;
- if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
- for (k = 0; k < 8; k++)
+ for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
@@ -453,34 +849,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- start = get_cycles();
-
- for (k = 0; k < 8; k++) {
- ret = crypto_ahash_digest(data[k].req);
- if (ret == -EINPROGRESS) {
- ret = 0;
- continue;
- }
-
- if (ret)
- break;
-
- crypto_req_done(&data[k].req->base, 0);
- }
-
- for (j = 0; j < k; j++) {
- struct crypto_wait *wait = &data[j].wait;
- int wait_ret;
+ if (secs)
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ else
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- wait_ret = crypto_wait_req(-EINPROGRESS, wait);
- if (wait_ret)
- ret = wait_ret;
- }
-
- end = get_cycles();
- cycles = end - start;
- pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles, cycles / (8 * speed[i].blen));
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
@@ -489,10 +863,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
}
out:
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
ahash_request_free(data[k].req);
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
@@ -736,6 +1110,254 @@ static void test_hash_speed(const char *algo, unsigned int secs,
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
+struct test_mb_skcipher_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_skcipher_encrypt(data[i].req);
+ else
+ rc[i] = crypto_skcipher_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_skcipher_data *data;
+ struct crypto_skcipher *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ u32 *b_size;
+ char iv[128];
+ int ret;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_skcipher(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ skcipher_request_free(data[i].req);
+ goto out_free_xbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ skcipher_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ crypto_init_wait(&data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_skcipher, tfm), e);
+
+ i = 0;
+ do {
+ b_size = block_sizes;
+ do {
+ if (*b_size > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ *b_size, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_skcipher_clear_flags(tfm, ~0);
+
+ ret = crypto_skcipher_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_skcipher_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_skcipher_ivsize(tfm);
+ if (iv_len)
+ memset(&iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_skcipher_data *cur = &data[j];
+ unsigned int k = *b_size;
+ unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
+ unsigned int p = 0;
+
+ sg_init_table(cur->sg, pages);
+
+ while (k > PAGE_SIZE) {
+ sg_set_buf(cur->sg + p, cur->xbuf[p],
+ PAGE_SIZE);
+ memset(cur->xbuf[p], 0xff, PAGE_SIZE);
+ p++;
+ k -= PAGE_SIZE;
+ }
+
+ sg_set_buf(cur->sg + p, cur->xbuf[p], k);
+ memset(cur->xbuf[p], 0xff, k);
+
+ skcipher_request_set_crypt(cur->req, cur->sg,
+ cur->sg, *b_size,
+ iv);
+ }
+
+ if (secs)
+ ret = test_mb_acipher_jiffies(data, enc,
+ *b_size, secs,
+ num_mb);
+ else
+ ret = test_mb_acipher_cycles(data, enc,
+ *b_size, num_mb);
+
+ if (ret) {
+ pr_err("%s() failed flags=%x\n", e,
+ crypto_skcipher_get_flags(tfm));
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ skcipher_request_free(data[i].req);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_skcipher(tfm);
+out_free_data:
+ kfree(data);
+}
+
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -1361,6 +1983,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
+ case 191:
+ ret += tcrypt_test("ecb(sm4)");
+ break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
@@ -1557,16 +2182,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
+ test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_20);
+ test_aead_speed("gcm(aes)", DECRYPT, sec,
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
+ test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
+ test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
+ NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
@@ -1574,6 +2207,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
+ case 215:
+ test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ break;
+
+ case 216:
+ test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ break;
+
+ case 217:
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1778,19 +2438,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
if (mode > 400 && mode < 500) break;
/* fall through */
case 422:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 423:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 424:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 425:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 499:
@@ -2008,6 +2672,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_8_32);
break;
+ case 600:
+ test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ break;
+
+ case 601:
+ test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ break;
+
+ case 602:
+ test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 603:
+ test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 604:
+ test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ break;
+
+ case 605:
+ test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 606:
+ test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ break;
+
+ case 607:
+ test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 608:
+ test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 609:
+ test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ break;
+
case 1000:
test_available();
break;
@@ -2069,6 +2945,8 @@ module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
+module_param(num_mb, uint, 0000);
+MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29d7020b..af4a01c5 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int ahash_guard_result(char *result, char c, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (result[i] != c)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
@@ -185,7 +197,8 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
- const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ int digestsize = crypto_ahash_digestsize(tfm);
req = *preq;
statesize = crypto_ahash_statesize(
@@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
+ memset(result, 1, digestsize);
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alg: hash: Failed to export() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, export used req->result for %s\n",
+ algo);
+ goto out;
+ }
ahash_request_free(req);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, import used req->result for %s\n",
+ algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
@@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out;
}
} else {
+ memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: update failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
@@ -2962,6 +3001,33 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecb(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(sm4_enc_tv_template),
+ .dec = __VECS(sm4_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "ecb(speck128)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck128_enc_tv_template),
+ .dec = __VECS(speck128_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "ecb(speck64)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck64_enc_tv_template),
+ .dec = __VECS(speck64_dec_tv_template)
+ }
+ }
+ }, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
@@ -3519,6 +3585,24 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "xts(speck128)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck128_xts_enc_tv_template),
+ .dec = __VECS(speck128_xts_dec_tv_template)
+ }
+ }
+ }, {
+ .alg = "xts(speck64)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = {
+ .enc = __VECS(speck64_xts_enc_tv_template),
+ .dec = __VECS(speck64_xts_dec_tv_template)
+ }
+ }
+ }, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a714b629..004c0a0f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
{
.key =
- "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
+ "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
"\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28"
"\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67"
"\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d"
@@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
"\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a"
"\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda"
"\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
- "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30"
- "\x02\x01\x30",
+ "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00"
+ "\x02\x01\x00",
.key_len = 804,
/*
* m is SHA256 hash of following message:
@@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = {
"\xc9\xfd\x55\x74\x49\x44\x79\xba"
"\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
"\xd0\xfc\xce\x33",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26"
+ "\xc3\x88\x20\x71\x15\x06\xe8\x2d"
+ "\xa3\x92\x44\xab\x3e\xe7\xff\x86"
+ "\xb6\x79\x10\x72",
},
};
@@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = {
"\x49\x10\x03\x76\xa8\x23\x5e\x2c"
"\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
"\xdb\x32\xdd\x97\x49\x6d\x33\x76",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71"
+ "\xf7\xfa\x80\xf5\xea\x11\x03\xb1"
+ "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7"
+ "\x8a\x97\xbb\xf2\x07\x08\x38\x30",
},
};
@@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = {
"\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
"\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
"\x9e\xef\x51\xac\xd0\x65\x7c\x22",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71"
+ "\xcf\xca\x30\x85\x9b\xc1\x25\xc7"
+ "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b"
+ "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51"
+ "\xd2\x21\xae\x2d\xc7\xae\x8c\x40"
+ "\xb9\xe6\x56\x48\x03\xcd\x88\x6b",
},
};
@@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = {
"\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
"\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
"\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde"
+ "\xf0\xc6\x42\x17\xd7\xb2\x26\x47"
+ "\x90\x28\xa6\x84\xe8\x49\x7a\x86"
+ "\xd6\xb8\x9e\xf8\x07\x59\x21\x03"
+ "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0"
+ "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b"
+ "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41"
+ "\x32\xc2\x8e\x50\x91\x86\x90\xfb",
},
};
@@ -1494,6 +2044,265 @@ static const struct hash_testvec crct10dif_tv_template[] = {
.digest = (u8 *)(u16 []){ 0x44c6 },
.np = 4,
.tap = { 1, 255, 57, 6 },
+ }, {
+ .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
+ "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
+ "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
+ "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
+ "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
+ "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
+ "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
+ "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
+ "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
+ "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
+ "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
+ "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
+ "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
+ "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
+ "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
+ "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
+ "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
+ "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
+ "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
+ "\x58\xef\x63\xfa\x91\x05\x9c\x33"
+ "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
+ "\x19\xb0\x47\xde\x52\xe9\x80\x17"
+ "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
+ "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
+ "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
+ "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
+ "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
+ "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
+ "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
+ "\x86\x1d\x91\x28\xbf\x33\xca\x61"
+ "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
+ "\x47\xde\x75\x0c\x80\x17\xae\x22"
+ "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
+ "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
+ "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
+ "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
+ "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
+ "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
+ "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
+ "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
+ "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
+ "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
+ "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
+ "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
+ "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
+ "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
+ "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
+ "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
+ "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
+ "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
+ "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
+ "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
+ "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
+ "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
+ "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
+ "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
+ "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
+ "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
+ "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
+ "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
+ "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
+ "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
+ "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
+ "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
+ "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
+ "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
+ "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
+ "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
+ "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
+ "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
+ "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
+ "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
+ "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
+ "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
+ "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
+ "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
+ "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
+ "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
+ "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
+ "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
+ "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
+ "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
+ "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
+ "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
+ "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
+ "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
+ "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
+ "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
+ "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
+ "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
+ "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
+ "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
+ "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
+ "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
+ "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
+ "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
+ "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
+ "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
+ "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
+ "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
+ "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
+ "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
+ "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
+ "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
+ "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
+ "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
+ "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
+ "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
+ "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
+ "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
+ "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
+ "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
+ "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
+ "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
+ "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
+ "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
+ "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
+ "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
+ "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
+ "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
+ "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
+ "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
+ "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
+ "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
+ "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
+ "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
+ "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
+ "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
+ "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
+ "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
+ "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
+ "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
+ "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
+ "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
+ "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
+ "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
+ "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
+ "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
+ "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
+ "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
+ "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
+ "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
+ "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
+ "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
+ "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
+ "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
+ "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
+ "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
+ "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
+ "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
+ "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
+ "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
+ "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
+ "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
+ "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
+ "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
+ "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
+ "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
+ "\x47\xde\x52\xe9\x80\x17\x8b\x22"
+ "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
+ "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
+ "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
+ "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
+ "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
+ "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
+ "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
+ "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
+ "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
+ "\x75\x0c\x80\x17\xae\x22\xb9\x50"
+ "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
+ "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
+ "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
+ "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
+ "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
+ "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
+ "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
+ "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
+ "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
+ "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
+ "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
+ "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
+ "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
+ "\x48\xdf\x53\xea\x81\x18\x8c\x23"
+ "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
+ "\x09\xa0\x37\xce\x42\xd9\x70\x07"
+ "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
+ "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
+ "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
+ "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
+ "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
+ "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
+ "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
+ "\x76\x0d\x81\x18\xaf\x23\xba\x51"
+ "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
+ "\x37\xce\x65\xfc\x70\x07\x9e\x12"
+ "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
+ "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
+ "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
+ "\xff\x73\x0a\xa1\x15\xac\x43\xda"
+ "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
+ "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
+ "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
+ "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
+ "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
+ "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
+ "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
+ "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
+ "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
+ "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
+ "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
+ "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
+ "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
+ "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
+ "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
+ "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
+ "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
+ "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
+ "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
+ "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
+ "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
+ "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
+ "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
+ "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
+ "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
+ "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
+ "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
+ "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
+ "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
+ "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
+ "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
+ "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
+ "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
+ "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
+ "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
+ "\xef\x86\x1d\x91\x28\xbf\x33\xca"
+ "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
+ "\xd3\x47\xde\x75\x0c\x80\x17\xae"
+ "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
+ "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
+ "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
+ "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
+ "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
+ "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
+ "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
+ "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
+ "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
+ "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
+ "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
+ "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
+ "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
+ "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
+ "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
+ "\x67\xfe\x95\x09\xa0\x37\xce\x42"
+ "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
+ "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
+ "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
+ .psize = 2048,
+ .digest = (u8 *)(u16 []){ 0x23ca },
}
};
@@ -13773,6 +14582,1623 @@ static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
},
};
+/*
+ * SM4 test vector taken from the draft RFC
+ * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
+ */
+
+static const struct cipher_testvec sm4_enc_tv_template[] = {
+ { /* SM4 Appendix A: Example Calculations. Example 1. */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .ilen = 16,
+ .result = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
+ "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
+ .rlen = 16,
+ }, { /*
+ * SM4 Appendix A: Example Calculations.
+ * Last 10 iterations of Example 2.
+ */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
+ "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
+ "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1",
+ .ilen = 160,
+ .result = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1"
+ "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
+ "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
+ .rlen = 160
+ }
+};
+
+static const struct cipher_testvec sm4_dec_tv_template[] = {
+ { /* SM4 Appendix A: Example Calculations. Example 1. */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
+ "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
+ .ilen = 16,
+ .result = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .rlen = 16,
+ }, { /*
+ * SM4 Appendix A: Example Calculations.
+ * Last 10 iterations of Example 2.
+ */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .input = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1"
+ "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
+ "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
+ .ilen = 160,
+ .result = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
+ "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
+ "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
+ "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
+ "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
+ "\xad\x57\x15\xab\x31\x5d\xc\xef"
+ "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
+ "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
+ "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
+ "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
+ "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
+ "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
+ "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
+ "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
+ "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
+ "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
+ "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
+ "\xed\xce\x0\x19\xe\x16\x2\x6e"
+ "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
+ "\x31\x51\xec\x47\xc3\x51\x83\xc1",
+ .rlen = 160
+ }
+};
+
+/*
+ * Speck test vectors taken from the original paper:
+ * "The Simon and Speck Families of Lightweight Block Ciphers"
+ * https://eprint.iacr.org/2013/404.pdf
+ *
+ * Note that the paper does not make byte and word order clear. But it was
+ * confirmed with the authors that the intended orders are little endian byte
+ * order and (y, x) word order. Equivalently, the printed test vectors, when
+ * looking at only the bytes (ignoring the whitespace that divides them into
+ * words), are backwards: the left-most byte is actually the one with the
+ * highest memory address, while the right-most byte is actually the one with
+ * the lowest memory address.
+ */
+
+static const struct cipher_testvec speck128_enc_tv_template[] = {
+ { /* Speck128/128 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .klen = 16,
+ .input = "\x20\x6d\x61\x64\x65\x20\x69\x74"
+ "\x20\x65\x71\x75\x69\x76\x61\x6c",
+ .ilen = 16,
+ .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
+ "\x65\x32\x78\x79\x51\x98\x5d\xa6",
+ .rlen = 16,
+ }, { /* Speck128/192 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ .klen = 24,
+ .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
+ "\x68\x69\x65\x66\x20\x48\x61\x72",
+ .ilen = 16,
+ .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
+ "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
+ .rlen = 16,
+ }, { /* Speck128/256 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .klen = 32,
+ .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
+ "\x49\x6e\x20\x74\x68\x6f\x73\x65",
+ .ilen = 16,
+ .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
+ "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
+ .rlen = 16,
+ },
+};
+
+static const struct cipher_testvec speck128_dec_tv_template[] = {
+ { /* Speck128/128 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .klen = 16,
+ .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
+ "\x65\x32\x78\x79\x51\x98\x5d\xa6",
+ .ilen = 16,
+ .result = "\x20\x6d\x61\x64\x65\x20\x69\x74"
+ "\x20\x65\x71\x75\x69\x76\x61\x6c",
+ .rlen = 16,
+ }, { /* Speck128/192 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ .klen = 24,
+ .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
+ "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
+ .ilen = 16,
+ .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
+ "\x68\x69\x65\x66\x20\x48\x61\x72",
+ .rlen = 16,
+ }, { /* Speck128/256 */
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .klen = 32,
+ .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
+ "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
+ .ilen = 16,
+ .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
+ "\x49\x6e\x20\x74\x68\x6f\x73\x65",
+ .rlen = 16,
+ },
+};
+
+/*
+ * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
+ * result recomputed with Speck128 as the cipher
+ */
+
+static const struct cipher_testvec speck128_xts_enc_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ilen = 32,
+ .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
+ "\x3b\x99\x4a\x64\x74\x77\xac\xed"
+ "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
+ "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
+ "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
+ "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
+ "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
+ "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
+ "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
+ "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
+ "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
+ "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
+ "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
+ "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
+ "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
+ "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
+ "\x19\xc5\x58\x84\x63\xb9\x12\x68"
+ "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
+ "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
+ "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
+ "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
+ "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
+ "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
+ "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
+ "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
+ "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
+ "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
+ "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
+ "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
+ "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
+ "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
+ "\xa5\x20\xac\x24\x1c\x73\x59\x73"
+ "\x58\x61\x3a\x87\x58\xb3\x20\x56"
+ "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
+ "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
+ "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
+ "\x09\x35\x71\x50\x65\xac\x92\xe3"
+ "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
+ "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
+ "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
+ "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
+ "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
+ "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
+ "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
+ "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
+ "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
+ "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
+ "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
+ "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
+ "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
+ "\x87\x30\xac\xd5\xea\x73\x49\x10"
+ "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
+ "\x66\x02\x35\x3d\x60\x06\x36\x4f"
+ "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
+ "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
+ "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
+ "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
+ "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
+ "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
+ "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
+ "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
+ "\x51\x66\x02\x69\x04\x97\x36\xd4"
+ "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
+ "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
+ "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
+ "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
+ "\x03\xe7\x05\x39\xf5\x05\x26\xee"
+ "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
+ "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
+ "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
+ "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
+ "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
+ "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95"
+ "\x02\x88\x41\x97\x16\x93\x99\x37"
+ "\x51\x05\x82\x09\x74\x94\x45\x92",
+ .klen = 64,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
+ "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
+ "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
+ "\x92\x99\xde\xd3\x76\xed\xcd\x63"
+ "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
+ "\x79\x36\x31\x19\x62\xae\x10\x7e"
+ "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
+ "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
+ "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
+ "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
+ "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
+ "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
+ "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
+ "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
+ "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
+ "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
+ "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
+ "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
+ "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
+ "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
+ "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
+ "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
+ "\xac\xa5\xd0\x38\xef\x19\x56\x53"
+ "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
+ "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
+ "\xed\x22\x34\x1c\x5d\xed\x17\x06"
+ "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
+ "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
+ "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
+ "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
+ "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
+ "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
+ "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
+ "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
+ "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
+ "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
+ "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
+ "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
+ "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
+ "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
+ "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
+ "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
+ "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
+ "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
+ "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
+ "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
+ "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
+ "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
+ "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
+ "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
+ "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
+ "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
+ "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
+ "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
+ "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
+ "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
+ "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
+ "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
+ "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
+ "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
+ "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
+ "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
+ "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
+ "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck128_xts_dec_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
+ "\x3b\x99\x4a\x64\x74\x77\xac\xed"
+ "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
+ "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
+ .ilen = 32,
+ .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
+ "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
+ "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
+ "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 32,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
+ "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
+ "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
+ "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
+ "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
+ "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
+ "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
+ "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
+ "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
+ "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
+ "\x19\xc5\x58\x84\x63\xb9\x12\x68"
+ "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
+ "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
+ "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
+ "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
+ "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
+ "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
+ "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
+ "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
+ "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
+ "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
+ "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
+ "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
+ "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
+ "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
+ "\xa5\x20\xac\x24\x1c\x73\x59\x73"
+ "\x58\x61\x3a\x87\x58\xb3\x20\x56"
+ "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
+ "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
+ "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
+ "\x09\x35\x71\x50\x65\xac\x92\xe3"
+ "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
+ "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
+ "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
+ "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
+ "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
+ "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
+ "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
+ "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
+ "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
+ "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
+ "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
+ "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
+ "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
+ "\x87\x30\xac\xd5\xea\x73\x49\x10"
+ "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
+ "\x66\x02\x35\x3d\x60\x06\x36\x4f"
+ "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
+ "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
+ "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
+ "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
+ "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
+ "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
+ "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
+ "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
+ "\x51\x66\x02\x69\x04\x97\x36\xd4"
+ "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
+ "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
+ "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
+ "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
+ "\x03\xe7\x05\x39\xf5\x05\x26\xee"
+ "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
+ "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
+ "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
+ "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
+ "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
+ "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27"
+ "\x31\x41\x59\x26\x53\x58\x97\x93"
+ "\x23\x84\x62\x64\x33\x83\x27\x95"
+ "\x02\x88\x41\x97\x16\x93\x99\x37"
+ "\x51\x05\x82\x09\x74\x94\x45\x92",
+ .klen = 64,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
+ "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
+ "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
+ "\x92\x99\xde\xd3\x76\xed\xcd\x63"
+ "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
+ "\x79\x36\x31\x19\x62\xae\x10\x7e"
+ "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
+ "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
+ "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
+ "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
+ "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
+ "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
+ "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
+ "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
+ "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
+ "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
+ "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
+ "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
+ "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
+ "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
+ "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
+ "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
+ "\xac\xa5\xd0\x38\xef\x19\x56\x53"
+ "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
+ "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
+ "\xed\x22\x34\x1c\x5d\xed\x17\x06"
+ "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
+ "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
+ "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
+ "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
+ "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
+ "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
+ "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
+ "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
+ "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
+ "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
+ "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
+ "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
+ "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
+ "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
+ "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
+ "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
+ "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
+ "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
+ "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
+ "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
+ "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
+ "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
+ "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
+ "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
+ "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
+ "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
+ "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
+ "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
+ "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
+ "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
+ "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
+ "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
+ "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
+ "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
+ "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
+ "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
+ "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
+ "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck64_enc_tv_template[] = {
+ { /* Speck64/96 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13",
+ .klen = 12,
+ .input = "\x65\x61\x6e\x73\x20\x46\x61\x74",
+ .ilen = 8,
+ .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
+ .rlen = 8,
+ }, { /* Speck64/128 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+ .klen = 16,
+ .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
+ .ilen = 8,
+ .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
+ .rlen = 8,
+ },
+};
+
+static const struct cipher_testvec speck64_dec_tv_template[] = {
+ { /* Speck64/96 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13",
+ .klen = 12,
+ .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
+ .ilen = 8,
+ .result = "\x65\x61\x6e\x73\x20\x46\x61\x74",
+ .rlen = 8,
+ }, { /* Speck64/128 */
+ .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
+ "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+ .klen = 16,
+ .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
+ .ilen = 8,
+ .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
+ .rlen = 8,
+ },
+};
+
+/*
+ * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result
+ * recomputed with Speck64 as the cipher, and key lengths adjusted
+ */
+
+static const struct cipher_testvec speck64_xts_enc_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ilen = 32,
+ .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
+ "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
+ "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
+ "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
+ "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
+ "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
+ "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .ilen = 32,
+ .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
+ "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
+ "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
+ "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
+ "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
+ "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
+ "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
+ "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
+ "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
+ "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
+ "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
+ "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
+ "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
+ "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
+ "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
+ "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
+ "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
+ "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
+ "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
+ "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
+ "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
+ "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
+ "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
+ "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
+ "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
+ "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
+ "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
+ "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
+ "\x07\xff\xf3\x72\x74\x48\xb5\x40"
+ "\x50\xb5\xdd\x90\x43\x31\x18\x15"
+ "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
+ "\x29\x93\x90\x8b\xda\x07\xf0\x35"
+ "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
+ "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
+ "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
+ "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
+ "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
+ "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
+ "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
+ "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
+ "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
+ "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
+ "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
+ "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
+ "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
+ "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
+ "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
+ "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
+ "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
+ "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
+ "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
+ "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
+ "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
+ "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
+ "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
+ "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
+ "\x59\xda\xee\x1a\x22\x18\xda\x0d"
+ "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
+ "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
+ "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
+ "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
+ "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
+ "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
+ "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
+ "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
+ "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
+ "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27",
+ .klen = 32,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .ilen = 512,
+ .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
+ "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
+ "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
+ "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
+ "\x78\x16\xea\x80\xdb\x33\x75\x94"
+ "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
+ "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
+ "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
+ "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
+ "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
+ "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
+ "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
+ "\x84\x5e\x46\xed\x20\x89\xb6\x44"
+ "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
+ "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
+ "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
+ "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
+ "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
+ "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
+ "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
+ "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
+ "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
+ "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
+ "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
+ "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
+ "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
+ "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
+ "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
+ "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
+ "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
+ "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
+ "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
+ "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
+ "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
+ "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
+ "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
+ "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
+ "\x52\x7f\x29\x51\x94\x20\x67\x3c"
+ "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
+ "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
+ "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
+ "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
+ "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
+ "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
+ "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
+ "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
+ "\x65\x53\x0f\x41\x11\xbd\x98\x99"
+ "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
+ "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
+ "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
+ "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
+ "\x63\x51\x34\x9d\x96\x12\xae\xce"
+ "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
+ "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
+ "\x54\x27\x74\xbb\x10\x86\x57\x46"
+ "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
+ "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
+ "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
+ "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
+ "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
+ "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
+ "\x9b\x63\x76\x32\x2f\x19\x72\x10"
+ "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
+ "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
+static const struct cipher_testvec speck64_xts_dec_tv_template[] = {
+ {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
+ "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
+ "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
+ "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
+ .ilen = 32,
+ .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .rlen = 32,
+ }, {
+ .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x11\x11\x11\x11\x11\x11\x11\x11"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
+ "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
+ "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
+ "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+ "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+ "\x22\x22\x22\x22\x22\x22\x22\x22",
+ .klen = 24,
+ .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
+ "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
+ "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
+ "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
+ .ilen = 32,
+ .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44"
+ "\x44\x44\x44\x44\x44\x44\x44\x44",
+ .rlen = 32,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x31\x41\x59\x26\x53\x58\x97\x93",
+ .klen = 24,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
+ "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
+ "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
+ "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
+ "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
+ "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
+ "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
+ "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
+ "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
+ "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
+ "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
+ "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
+ "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
+ "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
+ "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
+ "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
+ "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
+ "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
+ "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
+ "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
+ "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
+ "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
+ "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
+ "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
+ "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
+ "\x07\xff\xf3\x72\x74\x48\xb5\x40"
+ "\x50\xb5\xdd\x90\x43\x31\x18\x15"
+ "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
+ "\x29\x93\x90\x8b\xda\x07\xf0\x35"
+ "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
+ "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
+ "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
+ "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
+ "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
+ "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
+ "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
+ "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
+ "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
+ "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
+ "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
+ "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
+ "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
+ "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
+ "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
+ "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
+ "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
+ "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
+ "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
+ "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
+ "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
+ "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
+ "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
+ "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
+ "\x59\xda\xee\x1a\x22\x18\xda\x0d"
+ "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
+ "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
+ "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
+ "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
+ "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
+ "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
+ "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
+ "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
+ "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
+ "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ }, {
+ .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
+ "\x23\x53\x60\x28\x74\x71\x35\x26"
+ "\x62\x49\x77\x57\x24\x70\x93\x69"
+ "\x99\x59\x57\x49\x66\x96\x76\x27",
+ .klen = 32,
+ .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
+ "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
+ "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
+ "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
+ "\x78\x16\xea\x80\xdb\x33\x75\x94"
+ "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
+ "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
+ "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
+ "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
+ "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
+ "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
+ "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
+ "\x84\x5e\x46\xed\x20\x89\xb6\x44"
+ "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
+ "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
+ "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
+ "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
+ "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
+ "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
+ "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
+ "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
+ "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
+ "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
+ "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
+ "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
+ "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
+ "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
+ "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
+ "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
+ "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
+ "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
+ "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
+ "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
+ "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
+ "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
+ "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
+ "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
+ "\x52\x7f\x29\x51\x94\x20\x67\x3c"
+ "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
+ "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
+ "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
+ "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
+ "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
+ "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
+ "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
+ "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
+ "\x65\x53\x0f\x41\x11\xbd\x98\x99"
+ "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
+ "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
+ "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
+ "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
+ "\x63\x51\x34\x9d\x96\x12\xae\xce"
+ "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
+ "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
+ "\x54\x27\x74\xbb\x10\x86\x57\x46"
+ "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
+ "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
+ "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
+ "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
+ "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
+ "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
+ "\x9b\x63\x76\x32\x2f\x19\x72\x10"
+ "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
+ "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
+ .ilen = 512,
+ .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+ "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67"
+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77"
+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .rlen = 512,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 512 - 20, 4, 16 },
+ }
+};
+
/* Cast6 test vectors from RFC 2612 */
static const struct cipher_testvec cast6_enc_tv_template[] = {
{
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 5f62c4f9..f3a0dd25 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -24,9 +24,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index ebf7a3ef..07e62433 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -23,9 +23,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index df90b332..25c75af5 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author:
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
diff --git a/crypto/xts.c b/crypto/xts.c
index f317c48b..12284183 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -357,78 +357,6 @@ static int decrypt(struct skcipher_request *req)
return do_decrypt(req, init_crypt(req, decrypt_done));
}
-int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
- struct scatterlist *ssrc, unsigned int nbytes,
- struct xts_crypt_req *req)
-{
- const unsigned int bsize = XTS_BLOCK_SIZE;
- const unsigned int max_blks = req->tbuflen / bsize;
- struct blkcipher_walk walk;
- unsigned int nblocks;
- le128 *src, *dst, *t;
- le128 *t_buf = req->tbuf;
- int err, i;
-
- BUG_ON(max_blks < 1);
-
- blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (le128 *)walk.src.virt.addr;
- dst = (le128 *)walk.dst.virt.addr;
-
- /* calculate first value of T */
- req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
-
- i = 0;
- goto first;
-
- for (;;) {
- do {
- for (i = 0; i < nblocks; i++) {
- gf128mul_x_ble(&t_buf[i], t);
-first:
- t = &t_buf[i];
-
- /* PP <- T xor P */
- le128_xor(dst + i, t, src + i);
- }
-
- /* CC <- E(Key2,PP) */
- req->crypt_fn(req->crypt_ctx, (u8 *)dst,
- nblocks * bsize);
-
- /* C <- T xor CC */
- for (i = 0; i < nblocks; i++)
- le128_xor(dst + i, dst + i, &t_buf[i]);
-
- src += nblocks;
- dst += nblocks;
- nbytes -= nblocks * bsize;
- nblocks = min(nbytes / bsize, max_blks);
- } while (nblocks > 0);
-
- *(le128 *)walk.iv = *t;
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- if (!nbytes)
- break;
-
- nblocks = min(nbytes / bsize, max_blks);
- src = (le128 *)walk.src.virt.addr;
- dst = (le128 *)walk.dst.virt.addr;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(xts_crypt);
-
static int init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);