From cb8a91c0f77afc0b101ebd9c33c521c505883448 Mon Sep 17 00:00:00 2001 From: Lu Jialin Date: Mon, 4 Sep 2023 13:33:41 +0000 Subject: crypto: pcrypt - Fix hungtask for PADATA_RESET We found a hungtask bug in test_aead_vec_cfg as follows: INFO: task cryptomgr_test:391009 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. Call trace: __switch_to+0x98/0xe0 __schedule+0x6c4/0xf40 schedule+0xd8/0x1b4 schedule_timeout+0x474/0x560 wait_for_common+0x368/0x4e0 wait_for_completion+0x20/0x30 wait_for_completion+0x20/0x30 test_aead_vec_cfg+0xab4/0xd50 test_aead+0x144/0x1f0 alg_test_aead+0xd8/0x1e0 alg_test+0x634/0x890 cryptomgr_test+0x40/0x70 kthread+0x1e0/0x220 ret_from_fork+0x10/0x18 Kernel panic - not syncing: hung_task: blocked tasks For padata_do_parallel, when the return err is 0 or -EBUSY, it will call wait_for_completion(&wait->completion) in test_aead_vec_cfg. In normal case, aead_request_complete() will be called in pcrypt_aead_serial and the return err is 0 for padata_do_parallel. But, when pinst->flags is PADATA_RESET, the return err is -EBUSY for padata_do_parallel, and it won't call aead_request_complete(). Therefore, test_aead_vec_cfg will hung at wait_for_completion(&wait->completion), which will cause hungtask. The problem comes as following: (padata_do_parallel) | rcu_read_lock_bh(); | err = -EINVAL; | (padata_replace) | pinst->flags |= PADATA_RESET; err = -EBUSY | if (pinst->flags & PADATA_RESET) | rcu_read_unlock_bh() | return err In order to resolve the problem, we replace the return err -EBUSY with -EAGAIN, which means parallel_data is changing, and the caller should call it again. v3: remove retry and just change the return err. v2: introduce padata_try_do_parallel() in pcrypt_aead_encrypt and pcrypt_aead_decrypt to solve the hungtask. Signed-off-by: Lu Jialin Signed-off-by: Guo Zihua Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8c1d0ca4..d0d954fe 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req) err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) + return -EAGAIN; return err; } @@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req) err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; + if (err == -EBUSY) + return -EAGAIN; return err; } -- cgit v1.2.3 From cc314f64447a9958f7450a1b517d186a96f61f9c Mon Sep 17 00:00:00 2001 From: Li zeming Date: Thu, 14 Sep 2023 02:17:27 +0800 Subject: crypto: api - Remove unnecessary NULL initialisation tfm is assigned first, so it does not need to initialize the assignment. Signed-off-by: Li zeming Signed-off-by: Herbert Xu --- crypto/api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/api.c b/crypto/api.c index b9cc0c90..7f402107 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -389,7 +389,7 @@ EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type, u32 mask, gfp_t gfp) { - struct crypto_tfm *tfm = NULL; + struct crypto_tfm *tfm; unsigned int tfm_size; int err = -ENOMEM; -- cgit v1.2.3 From d1716903f396037a401cfd6f7fe2cd5189baf7cd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:21 +0800 Subject: crypto: aead - Add crypto_has_aead Add the helper crypto_has_aead. This is meant to replace the existing use of crypto_has_alg to locate AEAD algorithms. Signed-off-by: Herbert Xu --- crypto/aead.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crypto/aead.c b/crypto/aead.c index d5ba204e..54906633 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -269,6 +269,12 @@ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_alloc_aead); +int crypto_has_aead(const char *alg_name, u32 type, u32 mask) +{ + return crypto_type_has_alg(alg_name, &crypto_aead_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_has_aead); + static int aead_prepare_alg(struct aead_alg *alg) { struct crypto_istat_aead *istat = aead_get_stat(alg); -- cgit v1.2.3 From a5a57bd35085781d022c62a7bc5f930fb2abfada Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:23 +0800 Subject: crypto: hash - Hide CRYPTO_ALG_TYPE_AHASH_MASK Move the macro CRYPTO_ALG_TYPE_AHASH_MASK out of linux/crypto.h and into crypto/ahash.c so that it's not visible to users of the Crypto API. Also remove the unused CRYPTO_ALG_TYPE_HASH_MASK macro. Signed-off-by: Herbert Xu --- crypto/ahash.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/ahash.c b/crypto/ahash.c index 709ef094..213bb3e9 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -21,6 +21,8 @@ #include "hash.h" +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + static const struct crypto_type crypto_ahash_type; struct ahash_request_priv { -- cgit v1.2.3 From 17155679d03d12a05a059e73fb81ea74b95e249c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:24 +0800 Subject: crypto: skcipher - Add lskcipher Add a new API type lskcipher designed for taking straight kernel pointers instead of SG lists. Its relationship to skcipher will be analogous to that between shash and ahash. Signed-off-by: Herbert Xu --- crypto/Makefile | 6 +- crypto/cryptd.c | 2 +- crypto/lskcipher.c | 594 +++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/skcipher.c | 75 +++++-- crypto/skcipher.h | 30 +++ 5 files changed, 688 insertions(+), 19 deletions(-) create mode 100644 crypto/lskcipher.c create mode 100644 crypto/skcipher.h diff --git a/crypto/Makefile b/crypto/Makefile index 953a7e10..5ac6876f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -16,7 +16,11 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o obj-$(CONFIG_CRYPTO_GENIV) += geniv.o -obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o +crypto_skcipher-y += lskcipher.o +crypto_skcipher-y += skcipher.o + +obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o + obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o diff --git a/crypto/cryptd.c b/crypto/cryptd.c index bbcc368b..194a92d6 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -929,7 +929,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: return cryptd_create_hash(tmpl, tb, algt, &queue); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c new file mode 100644 index 00000000..3343c6d9 --- /dev/null +++ b/crypto/lskcipher.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linear symmetric key cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers. + * + * Copyright (c) 2023 Herbert Xu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "skcipher.h" + +static inline struct crypto_lskcipher *__crypto_lskcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_lskcipher, base); +} + +static inline struct lskcipher_alg *__crypto_lskcipher_alg( + struct crypto_alg *alg) +{ + return container_of(alg, struct lskcipher_alg, co.base); +} + +static inline struct crypto_istat_cipher *lskcipher_get_stat( + struct lskcipher_alg *alg) +{ + return skcipher_get_stat_common(&alg->co); +} + +static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) +{ + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err) + atomic64_inc(&istat->err_cnt); + + return err; +} + +static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + u8 *buffer, *alignbuffer; + unsigned long absize; + int ret; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = cipher->setkey(tfm, alignbuffer, keylen); + kfree_sensitive(buffer); + return ret; +} + +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + + if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize) + return -EINVAL; + + if ((unsigned long)key & alignmask) + return lskcipher_setkey_unaligned(tfm, key, keylen); + else + return cipher->setkey(tfm, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey); + +static int crypto_lskcipher_crypt_unaligned( + struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len, + u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final)) +{ + unsigned ivsize = crypto_lskcipher_ivsize(tfm); + unsigned bs = crypto_lskcipher_blocksize(tfm); + unsigned cs = crypto_lskcipher_chunksize(tfm); + int err; + u8 *tiv; + u8 *p; + + BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE || + MAX_CIPHER_ALIGNMASK >= PAGE_SIZE); + + tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (!tiv) + return -ENOMEM; + + memcpy(tiv, iv, ivsize); + + p = kmalloc(PAGE_SIZE, GFP_ATOMIC); + err = -ENOMEM; + if (!p) + goto out; + + while (len >= bs) { + unsigned chunk = min((unsigned)PAGE_SIZE, len); + int err; + + if (chunk > cs) + chunk &= ~(cs - 1); + + memcpy(p, src, chunk); + err = crypt(tfm, p, p, chunk, tiv, true); + if (err) + goto out; + + memcpy(dst, p, chunk); + src += chunk; + dst += chunk; + len -= chunk; + } + + err = len ? -EINVAL : 0; + +out: + memcpy(iv, tiv, ivsize); + kfree_sensitive(p); + kfree_sensitive(tiv); + return err; +} + +static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + int ret; + + if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & + alignmask) { + ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); + goto out; + } + + ret = crypt(tfm, src, dst, len, iv, true); + +out: + return crypto_lskcipher_errstat(alg, ret); +} + +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(len, &istat->encrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); + +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(len, &istat->decrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + return crypto_lskcipher_setkey(*ctx, key, keylen); +} + +static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct crypto_lskcipher *tfm = *ctx; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, walk.iv, walk.nbytes == walk.total); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->encrypt); +} + +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->decrypt); +} + +static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + alg->exit(skcipher); +} + +static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + if (alg->exit) + skcipher->base.exit = crypto_lskcipher_exit_tfm; + + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_lskcipher_free_instance(struct crypto_instance *inst) +{ + struct lskcipher_instance *skcipher = + container_of(inst, struct lskcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void __maybe_unused crypto_lskcipher_show( + struct seq_file *m, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + + seq_printf(m, "type : lskcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize); +} + +static int __maybe_unused crypto_lskcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_report_blkcipher rblkcipher; + + memset(&rblkcipher, 0, sizeof(rblkcipher)); + + strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type)); + strscpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->co.min_keysize; + rblkcipher.max_keysize = skcipher->co.max_keysize; + rblkcipher.ivsize = skcipher->co.ivsize; + + return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(rblkcipher), &rblkcipher); +} + +static int __maybe_unused crypto_lskcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_istat_cipher *istat; + struct crypto_stat_cipher rcipher; + + istat = lskcipher_get_stat(skcipher); + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + +static const struct crypto_type crypto_lskcipher_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_lskcipher_init_tfm, + .free = crypto_lskcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_lskcipher_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_lskcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_lskcipher_report_stat, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_LSKCIPHER, + .tfmsize = offsetof(struct crypto_lskcipher, base), +}; + +static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_lskcipher *skcipher; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type); + if (IS_ERR(skcipher)) { + crypto_mod_put(calg); + return PTR_ERR(skcipher); + } + + *ctx = skcipher; + tfm->exit = crypto_lskcipher_exit_tfm_sg; + + return 0; +} + +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_lskcipher_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_lskcipher); + +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher); + +static int lskcipher_prepare_alg(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->co.chunksize & (alg->co.chunksize - 1)) + return -EINVAL; + + base->cra_type = &crypto_lskcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER; + + return 0; +} + +int crypto_register_lskcipher(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = lskcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_lskcipher); + +void crypto_unregister_lskcipher(struct lskcipher_alg *alg) +{ + crypto_unregister_alg(&alg->co.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher); + +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_lskcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_lskciphers); + +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers); + +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst) +{ + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; + + err = lskcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(lskcipher_register_instance); + +static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm); + + crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_lskcipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher_spawn *spawn; + struct crypto_lskcipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_lskcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +static void lskcipher_free_instance_simple(struct lskcipher_instance *inst) +{ + crypto_drop_lskcipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * lskcipher_alloc_instance_simple - allocate instance of simple block cipher + * + * Allocate an lskcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to + * struct crypto_lskcipher *, and default ->setkey(), ->init(), and + * ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + struct lskcipher_instance *inst; + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = lskcipher_instance_ctx(inst); + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + &cipher_alg->co.base); + if (err) + goto err_free_inst; + + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + + err = -EINVAL; + if (cipher_alg->co.ivsize) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority; + inst->alg.co.min_keysize = cipher_alg->co.min_keysize; + inst->alg.co.max_keysize = cipher_alg->co.max_keysize; + inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize; + + /* Use struct crypto_lskcipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *); + inst->alg.setkey = lskcipher_setkey_simple; + inst->alg.init = lskcipher_init_tfm_simple; + inst->alg.exit = lskcipher_exit_tfm_simple; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple(inst); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7b275716..b9496dc8 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -24,8 +24,9 @@ #include #include #include +#include "skcipher.h" -#include "internal.h" +#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e enum { SKCIPHER_WALK_PHYS = 1 << 0, @@ -43,6 +44,8 @@ struct skcipher_walk_buffer { u8 buffer[]; }; +static const struct crypto_type crypto_skcipher_type; + static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_map_src(struct skcipher_walk *walk) @@ -89,11 +92,7 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( static inline struct crypto_istat_cipher *skcipher_get_stat( struct skcipher_alg *alg) { -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif + return skcipher_get_stat_common(&alg->co); } static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) @@ -468,6 +467,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); walk->total = req->cryptlen; walk->nbytes = 0; @@ -485,10 +485,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); - walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); + if (alg->co.base.cra_type != &crypto_skcipher_type) + walk->stride = alg->co.chunksize; + else + walk->stride = alg->walksize; + return skcipher_walk_first(walk); } @@ -616,6 +620,11 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned long alignmask = crypto_skcipher_alignmask(tfm); int err; + if (cipher->co.base.cra_type != &crypto_skcipher_type) { + err = crypto_lskcipher_setkey_sg(tfm, key, keylen); + goto out; + } + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) return -EINVAL; @@ -624,6 +633,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); +out: if (unlikely(err)) { skcipher_set_needkey(tfm); return err; @@ -649,6 +659,8 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_encrypt_sg(req); else ret = alg->encrypt(req); @@ -671,6 +683,8 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_decrypt_sg(req); else ret = alg->decrypt(req); @@ -693,6 +707,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher_set_needkey(skcipher); + if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) + return crypto_init_lskcipher_ops_sg(tfm); + if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -702,6 +719,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type != &crypto_skcipher_type) + return sizeof(struct crypto_lskcipher *); + + return crypto_alg_extsize(alg); +} + static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = @@ -770,7 +795,7 @@ static int __maybe_unused crypto_skcipher_report_stat( } static const struct crypto_type crypto_skcipher_type = { - .extsize = crypto_alg_extsize, + .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS @@ -783,7 +808,7 @@ static const struct crypto_type crypto_skcipher_type = { .report_stat = crypto_skcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; @@ -834,23 +859,18 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_skcipher); -static int skcipher_prepare_alg(struct skcipher_alg *alg) +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; - if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || - alg->walksize > PAGE_SIZE / 8) + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; - if (!alg->walksize) - alg->walksize = alg->chunksize; - base->cra_type = &crypto_skcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) memset(istat, 0, sizeof(*istat)); @@ -858,6 +878,27 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg) return 0; } +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->walksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->walksize) + alg->walksize = alg->chunksize; + + base->cra_type = &crypto_skcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + int crypto_register_skcipher(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; diff --git a/crypto/skcipher.h b/crypto/skcipher.h new file mode 100644 index 00000000..6f1295f0 --- /dev/null +++ b/crypto/skcipher.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright (c) 2023 Herbert Xu + */ +#ifndef _LOCAL_CRYPTO_SKCIPHER_H +#define _LOCAL_CRYPTO_SKCIPHER_H + +#include +#include "internal.h" + +static inline struct crypto_istat_cipher *skcipher_get_stat_common( + struct skcipher_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_SKCIPHER_H */ -- cgit v1.2.3 From 6781a99f1919d0d70c42f3d92fe13469ed6b6d58 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:25 +0800 Subject: crypto: lskcipher - Add compatibility wrapper around ECB As an aid to the transition from cipher algorithm implementations to lskcipher, add a temporary wrapper when creating simple lskcipher templates by using ecb(X) instead of X if an lskcipher implementation of X cannot be found. This can be reverted once all cipher implementations have switched over to lskcipher. Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 3343c6d9..9be3c04b 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -536,13 +536,19 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( u32 mask; struct lskcipher_instance *inst; struct crypto_lskcipher_spawn *spawn; + char ecb_name[CRYPTO_MAX_ALG_NAME]; struct lskcipher_alg *cipher_alg; + const char *cipher_name; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); if (err) return ERR_PTR(err); + cipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(cipher_name)) + return ERR_CAST(cipher_name); + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); @@ -550,9 +556,23 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( spawn = lskcipher_instance_ctx(inst); err = crypto_grab_lskcipher(spawn, lskcipher_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, mask); + cipher_name, 0, mask); + + ecb_name[0] = 0; + if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) { + err = -ENAMETOOLONG; + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + cipher_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + ecb_name, 0, mask); + } + if (err) goto err_free_inst; + cipher_alg = crypto_lskcipher_spawn_alg(spawn); err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, @@ -560,10 +580,37 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( if (err) goto err_free_inst; - /* Don't allow nesting. */ - err = -ELOOP; - if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) - goto err_free_inst; + if (ecb_name[0]) { + int len; + + len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], + sizeof(ecb_name)); + if (len < 2) + goto err_free_inst; + + if (ecb_name[len - 1] != ')') + goto err_free_inst; + + ecb_name[len - 1] = 0; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, ecb_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (strcmp(ecb_name, cipher_name) && + snprintf(inst->alg.co.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, cipher_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + } else { + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + } err = -EINVAL; if (cipher_alg->co.ivsize) -- cgit v1.2.3 From 9b9a6f4429328ebd7861c726ae2fa3e378edd30f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:26 +0800 Subject: crypto: testmgr - Add support for lskcipher algorithms Test lskcipher algorithms using the same logic as cipher algorithms. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 216878c8..aed4a6bf 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5945,6 +5945,25 @@ test_done: return rc; notest: + if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_LSKCIPHER) { + char nalg[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >= + sizeof(nalg)) + goto notest2; + + i = alg_find_test(nalg); + if (i < 0) + goto notest2; + + if (fips_enabled && !alg_test_descs[i].fips_allowed) + goto non_fips_alg; + + rc = alg_test_skcipher(alg_test_descs + i, driver, type, mask); + goto test_done; + } + +notest2: printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); if (type & CRYPTO_ALG_FIPS_INTERNAL) -- cgit v1.2.3 From 1e9b30de045178372b06773a77bd4d9986e648ca Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:27 +0800 Subject: crypto: ecb - Convert from skcipher to lskcipher This patch adds two different implementations of ECB. First of all an lskcipher wrapper around existing ciphers is introduced as a temporary transition aid. Secondly a permanent lskcipher template is also added. It's simply a wrapper around the underlying lskcipher algorithm. Signed-off-by: Herbert Xu --- crypto/ecb.c | 190 ++++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 156 insertions(+), 34 deletions(-) diff --git a/crypto/ecb.c b/crypto/ecb.c index 71fbb054..cc7625d1 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -5,75 +5,196 @@ * Copyright (c) 2006 Herbert Xu */ -#include #include #include #include #include #include #include +#include -static int crypto_ecb_crypt(struct skcipher_request *req, - struct crypto_cipher *cipher, +static int crypto_ecb_crypt(struct crypto_cipher *cipher, const u8 *src, + u8 *dst, unsigned nbytes, bool final, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { const unsigned int bsize = crypto_cipher_blocksize(cipher); - struct skcipher_walk walk; - unsigned int nbytes; - int err; - - err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes) != 0) { - const u8 *src = walk.src.virt.addr; - u8 *dst = walk.dst.virt.addr; + while (nbytes >= bsize) { + fn(crypto_cipher_tfm(cipher), dst, src); - do { - fn(crypto_cipher_tfm(cipher), dst, src); + src += bsize; + dst += bsize; - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - err = skcipher_walk_done(&walk, nbytes); + nbytes -= bsize; } - return err; + return nbytes && final ? -EINVAL : nbytes; } -static int crypto_ecb_encrypt(struct skcipher_request *req) +static int crypto_ecb_encrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, final, crypto_cipher_alg(cipher)->cia_encrypt); } -static int crypto_ecb_decrypt(struct skcipher_request *req) +static int crypto_ecb_decrypt2(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; - return crypto_ecb_crypt(req, cipher, + return crypto_ecb_crypt(cipher, src, dst, len, final, crypto_cipher_alg(cipher)->cia_decrypt); } -static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +static int lskcipher_setkey_simple2(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher *cipher = *ctx; + + crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_cipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_cipher_spawn *spawn; + struct crypto_cipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple2(struct crypto_lskcipher *tfm) +{ + struct crypto_cipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_cipher(*ctx); +} + +static void lskcipher_free_instance_simple2(struct lskcipher_instance *inst) +{ + crypto_drop_cipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +static struct lskcipher_instance *lskcipher_alloc_instance_simple2( + struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_cipher_spawn *spawn; + struct lskcipher_instance *inst; + struct crypto_alg *cipher_alg; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + spawn = lskcipher_instance_ctx(inst); + + err = crypto_grab_cipher(spawn, lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_spawn_cipher_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + cipher_alg); + if (err) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple2; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->cra_priority; + inst->alg.co.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; + inst->alg.co.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; + inst->alg.co.ivsize = cipher_alg->cra_blocksize; + + /* Use struct crypto_cipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_cipher *); + inst->alg.setkey = lskcipher_setkey_simple2; + inst->alg.init = lskcipher_init_tfm_simple2; + inst->alg.exit = lskcipher_exit_tfm_simple2; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple2(inst); + return ERR_PTR(err); +} + +static int crypto_ecb_create2(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple2(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */ + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + + inst->alg.encrypt = crypto_ecb_encrypt2; + inst->alg.decrypt = crypto_ecb_decrypt2; + + err = lskcipher_register_instance(tmpl, inst); + if (err) + inst->free(inst); + + return err; +} + +static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + struct lskcipher_instance *inst; + int err; + + inst = lskcipher_alloc_instance_simple(tmpl, tb); + if (IS_ERR(inst)) { + err = crypto_ecb_create2(tmpl, tb); + return err; + } + + spawn = lskcipher_instance_ctx(inst); + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + /* ECB mode doesn't take an IV */ + inst->alg.co.ivsize = 0; + if (cipher_alg->co.ivsize) + return -EINVAL; - inst->alg.encrypt = crypto_ecb_encrypt; - inst->alg.decrypt = crypto_ecb_decrypt; + inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize; + inst->alg.setkey = cipher_alg->setkey; + inst->alg.encrypt = cipher_alg->encrypt; + inst->alg.decrypt = cipher_alg->decrypt; + inst->alg.init = cipher_alg->init; + inst->alg.exit = cipher_alg->exit; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) inst->free(inst); @@ -102,3 +223,4 @@ module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); -- cgit v1.2.3 From 614b521c6bc7adebd0f4bfda137c2d0a41f60f3b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:28 +0800 Subject: crypto: cbc - Convert from skcipher to lskcipher Replace the existing skcipher CBC template with an lskcipher version. Signed-off-by: Herbert Xu --- crypto/cbc.c | 163 ++++++++++++++++++++++------------------------------------- 1 file changed, 61 insertions(+), 102 deletions(-) diff --git a/crypto/cbc.c b/crypto/cbc.c index 6c03e96b..28345b8d 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -5,8 +5,6 @@ * Copyright (c) 2006-2016 Herbert Xu */ -#include -#include #include #include #include @@ -14,99 +12,71 @@ #include #include -static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); - do { + for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) { crypto_xor(iv, src, bsize); - fn(tfm, dst, iv); + crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL); memcpy(iv, dst, bsize); - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); + } return nbytes; } -static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_encrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { crypto_xor(src, iv, bsize); - fn(tfm, src, src); + crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL); iv = src; src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_encrypt(struct skcipher_request *req) +static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); + if (src == dst) + rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv); - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_encrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_encrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } - - return err; + return rem && final ? -EINVAL : rem; } -static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, unsigned nbytes, + u8 *oiv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - u8 *iv = walk->iv; - - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); + const u8 *iv = oiv; + + if (nbytes < bsize) + goto out; do { - fn(tfm, dst, src); + crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL); crypto_xor(dst, iv, bsize); iv = src; @@ -114,83 +84,72 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); + memcpy(oiv, iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, - struct crypto_skcipher *skcipher) +static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm, + u8 *src, unsigned nbytes, u8 *iv) { - unsigned int bsize = crypto_skcipher_blocksize(skcipher); - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + unsigned int bsize = crypto_lskcipher_blocksize(tfm); u8 last_iv[MAX_CIPHER_BLOCKSIZE]; - struct crypto_cipher *cipher; - struct crypto_tfm *tfm; - cipher = skcipher_cipher_simple(skcipher); - tfm = crypto_cipher_tfm(cipher); - fn = crypto_cipher_alg(cipher)->cia_decrypt; + if (nbytes < bsize) + goto out; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; memcpy(last_iv, src, bsize); for (;;) { - fn(tfm, src, src); + crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL); if ((nbytes -= bsize) < bsize) break; crypto_xor(src, src - bsize, bsize); src -= bsize; } - crypto_xor(src, walk->iv, bsize); - memcpy(walk->iv, last_iv, bsize); + crypto_xor(src, iv, bsize); + memcpy(iv, last_iv, bsize); +out: return nbytes; } -static int crypto_cbc_decrypt(struct skcipher_request *req) +static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final) { - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher *cipher = *ctx; + int rem; - err = skcipher_walk_virt(&walk, req, false); + if (src == dst) + rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv); + else + rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv); - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_decrypt_inplace(&walk, skcipher); - else - err = crypto_cbc_decrypt_segment(&walk, skcipher); - err = skcipher_walk_done(&walk, err); - } - - return err; + return rem && final ? -EINVAL : rem; } static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct skcipher_instance *inst; - struct crypto_alg *alg; + struct lskcipher_instance *inst; int err; - inst = skcipher_alloc_instance_simple(tmpl, tb); + inst = lskcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); - alg = skcipher_ialg_simple(inst); - err = -EINVAL; - if (!is_power_of_2(alg->cra_blocksize)) + if (!is_power_of_2(inst->alg.co.base.cra_blocksize)) goto out_free_inst; inst->alg.encrypt = crypto_cbc_encrypt; inst->alg.decrypt = crypto_cbc_decrypt; - err = skcipher_register_instance(tmpl, inst); + err = lskcipher_register_instance(tmpl, inst); if (err) { out_free_inst: inst->free(inst); -- cgit v1.2.3 From f0be2a5192f007dd2ff2442e9701f5d55fb106a2 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:11 +0200 Subject: crypto: jitter - add RCT/APT support for different OSRs The oversampling rate (OSR) value specifies the heuristically implied entropy in the recorded data - H_submitter = 1/osr. A different entropy estimate implies a different APT/RCT cutoff value. This change adds support for OSRs 1 through 15. This OSR can be selected by the caller of the Jitter RNG. For this patch, the caller still uses one hard-coded OSR. A subsequent patch allows this value to be configured. In addition, the power-up self test is adjusted as follows: * It allows the caller to provide an oversampling rate that should be tested with - commonly it should be the same as used for the actual runtime operation. This makes the power-up testing therefore consistent with the runtime operation. * It calls now jent_measure_jitter (i.e. collects the full entropy that can possibly be harvested by the Jitter RNG) instead of only jent_condition_data (which only returns the entropy harvested from the conditioning component). This should now alleviate reports where the Jitter RNG initialization thinks there is too little entropy. * The power-up test now solely relies on the (enhanced) APT and RCT test that is used as a health test at runtime. The code allowing the different OSRs as well as the power-up test changes are present in the user space version of the Jitter RNG 3.4.1 and thus was already in production use for some time. Reported-by "Ospan, Abylay" Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 4 +- crypto/jitterentropy.c | 233 ++++++++++++++++++++++--------------------- crypto/jitterentropy.h | 3 +- 3 files changed, 123 insertions(+), 117 deletions(-) diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 7d1463a1..1de730f9 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -245,7 +245,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) crypto_shash_init(sdesc); rng->sdesc = sdesc; - rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc); + rng->entropy_collector = jent_entropy_collector_alloc(0, 0, sdesc); if (!rng->entropy_collector) { ret = -ENOMEM; goto err; @@ -334,7 +334,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(desc); + ret = jent_entropy_init(0, 0, desc); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index fe9c233e..1b99ffaa 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -72,6 +72,8 @@ struct rand_data { __u64 prev_time; /* SENSITIVE Previous time stamp */ __u64 last_delta; /* SENSITIVE stuck test */ __s64 last_delta2; /* SENSITIVE stuck test */ + + unsigned int flags; /* Flags used to initialize */ unsigned int osr; /* Oversample rate */ #define JENT_MEMORY_BLOCKS 64 #define JENT_MEMORY_BLOCKSIZE 32 @@ -88,16 +90,9 @@ struct rand_data { /* Repetition Count Test */ unsigned int rct_count; /* Number of stuck values */ - /* Intermittent health test failure threshold of 2^-30 */ - /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */ - /* However, our RCT implementation starts at 1, so we subtract 1 here. */ -#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */ -#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ - /* Permanent health test failure threshold of 2^-60 */ - /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */ - /* However, our RCT implementation starts at 1, so we subtract 1 here. */ -#define JENT_RCT_CUTOFF_PERMANENT (61 - 1) -#define JENT_APT_CUTOFF_PERMANENT 355 + /* Adaptive Proportion Test cutoff values */ + unsigned int apt_cutoff; /* Intermittent health test failure */ + unsigned int apt_cutoff_permanent; /* Permanent health test failure */ #define JENT_APT_WINDOW_SIZE 512 /* Data window size */ /* LSB of time stamp to process */ #define JENT_APT_LSB 16 @@ -122,6 +117,9 @@ struct rand_data { * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ #define JENT_EHEALTH 9 /* Health test failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ +#define JENT_EHASH 11 /* Hash self test failed */ +#define JENT_EMEM 12 /* Can't allocate memory for initialization */ /* * The output n bits can receive more than n bits of min entropy, of course, @@ -147,6 +145,48 @@ struct rand_data { * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ +/* + * See the SP 800-90B comment #10b for the corrected cutoff for the SP 800-90B + * APT. + * http://www.untruth.org/~josh/sp80090b/UL%20SP800-90B-final%20comments%20v1.9%2020191212.pdf + * In in the syntax of R, this is C = 2 + qbinom(1 − 2^(−30), 511, 2^(-1/osr)). + * (The original formula wasn't correct because the first symbol must + * necessarily have been observed, so there is no chance of observing 0 of these + * symbols.) + * + * For the alpha < 2^-53, R cannot be used as it uses a float data type without + * arbitrary precision. A SageMath script is used to calculate those cutoff + * values. + * + * For any value above 14, this yields the maximal allowable value of 512 + * (by FIPS 140-2 IG 7.19 Resolution # 16, we cannot choose a cutoff value that + * renders the test unable to fail). + */ +static const unsigned int jent_apt_cutoff_lookup[15] = { + 325, 422, 459, 477, 488, 494, 499, 502, + 505, 507, 508, 509, 510, 511, 512 }; +static const unsigned int jent_apt_cutoff_permanent_lookup[15] = { + 355, 447, 479, 494, 502, 507, 510, 512, + 512, 512, 512, 512, 512, 512, 512 }; +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +static void jent_apt_init(struct rand_data *ec, unsigned int osr) +{ + /* + * Establish the apt_cutoff based on the presumed entropy rate of + * 1/osr. + */ + if (osr >= ARRAY_SIZE(jent_apt_cutoff_lookup)) { + ec->apt_cutoff = jent_apt_cutoff_lookup[ + ARRAY_SIZE(jent_apt_cutoff_lookup) - 1]; + ec->apt_cutoff_permanent = jent_apt_cutoff_permanent_lookup[ + ARRAY_SIZE(jent_apt_cutoff_permanent_lookup) - 1]; + } else { + ec->apt_cutoff = jent_apt_cutoff_lookup[osr - 1]; + ec->apt_cutoff_permanent = + jent_apt_cutoff_permanent_lookup[osr - 1]; + } +} /* * Reset the APT counter * @@ -187,12 +227,12 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) /* APT health test failure detection */ static int jent_apt_permanent_failure(struct rand_data *ec) { - return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0; + return (ec->apt_count >= ec->apt_cutoff_permanent) ? 1 : 0; } static int jent_apt_failure(struct rand_data *ec) { - return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0; + return (ec->apt_count >= ec->apt_cutoff) ? 1 : 0; } /*************************************************************************** @@ -275,15 +315,28 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) return 0; } -/* RCT health test failure detection */ +/* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. + * In addition, we require an entropy value H of 1/osr as this is the minimum + * entropy required to provide full entropy. + * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr deltas for + * inserting them into the entropy pool which should then have (close to) + * DATA_SIZE_BITS bits of entropy in the conditioned output. + * + * Note, ec->rct_count (which equals to value B in the pseudo code of SP800-90B + * section 4.4.1) starts with zero. Hence we need to subtract one from the + * cutoff value as calculated following SP800-90B. Thus + * C = ceil(-log_2(alpha)/H) = 30*osr or 60*osr. + */ static int jent_rct_permanent_failure(struct rand_data *ec) { - return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0; + return (ec->rct_count >= (60 * ec->osr)) ? 1 : 0; } static int jent_rct_failure(struct rand_data *ec) { - return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0; + return (ec->rct_count >= (30 * ec->osr)) ? 1 : 0; } /* Report of health test failures */ @@ -448,7 +501,7 @@ static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) * * @return result of stuck test */ -static int jent_measure_jitter(struct rand_data *ec) +static int jent_measure_jitter(struct rand_data *ec, __u64 *ret_current_delta) { __u64 time = 0; __u64 current_delta = 0; @@ -472,6 +525,10 @@ static int jent_measure_jitter(struct rand_data *ec) if (jent_condition_data(ec, current_delta, stuck)) stuck = 1; + /* return the raw entropy value */ + if (ret_current_delta) + *ret_current_delta = current_delta; + return stuck; } @@ -489,11 +546,11 @@ static void jent_gen_entropy(struct rand_data *ec) safety_factor = JENT_ENTROPY_SAFETY_FACTOR; /* priming of the ->prev_time value */ - jent_measure_jitter(ec); + jent_measure_jitter(ec, NULL); while (!jent_health_failure(ec)) { /* If a stuck measurement is received, repeat measurement */ - if (jent_measure_jitter(ec)) + if (jent_measure_jitter(ec, NULL)) continue; /* @@ -554,7 +611,8 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(ec->hash_state)) + if (jent_entropy_init(ec->osr, ec->flags, + ec->hash_state)) return -3; return -2; @@ -604,11 +662,15 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, /* verify and set the oversampling rate */ if (osr == 0) - osr = 1; /* minimum sampling rate is 1 */ + osr = 1; /* H_submitter = 1 / osr */ entropy_collector->osr = osr; + entropy_collector->flags = flags; entropy_collector->hash_state = hash_state; + /* Initialize the APT */ + jent_apt_init(entropy_collector, osr); + /* fill the data pad with non-zero values */ jent_gen_entropy(entropy_collector); @@ -622,20 +684,14 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) jent_zfree(entropy_collector); } -int jent_entropy_init(void *hash_state) +int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) { - int i; - __u64 delta_sum = 0; - __u64 old_delta = 0; - unsigned int nonstuck = 0; - int time_backwards = 0; - int count_mod = 0; - int count_stuck = 0; - struct rand_data ec = { 0 }; - - /* Required for RCT */ - ec.osr = 1; - ec.hash_state = hash_state; + struct rand_data *ec; + int i, time_backwards = 0, ret = 0; + + ec = jent_entropy_collector_alloc(osr, flags, hash_state); + if (!ec) + return JENT_EMEM; /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -664,31 +720,28 @@ int jent_entropy_init(void *hash_state) #define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { - __u64 time = 0; - __u64 time2 = 0; - __u64 delta = 0; - unsigned int lowdelta = 0; - int stuck; + __u64 start_time = 0, end_time = 0, delta = 0; /* Invoke core entropy collection logic */ - jent_get_nstime(&time); - ec.prev_time = time; - jent_condition_data(&ec, time, 0); - jent_get_nstime(&time2); + jent_measure_jitter(ec, &delta); + end_time = ec->prev_time; + start_time = ec->prev_time - delta; /* test whether timer works */ - if (!time || !time2) - return JENT_ENOTIME; - delta = jent_delta(time, time2); + if (!start_time || !end_time) { + ret = JENT_ENOTIME; + goto out; + } + /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this * implies that we also have a high resolution timer */ - if (!delta) - return JENT_ECOARSETIME; - - stuck = jent_stuck(&ec, delta); + if (!delta || (end_time == start_time)) { + ret = JENT_ECOARSETIME; + goto out; + } /* * up to here we did not modify any variable that will be @@ -700,49 +753,9 @@ int jent_entropy_init(void *hash_state) if (i < CLEARCACHE) continue; - if (stuck) - count_stuck++; - else { - nonstuck++; - - /* - * Ensure that the APT succeeded. - * - * With the check below that count_stuck must be less - * than 10% of the overall generated raw entropy values - * it is guaranteed that the APT is invoked at - * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. - */ - if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { - jent_apt_reset(&ec, - delta & JENT_APT_WORD_MASK); - } - } - - /* Validate health test result */ - if (jent_health_failure(&ec)) - return JENT_EHEALTH; - /* test whether we have an increasing timer */ - if (!(time2 > time)) + if (!(end_time > start_time)) time_backwards++; - - /* use 32 bit value to ensure compilation on 32 bit arches */ - lowdelta = time2 - time; - if (!(lowdelta % 100)) - count_mod++; - - /* - * ensure that we have a varying delta timer which is necessary - * for the calculation of entropy -- perform this check - * only after the first loop is executed as we need to prime - * the old_data value - */ - if (delta > old_delta) - delta_sum += (delta - old_delta); - else - delta_sum += (old_delta - delta); - old_delta = delta; } /* @@ -752,31 +765,23 @@ int jent_entropy_init(void *hash_state) * should not fail. The value of 3 should cover the NTP case being * performed during our test run. */ - if (time_backwards > 3) - return JENT_ENOMONOTONIC; - - /* - * Variations of deltas of time must on average be larger - * than 1 to ensure the entropy estimation - * implied with 1 is preserved - */ - if ((delta_sum) <= 1) - return JENT_EVARVAR; + if (time_backwards > 3) { + ret = JENT_ENOMONOTONIC; + goto out; + } - /* - * Ensure that we have variations in the time stamp below 10 for at - * least 10% of all checks -- on some platforms, the counter increments - * in multiples of 100, but not always - */ - if ((TESTLOOPCOUNT/10 * 9) < count_mod) - return JENT_ECOARSETIME; + /* Did we encounter a health test failure? */ + if (jent_rct_failure(ec)) { + ret = JENT_ERCT; + goto out; + } + if (jent_apt_failure(ec)) { + ret = JENT_EHEALTH; + goto out; + } - /* - * If we have more than 90% stuck results, then this Jitter RNG is - * likely to not work well. - */ - if ((TESTLOOPCOUNT/10 * 9) < count_stuck) - return JENT_ESTUCK; +out: + jent_entropy_collector_free(ec); - return 0; + return ret; } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index 4c92176e..626c6228 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -9,7 +9,8 @@ extern int jent_hash_time(void *hash_state, __u64 time, u8 *addtl, int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; -extern int jent_entropy_init(void *hash_state); +extern int jent_entropy_init(unsigned int osr, unsigned int flags, + void *hash_state); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); -- cgit v1.2.3 From 0df5253004914f3f52b582ac6ceff2abb9f999c1 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:33 +0200 Subject: crypto: jitter - Allow configuration of memory size The memory size consumed by the Jitter RNG is one contributing factor in the amount of entropy that is gathered. As the amount of entropy directly correlates with the distance of the memory from the CPU, the caches that are possibly present on a given system have an impact on the collected entropy. Thus, the kernel compile time should offer a means to configure the amount of memory used by the Jitter RNG. Although this option could be turned into a runtime option (e.g. a kernel command line option), it should remain a compile time option as otherwise adminsitrators who may not have performed an entropy assessment may select a value that is inappropriate. The default value selected by the configuration is identical to the current Jitter RNG value. Thus, the patch should not lead to any change in the Jitter RNG behavior. To accommodate larger memory buffers, kvzalloc / kvfree is used. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 43 +++++++++++++++++++++++++++++++++++++++++++ crypto/jitterentropy-kcapi.c | 11 +++++++++++ crypto/jitterentropy.c | 16 +++++++++------- crypto/jitterentropy.h | 2 ++ 4 files changed, 65 insertions(+), 7 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 650b1b36..00c827d9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1296,6 +1296,49 @@ config CRYPTO_JITTERENTROPY See https://www.chronox.de/jent.html +choice + prompt "CPU Jitter RNG Memory Size" + default CRYPTO_JITTERENTROPY_MEMSIZE_2 + depends on CRYPTO_JITTERENTROPY + help + The Jitter RNG measures the execution time of memory accesses. + Multiple consecutive memory accesses are performed. If the memory + size fits into a cache (e.g. L1), only the memory access timing + to that cache is measured. The closer the cache is to the CPU + the less variations are measured and thus the less entropy is + obtained. Thus, if the memory size fits into the L1 cache, the + obtained entropy is less than if the memory size fits within + L1 + L2, which in turn is less if the memory fits into + L1 + L2 + L3. Thus, by selecting a different memory size, + the entropy rate produced by the Jitter RNG can be modified. + + config CRYPTO_JITTERENTROPY_MEMSIZE_2 + bool "2048 Bytes (default)" + + config CRYPTO_JITTERENTROPY_MEMSIZE_128 + bool "128 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_1024 + bool "1024 kBytes" + + config CRYPTO_JITTERENTROPY_MEMSIZE_8192 + bool "8192 kBytes" +endchoice + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS + int + default 64 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 512 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 4096 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + +config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE + int + default 32 if CRYPTO_JITTERENTROPY_MEMSIZE_2 + default 256 if CRYPTO_JITTERENTROPY_MEMSIZE_128 + default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 + default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 + config CRYPTO_JITTERENTROPY_TESTINTERFACE bool "CPU Jitter RNG Test Interface" depends on CRYPTO_JITTERENTROPY diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 1de730f9..a8e7bbd2 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -54,6 +54,17 @@ * Helper function ***************************************************************************/ +void *jent_kvzalloc(unsigned int len) +{ + return kvzalloc(len, GFP_KERNEL); +} + +void jent_kvzfree(void *ptr, unsigned int len) +{ + memzero_explicit(ptr, len); + kvfree(ptr); +} + void *jent_zalloc(unsigned int len) { return kzalloc(len, GFP_KERNEL); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 1b99ffaa..18bbe2b8 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -75,10 +75,10 @@ struct rand_data { unsigned int flags; /* Flags used to initialize */ unsigned int osr; /* Oversample rate */ -#define JENT_MEMORY_BLOCKS 64 -#define JENT_MEMORY_BLOCKSIZE 32 #define JENT_MEMORY_ACCESSLOOPS 128 -#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) +#define JENT_MEMORY_SIZE \ + (CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS * \ + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE) unsigned char *mem; /* Memory access location with size of * memblocks * memblocksize */ unsigned int memlocation; /* Pointer to byte in *mem */ @@ -650,13 +650,15 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, /* Allocate memory for adding variations based on memory * access */ - entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE); + entropy_collector->mem = jent_kvzalloc(JENT_MEMORY_SIZE); if (!entropy_collector->mem) { jent_zfree(entropy_collector); return NULL; } - entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; - entropy_collector->memblocks = JENT_MEMORY_BLOCKS; + entropy_collector->memblocksize = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE; + entropy_collector->memblocks = + CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS; entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; } @@ -679,7 +681,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, void jent_entropy_collector_free(struct rand_data *entropy_collector) { - jent_zfree(entropy_collector->mem); + jent_kvzfree(entropy_collector->mem, JENT_MEMORY_SIZE); entropy_collector->mem = NULL; jent_zfree(entropy_collector); } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index 626c6228..e31661ee 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later +extern void *jent_kvzalloc(unsigned int len); +extern void jent_kvzfree(void *ptr, unsigned int len); extern void *jent_zalloc(unsigned int len); extern void jent_zfree(void *ptr); extern void jent_get_nstime(__u64 *out); -- cgit v1.2.3 From d12465f517d24e1dbed244fcfdc67f89631abd19 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 21 Sep 2023 13:48:59 +0200 Subject: crypto: jitter - Allow configuration of oversampling rate The oversampling rate used by the Jitter RNG allows the configuration of the heuristically implied entropy in one timing measurement. This entropy rate is (1 / OSR) bits of entropy per time stamp. Considering that the Jitter RNG now support APT/RCT health tests for different OSRs, allow this value to be configured at compile time to support systems with limited amount of entropy in their timer. The allowed range of OSR values complies with the APT/RCT cutoff health test values which range from 1 through 15. The default value of the OSR selection support is left at 1 which is the current default. Thus, the addition of the configuration support does not alter the default Jitter RNG behavior. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 17 +++++++++++++++++ crypto/jitterentropy-kcapi.c | 6 ++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 00c827d9..ed931dde 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1339,6 +1339,23 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE default 1024 if CRYPTO_JITTERENTROPY_MEMSIZE_1024 default 2048 if CRYPTO_JITTERENTROPY_MEMSIZE_8192 +config CRYPTO_JITTERENTROPY_OSR + int "CPU Jitter RNG Oversampling Rate" + range 1 15 + default 1 + depends on CRYPTO_JITTERENTROPY + help + The Jitter RNG allows the specification of an oversampling rate (OSR). + The Jitter RNG operation requires a fixed amount of timing + measurements to produce one output block of random numbers. The + OSR value is multiplied with the amount of timing measurements to + generate one output block. Thus, the timing measurement is oversampled + by the OSR factor. The oversampling allows the Jitter RNG to operate + on hardware whose timers deliver limited amount of entropy (e.g. + the timer is coarse) by setting the OSR to a higher value. The + trade-off, however, is that the Jitter RNG now requires more time + to generate random numbers. + config CRYPTO_JITTERENTROPY_TESTINTERFACE bool "CPU Jitter RNG Test Interface" depends on CRYPTO_JITTERENTROPY diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index a8e7bbd2..0c675222 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -256,7 +256,9 @@ static int jent_kcapi_init(struct crypto_tfm *tfm) crypto_shash_init(sdesc); rng->sdesc = sdesc; - rng->entropy_collector = jent_entropy_collector_alloc(0, 0, sdesc); + rng->entropy_collector = + jent_entropy_collector_alloc(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, + sdesc); if (!rng->entropy_collector) { ret = -ENOMEM; goto err; @@ -345,7 +347,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(0, 0, desc); + ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { -- cgit v1.2.3 From 7f456eec227b9762e6307e49485c0c776b1b816b Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sat, 23 Sep 2023 12:08:06 +0200 Subject: crypto: engine - Make crypto_engine_exit() return void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All callers ignore the return value, so simplify by not providing one. Note that crypto_engine_exit() is typically called in a device driver's remove path (or the error path in probe), where errors cannot be handled anyhow. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 108d9d55..e60a0eb6 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -552,20 +552,16 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** * crypto_engine_exit - free the resources of hardware engine when exit * @engine: the hardware engine need to be freed - * - * Return 0 for success. */ -int crypto_engine_exit(struct crypto_engine *engine) +void crypto_engine_exit(struct crypto_engine *engine) { int ret; ret = crypto_engine_stop(engine); if (ret) - return ret; + return; kthread_destroy_worker(engine->kworker); - - return 0; } EXPORT_SYMBOL_GPL(crypto_engine_exit); -- cgit v1.2.3 From 2b2a4f67c38122906546d9ea411bf1fcef20636b Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Tue, 26 Sep 2023 11:46:41 +0200 Subject: X.509: Add missing IMPLICIT annotations to AKID ASN.1 module The ASN.1 module in RFC 5280 appendix A.1 uses EXPLICIT TAGS whereas the one in appendix A.2 uses IMPLICIT TAGS. The kernel's simplified asn1_compiler.c always uses EXPLICIT TAGS, hence definitions from appendix A.2 need to be annotated as IMPLICIT for the compiler to generate RFC-compliant code. In particular, GeneralName is defined in appendix A.2: GeneralName ::= CHOICE { otherName [0] OtherName, ... dNSName [2] IA5String, x400Address [3] ORAddress, directoryName [4] Name, ... } Because appendix A.2 uses IMPLICIT TAGS, the IA5String tag (0x16) of a dNSName is not rendered. Instead, the string directly succeeds the [2] tag (0x82). Likewise, the SEQUENCE tag (0x30) of an OtherName is not rendered. Instead, only the constituents of the SEQUENCE are rendered: An OID tag (0x06), a [0] tag (0xa0) and an ANY tag. That's three consecutive tags instead of a single encompassing tag. The situation is different for x400Address and directoryName choices: They reference ORAddress and Name, which are defined in appendix A.1, therefore use EXPLICIT TAGS. The AKID ASN.1 module is missing several IMPLICIT annotations, hence isn't RFC-compliant. In the unlikely event that an AKID contains other elements beside a directoryName, users may see parse errors. Add the missing annotations but do not tag this commit for stable as I am not aware of any issue reports. Fixes are only eligible for stable if they're "obviously correct" and with ASN.1 there's no such thing. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/x509_akid.asn1 | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1 index 1a33231a..c7818ff4 100644 --- a/crypto/asymmetric_keys/x509_akid.asn1 +++ b/crypto/asymmetric_keys/x509_akid.asn1 @@ -14,15 +14,15 @@ CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial }) GeneralNames ::= SEQUENCE OF GeneralName GeneralName ::= CHOICE { - otherName [0] ANY, - rfc822Name [1] IA5String, - dNSName [2] IA5String, + otherName [0] IMPLICIT OtherName, + rfc822Name [1] IMPLICIT IA5String, + dNSName [2] IMPLICIT IA5String, x400Address [3] ANY, directoryName [4] Name ({ x509_akid_note_name }), - ediPartyName [5] ANY, - uniformResourceIdentifier [6] IA5String, - iPAddress [7] OCTET STRING, - registeredID [8] OBJECT IDENTIFIER + ediPartyName [5] IMPLICIT EDIPartyName, + uniformResourceIdentifier [6] IMPLICIT IA5String, + iPAddress [7] IMPLICIT OCTET STRING, + registeredID [8] IMPLICIT OBJECT IDENTIFIER } Name ::= SEQUENCE OF RelativeDistinguishedName @@ -33,3 +33,13 @@ AttributeValueAssertion ::= SEQUENCE { attributeType OBJECT IDENTIFIER ({ x509_note_OID }), attributeValue ANY ({ x509_extract_name_segment }) } + +OtherName ::= SEQUENCE { + type-id OBJECT IDENTIFIER, + value [0] ANY + } + +EDIPartyName ::= SEQUENCE { + nameAssigner [0] ANY OPTIONAL, + partyName [1] ANY + } -- cgit v1.2.3 From aed3ded87f1c467e1193ae2df1e79b06d909fed5 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Mon, 2 Oct 2023 00:57:15 +0100 Subject: crypto: pkcs7 - remove md4 md5 x.509 support Remove support for md4 md5 hash and signatures in x.509 certificate parsers, pkcs7 signature parser, authenticode parser. All of these are insecure or broken, and everyone has long time ago migrated to alternative hash implementations. Also remove md2 & md3 oids which have already didn't have support. This is also likely the last user of md4 in the kernel, and thus crypto/md4.c and related tests in tcrypt & testmgr can likely be removed. Other users such as cifs smbfs ext modpost sumversions have their own internal implementation as needed. Signed-off-by: Dimitri John Ledkov Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 6 ------ crypto/asymmetric_keys/pkcs7_parser.c | 6 ------ crypto/asymmetric_keys/x509_cert_parser.c | 6 ------ 3 files changed, 18 deletions(-) diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 839591ad..690405eb 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,12 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_md4: - ctx->digest_algo = "md4"; - break; - case OID_md5: - ctx->digest_algo = "md5"; - break; case OID_sha1: ctx->digest_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 277482bb..cf4caab9 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,12 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_md4: - ctx->sinfo->sig->hash_algo = "md4"; - break; - case OID_md5: - ctx->sinfo->sig->hash_algo = "md5"; - break; case OID_sha1: ctx->sinfo->sig->hash_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 0a7049b4..2c309286 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -195,15 +195,9 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { - case OID_md2WithRSAEncryption: - case OID_md3WithRSAEncryption: default: return -ENOPKG; /* Unsupported combination */ - case OID_md4WithRSAEncryption: - ctx->cert->sig->hash_algo = "md4"; - goto rsa_pkcs1; - case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; -- cgit v1.2.3 From 17b539462a493336163f7a7ce012eb6bd31d9132 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:56:25 +0800 Subject: crypto: deflate - Remove zlib-deflate Remove the implementation of zlib-deflate because it is completely unused in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel --- crypto/deflate.c | 61 ++++++++++++++++---------------------------------------- 1 file changed, 17 insertions(+), 44 deletions(-) diff --git a/crypto/deflate.c b/crypto/deflate.c index b2a46f6d..6e31e0db 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -39,24 +39,20 @@ struct deflate_ctx { struct z_stream_s decomp_stream; }; -static int deflate_comp_init(struct deflate_ctx *ctx, int format) +static int deflate_comp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->comp_stream; stream->workspace = vzalloc(zlib_deflate_workspacesize( - MAX_WBITS, MAX_MEM_LEVEL)); + -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL)); if (!stream->workspace) { ret = -ENOMEM; goto out; } - if (format) - ret = zlib_deflateInit(stream, 3); - else - ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, - DEFLATE_DEF_MEMLEVEL, - Z_DEFAULT_STRATEGY); + ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, + -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + Z_DEFAULT_STRATEGY); if (ret != Z_OK) { ret = -EINVAL; goto out_free; @@ -68,7 +64,7 @@ out_free: goto out; } -static int deflate_decomp_init(struct deflate_ctx *ctx, int format) +static int deflate_decomp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->decomp_stream; @@ -78,10 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx, int format) ret = -ENOMEM; goto out; } - if (format) - ret = zlib_inflateInit(stream); - else - ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); + ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); if (ret != Z_OK) { ret = -EINVAL; goto out_free; @@ -105,21 +98,21 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) vfree(ctx->decomp_stream.workspace); } -static int __deflate_init(void *ctx, int format) +static int __deflate_init(void *ctx) { int ret; - ret = deflate_comp_init(ctx, format); + ret = deflate_comp_init(ctx); if (ret) goto out; - ret = deflate_decomp_init(ctx, format); + ret = deflate_decomp_init(ctx); if (ret) deflate_comp_exit(ctx); out: return ret; } -static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) +static void *deflate_alloc_ctx(struct crypto_scomp *tfm) { struct deflate_ctx *ctx; int ret; @@ -128,7 +121,7 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) if (!ctx) return ERR_PTR(-ENOMEM); - ret = __deflate_init(ctx, format); + ret = __deflate_init(ctx); if (ret) { kfree(ctx); return ERR_PTR(ret); @@ -137,21 +130,11 @@ static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format) return ctx; } -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 0); -} - -static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm) -{ - return gen_deflate_alloc_ctx(tfm, 1); -} - static int deflate_init(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); - return __deflate_init(ctx, 0); + return __deflate_init(ctx); } static void __deflate_exit(void *ctx) @@ -286,7 +269,7 @@ static struct crypto_alg alg = { .coa_decompress = deflate_decompress } } }; -static struct scomp_alg scomp[] = { { +static struct scomp_alg scomp = { .alloc_ctx = deflate_alloc_ctx, .free_ctx = deflate_free_ctx, .compress = deflate_scompress, @@ -296,17 +279,7 @@ static struct scomp_alg scomp[] = { { .cra_driver_name = "deflate-scomp", .cra_module = THIS_MODULE, } -}, { - .alloc_ctx = zlib_deflate_alloc_ctx, - .free_ctx = deflate_free_ctx, - .compress = deflate_scompress, - .decompress = deflate_sdecompress, - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "zlib-deflate-scomp", - .cra_module = THIS_MODULE, - } -} }; +}; static int __init deflate_mod_init(void) { @@ -316,7 +289,7 @@ static int __init deflate_mod_init(void) if (ret) return ret; - ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp)); + ret = crypto_register_scomp(&scomp); if (ret) { crypto_unregister_alg(&alg); return ret; @@ -328,7 +301,7 @@ static int __init deflate_mod_init(void) static void __exit deflate_mod_fini(void) { crypto_unregister_alg(&alg); - crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp)); + crypto_unregister_scomp(&scomp); } subsys_initcall(deflate_mod_init); -- cgit v1.2.3 From dbc9bbe564cea72b96994490dcf5409d4019617e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:57:06 +0800 Subject: crypto: testmgr - Remove zlib-deflate Remove zlib-deflate test vectors as it no longer exists in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel --- crypto/testmgr.c | 10 -------- crypto/testmgr.h | 75 -------------------------------------------------------- 2 files changed, 85 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index aed4a6bf..bd4952e4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5772,16 +5772,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(xxhash64_tv_template) } - }, { - .alg = "zlib-deflate", - .test = alg_test_comp, - .fips_allowed = 1, - .suite = { - .comp = { - .comp = __VECS(zlib_deflate_comp_tv_template), - .decomp = __VECS(zlib_deflate_decomp_tv_template) - } - } }, { .alg = "zstd", .test = alg_test_comp, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 5ca7a412..0cd6e060 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -35754,81 +35754,6 @@ static const struct comp_testvec deflate_decomp_tv_template[] = { }, }; -static const struct comp_testvec zlib_deflate_comp_tv_template[] = { - { - .inlen = 70, - .outlen = 44, - .input = "Join us now and share the software " - "Join us now and share the software ", - .output = "\x78\x5e\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - }, { - .inlen = 191, - .outlen = 129, - .input = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - .output = "\x78\x5e\x5d\xce\x41\x0a\xc3\x30" - "\x0c\x04\xc0\xaf\xec\x0b\xf2\x87" - "\xd2\xa6\x50\xe8\xc1\x07\x7f\x40" - "\xb1\x95\x5a\x60\x5b\xc6\x56\x0f" - "\xfd\x7d\x93\x1e\x42\xe8\x51\xec" - "\xee\x20\x9f\x64\x20\x6a\x78\x17" - "\xae\x86\xc8\x23\x74\x59\x78\x80" - "\x10\xb4\xb4\xce\x63\x88\x56\x14" - "\xb6\xa4\x11\x0b\x0d\x8e\xd8\x6e" - "\x4b\x8c\xdb\x7c\x7f\x5e\xfc\x7c" - "\xae\x51\x7e\x69\x17\x4b\x65\x02" - "\xfc\x1f\xbc\x4a\xdd\xd8\x7d\x48" - "\xad\x65\x09\x64\x3b\xac\xeb\xd9" - "\xc2\x01\xc0\xf4\x17\x3c\x1c\x1c" - "\x7d\xb2\x52\xc4\xf5\xf4\x8f\xeb" - "\x6a\x1a\x34\x4f\x5f\x2e\x32\x45" - "\x4e", - }, -}; - -static const struct comp_testvec zlib_deflate_decomp_tv_template[] = { - { - .inlen = 128, - .outlen = 191, - .input = "\x78\x9c\x5d\x8d\x31\x0e\xc2\x30" - "\x10\x04\xbf\xb2\x2f\xc8\x1f\x10" - "\x04\x09\x89\xc2\x85\x3f\x70\xb1" - "\x2f\xf8\x24\xdb\x67\xd9\x47\xc1" - "\xef\x49\x68\x12\x51\xae\x76\x67" - "\xd6\x27\x19\x88\x1a\xde\x85\xab" - "\x21\xf2\x08\x5d\x16\x1e\x20\x04" - "\x2d\xad\xf3\x18\xa2\x15\x85\x2d" - "\x69\xc4\x42\x83\x23\xb6\x6c\x89" - "\x71\x9b\xef\xcf\x8b\x9f\xcf\x33" - "\xca\x2f\xed\x62\xa9\x4c\x80\xff" - "\x13\xaf\x52\x37\xed\x0e\x52\x6b" - "\x59\x02\xd9\x4e\xe8\x7a\x76\x1d" - "\x02\x98\xfe\x8a\x87\x83\xa3\x4f" - "\x56\x8a\xb8\x9e\x8e\x5c\x57\xd3" - "\xa0\x79\xfa\x02\x2e\x32\x45\x4e", - .output = "This document describes a compression method based on the DEFLATE" - "compression algorithm. This document defines the application of " - "the DEFLATE algorithm to the IP Payload Compression Protocol.", - }, { - .inlen = 44, - .outlen = 70, - .input = "\x78\x9c\xf3\xca\xcf\xcc\x53\x28" - "\x2d\x56\xc8\xcb\x2f\x57\x48\xcc" - "\x4b\x51\x28\xce\x48\x2c\x4a\x55" - "\x28\xc9\x48\x55\x28\xce\x4f\x2b" - "\x29\x07\x71\xbc\x08\x2b\x01\x00" - "\x7c\x65\x19\x3d", - .output = "Join us now and share the software " - "Join us now and share the software ", - }, -}; - /* * LZO test vectors (null-terminated strings). */ -- cgit v1.2.3 From 2c5b6d95d33c5cc6f5b3231c3174c764ad75465d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:31:55 +0800 Subject: crypto: skcipher - Add dependency on ecb As lskcipher requires the ecb wrapper for the transition add an explicit dependency on it so that it is always present. This can be removed once all simple ciphers have been converted to lskcipher. Reported-by: Nathan Chancellor Fixes: 614b521c6bc7 ("crypto: cbc - Convert from skcipher to lskcipher") Signed-off-by: Herbert Xu Tested-by: Nathan Chancellor Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index ed931dde..bbf51d55 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -85,6 +85,7 @@ config CRYPTO_SKCIPHER tristate select CRYPTO_SKCIPHER2 select CRYPTO_ALGAPI + select CRYPTO_ECB config CRYPTO_SKCIPHER2 tristate @@ -689,7 +690,7 @@ config CRYPTO_CTS config CRYPTO_ECB tristate "ECB (Electronic Codebook)" - select CRYPTO_SKCIPHER + select CRYPTO_SKCIPHER2 select CRYPTO_MANAGER help ECB (Electronic Codebook) mode (NIST SP800-38A) -- cgit v1.2.3 From e6ff1b23673f8aa00c53d8e7343de856dfd85927 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:18 +0800 Subject: crypto: arc4 - Convert from skcipher to lskcipher Replace skcipher implementation with lskcipher. Signed-off-by: Herbert Xu --- crypto/arc4.c | 60 ++++++++++++++++++++++---------------------------------- crypto/testmgr.c | 2 +- 2 files changed, 24 insertions(+), 38 deletions(-) diff --git a/crypto/arc4.c b/crypto/arc4.c index 3254dcc3..eb3590dc 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -7,7 +7,6 @@ * Jon Oberheide */ -#include #include #include #include @@ -15,33 +14,24 @@ #include #include -static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, +static int crypto_arc4_setkey(struct crypto_lskcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); return arc4_setkey(ctx, in_key, key_len); } -static int crypto_arc4_crypt(struct skcipher_request *req) +static int crypto_arc4_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned nbytes, u8 *iv, bool final) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - int err; + struct arc4_ctx *ctx = crypto_lskcipher_ctx(tfm); - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes > 0) { - arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; + arc4_crypt(ctx, dst, src, nbytes); + return 0; } -static int crypto_arc4_init(struct crypto_skcipher *tfm) +static int crypto_arc4_init(struct crypto_lskcipher *tfm) { pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n", current->comm, (unsigned long)current->pid); @@ -49,33 +39,29 @@ static int crypto_arc4_init(struct crypto_skcipher *tfm) return 0; } -static struct skcipher_alg arc4_alg = { - /* - * For legacy reasons, this is named "ecb(arc4)", not "arc4". - * Nevertheless it's actually a stream cipher, not a block cipher. - */ - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb(arc4)-generic", - .base.cra_priority = 100, - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct arc4_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = crypto_arc4_setkey, - .encrypt = crypto_arc4_crypt, - .decrypt = crypto_arc4_crypt, - .init = crypto_arc4_init, +static struct lskcipher_alg arc4_alg = { + .co.base.cra_name = "arc4", + .co.base.cra_driver_name = "arc4-generic", + .co.base.cra_priority = 100, + .co.base.cra_blocksize = ARC4_BLOCK_SIZE, + .co.base.cra_ctxsize = sizeof(struct arc4_ctx), + .co.base.cra_module = THIS_MODULE, + .co.min_keysize = ARC4_MIN_KEY_SIZE, + .co.max_keysize = ARC4_MAX_KEY_SIZE, + .setkey = crypto_arc4_setkey, + .encrypt = crypto_arc4_crypt, + .decrypt = crypto_arc4_crypt, + .init = crypto_arc4_init, }; static int __init arc4_init(void) { - return crypto_register_skcipher(&arc4_alg); + return crypto_register_lskcipher(&arc4_alg); } static void __exit arc4_exit(void) { - crypto_unregister_skcipher(&arc4_alg); + crypto_unregister_lskcipher(&arc4_alg); } subsys_initcall(arc4_init); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index bd4952e4..54135c76 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4963,7 +4963,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "ecb(arc4)", - .generic_driver = "ecb(arc4)-generic", + .generic_driver = "arc4-generic", .test = alg_test_skcipher, .suite = { .cipher = __VECS(arc4_tv_template) -- cgit v1.2.3 From a43bb48956d0c4e7fa0ad5aa37a059bc2d719ad6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:20 +0800 Subject: crypto: essiv - Handle lskcipher spawns Add code to handle an underlying lskcihper object when grabbing an skcipher spawn. Fixes: 17155679d03d ("crypto: skcipher - Add lskcipher") Signed-off-by: Herbert Xu --- crypto/essiv.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/crypto/essiv.c b/crypto/essiv.c index f7d4ef48..e63fc644 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -442,6 +442,7 @@ out: static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *skcipher_alg = NULL; struct crypto_attr_type *algt; const char *inner_cipher_name; const char *shash_name; @@ -450,7 +451,6 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_instance *inst; struct crypto_alg *base, *block_base; struct essiv_instance_ctx *ictx; - struct skcipher_alg *skcipher_alg = NULL; struct aead_alg *aead_alg = NULL; struct crypto_alg *_hash_alg; struct shash_alg *hash_alg; @@ -475,7 +475,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) mask = crypto_algt_inherited_mask(algt); switch (type) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: skcipher_inst = kzalloc(sizeof(*skcipher_inst) + sizeof(*ictx), GFP_KERNEL); if (!skcipher_inst) @@ -489,9 +489,10 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) inner_cipher_name, 0, mask); if (err) goto out_free_inst; - skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn); + skcipher_alg = crypto_spawn_skcipher_alg_common( + &ictx->u.skcipher_spawn); block_base = &skcipher_alg->base; - ivsize = crypto_skcipher_alg_ivsize(skcipher_alg); + ivsize = skcipher_alg->ivsize; break; case CRYPTO_ALG_TYPE_AEAD: @@ -574,18 +575,17 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) base->cra_alignmask = block_base->cra_alignmask; base->cra_priority = block_base->cra_priority; - if (type == CRYPTO_ALG_TYPE_SKCIPHER) { + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) { skcipher_inst->alg.setkey = essiv_skcipher_setkey; skcipher_inst->alg.encrypt = essiv_skcipher_encrypt; skcipher_inst->alg.decrypt = essiv_skcipher_decrypt; skcipher_inst->alg.init = essiv_skcipher_init_tfm; skcipher_inst->alg.exit = essiv_skcipher_exit_tfm; - skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg); - skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg); + skcipher_inst->alg.min_keysize = skcipher_alg->min_keysize; + skcipher_inst->alg.max_keysize = skcipher_alg->max_keysize; skcipher_inst->alg.ivsize = ivsize; - skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg); - skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg); + skcipher_inst->alg.chunksize = skcipher_alg->chunksize; skcipher_inst->free = essiv_skcipher_free_instance; @@ -616,7 +616,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) out_free_hash: crypto_mod_put(_hash_alg); out_drop_skcipher: - if (type == CRYPTO_ALG_TYPE_SKCIPHER) + if (type == CRYPTO_ALG_TYPE_LSKCIPHER) crypto_drop_skcipher(&ictx->u.skcipher_spawn); else crypto_drop_aead(&ictx->u.aead_spawn); -- cgit v1.2.3 From 10f3117e309de36756755ae96fd549b5631a8223 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:21 +0800 Subject: crypto: cryptd - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 194a92d6..31d022d4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -377,7 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, { struct skcipherd_instance_ctx *ctx; struct skcipher_instance *inst; - struct skcipher_alg *alg; + struct skcipher_alg_common *alg; u32 type; u32 mask; int err; @@ -396,17 +396,17 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); if (err) goto err_free_inst; inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.ivsize = alg->ivsize; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); -- cgit v1.2.3 From 57a4df41d1e7cea471842e50f91265dbad255156 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:22 +0800 Subject: crypto: adiantum - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/adiantum.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index c33ba22a..51703746 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -468,7 +468,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst) * Check for a supported set of inner algorithms. * See the comment at the beginning of this file. */ -static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, +static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg, struct crypto_alg *blockcipher_alg, struct shash_alg *hash_alg) { @@ -494,7 +494,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) const char *nhpoly1305_name; struct skcipher_instance *inst; struct adiantum_instance_ctx *ictx; - struct skcipher_alg *streamcipher_alg; + struct skcipher_alg_common *streamcipher_alg; struct crypto_alg *blockcipher_alg; struct shash_alg *hash_alg; int err; @@ -514,7 +514,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); + streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn); /* Block cipher, e.g. "aes" */ err = crypto_grab_cipher(&ictx->blockcipher_spawn, @@ -578,8 +578,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.decrypt = adiantum_decrypt; inst->alg.init = adiantum_init_tfm; inst->alg.exit = adiantum_exit_tfm; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg); + inst->alg.min_keysize = streamcipher_alg->min_keysize; + inst->alg.max_keysize = streamcipher_alg->max_keysize; inst->alg.ivsize = TWEAK_SIZE; inst->free = adiantum_free_instance; -- cgit v1.2.3 From 0f48357a995d843c25e222d5d316a561614b21c1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:23 +0800 Subject: crypto: authenc - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/authenc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index 3326c734..fa896ab1 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -373,9 +373,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -398,7 +398,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, auth_base->cra_alignmask + 1); @@ -422,8 +422,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_init_tfm; -- cgit v1.2.3 From d25635bc95e25386e70ccd071fb74c867642db23 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:24 +0800 Subject: crypto: authencesn - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/authencesn.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 91424e79..60e9568f 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,9 +390,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; + struct skcipher_alg_common *enc; struct hash_alg_common *auth; struct crypto_alg *auth_base; - struct skcipher_alg *enc; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); @@ -415,7 +415,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; - enc = crypto_spawn_skcipher_alg(&ctx->enc); + enc = crypto_spawn_skcipher_alg_common(&ctx->enc); err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -435,8 +435,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); - inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc); - inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc); + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; inst->alg.maxauthsize = auth->digestsize; inst->alg.init = crypto_authenc_esn_init_tfm; -- cgit v1.2.3 From d8a766c63a3f70cb1396ba0a1561c65d5dbf9145 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:25 +0800 Subject: crypto: ccm - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/ccm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crypto/ccm.c b/crypto/ccm.c index a9453129..7af89a5b 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,10 +447,10 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; - struct skcipher_alg *ctr; struct hash_alg_common *mac; int err; @@ -478,13 +478,12 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ictx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ictx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; /* ctr and cbcmac must use the same underlying block cipher. */ @@ -507,7 +506,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = mac->base.cra_alignmask | ctr->base.cra_alignmask; inst->alg.ivsize = 16; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.init = crypto_ccm_init_tfm; -- cgit v1.2.3 From 655ed8ec1581b32c17a862982b5891a5437459fd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:26 +0800 Subject: crypto: chacha20poly1305 - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 3a905c5d..0e2e208d 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -558,7 +558,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; - struct skcipher_alg *chacha; + struct skcipher_alg_common *chacha; struct hash_alg_common *poly; int err; @@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; - chacha = crypto_spawn_skcipher_alg(&ctx->chacha); + chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), crypto_attr_alg_name(tb[2]), 0, mask); @@ -591,7 +591,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (poly->digestsize != POLY1305_DIGEST_SIZE) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ - if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE) + if (chacha->ivsize != CHACHA_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (chacha->base.cra_blocksize != 1) @@ -615,7 +615,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha); + inst->alg.chunksize = chacha->chunksize; inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; inst->alg.init = chachapoly_init; inst->alg.exit = chachapoly_exit; -- cgit v1.2.3 From cf442366cd00206389ba2e7ee699e8cae141c6a9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:27 +0800 Subject: crypto: ctr - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/ctr.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/crypto/ctr.c b/crypto/ctr.c index 23c698b2..14204960 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -258,8 +258,8 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; u32 mask; int err; @@ -278,11 +278,11 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); /* We only support 16-byte blocks. */ err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) + if (alg->ivsize != CTR_RFC3686_BLOCK_SIZE) goto err_free_inst; /* Not a stream cipher? */ @@ -303,11 +303,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = CTR_RFC3686_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - CTR_RFC3686_NONCE_SIZE; + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize + CTR_RFC3686_NONCE_SIZE; + inst->alg.max_keysize = alg->max_keysize + CTR_RFC3686_NONCE_SIZE; inst->alg.setkey = crypto_rfc3686_setkey; inst->alg.encrypt = crypto_rfc3686_crypt; -- cgit v1.2.3 From 2e33bd81213e886288c7b6cac14d57a5db6ae519 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:28 +0800 Subject: crypto: cts - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/cts.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crypto/cts.c b/crypto/cts.c index 8f604f65..f5b42156 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -324,8 +324,8 @@ static void crypto_cts_free(struct skcipher_instance *inst) static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; u32 mask; int err; @@ -344,10 +344,10 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_spawn_skcipher_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; - if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize) + if (alg->ivsize != alg->base.cra_blocksize) goto err_free_inst; if (strncmp(alg->base.cra_name, "cbc(", 4)) @@ -363,9 +363,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.ivsize = alg->base.cra_blocksize; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); + inst->alg.chunksize = alg->chunksize; + inst->alg.min_keysize = alg->min_keysize; + inst->alg.max_keysize = alg->max_keysize; inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx); -- cgit v1.2.3 From d586c61f2445d8cc63c9fb383fc92361340a32f1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:29 +0800 Subject: crypto: gcm - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/gcm.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 4ba62445..91ce6e0e 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -576,10 +576,10 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { + struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; - struct skcipher_alg *ctr; struct hash_alg_common *ghash; int err; @@ -607,13 +607,12 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr_name, 0, mask); if (err) goto err_free_inst; - ctr = crypto_spawn_skcipher_alg(&ctx->ctr); + ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || - crypto_skcipher_alg_ivsize(ctr) != 16 || - ctr->base.cra_blocksize != 1) + ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; @@ -634,7 +633,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; - inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); + inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; -- cgit v1.2.3 From d15232fe37de805bd155e9b150d7aaf86feb5d3b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:30 +0800 Subject: crypto: hctr2 - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/hctr2.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/hctr2.c b/crypto/hctr2.c index 6f4c1884..653fde72 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -406,10 +406,10 @@ static int hctr2_create_common(struct crypto_template *tmpl, const char *xctr_name, const char *polyval_name) { + struct skcipher_alg_common *xctr_alg; u32 mask; struct skcipher_instance *inst; struct hctr2_instance_ctx *ictx; - struct skcipher_alg *xctr_alg; struct crypto_alg *blockcipher_alg; struct shash_alg *polyval_alg; char blockcipher_name[CRYPTO_MAX_ALG_NAME]; @@ -431,7 +431,7 @@ static int hctr2_create_common(struct crypto_template *tmpl, xctr_name, 0, mask); if (err) goto err_free_inst; - xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn); + xctr_alg = crypto_spawn_skcipher_alg_common(&ictx->xctr_spawn); err = -EINVAL; if (strncmp(xctr_alg->base.cra_name, "xctr(", 5)) @@ -500,8 +500,8 @@ static int hctr2_create_common(struct crypto_template *tmpl, inst->alg.decrypt = hctr2_decrypt; inst->alg.init = hctr2_init_tfm; inst->alg.exit = hctr2_exit_tfm; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg); + inst->alg.min_keysize = xctr_alg->min_keysize; + inst->alg.max_keysize = xctr_alg->max_keysize; inst->alg.ivsize = TWEAK_SIZE; inst->free = hctr2_free_instance; -- cgit v1.2.3 From 8323119732f508dcd39cc8cdd5ebe54559cfb5bb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:31 +0800 Subject: crypto: lrw - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/lrw.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index 59260aef..e216fbf2 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -299,8 +299,8 @@ static void lrw_free_instance(struct skcipher_instance *inst) static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; + struct skcipher_alg_common *alg; struct skcipher_instance *inst; - struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; @@ -336,13 +336,13 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(spawn); + alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", @@ -382,10 +382,8 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) (__alignof__(be128) - 1); inst->alg.ivsize = LRW_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + - LRW_BLOCK_SIZE; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + - LRW_BLOCK_SIZE; + inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; + inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); -- cgit v1.2.3 From c2e93d6372cadb653054710ad9be3e8ba4231551 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 3 Oct 2023 11:43:32 +0800 Subject: crypto: xts - Only access common skcipher fields on spawn As skcipher spawns may be of the type lskcipher, only the common fields may be accessed. This was already the case but use the correct helpers to make this more obvious. Signed-off-by: Herbert Xu --- crypto/xts.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/xts.c b/crypto/xts.c index 548b302c..9237b143 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -338,9 +338,9 @@ static void xts_free_instance(struct skcipher_instance *inst) static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { + struct skcipher_alg_common *alg; struct skcipher_instance *inst; struct xts_instance_ctx *ctx; - struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; @@ -375,13 +375,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alg = crypto_skcipher_spawn_alg(&ctx->spawn); + alg = crypto_spawn_skcipher_alg_common(&ctx->spawn); err = -EINVAL; if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) goto err_free_inst; - if (crypto_skcipher_alg_ivsize(alg)) + if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", @@ -421,8 +421,8 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) (__alignof__(u64) - 1); inst->alg.ivsize = XTS_BLOCK_SIZE; - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; + inst->alg.min_keysize = alg->min_keysize * 2; + inst->alg.max_keysize = alg->max_keysize * 2; inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); -- cgit v1.2.3 From 61685126cb52dd3820d10f236f3fec30698de786 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Sat, 7 Oct 2023 09:10:43 +0200 Subject: crypto: jitter - reuse allocated entropy collector In case a health test error occurs during runtime, the power-up health tests are rerun to verify that the noise source is still good and that the reported health test error was an outlier. For performing this power-up health test, the already existing entropy collector instance is used instead of allocating a new one. This change has the following implications: * The noise that is collected as part of the newly run health tests is inserted into the entropy collector and thus stirs the existing data present in there further. Thus, the entropy collected during the health test is not wasted. This is also allowed by SP800-90B. * The power-on health test is not affected by the state of the entropy collector, because it resets the APT / RCT state. The remainder of the state is unrelated to the health test as it is only applied to newly obtained time stamps. This change also fixes a bug report about an allocation while in an atomic lock (the lock is taken in jent_kcapi_random, jent_read_entropy is called and this can call jent_entropy_init). Fixes: f0be2a5192f0 ("jitter - add RCT/APT support for different OSRs") Reported-by: Dan Carpenter Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 2 +- crypto/jitterentropy.c | 36 ++++++++++++++++++++++++++---------- crypto/jitterentropy.h | 2 +- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 0c675222..76edbf8a 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -347,7 +347,7 @@ static int __init jent_mod_init(void) desc->tfm = tfm; crypto_shash_init(desc); - ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc); + ret = jent_entropy_init(CONFIG_CRYPTO_JITTERENTROPY_OSR, 0, desc, NULL); shash_desc_zero(desc); crypto_free_shash(tfm); if (ret) { diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 18bbe2b8..09c9db90 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -611,8 +611,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(ec->osr, ec->flags, - ec->hash_state)) + if (jent_entropy_init(0, 0, NULL, ec)) return -3; return -2; @@ -686,14 +685,30 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) jent_zfree(entropy_collector); } -int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) +int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, + struct rand_data *p_ec) { - struct rand_data *ec; - int i, time_backwards = 0, ret = 0; - - ec = jent_entropy_collector_alloc(osr, flags, hash_state); - if (!ec) - return JENT_EMEM; + /* + * If caller provides an allocated ec, reuse it which implies that the + * health test entropy data is used to further still the available + * entropy pool. + */ + struct rand_data *ec = p_ec; + int i, time_backwards = 0, ret = 0, ec_free = 0; + + if (!ec) { + ec = jent_entropy_collector_alloc(osr, flags, hash_state); + if (!ec) + return JENT_EMEM; + ec_free = 1; + } else { + /* Reset the APT */ + jent_apt_reset(ec, 0); + /* Ensure that a new APT base is obtained */ + ec->apt_base_set = 0; + /* Reset the RCT */ + ec->rct_count = 0; + } /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These @@ -783,7 +798,8 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state) } out: - jent_entropy_collector_free(ec); + if (ec_free) + jent_entropy_collector_free(ec); return ret; } diff --git a/crypto/jitterentropy.h b/crypto/jitterentropy.h index e31661ee..aa472867 100644 --- a/crypto/jitterentropy.h +++ b/crypto/jitterentropy.h @@ -12,7 +12,7 @@ int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len); struct rand_data; extern int jent_entropy_init(unsigned int osr, unsigned int flags, - void *hash_state); + void *hash_state, struct rand_data *p_ec); extern int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len); -- cgit v1.2.3 From 7612222ba4d506b41beb8f40abaa206942a4109a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 8 Oct 2023 19:31:16 -0700 Subject: crypto: xts - use 'spawn' for underlying single-block cipher Since commit f91796cf71a5 ("crypto: api - Fix built-in testing dependency failures"), the following warning appears when booting an x86_64 kernel that is configured with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y and CONFIG_CRYPTO_AES_NI_INTEL=y, even when CONFIG_CRYPTO_XTS=y and CONFIG_CRYPTO_AES=y: alg: skcipher: skipping comparison tests for xts-aes-aesni because xts(ecb(aes-generic)) is unavailable This is caused by an issue in the xts template where it allocates an "aes" single-block cipher without declaring a dependency on it via the crypto_spawn mechanism. This issue was exposed by the above commit because it reversed the order that the algorithms are tested in. Specifically, when "xts(ecb(aes-generic))" is instantiated and tested during the comparison tests for "xts-aes-aesni", the "xts" template allocates an "aes" crypto_cipher for encrypting tweaks. This resolves to "aes-aesni". (Getting "aes-aesni" instead of "aes-generic" here is a bit weird, but it's apparently intended.) Due to the above-mentioned commit, the testing of "aes-aesni", and the finalization of its registration, now happens at this point instead of before. At the end of that, crypto_remove_spawns() unregisters all algorithm instances that depend on a lower-priority "aes" implementation such as "aes-generic" but that do not depend on "aes-aesni". However, because "xts" does not use the crypto_spawn mechanism for its "aes", its dependency on "aes-aesni" is not recognized by crypto_remove_spawns(). Thus, crypto_remove_spawns() unexpectedly unregisters "xts(ecb(aes-generic))". Fix this issue by making the "xts" template use the crypto_spawn mechanism for its "aes" dependency, like what other templates do. Note, this fix could be applied as far back as commit 6c1314f521f2 ("crypto: xts - Convert to skcipher"). However, the issue only got exposed by the much more recent changes to how the crypto API runs the self-tests, so there should be no need to backport this to very old kernels. Also, an alternative fix would be to flip the list iteration order in crypto_start_tests() to restore the original testing order. I'm thinking we should do that too, since the original order seems more natural, but it shouldn't be relied on for correctness. Fixes: f91796cf71a5 ("crypto: api - Fix built-in testing dependency failures") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xts.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/crypto/xts.c b/crypto/xts.c index 9237b143..672e1a3f 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -28,7 +28,7 @@ struct xts_tfm_ctx { struct xts_instance_ctx { struct crypto_skcipher_spawn spawn; - char name[CRYPTO_MAX_ALG_NAME]; + struct crypto_cipher_spawn tweak_spawn; }; struct xts_request_ctx { @@ -306,7 +306,7 @@ static int xts_init_tfm(struct crypto_skcipher *tfm) ctx->child = child; - tweak = crypto_alloc_cipher(ictx->name, 0, 0); + tweak = crypto_spawn_cipher(&ictx->tweak_spawn); if (IS_ERR(tweak)) { crypto_free_skcipher(ctx->child); return PTR_ERR(tweak); @@ -333,12 +333,14 @@ static void xts_free_instance(struct skcipher_instance *inst) struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); crypto_drop_skcipher(&ictx->spawn); + crypto_drop_cipher(&ictx->tweak_spawn); kfree(inst); } static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_alg_common *alg; + char name[CRYPTO_MAX_ALG_NAME]; struct skcipher_instance *inst; struct xts_instance_ctx *ctx; const char *cipher_name; @@ -363,13 +365,13 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) cipher_name, 0, mask); if (err == -ENOENT) { err = -ENAMETOOLONG; - if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", + if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), - ctx->name, 0, mask); + name, 0, mask); } if (err) @@ -398,23 +400,28 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) if (!strncmp(cipher_name, "ecb(", 4)) { int len; - len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); + len = strscpy(name, cipher_name + 4, sizeof(name)); if (len < 2) goto err_free_inst; - if (ctx->name[len - 1] != ')') + if (name[len - 1] != ')') goto err_free_inst; - ctx->name[len - 1] = 0; + name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { + "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; goto err_free_inst; } } else goto err_free_inst; + err = crypto_grab_cipher(&ctx->tweak_spawn, + skcipher_crypto_instance(inst), name, 0, mask); + if (err) + goto err_free_inst; + inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | -- cgit v1.2.3 From e5dcb2c03a0cecf042e52ca1c84f7c6595723fe5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 00:32:13 -0700 Subject: crypto: shash - optimize the default digest and finup For an shash algorithm that doesn't implement ->digest, currently crypto_shash_digest() with aligned input makes 5 indirect calls: 1 to shash_digest_unaligned(), 1 to ->init, 2 to ->update ('alignmask + 1' bytes, then the rest), then 1 to ->final. This is true even if the algorithm implements ->finup. This is caused by an unnecessary fallback to code meant to handle unaligned inputs. In fact, crypto_shash_digest() already does the needed alignment check earlier. Therefore, optimize the number of indirect calls for aligned inputs to 3 when the algorithm implements ->finup. It remains at 5 when the algorithm implements neither ->finup nor ->digest. Similarly, for an shash algorithm that doesn't implement ->finup, currently crypto_shash_finup() with aligned input makes 4 indirect calls: 1 to shash_finup_unaligned(), 2 to ->update, and 1 to ->final. Optimize this to 3 calls. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/crypto/shash.c b/crypto/shash.c index 1fadb6b5..d99dc2f9 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -191,6 +191,15 @@ static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, shash_final_unaligned(desc, out); } +static int shash_default_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + + return shash->update(desc, data, len) ?: + shash->final(desc, out); +} + int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -224,6 +233,15 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, shash_final_unaligned(desc, out); } +static int shash_default_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct shash_alg *shash = crypto_shash_alg(desc->tfm); + + return shash->init(desc) ?: + shash->finup(desc, data, len, out); +} + int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -656,9 +674,9 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; if (!alg->finup) - alg->finup = shash_finup_unaligned; + alg->finup = shash_default_finup; if (!alg->digest) - alg->digest = shash_digest_unaligned; + alg->digest = shash_default_digest; if (!alg->export) { alg->export = shash_default_export; alg->import = shash_default_import; -- cgit v1.2.3 From 8a3684881087d6030988a99bea8b3299e88aaf17 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 00:32:14 -0700 Subject: crypto: shash - fold shash_digest_unaligned() into crypto_shash_digest() Fold shash_digest_unaligned() into its only remaining caller. Also, avoid a redundant check of CRYPTO_TFM_NEED_KEY by replacing the call to crypto_shash_init() with shash->init(desc). Finally, replace shash_update_unaligned() + shash_final_unaligned() with shash_finup_unaligned() which does exactly that. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/crypto/shash.c b/crypto/shash.c index d99dc2f9..15fee57c 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -225,14 +225,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_finup); -static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return crypto_shash_init(desc) ?: - shash_update_unaligned(desc, data, len) ?: - shash_final_unaligned(desc, out); -} - static int shash_default_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -260,7 +252,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; else if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash_digest_unaligned(desc, data, len, out); + err = shash->init(desc) ?: + shash_finup_unaligned(desc, data, len, out); else err = shash->digest(desc, data, len, out); -- cgit v1.2.3 From ba95588e20059d4ecc82a0a784ef37d82863d945 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 9 Oct 2023 22:59:43 -0700 Subject: crypto: adiantum - add fast path for single-page messages When the source scatterlist is a single page, optimize the first hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and loading the narrow part of the source data. Likewise, when the destination scatterlist is a single page, optimize the second hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and storing the narrow part of the destination data. In some cases these optimizations improve performance significantly. Note: ideally, for optimal performance each architecture should implement the full "adiantum(xchacha12,aes)" algorithm and fully optimize the contiguous buffer case to use no indirect calls. That's not something I've gotten around to doing, though. This commit just makes a relatively small change that provides some benefit with the existing template-based approach. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 65 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 51703746..aa0f8b3b 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req) /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ static int adiantum_hash_message(struct skcipher_request *req, - struct scatterlist *sgl, le128 *digest) + struct scatterlist *sgl, unsigned int nents, + le128 *digest) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; struct shash_desc *hash_desc = &rctx->u.hash_desc; @@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req, unsigned int i, n; int err; - hash_desc->tfm = tctx->hash; - err = crypto_shash_init(hash_desc); if (err) return err; - sg_miter_start(&miter, sgl, sg_nents(sgl), - SG_MITER_FROM_SG | SG_MITER_ATOMIC); + sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); for (i = 0; i < bulk_len; i += n) { sg_miter_next(&miter); n = min_t(unsigned int, miter.length, bulk_len - i); @@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *dst = req->dst; + const unsigned int dst_nents = sg_nents(dst); le128 digest; int err; @@ -298,13 +296,30 @@ static int adiantum_finish(struct skcipher_request *req) * enc: C_R = C_M - H_{K_H}(T, C_L) * dec: P_R = P_M - H_{K_H}(T, P_L) */ - err = adiantum_hash_message(req, req->dst, &digest); - if (err) - return err; - le128_add(&digest, &digest, &rctx->header_hash); - le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1); + rctx->u.hash_desc.tfm = tctx->hash; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); + if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page destination */ + void *virt = kmap_local_page(sg_page(dst)) + dst->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + if (err) { + kunmap_local(virt); + return err; + } + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); + kunmap_local(virt); + } else { + /* Slow path that works for any destination scatterlist */ + err = adiantum_hash_message(req, dst, dst_nents, &digest); + if (err) + return err; + le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst, + bulk_len, sizeof(le128), 1); + } return 0; } @@ -324,6 +339,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct scatterlist *src = req->src; + const unsigned int src_nents = sg_nents(src); unsigned int stream_len; le128 digest; int err; @@ -339,12 +356,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc) * dec: C_M = C_R + H_{K_H}(T, C_L) */ adiantum_hash_header(req); - err = adiantum_hash_message(req, req->src, &digest); + rctx->u.hash_desc.tfm = tctx->hash; + if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) { + /* Fast path for single-page source */ + void *virt = kmap_local_page(sg_page(src)) + src->offset; + + err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, + (u8 *)&digest); + memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128)); + kunmap_local(virt); + } else { + /* Slow path that works for any source scatterlist */ + err = adiantum_hash_message(req, src, src_nents, &digest); + scatterwalk_map_and_copy(&rctx->rbuf.bignum, src, + bulk_len, sizeof(le128), 0); + } if (err) return err; - le128_add(&digest, &digest, &rctx->header_hash); - scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src, - bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0); + le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); /* If encrypting, encrypt P_M with the block cipher to get C_M */ -- cgit v1.2.3 From 6add861dcd431e475e24eff95cda1d08d29797ec Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 10 Oct 2023 22:22:38 +0100 Subject: crypto: pkcs7 - remove sha1 support Removes support for sha1 signed kernel modules, importing sha1 signed x.509 certificates. rsa-pkcs1pad keeps sha1 padding support, which seems to be used by virtio driver. sha1 remains available as there are many drivers and subsystems using it. Note only hmac(sha1) with secret keys remains cryptographically secure. In the kernel there are filesystems, IMA, tpm/pcr that appear to be using sha1. Maybe they can all start to be slowly upgraded to something else i.e. blake3, ParallelHash, SHAKE256 as needed. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 3 -- crypto/asymmetric_keys/pkcs7_parser.c | 4 -- crypto/asymmetric_keys/public_key.c | 3 +- crypto/asymmetric_keys/signature.c | 2 +- crypto/asymmetric_keys/x509_cert_parser.c | 8 ---- crypto/testmgr.h | 80 ------------------------------- 6 files changed, 2 insertions(+), 98 deletions(-) diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 690405eb..6416bded 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,9 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_sha1: - ctx->digest_algo = "sha1"; - break; case OID_sha256: ctx->digest_algo = "sha256"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index cf4caab9..ab647cb4 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,9 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_sha1: - ctx->sinfo->sig->hash_algo = "sha1"; - break; case OID_sha256: ctx->sinfo->sig->hash_algo = "sha256"; break; @@ -272,7 +269,6 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; - case OID_id_ecdsa_with_sha1: case OID_id_ecdsa_with_sha224: case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index abeecb83..5bf0452c 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -116,8 +116,7 @@ software_key_determine_akcipher(const struct public_key *pkey, */ if (!hash_algo) return -EINVAL; - if (strcmp(hash_algo, "sha1") != 0 && - strcmp(hash_algo, "sha224") != 0 && + if (strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && strcmp(hash_algo, "sha512") != 0) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 2deff81f..398983be 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob); * Sign the specified data blob using the private key specified by params->key. * The signature is wrapped in an encoding if params->encoding is specified * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be - * passed through params->hash_algo (eg. "sha1"). + * passed through params->hash_algo (eg. "sha512"). * * Returns the length of the data placed in the signature buffer or an error. */ diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 2c309286..68ef1ffb 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -198,10 +198,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, default: return -ENOPKG; /* Unsupported combination */ - case OID_sha1WithRSAEncryption: - ctx->cert->sig->hash_algo = "sha1"; - goto rsa_pkcs1; - case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; @@ -218,10 +214,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; - case OID_id_ecdsa_with_sha1: - ctx->cert->sig->hash_algo = "sha1"; - goto ecdsa; - case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 0cd6e060..d7e98397 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -653,30 +653,6 @@ static const struct akcipher_testvec rsa_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { { .key = - "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" - "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" - "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" - "\x98", - .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, - .m = - "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" - "\x63\x85\xe7\x82", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" - "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" - "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86" - "\x80\x6f\xa5\x79\x77\xda\xd0", - .c_size = 55, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" @@ -780,32 +756,6 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { { .key = - "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" - "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" - "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" - "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" - "\xaf", - .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, - .m = - "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" - "\x0b\xde\x6a\x42", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" - "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" - "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d" - "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7" - "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", - .c_size = 72, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" @@ -916,36 +866,6 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { { - .key = /* secp384r1(sha1) */ - "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" - "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" - "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" - "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" - "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" - "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" - "\xf1", - .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, - .m = - "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" - "\x3a\x69\xc1\x93", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" - "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" - "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e" - "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0" - "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88" - "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26" - "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", - .c_size = 104, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { .key = /* secp384r1(sha224) */ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" -- cgit v1.2.3 From d12a6b181866c9fbcd59338e54beb3491846e751 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 10 Oct 2023 22:25:29 +0100 Subject: crypto: mscode_parser - remove sha224 authenticode support It is possible to stand up own certificates and sign PE-COFF binaries using SHA-224. However it never became popular or needed since it has similar costs as SHA-256. Windows Authenticode infrastructure never had support for SHA-224, and all secureboot keys used fro linux vmlinuz have always been using at least SHA-256. Given the point of mscode_parser is to support interoperatiblity with typical de-facto hashes, remove support for SHA-224 to avoid posibility of creating interoperatibility issues with rhboot/shim, grub, and non-linux systems trying to sign or verify vmlinux. SHA-224 itself is not removed from the kernel, as it is truncated SHA-256. If requested I can write patches to remove SHA-224 support across all of the drivers. Signed-off-by: Dimitri John Ledkov Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 6416bded..855cbc46 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -84,9 +84,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, case OID_sha512: ctx->digest_algo = "sha512"; break; - case OID_sha224: - ctx->digest_algo = "sha224"; - break; case OID__NR: sprint_oid(value, vlen, buffer, sizeof(buffer)); -- cgit v1.2.3 From ab230b6c7e63b47b853c2bb6023c26c054c2d6c5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 12 Oct 2023 13:11:25 +0800 Subject: crypto: lskcipher - Return EINVAL when ecb_name fails sanity checks Set the error value to -EINVAL instead of zero when the underlying name (within "ecb()") fails basic sanity checks. Fixes: 6781a99f1919 ("crypto: lskcipher - Add compatibility wrapper around ECB") Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202310111323.ZjK7bzjw-lkp@intel.com/ Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index 9be3c04b..cb6170eb 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -583,6 +583,7 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple( if (ecb_name[0]) { int len; + err = -EINVAL; len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4], sizeof(ecb_name)); if (len < 2) -- cgit v1.2.3 From 6dac6980754c6e19dc1f82dbd8c0a3383e26cf98 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 12 Oct 2023 22:56:13 -0700 Subject: crypto: skcipher - fix weak key check for lskciphers When an algorithm of the new "lskcipher" type is exposed through the "skcipher" API, calls to crypto_skcipher_setkey() don't pass on the CRYPTO_TFM_REQ_FORBID_WEAK_KEYS flag to the lskcipher. This causes self-test failures for ecb(des), as weak keys are not rejected anymore. Fix this. Fixes: 17155679d03d ("crypto: skcipher - Add lskcipher") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/lskcipher.c | 8 -------- crypto/skcipher.c | 8 +++++++- crypto/skcipher.h | 2 -- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c index cb6170eb..9edc8973 100644 --- a/crypto/lskcipher.c +++ b/crypto/lskcipher.c @@ -194,14 +194,6 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, } EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); -int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); - - return crypto_lskcipher_setkey(*ctx, key, keylen); -} - static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, diff --git a/crypto/skcipher.c b/crypto/skcipher.c index b9496dc8..ac8b8c04 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -621,7 +621,13 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, int err; if (cipher->co.base.cra_type != &crypto_skcipher_type) { - err = crypto_lskcipher_setkey_sg(tfm, key, keylen); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(*ctx, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_lskcipher_setkey(*ctx, key, keylen); goto out; } diff --git a/crypto/skcipher.h b/crypto/skcipher.h index 6f1295f0..16c94843 100644 --- a/crypto/skcipher.h +++ b/crypto/skcipher.h @@ -20,8 +20,6 @@ static inline struct crypto_istat_cipher *skcipher_get_stat_common( #endif } -int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen); int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); -- cgit v1.2.3 From 6a49ee67e7f14ce9a00f1514655c0b6333b91730 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Oct 2023 13:21:44 +0800 Subject: certs: Break circular dependency when selftest is modular The modular build fails because the self-test code depends on pkcs7 which in turn depends on x509 which contains the self-test. Split the self-test out into its own module to break the cycle. Fixes: 041ec6e33d48 ("certs: Add FIPS selftests") Signed-off-by: Herbert Xu Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/Kconfig | 3 ++- crypto/asymmetric_keys/Makefile | 3 ++- crypto/asymmetric_keys/selftest.c | 13 ++++++++++--- crypto/asymmetric_keys/x509_parser.h | 9 --------- crypto/asymmetric_keys/x509_public_key.c | 8 +------- 5 files changed, 15 insertions(+), 21 deletions(-) diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 1ef3b46d..59ec726b 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -76,7 +76,7 @@ config SIGNED_PE_FILE_VERIFICATION signed PE binary. config FIPS_SIGNATURE_SELFTEST - bool "Run FIPS selftests on the X.509+PKCS7 signature verification" + tristate "Run FIPS selftests on the X.509+PKCS7 signature verification" help This option causes some selftests to be run on the signature verification code, using some built in data. This is required @@ -84,5 +84,6 @@ config FIPS_SIGNATURE_SELFTEST depends on KEYS depends on ASYMMETRIC_KEY_TYPE depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER + depends on X509_CERTIFICATE_PARSER endif # ASYMMETRIC_KEY_TYPE diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index 0d1fa1b6..1a273d6d 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -22,7 +22,8 @@ x509_key_parser-y := \ x509_cert_parser.o \ x509_loader.o \ x509_public_key.o -x509_key_parser-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += selftest.o +obj-$(CONFIG_FIPS_SIGNATURE_SELFTEST) += x509_selftest.o +x509_selftest-y += selftest.o $(obj)/x509_cert_parser.o: \ $(obj)/x509.asn1.h \ diff --git a/crypto/asymmetric_keys/selftest.c b/crypto/asymmetric_keys/selftest.c index fa0bf7f2..c50da7ef 100644 --- a/crypto/asymmetric_keys/selftest.c +++ b/crypto/asymmetric_keys/selftest.c @@ -4,10 +4,11 @@ * Written by David Howells (dhowells@redhat.com) */ -#include +#include #include +#include #include -#include +#include #include "x509_parser.h" struct certs_test { @@ -175,7 +176,7 @@ static const struct certs_test certs_tests[] __initconst = { TEST(certs_selftest_1_data, certs_selftest_1_pkcs7), }; -int __init fips_signature_selftest(void) +static int __init fips_signature_selftest(void) { struct key *keyring; int ret, i; @@ -222,3 +223,9 @@ int __init fips_signature_selftest(void) key_put(keyring); return 0; } + +late_initcall(fips_signature_selftest); + +MODULE_DESCRIPTION("X.509 self tests"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index a299c9c5..97a886cb 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -40,15 +40,6 @@ struct x509_certificate { bool blacklisted; }; -/* - * selftest.c - */ -#ifdef CONFIG_FIPS_SIGNATURE_SELFTEST -extern int __init fips_signature_selftest(void); -#else -static inline int fips_signature_selftest(void) { return 0; } -#endif - /* * x509_cert_parser.c */ diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 7c71db3a..6a4f00be 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -262,15 +262,9 @@ static struct asymmetric_key_parser x509_key_parser = { /* * Module stuff */ -extern int __init certs_selftest(void); static int __init x509_key_init(void) { - int ret; - - ret = register_asymmetric_key_parser(&x509_key_parser); - if (ret < 0) - return ret; - return fips_signature_selftest(); + return register_asymmetric_key_parser(&x509_key_parser); } static void __exit x509_key_exit(void) -- cgit v1.2.3 From 099dc3edfb212ef399107a0b6affd7b19e154571 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 16 Oct 2023 13:57:30 +0800 Subject: crypto: rsa - Add module alias for pkcs1pad Add a module alias for pkcs1pas so that it can be auto-loaded by modprobe. Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d2e5e104..49756c6e 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -687,3 +687,5 @@ struct crypto_template rsa_pkcs1pad_tmpl = { .create = pkcs1pad_create, .module = THIS_MODULE, }; + +MODULE_ALIAS_CRYPTO("pkcs1pad"); -- cgit v1.2.3 From 1eb7dcd582b55be7c624cb31d55f9fc9fb62940b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 15:34:55 -0700 Subject: crypto: shash - eliminate indirect call for default import and export Most shash algorithms don't have custom ->import and ->export functions, resulting in the memcpy() based default being used. Yet, crypto_shash_import() and crypto_shash_export() still make an indirect call, which is expensive. Therefore, change how the default import and export are called to make it so that crypto_shash_import() and crypto_shash_export() don't do an indirect call in this case. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/crypto/shash.c b/crypto/shash.c index 15fee57c..52420c41 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -277,17 +277,34 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); -static int shash_default_export(struct shash_desc *desc, void *out) +int crypto_shash_export(struct shash_desc *desc, void *out) { - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + + if (shash->export) + return shash->export(desc, out); + + memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm)); return 0; } +EXPORT_SYMBOL_GPL(crypto_shash_export); -static int shash_default_import(struct shash_desc *desc, const void *in) +int crypto_shash_import(struct shash_desc *desc, const void *in) { - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + if (shash->import) + return shash->import(desc, in); + + memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); return 0; } +EXPORT_SYMBOL_GPL(crypto_shash_import); static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) @@ -666,15 +683,23 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + /* + * Handle missing optional functions. For each one we can either + * install a default here, or we can leave the pointer as NULL and check + * the pointer for NULL in crypto_shash_*(), avoiding an indirect call + * when the default behavior is desired. For ->finup and ->digest we + * install defaults, since for optimal performance algorithms should + * implement these anyway. On the other hand, for ->import and + * ->export the common case and best performance comes from the simple + * memcpy of the shash_desc_ctx, so when those pointers are NULL we + * leave them NULL and provide the memcpy with no indirect call. + */ if (!alg->finup) alg->finup = shash_default_finup; if (!alg->digest) alg->digest = shash_default_digest; - if (!alg->export) { - alg->export = shash_default_export; - alg->import = shash_default_import; + if (!alg->export) alg->halg.statesize = alg->descsize; - } if (!alg->setkey) alg->setkey = shash_no_setkey; -- cgit v1.2.3 From 3c00ba3cceed8f654309a10dcd8297778d972cb6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:32 -0700 Subject: crypto: cbcmac - remove unnecessary alignment logic The cbcmac template is aligning a field in its desc context to the alignmask of its underlying 'cipher', at runtime. This is almost entirely pointless, since cbcmac is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, remove the manual alignment code from cbcmac. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/crypto/ccm.c b/crypto/ccm.c index 7af89a5b..dd7aed63 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -56,6 +56,7 @@ struct cbcmac_tfm_ctx { struct cbcmac_desc_ctx { unsigned int len; + u8 dg[]; }; static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( @@ -785,10 +786,9 @@ static int crypto_cbcmac_digest_init(struct shash_desc *pdesc) { struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_digestsize(pdesc->tfm); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs; ctx->len = 0; - memset(dg, 0, bs); + memset(ctx->dg, 0, bs); return 0; } @@ -801,18 +801,17 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; while (len > 0) { unsigned int l = min(len, bs - ctx->len); - crypto_xor(dg + ctx->len, p, l); + crypto_xor(&ctx->dg[ctx->len], p, l); ctx->len +=l; len -= l; p += l; if (ctx->len == bs) { - crypto_cipher_encrypt_one(tfm, dg, dg); + crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); ctx->len = 0; } } @@ -827,12 +826,11 @@ static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out) struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); - u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; if (ctx->len) - crypto_cipher_encrypt_one(tfm, dg, dg); + crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg); - memcpy(out, dg, bs); + memcpy(out, ctx->dg, bs); return 0; } @@ -889,8 +887,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = 1; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx), - alg->cra_alignmask + 1) + + inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) + alg->cra_blocksize; inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx); -- cgit v1.2.3 From d7a3619daea9014f9e5b68ec4b3e791c7c05173a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:33 -0700 Subject: crypto: cmac - remove unnecessary alignment logic The cmac template is setting its alignmask to that of its underlying 'cipher'. Yet, it doesn't care itself about how its inputs and outputs are aligned, which is ostensibly the point of the alignmask. Instead, cmac actually just uses its alignmask itself to runtime-align certain fields in its tfm and desc contexts appropriately for its underlying cipher. That is almost entirely pointless too, though, since cmac is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, this patch removes the manual alignment code from cmac and makes it stop setting an alignmask. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/cmac.c | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/crypto/cmac.c b/crypto/cmac.c index fce6b0f5..c7aa3665 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -28,7 +28,7 @@ */ struct cmac_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; + __be64 consts[]; }; /* @@ -44,17 +44,15 @@ struct cmac_tfm_ctx { */ struct cmac_desc_ctx { unsigned int len; - u8 ctx[]; + u8 odds[]; }; static int crypto_cmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); unsigned int bs = crypto_shash_blocksize(parent); - __be64 *consts = PTR_ALIGN((void *)ctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); + __be64 *consts = ctx->consts; u64 _const[2]; int i, err = 0; u8 msb_mask, gfmask; @@ -104,10 +102,9 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent, static int crypto_cmac_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; + u8 *prev = &ctx->odds[bs]; ctx->len = 0; memset(prev, 0, bs); @@ -119,12 +116,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; /* checking the data can fill the block */ @@ -165,14 +161,11 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p, static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent); struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN((void *)tctx->ctx, - (alignmask | (__alignof__(__be64) - 1)) + 1); - u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; unsigned int offset = 0; @@ -191,7 +184,7 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out) } crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); + crypto_xor(prev, (const u8 *)tctx->consts + offset, bs); crypto_cipher_encrypt_one(tfm, out, prev); @@ -241,7 +234,6 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -273,23 +265,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) + + alg->cra_blocksize * 2; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = - ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment()) - + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - - inst->alg.base.cra_ctxsize = - ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment()) - + ((alignmask | (__alignof__(__be64) - 1)) & - ~(crypto_tfm_ctx_alignment() - 1)) - + alg->cra_blocksize * 2; - + inst->alg.descsize = sizeof(struct cmac_desc_ctx) + + alg->cra_blocksize * 2; inst->alg.init = crypto_cmac_digest_init; inst->alg.update = crypto_cmac_digest_update; inst->alg.final = crypto_cmac_digest_final; -- cgit v1.2.3 From b510a3de6eef2ca0d324f320df639441fb8526cb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:34 -0700 Subject: crypto: hmac - remove unnecessary alignment logic The hmac template is setting its alignmask to that of its underlying unkeyed hash algorithm, and it is aligning the ipad and opad fields in its tfm context to that alignment. However, hmac does not actually need any sort of alignment itself, which makes this pointless except to keep the pads aligned to what the underlying algorithm prefers. But very few shash algorithms actually set an alignmask, and it is being removed from those remaining ones; also, after setkey, the pads are only passed to crypto_shash_import and crypto_shash_export which ignore the alignmask. Therefore, make the hmac template stop setting an alignmask and simply use natural alignment for ipad and opad. Note, this change also moves the pads from the beginning of the tfm context to the end, which makes much more sense; the variable-length fields should be at the end. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hmac.c | 56 ++++++++++++++++++++++---------------------------------- 1 file changed, 22 insertions(+), 34 deletions(-) diff --git a/crypto/hmac.c b/crypto/hmac.c index ea93f4c5..7cec25ff 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -24,31 +24,20 @@ struct hmac_ctx { struct crypto_shash *hash; + /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */ + u8 pads[]; }; -static inline void *align_ptr(void *p, unsigned int align) -{ - return (void *)ALIGN((unsigned long)p, align); -} - -static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) -{ - return align_ptr(crypto_shash_ctx_aligned(tfm) + - crypto_shash_statesize(tfm) * 2, - crypto_tfm_ctx_alignment()); -} - static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *ipad = crypto_shash_ctx_aligned(parent); - char *opad = ipad + ss; - struct hmac_ctx *ctx = align_ptr(opad + ss, - crypto_tfm_ctx_alignment()); - struct crypto_shash *hash = ctx->hash; + struct hmac_ctx *tctx = crypto_shash_ctx(parent); + struct crypto_shash *hash = tctx->hash; + u8 *ipad = &tctx->pads[0]; + u8 *opad = &tctx->pads[ss]; SHASH_DESC_ON_STACK(shash, hash); unsigned int i; @@ -94,16 +83,18 @@ static int hmac_export(struct shash_desc *pdesc, void *out) static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); - struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); - desc->tfm = ctx->hash; + desc->tfm = tctx->hash; return crypto_shash_import(desc, in); } static int hmac_init(struct shash_desc *pdesc) { - return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); + const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm); + + return hmac_import(pdesc, &tctx->pads[0]); } static int hmac_update(struct shash_desc *pdesc, @@ -119,7 +110,8 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out) struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; + const struct hmac_ctx *tctx = crypto_shash_ctx(parent); + const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_final(desc, out) ?: @@ -134,7 +126,8 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data, struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + ss; + const struct hmac_ctx *tctx = crypto_shash_ctx(parent); + const u8 *opad = &tctx->pads[ss]; struct shash_desc *desc = shash_desc_ctx(pdesc); return crypto_shash_finup(desc, data, nbytes, out) ?: @@ -147,7 +140,7 @@ static int hmac_init_tfm(struct crypto_shash *parent) struct crypto_shash *hash; struct shash_instance *inst = shash_alg_instance(parent); struct crypto_shash_spawn *spawn = shash_instance_ctx(inst); - struct hmac_ctx *ctx = hmac_ctx(parent); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) @@ -156,14 +149,14 @@ static int hmac_init_tfm(struct crypto_shash *parent) parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); - ctx->hash = hash; + tctx->hash = hash; return 0; } static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) { - struct hmac_ctx *sctx = hmac_ctx(src); - struct hmac_ctx *dctx = hmac_ctx(dst); + struct hmac_ctx *sctx = crypto_shash_ctx(src); + struct hmac_ctx *dctx = crypto_shash_ctx(dst); struct crypto_shash *hash; hash = crypto_clone_shash(sctx->hash); @@ -176,9 +169,9 @@ static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src) static void hmac_exit_tfm(struct crypto_shash *parent) { - struct hmac_ctx *ctx = hmac_ctx(parent); + struct hmac_ctx *tctx = crypto_shash_ctx(parent); - crypto_free_shash(ctx->hash); + crypto_free_shash(tctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -225,15 +218,10 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + (ss * 2); - ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; inst->alg.statesize = ss; - - inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(ss * 2, crypto_tfm_ctx_alignment()); - inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; -- cgit v1.2.3 From 46e921315febd3931065750ebbc64e5ced3c2f99 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:35 -0700 Subject: crypto: vmac - don't set alignmask The vmac template is setting its alignmask to that of its underlying 'cipher'. This doesn't actually accomplish anything useful, though, so stop doing it. (vmac_update() does have an alignment bug, where it assumes u64 alignment when it shouldn't, but that bug exists both before and after this patch.) This is a prerequisite for removing support for nonzero alignmasks from shash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/vmac.c | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/vmac.c b/crypto/vmac.c index 4633b2dd..0a1d8efa 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -649,7 +649,6 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); inst->alg.base.cra_init = vmac_init_tfm; -- cgit v1.2.3 From 177bcadde839a36c00efc517d17e74ccae578e36 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:36 -0700 Subject: crypto: xcbc - remove unnecessary alignment logic The xcbc template is setting its alignmask to that of its underlying 'cipher'. Yet, it doesn't care itself about how its inputs and outputs are aligned, which is ostensibly the point of the alignmask. Instead, xcbc actually just uses its alignmask itself to runtime-align certain fields in its tfm and desc contexts appropriately for its underlying cipher. That is almost entirely pointless too, though, since xcbc is already using the cipher API functions that handle alignment themselves, and few ciphers set a nonzero alignmask anyway. Also, even without runtime alignment, an alignment of at least 4 bytes can be guaranteed. Thus, at best this code is optimizing for the rare case of ciphers that set an alignmask >= 7, at the cost of hurting the common cases. Therefore, this patch removes the manual alignment code from xcbc and makes it stop setting an alignmask. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xcbc.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 6074c5c1..a9e8ee9c 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -27,7 +27,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, */ struct xcbc_tfm_ctx { struct crypto_cipher *child; - u8 ctx[]; + u8 consts[]; }; /* @@ -43,7 +43,7 @@ struct xcbc_tfm_ctx { */ struct xcbc_desc_ctx { unsigned int len; - u8 ctx[]; + u8 odds[]; }; #define XCBC_BLOCKSIZE 16 @@ -51,9 +51,8 @@ struct xcbc_desc_ctx { static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); - u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *consts = ctx->consts; int err = 0; u8 key1[XCBC_BLOCKSIZE]; int bs = sizeof(key1); @@ -71,10 +70,9 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); - u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; + u8 *prev = &ctx->odds[bs]; ctx->len = 0; memset(prev, 0, bs); @@ -86,12 +84,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; /* checking the data can fill the block */ @@ -132,13 +129,11 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - unsigned long alignmask = crypto_shash_alignmask(parent); struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); - u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *odds = ctx->odds; u8 *prev = odds + bs; unsigned int offset = 0; @@ -157,7 +152,7 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) } crypto_xor(prev, odds, bs); - crypto_xor(prev, consts + offset, bs); + crypto_xor(prev, &tctx->consts[offset], bs); crypto_cipher_encrypt_one(tfm, out, prev); @@ -191,7 +186,6 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; - unsigned long alignmask; u32 mask; int err; @@ -218,21 +212,15 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - alignmask = alg->cra_alignmask | 3; - inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) + + alg->cra_blocksize * 2; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), - crypto_tfm_ctx_alignment()) + - (alignmask & - ~(crypto_tfm_ctx_alignment() - 1)) + + inst->alg.descsize = sizeof(struct xcbc_desc_ctx) + alg->cra_blocksize * 2; - inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), - alignmask + 1) + - alg->cra_blocksize * 2; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; -- cgit v1.2.3 From c4221113bcc6709e52df1599cd846186b067f091 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:37 -0700 Subject: crypto: shash - remove support for nonzero alignmask Currently, the shash API checks the alignment of all message, key, and digest buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. In the case of the message buffer, cryptographic hash functions internally operate on fixed-size blocks, so implementations end up needing to deal with byte-aligned data anyway because the length(s) passed to ->update might not be divisible by the block size. Word-alignment of the message can theoretically be helpful for CRCs, like what was being done in crc32c-sparc64. But in practice it's better for the algorithms to use unaligned accesses or align the message themselves. A similar argument applies to the key and digest. In any case, no shash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from shash. The benefit is that all the code to handle "misaligned" buffers in the shash API goes away, reducing the overhead of the shash API. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 128 ++++----------------------------------------------------- 1 file changed, 8 insertions(+), 120 deletions(-) diff --git a/crypto/shash.c b/crypto/shash.c index 52420c41..409b33f9 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -10,15 +10,12 @@ #include #include #include -#include #include #include #include #include "hash.h" -#define MAX_SHASH_ALIGNMASK 63 - static const struct crypto_type crypto_shash_type; static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) @@ -38,27 +35,6 @@ int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(shash_no_setkey); -static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) -{ - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned long absize; - u8 *buffer, *alignbuffer; - int err; - - absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); - buffer = kmalloc(absize, GFP_ATOMIC); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - err = shash->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return err; -} - static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) { if (crypto_shash_alg_needs_key(alg)) @@ -69,14 +45,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; - if ((unsigned long)key & alignmask) - err = shash_setkey_unaligned(tfm, key, keylen); - else - err = shash->setkey(tfm, key, keylen); - + err = shash->setkey(tfm, key, keylen); if (unlikely(err)) { shash_set_needkey(tfm, shash); return err; @@ -87,110 +58,35 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); - unsigned int unaligned_len = alignmask + 1 - - ((unsigned long)data & alignmask); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_SHASH_ALIGNMASK * 2]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); - int err; - - if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf))) - return -EINVAL; - - if (unaligned_len > len) - unaligned_len = len; - - memcpy(buf, data, unaligned_len); - err = shash->update(desc, buf, unaligned_len); - memset(buf, 0, unaligned_len); - - return err ?: - shash->update(desc, data + unaligned_len, len - unaligned_len); -} - int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_add(len, &shash_get_stat(shash)->hash_tlen); - if ((unsigned long)data & alignmask) - err = shash_update_unaligned(desc, data, len); - else - err = shash->update(desc, data, len); + err = shash->update(desc, data, len); return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_update); -static int shash_final_unaligned(struct shash_desc *desc, u8 *out) -{ - struct crypto_shash *tfm = desc->tfm; - unsigned long alignmask = crypto_shash_alignmask(tfm); - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned int ds = crypto_shash_digestsize(tfm); - /* - * We cannot count on __aligned() working for large values: - * https://patchwork.kernel.org/patch/9507697/ - */ - u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE]; - u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); - int err; - - if (WARN_ON(buf + ds > ubuf + sizeof(ubuf))) - return -EINVAL; - - err = shash->final(desc, buf); - if (err) - goto out; - - memcpy(out, buf, ds); - -out: - memset(buf, 0, ds); - return err; -} - int crypto_shash_final(struct shash_desc *desc, u8 *out) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); + struct shash_alg *shash = crypto_shash_alg(desc->tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_inc(&shash_get_stat(shash)->hash_cnt); - if ((unsigned long)out & alignmask) - err = shash_final_unaligned(desc, out); - else - err = shash->final(desc, out); + err = shash->final(desc, out); return crypto_shash_errstat(shash, err); } EXPORT_SYMBOL_GPL(crypto_shash_final); -static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return shash_update_unaligned(desc, data, len) ?: - shash_final_unaligned(desc, out); -} - static int shash_default_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -205,7 +101,6 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { @@ -215,11 +110,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, atomic64_add(len, &istat->hash_tlen); } - if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash_finup_unaligned(desc, data, len, out); - else - err = shash->finup(desc, data, len, out); - + err = shash->finup(desc, data, len, out); return crypto_shash_errstat(shash, err); } @@ -239,7 +130,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, { struct crypto_shash *tfm = desc->tfm; struct shash_alg *shash = crypto_shash_alg(tfm); - unsigned long alignmask = crypto_shash_alignmask(tfm); int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { @@ -251,9 +141,6 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; - else if (((unsigned long)data | (unsigned long)out) & alignmask) - err = shash->init(desc) ?: - shash_finup_unaligned(desc, data, len, out); else err = shash->digest(desc, data, len, out); @@ -670,7 +557,8 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; - if (base->cra_alignmask > MAX_SHASH_ALIGNMASK) + /* alignmask is not useful for shash, so it is not supported. */ + if (base->cra_alignmask) return -EINVAL; if ((alg->export && !alg->import) || (alg->import && !alg->export)) -- cgit v1.2.3 From 3d76b133d77300a221fc515aafa441a20aa74e33 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:39 -0700 Subject: crypto: drbg - stop checking crypto_shash_alignmask Now that the shash algorithm type does not support nonzero alignmasks, crypto_shash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_shash_alignmask() in drbg. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/drbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index ff4ebbc6..e01f8c77 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1698,7 +1698,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg) sdesc->shash.tfm = tfm; drbg->priv_data = sdesc; - return crypto_shash_alignmask(tfm); + return 0; } static int drbg_fini_hash_kernel(struct drbg_state *drbg) -- cgit v1.2.3 From 4d5e826cf1d0aa75049f86e7f344c81e7e9085f4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:40 -0700 Subject: crypto: testmgr - stop checking crypto_shash_alignmask Now that the shash algorithm type does not support nonzero alignmasks, crypto_shash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_shash_alignmask() in testmgr. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 54135c76..48a0929c 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1275,7 +1275,6 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_shash *tfm = desc->tfm; - const unsigned int alignmask = crypto_shash_alignmask(tfm); const unsigned int digestsize = crypto_shash_digestsize(tfm); const unsigned int statesize = crypto_shash_statesize(tfm); const char *driver = crypto_shash_driver_name(tfm); @@ -1287,7 +1286,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1304,7 +1303,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); -- cgit v1.2.3 From 649b5691439de493d3fe58c9e5af39270a1c93a7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:41 -0700 Subject: crypto: adiantum - stop using alignmask of shash_alg Now that the shash algorithm type does not support nonzero alignmasks, shash_alg::base.cra_alignmask is always 0, so OR-ing it into another value is a no-op. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index aa0f8b3b..9ff3376f 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -590,8 +590,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); - inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | - hash_alg->base.cra_alignmask; + inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask; /* * The block cipher is only invoked once per message, so for long * messages (e.g. sectors for disk encryption) its performance doesn't -- cgit v1.2.3 From 456364410716889832798a54d33efb459e78e20e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 18 Oct 2023 22:53:42 -0700 Subject: crypto: hctr2 - stop using alignmask of shash_alg Now that the shash algorithm type does not support nonzero alignmasks, shash_alg::base.cra_alignmask is always 0, so OR-ing it into another value is a no-op. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/hctr2.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/hctr2.c b/crypto/hctr2.c index 653fde72..87e7547a 100644 --- a/crypto/hctr2.c +++ b/crypto/hctr2.c @@ -485,8 +485,7 @@ static int hctr2_create_common(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) + polyval_alg->statesize * 2; - inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask | - polyval_alg->base.cra_alignmask; + inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask; /* * The hash function is called twice, so it is weighted higher than the * xctr and blockcipher. -- cgit v1.2.3 From 8ef4ed7514314378224e993fa44ced0b6f9a3e19 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Thu, 19 Oct 2023 09:40:42 +0200 Subject: crypto: jitter - use permanent health test storage The health test result in the current code is only given for the currently processed raw time stamp. This implies to react on the health test error, the result must be checked after each raw time stamp being processed. To avoid this constant checking requirement, any health test error is recorded and stored to be analyzed at a later time, if needed. This change ensures that the power-up test catches any health test error. Without that patch, the power-up health test result is not enforced. The introduced changes are already in use with the user space version of the Jitter RNG. Fixes: f0be2a5192f0 ("jitter - add RCT/APT support for different OSRs") Reported-by: Joachim Vandersmissen Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy.c | 125 +++++++++++++++++++++++++++++-------------------- 1 file changed, 74 insertions(+), 51 deletions(-) diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 09c9db90..26a9048b 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -100,6 +100,8 @@ struct rand_data { unsigned int apt_observations; /* Number of collected observations */ unsigned int apt_count; /* APT counter */ unsigned int apt_base; /* APT base reference */ + unsigned int health_failure; /* Record health failure */ + unsigned int apt_base_set:1; /* APT base reference set? */ }; @@ -121,6 +123,13 @@ struct rand_data { #define JENT_EHASH 11 /* Hash self test failed */ #define JENT_EMEM 12 /* Can't allocate memory for initialization */ +#define JENT_RCT_FAILURE 1 /* Failure in RCT health test. */ +#define JENT_APT_FAILURE 2 /* Failure in APT health test. */ +#define JENT_PERMANENT_FAILURE_SHIFT 16 +#define JENT_PERMANENT_FAILURE(x) (x << JENT_PERMANENT_FAILURE_SHIFT) +#define JENT_RCT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_RCT_FAILURE) +#define JENT_APT_FAILURE_PERMANENT JENT_PERMANENT_FAILURE(JENT_APT_FAILURE) + /* * The output n bits can receive more than n bits of min entropy, of course, * but the fixed output of the conditioning function can only asymptotically @@ -215,26 +224,22 @@ static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) return; } - if (delta_masked == ec->apt_base) + if (delta_masked == ec->apt_base) { ec->apt_count++; + /* Note, ec->apt_count starts with one. */ + if (ec->apt_count >= ec->apt_cutoff_permanent) + ec->health_failure |= JENT_APT_FAILURE_PERMANENT; + else if (ec->apt_count >= ec->apt_cutoff) + ec->health_failure |= JENT_APT_FAILURE; + } + ec->apt_observations++; if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) jent_apt_reset(ec, delta_masked); } -/* APT health test failure detection */ -static int jent_apt_permanent_failure(struct rand_data *ec) -{ - return (ec->apt_count >= ec->apt_cutoff_permanent) ? 1 : 0; -} - -static int jent_apt_failure(struct rand_data *ec) -{ - return (ec->apt_count >= ec->apt_cutoff) ? 1 : 0; -} - /*************************************************************************** * Stuck Test and its use as Repetition Count Test * @@ -261,6 +266,30 @@ static void jent_rct_insert(struct rand_data *ec, int stuck) { if (stuck) { ec->rct_count++; + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. + * In addition, we require an entropy value H of 1/osr as this + * is the minimum entropy required to provide full entropy. + * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr + * deltas for inserting them into the entropy pool which should + * then have (close to) DATA_SIZE_BITS bits of entropy in the + * conditioned output. + * + * Note, ec->rct_count (which equals to value B in the pseudo + * code of SP800-90B section 4.4.1) starts with zero. Hence + * we need to subtract one from the cutoff value as calculated + * following SP800-90B. Thus C = ceil(-log_2(alpha)/H) = 30*osr + * or 60*osr. + */ + if ((unsigned int)ec->rct_count >= (60 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure |= JENT_RCT_FAILURE_PERMANENT; + } else if ((unsigned int)ec->rct_count >= (30 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure |= JENT_RCT_FAILURE; + } } else { /* Reset RCT */ ec->rct_count = 0; @@ -316,38 +345,24 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) } /* - * The cutoff value is based on the following consideration: - * alpha = 2^-30 or 2^-60 as recommended in SP800-90B. - * In addition, we require an entropy value H of 1/osr as this is the minimum - * entropy required to provide full entropy. - * Note, we collect (DATA_SIZE_BITS + ENTROPY_SAFETY_FACTOR)*osr deltas for - * inserting them into the entropy pool which should then have (close to) - * DATA_SIZE_BITS bits of entropy in the conditioned output. - * - * Note, ec->rct_count (which equals to value B in the pseudo code of SP800-90B - * section 4.4.1) starts with zero. Hence we need to subtract one from the - * cutoff value as calculated following SP800-90B. Thus - * C = ceil(-log_2(alpha)/H) = 30*osr or 60*osr. + * Report any health test failures + * + * @ec [in] Reference to entropy collector + * + * @return a bitmask indicating which tests failed + * 0 No health test failure + * 1 RCT failure + * 2 APT failure + * 1<rct_count >= (60 * ec->osr)) ? 1 : 0; -} + /* Test is only enabled in FIPS mode */ + if (!fips_enabled) + return 0; -static int jent_rct_failure(struct rand_data *ec) -{ - return (ec->rct_count >= (30 * ec->osr)) ? 1 : 0; -} - -/* Report of health test failures */ -static int jent_health_failure(struct rand_data *ec) -{ - return jent_rct_failure(ec) | jent_apt_failure(ec); -} - -static int jent_permanent_health_failure(struct rand_data *ec) -{ - return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec); + return ec->health_failure; } /*************************************************************************** @@ -594,11 +609,12 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, return -1; while (len > 0) { - unsigned int tocopy; + unsigned int tocopy, health_test_result; jent_gen_entropy(ec); - if (jent_permanent_health_failure(ec)) { + health_test_result = jent_health_failure(ec); + if (health_test_result > JENT_PERMANENT_FAILURE_SHIFT) { /* * At this point, the Jitter RNG instance is considered * as a failed instance. There is no rerun of the @@ -606,13 +622,18 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, * is assumed to not further use this instance. */ return -3; - } else if (jent_health_failure(ec)) { + } else if (health_test_result) { /* * Perform startup health tests and return permanent * error if it fails. */ - if (jent_entropy_init(0, 0, NULL, ec)) + if (jent_entropy_init(0, 0, NULL, ec)) { + /* Mark the permanent error */ + ec->health_failure &= + JENT_RCT_FAILURE_PERMANENT | + JENT_APT_FAILURE_PERMANENT; return -3; + } return -2; } @@ -695,6 +716,7 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, */ struct rand_data *ec = p_ec; int i, time_backwards = 0, ret = 0, ec_free = 0; + unsigned int health_test_result; if (!ec) { ec = jent_entropy_collector_alloc(osr, flags, hash_state); @@ -708,6 +730,9 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, ec->apt_base_set = 0; /* Reset the RCT */ ec->rct_count = 0; + /* Reset intermittent, leave permanent health test result */ + ec->health_failure &= (~JENT_RCT_FAILURE); + ec->health_failure &= (~JENT_APT_FAILURE); } /* We could perform statistical tests here, but the problem is @@ -788,12 +813,10 @@ int jent_entropy_init(unsigned int osr, unsigned int flags, void *hash_state, } /* Did we encounter a health test failure? */ - if (jent_rct_failure(ec)) { - ret = JENT_ERCT; - goto out; - } - if (jent_apt_failure(ec)) { - ret = JENT_EHEALTH; + health_test_result = jent_health_failure(ec); + if (health_test_result) { + ret = (health_test_result & JENT_RCT_FAILURE) ? JENT_ERCT : + JENT_EHEALTH; goto out; } -- cgit v1.2.3 From 2f3a2e84cc28e4017d33a376969529fb23d8abcd Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 21 Oct 2023 13:23:44 +0200 Subject: treewide: Add SPDX identifier to IETF ASN.1 modules Per section 4.c. of the IETF Trust Legal Provisions, "Code Components" in IETF Documents are licensed on the terms of the BSD-3-Clause license: https://trustee.ietf.org/documents/trust-legal-provisions/tlp-5/ The term "Code Components" specifically includes ASN.1 modules: https://trustee.ietf.org/documents/trust-legal-provisions/code-components-list-3/ Add an SPDX identifier as well as a copyright notice pursuant to section 6.d. of the Trust Legal Provisions to all ASN.1 modules in the tree which are derived from IETF Documents. Section 4.d. of the Trust Legal Provisions requests that each Code Component identify the RFC from which it is taken, so link that RFC in every ASN.1 module. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/pkcs7.asn1 | 7 +++++++ crypto/asymmetric_keys/pkcs8.asn1 | 6 ++++++ crypto/asymmetric_keys/x509.asn1 | 7 +++++++ crypto/asymmetric_keys/x509_akid.asn1 | 5 +++++ crypto/rsaprivkey.asn1 | 7 +++++++ crypto/rsapubkey.asn1 | 7 +++++++ 6 files changed, 39 insertions(+) diff --git a/crypto/asymmetric_keys/pkcs7.asn1 b/crypto/asymmetric_keys/pkcs7.asn1 index 1eca740b..28e1f4a4 100644 --- a/crypto/asymmetric_keys/pkcs7.asn1 +++ b/crypto/asymmetric_keys/pkcs7.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2009 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5652#section-3 + PKCS7ContentInfo ::= SEQUENCE { contentType ContentType ({ pkcs7_check_content_type }), content [0] EXPLICIT SignedData OPTIONAL diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1 index 702c41a3..a2a8af26 100644 --- a/crypto/asymmetric_keys/pkcs8.asn1 +++ b/crypto/asymmetric_keys/pkcs8.asn1 @@ -1,3 +1,9 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2010 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5958#section-2 -- -- This is the unencrypted variant -- diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1 index 92d59c32..feb9573c 100644 --- a/crypto/asymmetric_keys/x509.asn1 +++ b/crypto/asymmetric_keys/x509.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc5280#section-4 + Certificate ::= SEQUENCE { tbsCertificate TBSCertificate ({ x509_note_tbs_certificate }), signatureAlgorithm AlgorithmIdentifier, diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1 index c7818ff4..0f8355cf 100644 --- a/crypto/asymmetric_keys/x509_akid.asn1 +++ b/crypto/asymmetric_keys/x509_akid.asn1 @@ -1,3 +1,8 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2008 IETF Trust and the persons identified as authors +-- of the code +-- -- X.509 AuthorityKeyIdentifier -- rfc5280 section 4.2.1.1 diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1 index 4ce06758..76865124 100644 --- a/crypto/rsaprivkey.asn1 +++ b/crypto/rsaprivkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.2 + RsaPrivKey ::= SEQUENCE { version INTEGER, n INTEGER ({ rsa_get_n }), diff --git a/crypto/rsapubkey.asn1 b/crypto/rsapubkey.asn1 index 725498e4..0d32b1ca 100644 --- a/crypto/rsapubkey.asn1 +++ b/crypto/rsapubkey.asn1 @@ -1,3 +1,10 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- +-- Copyright (C) 2016 IETF Trust and the persons identified as authors +-- of the code +-- +-- https://www.rfc-editor.org/rfc/rfc8017#appendix-A.1.1 + RsaPubKey ::= SEQUENCE { n INTEGER ({ rsa_get_n }), e INTEGER ({ rsa_get_e }) -- cgit v1.2.3 From b2d9bdc93c3e2f61638f1e050126cf963e6066a5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:44 -0700 Subject: crypto: ahash - remove support for nonzero alignmask Currently, the ahash API checks the alignment of all key and result buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. First, since it does not apply to the message, its effect is much more limited than e.g. is the case for the alignmask for "skcipher". Second, the key and result buffers are given as virtual addresses and cannot (in general) be DMA'ed into, so drivers end up having to copy to/from them in software anyway. As a result it's easy to use memcpy() or the unaligned access helpers. The crypto_hash_walk_*() helper functions do use the alignmask to align the message. But with one exception those are only used for shash algorithms being exposed via the ahash API, not for native ahashes, and aligning the message is not required in this case, especially now that alignmask support has been removed from shash. The exception is the n2_core driver, which doesn't set an alignmask. In any case, no ahash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from ahash. The benefit is that all the code to handle "misaligned" buffers in the ahash API goes away, reducing the overhead of the ahash API. This follows the same change that was made to shash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 117 ++++----------------------------------------------------- crypto/shash.c | 8 ++-- 2 files changed, 12 insertions(+), 113 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 213bb3e9..744fd3b8 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -35,21 +35,12 @@ struct ahash_request_priv { static int hash_walk_next(struct crypto_hash_walk *walk) { - unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); walk->data = kmap_local_page(walk->pg); walk->data += offset; - - if (offset & alignmask) { - unsigned int unaligned = alignmask + 1 - (offset & alignmask); - - if (nbytes > unaligned) - nbytes = unaligned; - } - walk->entrylen -= nbytes; return nbytes; } @@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { - unsigned int alignmask = walk->alignmask; - walk->data -= walk->offset; - if (walk->entrylen && (walk->offset & alignmask) && !err) { - unsigned int nbytes; - - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(walk->entrylen, - (unsigned int)(PAGE_SIZE - walk->offset)); - if (nbytes) { - walk->entrylen -= nbytes; - walk->data += walk->offset; - return nbytes; - } - } - kunmap_local(walk->data); crypto_yield(walk->flags); @@ -121,7 +97,6 @@ int crypto_hash_walk_first(struct ahash_request *req, return 0; } - walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; walk->flags = req->base.flags; @@ -129,26 +104,6 @@ int crypto_hash_walk_first(struct ahash_request *req, } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int ret; - u8 *buffer, *alignbuffer; - unsigned long absize; - - absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - ret = tfm->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return ret; -} - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)key & alignmask) - err = ahash_setkey_unaligned(tfm, key, keylen); - else - err = tfm->setkey(tfm, key, keylen); + int err = tfm->setkey(tfm, key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm); @@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request *subreq; unsigned int subreq_size; @@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); subreq_size += reqsize; subreq_size += ds; - subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, ahash_request_set_callback(subreq, flags, cplt, req); result = (u8 *)(subreq + 1) + reqsize; - result = PTR_ALIGN(result, alignmask + 1); ahash_request_set_crypt(subreq, req->src, result, req->nbytes); @@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err) kfree_sensitive(subreq); } -static void ahash_op_unaligned_done(void *data, int err) -{ - struct ahash_request *areq = data; - - if (err == -EINPROGRESS) - goto out; - - /* First copy req->result into req->priv.result */ - ahash_restore_req(areq, err); - -out: - /* Complete the ORIGINAL request. */ - ahash_request_complete(areq, err); -} - -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - int err; - - err = ahash_save_req(req, ahash_op_unaligned_done, has_state); - if (err) - return err; - - err = op(req->priv); - if (err == -EINPROGRESS || err == -EBUSY) - return err; - - ahash_restore_req(req, err); - - return err; -} - -static int crypto_ahash_op(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)req->result & alignmask) - err = ahash_op_unaligned(req, op, has_state); - else - err = op(req); - - return crypto_hash_errstat(crypto_hash_alg_common(tfm), err); -} - int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req) if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_inc(&hash_get_stat(alg)->hash_cnt); - return crypto_ahash_op(req, tfm->final, true); + return crypto_hash_errstat(alg, tfm->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); @@ -325,7 +221,7 @@ int crypto_ahash_finup(struct ahash_request *req) atomic64_add(req->nbytes, &istat->hash_tlen); } - return crypto_ahash_op(req, tfm->finup, true); + return crypto_hash_errstat(alg, tfm->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -333,6 +229,7 @@ int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { struct crypto_istat_hash *istat = hash_get_stat(alg); @@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req) } if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return crypto_hash_errstat(alg, -ENOKEY); + err = -ENOKEY; + else + err = tfm->digest(req); - return crypto_ahash_op(req, tfm->digest, false); + return crypto_hash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); diff --git a/crypto/shash.c b/crypto/shash.c index 409b33f9..359702c2 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg) if (alg->digestsize > HASH_MAX_DIGESTSIZE) return -EINVAL; + /* alignmask is not useful for hashes, so it is not supported. */ + if (base->cra_alignmask) + return -EINVAL; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) @@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; - /* alignmask is not useful for shash, so it is not supported. */ - if (base->cra_alignmask) - return -EINVAL; - if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; -- cgit v1.2.3 From c9c77fc2434ae062ff770e99fef0ed11d237b840 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:45 -0700 Subject: crypto: authenc - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify the code in authenc accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authenc.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/crypto/authenc.c b/crypto/authenc.c index fa896ab1..3aaf3ab4 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -141,9 +141,6 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->dst, hash, req->assoclen + req->cryptlen); @@ -286,9 +283,6 @@ static int crypto_authenc_decrypt(struct aead_request *req) u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), - crypto_ahash_alignmask(auth) + 1); - ahash_request_set_tfm(ahreq, auth); ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen - authsize); @@ -400,8 +394,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, goto err_free_inst; enc = crypto_spawn_skcipher_alg_common(&ctx->enc); - ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, - auth_base->cra_alignmask + 1); + ctx->reqoff = 2 * auth->digestsize; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, @@ -418,8 +411,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); inst->alg.ivsize = enc->ivsize; -- cgit v1.2.3 From 7b062611299a976b3e44a3767f0d8d772c1fa228 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:46 -0700 Subject: crypto: authencesn - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify the code in authenc accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/authencesn.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 60e9568f..2cc933e2 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -87,11 +87,8 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req, unsigned int flags) { struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); - struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; @@ -122,8 +119,7 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *hash = areq_ctx->tail; struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); unsigned int assoclen = req->assoclen; @@ -224,8 +220,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, struct skcipher_request *skreq = (void *)(areq_ctx->tail + ctx->reqoff); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int cryptlen = req->cryptlen - authsize; unsigned int assoclen = req->assoclen; struct scatterlist *dst = req->dst; @@ -272,8 +267,7 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req) struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); unsigned int authsize = crypto_aead_authsize(authenc_esn); struct crypto_ahash *auth = ctx->auth; - u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, - crypto_ahash_alignmask(auth) + 1); + u8 *ohash = areq_ctx->tail; unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; u8 *ihash = ohash + crypto_ahash_digestsize(auth); @@ -344,8 +338,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm) ctx->enc = enc; ctx->null = null; - ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth), - crypto_ahash_alignmask(auth) + 1); + ctx->reqoff = 2 * crypto_ahash_digestsize(auth); crypto_aead_set_reqsize( tfm, @@ -431,8 +424,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; - inst->alg.base.cra_alignmask = auth_base->cra_alignmask | - enc->base.cra_alignmask; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); inst->alg.ivsize = enc->ivsize; -- cgit v1.2.3 From a0c6df49e7fe8861643a75379c7a349acfcf142e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:47 -0700 Subject: crypto: testmgr - stop checking crypto_ahash_alignmask Now that the alignmask for ahash and shash algorithms is always 0, crypto_ahash_alignmask() always returns 0 and will be removed. In preparation for this, stop checking crypto_ahash_alignmask() in testmgr. As a result of this change, test_sg_division::offset_relative_to_alignmask and testvec_config::key_offset_relative_to_alignmask no longer have any effect on ahash (or shash) algorithms. Therefore, also stop setting these flags in default_hash_testvec_configs[]. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/testmgr.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 48a0929c..335449a2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -408,17 +408,15 @@ static const struct testvec_config default_hash_testvec_configs[] = { .finalization_type = FINALIZATION_TYPE_FINAL, .key_offset = 1, }, { - .name = "digest buffer aligned only to alignmask", + .name = "digest misaligned buffer", .src_divs = { { .proportion_of_total = 10000, .offset = 1, - .offset_relative_to_alignmask = true, }, }, .finalization_type = FINALIZATION_TYPE_DIGEST, .key_offset = 1, - .key_offset_relative_to_alignmask = true, }, { .name = "init+update+update+final two even splits", .src_divs = { @@ -1458,7 +1456,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, u8 *hashstate) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - const unsigned int alignmask = crypto_ahash_alignmask(tfm); const unsigned int digestsize = crypto_ahash_digestsize(tfm); const unsigned int statesize = crypto_ahash_statesize(tfm); const char *driver = crypto_ahash_driver_name(tfm); @@ -1474,7 +1471,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize, - cfg, alignmask); + cfg, 0); if (err) { if (err == vec->setkey_error) return 0; @@ -1491,7 +1488,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, } /* Build the scatterlist for the source data */ - err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); + err = build_hash_sglist(tsgl, vec, cfg, 0, divs); if (err) { pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", driver, vec_name, cfg->name); -- cgit v1.2.3 From ec6970b74f360ffd94cba9b6cb8d0fcf39c896b0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:50 -0700 Subject: crypto: ccm - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify crypto_ccm_create_common() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ccm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/ccm.c b/crypto/ccm.c index dd7aed63..36f0acec 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -504,8 +504,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = mac->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.ivsize = 16; inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; -- cgit v1.2.3 From 20078fc78b54451a69c347fd5eeb3234b133b6df Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:51 -0700 Subject: crypto: chacha20poly1305 - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify chachapoly_create() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 0e2e208d..9e465133 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,8 +610,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = chacha->base.cra_alignmask | - poly->base.cra_alignmask; + inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; -- cgit v1.2.3 From 403af307cbab85306c643b0397586f9f01afdc53 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:52 -0700 Subject: crypto: gcm - stop using alignmask of ahash Now that the alignmask for ahash and shash algorithms is always 0, simplify crypto_gcm_create_common() accordingly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/gcm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/gcm.c b/crypto/gcm.c index 91ce6e0e..84f7c23d 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -629,8 +629,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | - ctr->base.cra_alignmask; + inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; inst->alg.chunksize = ctr->chunksize; -- cgit v1.2.3 From 81b8024553eabe00c78fbf6184cca9f879396404 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:54 -0700 Subject: crypto: ahash - remove struct ahash_request_priv struct ahash_request_priv is unused, so remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 744fd3b8..556c9501 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -25,14 +25,6 @@ static const struct crypto_type crypto_ahash_type; -struct ahash_request_priv { - crypto_completion_t complete; - void *data; - u8 *result; - u32 flags; - void *ubuf[] CRYPTO_MINALIGN_ATTR; -}; - static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; -- cgit v1.2.3 From 348850ab07e42bd2da1a354c38aba1527d757fe7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:55 -0700 Subject: crypto: ahash - improve file comment Improve the file comment for crypto/ahash.c. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 556c9501..1ad402f4 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -2,8 +2,12 @@ /* * Asynchronous Cryptographic Hash operations. * - * This is the asynchronous version of hash.c with notification of - * completion via a callback. + * This is the implementation of the ahash (asynchronous hash) API. It differs + * from shash (synchronous hash) in that ahash supports asynchronous operations, + * and it hashes data from scatterlists instead of virtually addressed buffers. + * + * The ahash API provides access to both ahash and shash algorithms. The shash + * API only provides access to shash algorithms. * * Copyright (c) 2008 Loc Ho */ -- cgit v1.2.3 From 66b3c618ce304706ee3dc571c0b27f902dae8e8c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:58 -0700 Subject: crypto: hash - move "ahash wrapping shash" functions to ahash.c The functions that are involved in implementing the ahash API on top of an shash algorithm belong better in ahash.c, not in shash.c where they currently are. Move them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 186 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/hash.h | 4 +- crypto/shash.c | 189 +-------------------------------------------------------- 3 files changed, 188 insertions(+), 191 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 1ad402f4..74be1eb2 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,6 +29,192 @@ static const struct crypto_type crypto_ahash_type; +static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + + return crypto_shash_setkey(*ctx, key, keylen); +} + +static int shash_async_init(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return crypto_shash_init(desc); +} + +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) +{ + struct crypto_hash_walk walk; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; + nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_shash_update(desc, walk.data, nbytes); + + return nbytes; +} +EXPORT_SYMBOL_GPL(shash_ahash_update); + +static int shash_async_update(struct ahash_request *req) +{ + return shash_ahash_update(req, ahash_request_ctx(req)); +} + +static int shash_async_final(struct ahash_request *req) +{ + return crypto_shash_final(ahash_request_ctx(req), req->result); +} + +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) +{ + struct crypto_hash_walk walk; + int nbytes; + + nbytes = crypto_hash_walk_first(req, &walk); + if (!nbytes) + return crypto_shash_final(desc, req->result); + + do { + nbytes = crypto_hash_walk_last(&walk) ? + crypto_shash_finup(desc, walk.data, nbytes, + req->result) : + crypto_shash_update(desc, walk.data, nbytes); + nbytes = crypto_hash_walk_done(&walk, nbytes); + } while (nbytes > 0); + + return nbytes; +} +EXPORT_SYMBOL_GPL(shash_ahash_finup); + +static int shash_async_finup(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return shash_ahash_finup(req, desc); +} + +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) +{ + unsigned int nbytes = req->nbytes; + struct scatterlist *sg; + unsigned int offset; + int err; + + if (nbytes && + (sg = req->src, offset = sg->offset, + nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { + void *data; + + data = kmap_local_page(sg_page(sg)); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); + } else + err = crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + return err; +} +EXPORT_SYMBOL_GPL(shash_ahash_digest); + +static int shash_async_digest(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return shash_ahash_digest(req, desc); +} + +static int shash_async_export(struct ahash_request *req, void *out) +{ + return crypto_shash_export(ahash_request_ctx(req), out); +} + +static int shash_async_import(struct ahash_request *req, const void *in) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + + return crypto_shash_import(desc, in); +} + +static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(*ctx); +} + +static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct shash_alg *alg = __crypto_shash_alg(calg); + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *shash; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + shash = crypto_create_tfm(calg, &crypto_shash_type); + if (IS_ERR(shash)) { + crypto_mod_put(calg); + return PTR_ERR(shash); + } + + *ctx = shash; + tfm->exit = crypto_exit_shash_ops_async; + + crt->init = shash_async_init; + crt->update = shash_async_update; + crt->final = shash_async_final; + crt->finup = shash_async_finup; + crt->digest = shash_async_digest; + if (crypto_shash_alg_has_setkey(alg)) + crt->setkey = shash_async_setkey; + + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + + crt->export = shash_async_export; + crt->import = shash_async_import; + + crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); + + return 0; +} + +static struct crypto_ahash * +crypto_clone_shash_ops_async(struct crypto_ahash *nhash, + struct crypto_ahash *hash) +{ + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); + struct crypto_shash **ctx = crypto_ahash_ctx(hash); + struct crypto_shash *shash; + + shash = crypto_clone_shash(*ctx); + if (IS_ERR(shash)) { + crypto_free_ahash(nhash); + return ERR_CAST(shash); + } + + *nctx = shash; + + return nhash; +} + static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; diff --git a/crypto/hash.h b/crypto/hash.h index 7e6c1a94..de2ee2f4 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -31,9 +31,7 @@ static inline int crypto_hash_report_stat(struct sk_buff *skb, return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } -int crypto_init_shash_ops_async(struct crypto_tfm *tfm); -struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash); +extern const struct crypto_type crypto_shash_type; int hash_prepare_alg(struct hash_alg_common *alg); diff --git a/crypto/shash.c b/crypto/shash.c index 359702c2..28092ed8 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -16,8 +16,6 @@ #include "hash.h" -static const struct crypto_type crypto_shash_type; - static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) { return hash_get_stat(&alg->halg); @@ -193,191 +191,6 @@ int crypto_shash_import(struct shash_desc *desc, const void *in) } EXPORT_SYMBOL_GPL(crypto_shash_import); -static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(tfm); - - return crypto_shash_setkey(*ctx, key, keylen); -} - -static int shash_async_init(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_init(desc); -} - -int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) -{ - struct crypto_hash_walk walk; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; - nbytes = crypto_hash_walk_done(&walk, nbytes)) - nbytes = crypto_shash_update(desc, walk.data, nbytes); - - return nbytes; -} -EXPORT_SYMBOL_GPL(shash_ahash_update); - -static int shash_async_update(struct ahash_request *req) -{ - return shash_ahash_update(req, ahash_request_ctx(req)); -} - -static int shash_async_final(struct ahash_request *req) -{ - return crypto_shash_final(ahash_request_ctx(req), req->result); -} - -int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) -{ - struct crypto_hash_walk walk; - int nbytes; - - nbytes = crypto_hash_walk_first(req, &walk); - if (!nbytes) - return crypto_shash_final(desc, req->result); - - do { - nbytes = crypto_hash_walk_last(&walk) ? - crypto_shash_finup(desc, walk.data, nbytes, - req->result) : - crypto_shash_update(desc, walk.data, nbytes); - nbytes = crypto_hash_walk_done(&walk, nbytes); - } while (nbytes > 0); - - return nbytes; -} -EXPORT_SYMBOL_GPL(shash_ahash_finup); - -static int shash_async_finup(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_finup(req, desc); -} - -int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) -{ - unsigned int nbytes = req->nbytes; - struct scatterlist *sg; - unsigned int offset; - int err; - - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; - - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); - - return err; -} -EXPORT_SYMBOL_GPL(shash_ahash_digest); - -static int shash_async_digest(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_digest(req, desc); -} - -static int shash_async_export(struct ahash_request *req, void *out) -{ - return crypto_shash_export(ahash_request_ctx(req), out); -} - -static int shash_async_import(struct ahash_request *req, const void *in) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_import(desc, in); -} - -static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - - crypto_free_shash(*ctx); -} - -int crypto_init_shash_ops_async(struct crypto_tfm *tfm) -{ - struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); - struct crypto_ahash *crt = __crypto_ahash_cast(tfm); - struct crypto_shash **ctx = crypto_tfm_ctx(tfm); - struct crypto_shash *shash; - - if (!crypto_mod_get(calg)) - return -EAGAIN; - - shash = crypto_create_tfm(calg, &crypto_shash_type); - if (IS_ERR(shash)) { - crypto_mod_put(calg); - return PTR_ERR(shash); - } - - *ctx = shash; - tfm->exit = crypto_exit_shash_ops_async; - - crt->init = shash_async_init; - crt->update = shash_async_update; - crt->final = shash_async_final; - crt->finup = shash_async_finup; - crt->digest = shash_async_digest; - if (crypto_shash_alg_has_setkey(alg)) - crt->setkey = shash_async_setkey; - - crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & - CRYPTO_TFM_NEED_KEY); - - crt->export = shash_async_export; - crt->import = shash_async_import; - - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); - - return 0; -} - -struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash) -{ - struct crypto_shash **nctx = crypto_ahash_ctx(nhash); - struct crypto_shash **ctx = crypto_ahash_ctx(hash); - struct crypto_shash *shash; - - shash = crypto_clone_shash(*ctx); - if (IS_ERR(shash)) { - crypto_free_ahash(nhash); - return ERR_CAST(shash); - } - - *nctx = shash; - - return nhash; -} - static void crypto_shash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); @@ -456,7 +269,7 @@ static int __maybe_unused crypto_shash_report_stat( return crypto_hash_report_stat(skb, alg, "shash"); } -static const struct crypto_type crypto_shash_type = { +const struct crypto_type crypto_shash_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_shash_init_tfm, .free = crypto_shash_free_instance, -- cgit v1.2.3 From 88b873d24bc4dfc870def262bc2b867988e2f2d6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:59 -0700 Subject: crypto: ahash - check for shash type instead of not ahash type Since the previous patch made crypto_shash_type visible to ahash.c, change checks for '->cra_type != &crypto_ahash_type' to '->cra_type == &crypto_shash_type'. This makes more sense and avoids having to forward-declare crypto_ahash_type. The result is still the same, since the type is either shash or ahash here. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 74be1eb2..96fec0ca 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,8 +27,6 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static const struct crypto_type crypto_ahash_type; - static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -511,7 +509,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) crypto_ahash_set_statesize(hash, alg->halg.statesize); - if (tfm->__crt_alg->cra_type != &crypto_ahash_type) + if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_shash_ops_async(tfm); hash->init = alg->init; @@ -535,7 +533,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { - if (alg->cra_type != &crypto_ahash_type) + if (alg->cra_type == &crypto_shash_type) return sizeof(struct crypto_shash *); return crypto_alg_extsize(alg); @@ -760,7 +758,7 @@ bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; - if (alg->cra_type != &crypto_ahash_type) + if (alg->cra_type == &crypto_shash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); return __crypto_ahash_alg(alg)->setkey != NULL; -- cgit v1.2.3 From 890caf03421ecc833fc4dbb77823ffb8c03069a8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:11:00 -0700 Subject: crypto: ahash - optimize performance when wrapping shash The "ahash" API provides access to both CPU-based and hardware offload- based implementations of hash algorithms. Typically the former are implemented as "shash" algorithms under the hood, while the latter are implemented as "ahash" algorithms. The "ahash" API provides access to both. Various kernel subsystems use the ahash API because they want to support hashing hardware offload without using a separate API for it. Yet, the common case is that a crypto accelerator is not actually being used, and ahash is just wrapping a CPU-based shash algorithm. This patch optimizes the ahash API for that common case by eliminating the extra indirect call for each ahash operation on top of shash. It also fixes the double-counting of crypto stats in this scenario (though CONFIG_CRYPTO_STATS should *not* be enabled by anyone interested in performance anyway...), and it eliminates redundant checking of CRYPTO_TFM_NEED_KEY. As a bonus, it also shrinks struct crypto_ahash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ahash.c | 285 +++++++++++++++++++++++++++++---------------------------- crypto/hash.h | 10 ++ crypto/shash.c | 8 +- 3 files changed, 162 insertions(+), 141 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 96fec0ca..deee55f9 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,22 +27,38 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) +static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) { - struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + return hash_get_stat(&alg->halg); +} + +static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) +{ + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; - return crypto_shash_setkey(*ctx, key, keylen); + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&ahash_get_stat(alg)->err_cnt); + + return err; } -static int shash_async_init(struct ahash_request *req) +/* + * For an ahash tfm that is using an shash algorithm (instead of an ahash + * algorithm), this returns the underlying shash tfm. + */ +static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm) { - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); + return *(struct crypto_shash **)crypto_ahash_ctx(tfm); +} - desc->tfm = *ctx; +static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + struct shash_desc *desc = ahash_request_ctx(req); - return crypto_shash_init(desc); + desc->tfm = ahash_to_shash(tfm); + return desc; } int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) @@ -58,16 +74,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_update); -static int shash_async_update(struct ahash_request *req) -{ - return shash_ahash_update(req, ahash_request_ctx(req)); -} - -static int shash_async_final(struct ahash_request *req) -{ - return crypto_shash_final(ahash_request_ctx(req), req->result); -} - int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) { struct crypto_hash_walk walk; @@ -89,16 +95,6 @@ int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_finup); -static int shash_async_finup(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_finup(req, desc); -} - int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { unsigned int nbytes = req->nbytes; @@ -123,42 +119,16 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) } EXPORT_SYMBOL_GPL(shash_ahash_digest); -static int shash_async_digest(struct ahash_request *req) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return shash_ahash_digest(req, desc); -} - -static int shash_async_export(struct ahash_request *req, void *out) -{ - return crypto_shash_export(ahash_request_ctx(req), out); -} - -static int shash_async_import(struct ahash_request *req, const void *in) -{ - struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); - - desc->tfm = *ctx; - - return crypto_shash_import(desc, in); -} - -static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) +static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_shash **ctx = crypto_tfm_ctx(tfm); crypto_free_shash(*ctx); } -static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; @@ -172,47 +142,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) return PTR_ERR(shash); } + crt->using_shash = true; *ctx = shash; - tfm->exit = crypto_exit_shash_ops_async; - - crt->init = shash_async_init; - crt->update = shash_async_update; - crt->final = shash_async_final; - crt->finup = shash_async_finup; - crt->digest = shash_async_digest; - if (crypto_shash_alg_has_setkey(alg)) - crt->setkey = shash_async_setkey; + tfm->exit = crypto_exit_ahash_using_shash; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - - crt->export = shash_async_export; - crt->import = shash_async_import; - crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } -static struct crypto_ahash * -crypto_clone_shash_ops_async(struct crypto_ahash *nhash, - struct crypto_ahash *hash) -{ - struct crypto_shash **nctx = crypto_ahash_ctx(nhash); - struct crypto_shash **ctx = crypto_ahash_ctx(hash); - struct crypto_shash *shash; - - shash = crypto_clone_shash(*ctx); - if (IS_ERR(shash)) { - crypto_free_ahash(nhash); - return ERR_CAST(shash); - } - - *nctx = shash; - - return nhash; -} - static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; @@ -290,30 +230,54 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, return -ENOSYS; } -static void ahash_set_needkey(struct crypto_ahash *tfm) +static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg) { - const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (tfm->setkey != ahash_nosetkey && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + if (alg->setkey != ahash_nosetkey && + !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - int err = tfm->setkey(tfm, key, keylen); + if (likely(tfm->using_shash)) { + struct crypto_shash *shash = ahash_to_shash(tfm); + int err; - if (unlikely(err)) { - ahash_set_needkey(tfm); - return err; + err = crypto_shash_setkey(shash, key, keylen); + if (unlikely(err)) { + crypto_ahash_set_flags(tfm, + crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); + return err; + } + } else { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + int err; + + err = alg->setkey(tfm, key, keylen); + if (unlikely(err)) { + ahash_set_needkey(tfm, alg); + return err; + } } - crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_init(prepare_shash_desc(req, tfm)); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->init(req); +} +EXPORT_SYMBOL_GPL(crypto_ahash_init); + static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { @@ -377,42 +341,67 @@ static void ahash_restore_req(struct ahash_request *req, int err) kfree_sensitive(subreq); } -int crypto_ahash_final(struct ahash_request *req) +int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; + if (likely(tfm->using_shash)) + return shash_ahash_update(req, ahash_request_ctx(req)); + + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&hash_get_stat(alg)->hash_cnt); + atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); + return crypto_ahash_errstat(alg, alg->update(req)); +} +EXPORT_SYMBOL_GPL(crypto_ahash_update); + +int crypto_ahash_final(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg; + + if (likely(tfm->using_shash)) + return crypto_shash_final(ahash_request_ctx(req), req->result); - return crypto_hash_errstat(alg, tfm->final(req)); + alg = crypto_ahash_alg(tfm); + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) + atomic64_inc(&ahash_get_stat(alg)->hash_cnt); + return crypto_ahash_errstat(alg, alg->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; + + if (likely(tfm->using_shash)) + return shash_ahash_finup(req, ahash_request_ctx(req)); + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = hash_get_stat(alg); + struct crypto_istat_hash *istat = ahash_get_stat(alg); atomic64_inc(&istat->hash_cnt); atomic64_add(req->nbytes, &istat->hash_tlen); } - - return crypto_hash_errstat(alg, tfm->finup(req)); + return crypto_ahash_errstat(alg, alg->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + struct ahash_alg *alg; int err; + if (likely(tfm->using_shash)) + return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + + alg = crypto_ahash_alg(tfm); if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_hash *istat = hash_get_stat(alg); + struct crypto_istat_hash *istat = ahash_get_stat(alg); atomic64_inc(&istat->hash_cnt); atomic64_add(req->nbytes, &istat->hash_tlen); @@ -421,9 +410,9 @@ int crypto_ahash_digest(struct ahash_request *req) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) err = -ENOKEY; else - err = tfm->digest(req); + err = alg->digest(req); - return crypto_hash_errstat(alg, err); + return crypto_ahash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -448,7 +437,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) subreq->base.complete = ahash_def_finup_done2; - err = crypto_ahash_reqtfm(req)->final(subreq); + err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); if (err == -EINPROGRESS || err == -EBUSY) return err; @@ -485,13 +474,35 @@ static int ahash_def_finup(struct ahash_request *req) if (err) return err; - err = tfm->update(req->priv); + err = crypto_ahash_alg(tfm)->update(req->priv); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } +int crypto_ahash_export(struct ahash_request *req, void *out) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_export(ahash_request_ctx(req), out); + return crypto_ahash_alg(tfm)->export(req, out); +} +EXPORT_SYMBOL_GPL(crypto_ahash_export); + +int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (likely(tfm->using_shash)) + return crypto_shash_import(prepare_shash_desc(req, tfm), in); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return crypto_ahash_alg(tfm)->import(req, in); +} +EXPORT_SYMBOL_GPL(crypto_ahash_import); + static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); @@ -505,25 +516,12 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - hash->setkey = ahash_nosetkey; - crypto_ahash_set_statesize(hash, alg->halg.statesize); if (tfm->__crt_alg->cra_type == &crypto_shash_type) - return crypto_init_shash_ops_async(tfm); - - hash->init = alg->init; - hash->update = alg->update; - hash->final = alg->final; - hash->finup = alg->finup ?: ahash_def_finup; - hash->digest = alg->digest; - hash->export = alg->export; - hash->import = alg->import; - - if (alg->setkey) { - hash->setkey = alg->setkey; - ahash_set_needkey(hash); - } + return crypto_init_ahash_using_shash(tfm); + + ahash_set_needkey(hash, alg); if (alg->exit_tfm) tfm->exit = crypto_ahash_exit_tfm; @@ -641,19 +639,21 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) if (IS_ERR(nhash)) return nhash; - nhash->init = hash->init; - nhash->update = hash->update; - nhash->final = hash->final; - nhash->finup = hash->finup; - nhash->digest = hash->digest; - nhash->export = hash->export; - nhash->import = hash->import; - nhash->setkey = hash->setkey; nhash->reqsize = hash->reqsize; nhash->statesize = hash->statesize; - if (tfm->__crt_alg->cra_type != &crypto_ahash_type) - return crypto_clone_shash_ops_async(nhash, hash); + if (likely(hash->using_shash)) { + struct crypto_shash **nctx = crypto_ahash_ctx(nhash); + struct crypto_shash *shash; + + shash = crypto_clone_shash(ahash_to_shash(hash)); + if (IS_ERR(shash)) { + err = PTR_ERR(shash); + goto out_free_nhash; + } + *nctx = shash; + return nhash; + } err = -ENOSYS; alg = crypto_ahash_alg(hash); @@ -687,6 +687,11 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + if (!alg->finup) + alg->finup = ahash_def_finup; + if (!alg->setkey) + alg->setkey = ahash_nosetkey; + return 0; } @@ -761,7 +766,7 @@ bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) if (alg->cra_type == &crypto_shash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); - return __crypto_ahash_alg(alg)->setkey != NULL; + return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); diff --git a/crypto/hash.h b/crypto/hash.h index de2ee2f4..93f6ba0d 100644 --- a/crypto/hash.h +++ b/crypto/hash.h @@ -12,6 +12,16 @@ #include "internal.h" +static inline struct crypto_istat_hash *hash_get_stat( + struct hash_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + static inline int crypto_hash_report_stat(struct sk_buff *skb, struct crypto_alg *alg, const char *type) diff --git a/crypto/shash.c b/crypto/shash.c index 28092ed8..d5194221 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -23,7 +23,13 @@ static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) static inline int crypto_shash_errstat(struct shash_alg *alg, int err) { - return crypto_hash_errstat(&alg->halg, err); + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err && err != -EINPROGRESS && err != -EBUSY) + atomic64_inc(&shash_get_stat(alg)->err_cnt); + + return err; } int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, -- cgit v1.2.3 From a3ac34c7373fb36e5d23796844987f256d714009 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:04 +0100 Subject: crypto: FIPS 202 SHA-3 register in hash info for IMA Register FIPS 202 SHA-3 hashes in hash info for IMA and other users. Sizes 256 and up, as 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/hash_info.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crypto/hash_info.c b/crypto/hash_info.c index a49ff96b..9a467638 100644 --- a/crypto/hash_info.c +++ b/crypto/hash_info.c @@ -29,6 +29,9 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = { [HASH_ALGO_SM3_256] = "sm3", [HASH_ALGO_STREEBOG_256] = "streebog256", [HASH_ALGO_STREEBOG_512] = "streebog512", + [HASH_ALGO_SHA3_256] = "sha3-256", + [HASH_ALGO_SHA3_384] = "sha3-384", + [HASH_ALGO_SHA3_512] = "sha3-512", }; EXPORT_SYMBOL_GPL(hash_algo_name); @@ -53,5 +56,8 @@ const int hash_digest_size[HASH_ALGO__LAST] = { [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE, [HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE, [HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE, + [HASH_ALGO_SHA3_256] = SHA3_256_DIGEST_SIZE, + [HASH_ALGO_SHA3_384] = SHA3_384_DIGEST_SIZE, + [HASH_ALGO_SHA3_512] = SHA3_512_DIGEST_SIZE, }; EXPORT_SYMBOL_GPL(hash_digest_size); -- cgit v1.2.3 From 6347eda6b6d6f6d5ad3170858dc3364e0c71913e Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:05 +0100 Subject: crypto: rsa-pkcs1pad - Add FIPS 202 SHA-3 support Add support in rsa-pkcs1pad for FIPS 202 SHA-3 hashes, sizes 256 and up. As 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 25 ++++++++++++++++++++++++- crypto/testmgr.c | 12 ++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 49756c6e..cd501195 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -61,6 +61,24 @@ static const u8 rsa_digest_info_sha512[] = { 0x05, 0x00, 0x04, 0x40 }; +static const u8 rsa_digest_info_sha3_256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 rsa_digest_info_sha3_384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 rsa_digest_info_sha3_512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A, + 0x05, 0x00, 0x04, 0x40 +}; + static const struct rsa_asn1_template { const char *name; const u8 *data; @@ -74,8 +92,13 @@ static const struct rsa_asn1_template { _(sha384), _(sha512), _(sha224), - { NULL } #undef _ +#define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) } + _(256), + _(384), + _(512), +#undef _ + { NULL } }; static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 335449a2..1dc93bf6 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5464,6 +5464,18 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "pkcs1pad(rsa,sha512)", .test = alg_test_null, .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-256)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-384)", + .test = alg_test_null, + .fips_allowed = 1, + }, { + .alg = "pkcs1pad(rsa,sha3-512)", + .test = alg_test_null, + .fips_allowed = 1, }, { .alg = "poly1305", .test = alg_test_hash, -- cgit v1.2.3 From a7bf3f847576ce6443d29802749f37f9c118dc2e Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:06 +0100 Subject: crypto: asymmetric_keys - allow FIPS 202 SHA-3 signatures Add FIPS 202 SHA-3 hash signature support in x509 certificates, pkcs7 signatures, and authenticode signatures. Supports hashes of size 256 and up, as 224 is too weak for any practical purposes. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 9 +++++++++ crypto/asymmetric_keys/pkcs7_parser.c | 12 ++++++++++++ crypto/asymmetric_keys/public_key.c | 5 ++++- crypto/asymmetric_keys/x509_cert_parser.c | 24 ++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 1 deletion(-) diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 855cbc46..05402ef8 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -84,6 +84,15 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, case OID_sha512: ctx->digest_algo = "sha512"; break; + case OID_sha3_256: + ctx->digest_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->digest_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->digest_algo = "sha3-512"; + break; case OID__NR: sprint_oid(value, vlen, buffer, sizeof(buffer)); diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index ab647cb4..5b08c507 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -248,6 +248,15 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_gost2012Digest512: ctx->sinfo->sig->hash_algo = "streebog512"; break; + case OID_sha3_256: + ctx->sinfo->sig->hash_algo = "sha3-256"; + break; + case OID_sha3_384: + ctx->sinfo->sig->hash_algo = "sha3-384"; + break; + case OID_sha3_512: + ctx->sinfo->sig->hash_algo = "sha3-512"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -273,6 +282,9 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: case OID_id_ecdsa_with_sha512: + case OID_id_ecdsa_with_sha3_256: + case OID_id_ecdsa_with_sha3_384: + case OID_id_ecdsa_with_sha3_512: ctx->sinfo->sig->pkey_algo = "ecdsa"; ctx->sinfo->sig->encoding = "x962"; break; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 5bf0452c..8eeab38a 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -119,7 +119,10 @@ software_key_determine_akcipher(const struct public_key *pkey, if (strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && - strcmp(hash_algo, "sha512") != 0) + strcmp(hash_algo, "sha512") != 0 && + strcmp(hash_algo, "sha3-256") != 0 && + strcmp(hash_algo, "sha3-384") != 0 && + strcmp(hash_algo, "sha3-512") != 0) return -EINVAL; } else if (strcmp(pkey->pkey_algo, "sm2") == 0) { if (strcmp(encoding, "raw") != 0) diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 68ef1ffb..487204d3 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -214,6 +214,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; + case OID_id_rsassa_pkcs1_v1_5_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto rsa_pkcs1; + + case OID_id_rsassa_pkcs1_v1_5_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto rsa_pkcs1; + case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; @@ -230,6 +242,18 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha512"; goto ecdsa; + case OID_id_ecdsa_with_sha3_256: + ctx->cert->sig->hash_algo = "sha3-256"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_384: + ctx->cert->sig->hash_algo = "sha3-384"; + goto ecdsa; + + case OID_id_ecdsa_with_sha3_512: + ctx->cert->sig->hash_algo = "sha3-512"; + goto ecdsa; + case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; -- cgit v1.2.3 From 557f9cae49de49b6fd6cafc5005aaf737bd0c53e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 27 Oct 2023 12:52:06 -0700 Subject: crypto: testmgr - move pkcs1pad(rsa,sha3-*) to correct place alg_test_descs[] needs to be in sorted order, since it is used for binary search. This fixes the following boot-time warning: testmgr: alg_test_descs entries in wrong order: 'pkcs1pad(rsa,sha512)' before 'pkcs1pad(rsa,sha3-256)' Fixes: 6347eda6b6d6 ("crypto: rsa-pkcs1pad - Add FIPS 202 SHA-3 support") Signed-off-by: Eric Biggers Reviewed-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/testmgr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 1dc93bf6..15c7a301 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5457,23 +5457,23 @@ static const struct alg_test_desc alg_test_descs[] = { .akcipher = __VECS(pkcs1pad_rsa_tv_template) } }, { - .alg = "pkcs1pad(rsa,sha384)", + .alg = "pkcs1pad(rsa,sha3-256)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha512)", + .alg = "pkcs1pad(rsa,sha3-384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-256)", + .alg = "pkcs1pad(rsa,sha3-512)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-384)", + .alg = "pkcs1pad(rsa,sha384)", .test = alg_test_null, .fips_allowed = 1, }, { - .alg = "pkcs1pad(rsa,sha3-512)", + .alg = "pkcs1pad(rsa,sha512)", .test = alg_test_null, .fips_allowed = 1, }, { -- cgit v1.2.3 From 8b621201bc68f6bd103c7757ac80d47e7d3a0756 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 27 Oct 2023 13:30:17 -0700 Subject: crypto: adiantum - flush destination page before unmapping Upon additional review, the new fast path in adiantum_finish() is missing the call to flush_dcache_page() that scatterwalk_map_and_copy() was doing. It's apparently debatable whether flush_dcache_page() is actually needed, as per the discussion at https://lore.kernel.org/lkml/YYP1lAq46NWzhOf0@casper.infradead.org/T/#u. However, it appears that currently all the helper functions that write to a page, such as scatterwalk_map_and_copy(), memcpy_to_page(), and memzero_page(), do the dcache flush. So do it to be consistent. Fixes: ba95588e2005 ("crypto: adiantum - add fast path for single-page messages") Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crypto/adiantum.c b/crypto/adiantum.c index 9ff3376f..60f3883b 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -300,7 +300,8 @@ static int adiantum_finish(struct skcipher_request *req) le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash); if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) { /* Fast path for single-page destination */ - void *virt = kmap_local_page(sg_page(dst)) + dst->offset; + struct page *page = sg_page(dst); + void *virt = kmap_local_page(page) + dst->offset; err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len, (u8 *)&digest); @@ -310,6 +311,7 @@ static int adiantum_finish(struct skcipher_request *req) } le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest); memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128)); + flush_dcache_page(page); kunmap_local(virt); } else { /* Slow path that works for any destination scatterlist */ -- cgit v1.2.3