From a52eb0489f96f7318fce6b1afe1cabd46c750746 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 30 May 2020 00:23:49 +1000 Subject: crypto: algif_aead - Only wake up when ctx->more is zero AEAD does not support partial requests so we must not wake up while ctx->more is set. In order to distinguish between the case of no data sent yet and a zero-length request, a new init flag has been added to ctx. SKCIPHER has also been modified to ensure that at least a block of data is available if there is more data to come. Fixes: c1b1fa586429 ("crypto: af_alg - consolidation of...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 11 ++++++++--- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 4 ++-- 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e..9fcb91ea 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -843,7 +847,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { + if (ctx->init && (init || !ctx->more)) { err = -EINVAL; goto unlock; } @@ -854,6 +858,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; + ctx->init = true; } while (size) { diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a6..d48d2156 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c8..a51ba22f 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } -- cgit v1.2.3 From 673136029221f5b7c0eea9efc9aabc1a081aa148 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 2 Jul 2020 13:32:21 +1000 Subject: crypto: af_alg - Fix regression on empty requests Some user-space programs rely on crypto requests that have no control metadata. This broke when a check was added to require the presence of control metadata with the ctx->init flag. This patch fixes the regression by setting ctx->init as long as one sendmsg(2) has been made, with or without a control message. Reported-by: Sachin Sant Reported-by: Naresh Kamboju Fixes: a52eb0489f96 ("crypto: algif_aead - Only wake up when...") Signed-off-by: Herbert Xu --- crypto/af_alg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 9fcb91ea..5882ed46 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -851,6 +851,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, err = -EINVAL; goto unlock; } + ctx->init = true; if (init) { ctx->enc = enc; @@ -858,7 +859,6 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; - ctx->init = true; } while (size) { -- cgit v1.2.3 From 4077186d50f7d3eb818add77638f52ea18cc124a Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sun, 5 Jul 2020 21:18:58 +1200 Subject: crypto: api - permit users to specify numa node of acomp hardware For a Linux server with NUMA, there are possibly multiple (de)compressors which are either local or remote to some NUMA node. Some drivers will automatically use the (de)compressor near the CPU calling acomp_alloc(). However, it is not necessarily correct because users who send acomp_req could be from different NUMA node with the CPU which allocates acomp. Just like kernel has kmalloc() and kmalloc_node(), here crypto can have same support. Cc: Seth Jennings Cc: Dan Streetman Cc: Vitaly Wool Cc: Andrew Morton Cc: Jonathan Cameron Signed-off-by: Barry Song Signed-off-by: Herbert Xu --- crypto/acompress.c | 8 ++++++++ crypto/api.c | 24 +++++++++++++++--------- crypto/internal.h | 23 +++++++++++++++++++---- 3 files changed, 42 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723..c32c7204 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/api.c b/crypto/api.c index edcf6908..5d8fe60b 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -433,8 +433,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -472,7 +474,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -542,7 +548,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform diff --git a/crypto/internal.h b/crypto/internal.h index ff06a3bd..1b92a5a6 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); -- cgit v1.2.3 From 1f0bdfe7a7d449f186980762f5192007277b30be Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:36 -0700 Subject: crypto: geniv - remove unneeded arguments from aead_geniv_alloc() The type and mask arguments to aead_geniv_alloc() are always 0, so remove them. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/echainiv.c | 2 +- crypto/geniv.c | 7 ++++--- crypto/seqiv.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02ba..69686668 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/geniv.c b/crypto/geniv.c index 6a90c52d..07496c8a 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,7 +39,7 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { struct crypto_aead_spawn *spawn; struct crypto_attr_type *algt; @@ -47,6 +47,7 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; algt = crypto_get_attr_type(tb); @@ -63,10 +64,10 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); + mask = crypto_requires_sync(algt->type, algt->mask); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f124b9b5..e48f875a 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); -- cgit v1.2.3 From d9c52b87ae9554c5938f7bd855a498cd052a6454 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:37 -0700 Subject: crypto: seqiv - remove seqiv_create() seqiv_create() is pointless because it just checks that the template is being instantiated as an AEAD, then calls seqiv_aead_create(). But seqiv_aead_create() does the exact same check, via aead_geniv_alloc(). Just remove seqiv_create() and use seqiv_aead_create() directly. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/seqiv.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'crypto') diff --git a/crypto/seqiv.c b/crypto/seqiv.c index e48f875a..23e22d8b 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -164,23 +164,9 @@ free_inst: return err; } -static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - return -EINVAL; - - return seqiv_aead_create(tmpl, tb); -} - static struct crypto_template seqiv_tmpl = { .name = "seqiv", - .create = seqiv_create, + .create = seqiv_aead_create, .module = THIS_MODULE, }; -- cgit v1.2.3 From 558cc15c0d3341a2aa80a2e0be1e2e8307efebee Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:38 -0700 Subject: crypto: algapi - use common mechanism for inheriting flags The flag CRYPTO_ALG_ASYNC is "inherited" in the sense that when a template is instantiated, the template will have CRYPTO_ALG_ASYNC set if any of the algorithms it uses has CRYPTO_ALG_ASYNC set. We'd like to add a second flag (CRYPTO_ALG_ALLOCATES_MEMORY) that gets "inherited" in the same way. This is difficult because the handling of CRYPTO_ALG_ASYNC is hardcoded everywhere. Address this by: - Add CRYPTO_ALG_INHERITED_FLAGS, which contains the set of flags that have these inheritance semantics. - Add crypto_algt_inherited_mask(), for use by template ->create() methods. It returns any of these flags that the user asked to be unset and thus must be passed in the 'mask' to crypto_grab_*(). - Also modify crypto_check_attr_type() to handle computing the 'mask' so that most templates can just use this. - Make crypto_grab_*() propagate these flags to the template instance being created so that templates don't have to do this themselves. Make crypto/simd.c propagate these flags too, since it "wraps" another algorithm, similar to a template. Based on a patch by Mikulas Patocka (https://lore.kernel.org/r/alpine.LRH.2.02.2006301414580.30526@file01.intranet.prod.int.rdu2.redhat.com). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/adiantum.c | 14 +++-------- crypto/algapi.c | 21 ++++++++++++++++- crypto/authenc.c | 14 +++-------- crypto/authencesn.c | 14 +++-------- crypto/ccm.c | 33 ++++++++------------------ crypto/chacha20poly1305.c | 14 +++-------- crypto/cmac.c | 5 ++-- crypto/cryptd.c | 59 ++++++++++++++++++++++++----------------------- crypto/ctr.c | 19 ++++----------- crypto/cts.c | 13 +++-------- crypto/essiv.c | 11 ++++++--- crypto/gcm.c | 40 ++++++++------------------------ crypto/geniv.c | 14 +++-------- crypto/hmac.c | 5 ++-- crypto/lrw.c | 13 +++-------- crypto/pcrypt.c | 14 ++++------- crypto/rsa-pkcs1pad.c | 13 +++-------- crypto/simd.c | 6 +++-- crypto/skcipher.c | 15 ++++-------- crypto/vmac.c | 5 ++-- crypto/xcbc.c | 5 ++-- crypto/xts.c | 17 ++++---------- 22 files changed, 137 insertions(+), 227 deletions(-) (limited to 'crypto') diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f41..7fbdc327 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | diff --git a/crypto/algapi.c b/crypto/algapi.c index 92abdf67..fdabf267 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138..670bf1a0 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 149b70df..b60e61b1 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/ccm.c b/crypto/ccm.c index d1fb01bb..494d7090 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb..97bbb135 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544..df36be1e 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 28321226..a1bea0f4 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; @@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/ctr.c b/crypto/ctr.c index 31ac4ae5..ae8d88c7 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -256,29 +256,22 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), @@ -310,8 +303,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + diff --git a/crypto/cts.c b/crypto/cts.c index 5e005c4f..3766d47e 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/essiv.c b/crypto/essiv.c index a7f45dbc..d012be23 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; diff --git a/crypto/gcm.c b/crypto/gcm.c index 0103d28c..3a36a953 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/geniv.c b/crypto/geniv.c index 07496c8a..bee4621b 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -42,7 +42,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; @@ -50,12 +49,9 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -63,9 +59,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask = crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) @@ -90,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb94..25856aa7 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/lrw.c b/crypto/lrw.c index 5b07a7c0..a709c801 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -297,21 +297,15 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -379,7 +373,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8bddc65c..cbc383a1 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d31031de..4983b2b4 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst) static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) const char *hash_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; } - inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); diff --git a/crypto/simd.c b/crypto/simd.c index 56885af4..edaa479a 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; @@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7221def7..3b93a74a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -934,22 +934,17 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct skcipher_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *cipher_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); - - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return ERR_PTR(err); + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/vmac.c b/crypto/vmac.c index 2d906830..9b565d10 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 598ec88a..af3b7eb5 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xts.c b/crypto/xts.c index 3565f3b8..35a30610 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -331,19 +331,17 @@ static void crypto_xts_free(struct skcipher_instance *inst) static int create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + mask |= crypto_requires_off(crypto_get_attr_type(tb), + CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -355,10 +353,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT) { @@ -415,7 +409,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | -- cgit v1.2.3 From b1ab682ba843fb0f99cb66969432c55bf8aa35b4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 9 Jul 2020 23:20:39 -0700 Subject: crypto: algapi - add NEED_FALLBACK to INHERITED_FLAGS CRYPTO_ALG_NEED_FALLBACK is handled inconsistently. When it's requested to be clear, some templates propagate that request to child algorithms, while others don't. It's apparently desired for NEED_FALLBACK to be propagated, to avoid deadlocks where a module tries to load itself while it's being initialized, and to avoid unnecessarily complex fallback chains where we have e.g. cbc-aes-$driver falling back to cbc(aes-$driver) where aes-$driver itself falls back to aes-generic, instead of cbc-aes-$driver simply falling back to cbc(aes-generic). There have been a number of fixes to this effect: commit 4e20b8b65210 ("crypto: xts - Propagate NEED_FALLBACK bit") commit 25fe465280e3 ("crypto: ctr - Propagate NEED_FALLBACK bit") commit cf197f2b42c6 ("crypto: cbc - Propagate NEED_FALLBACK bit") But it seems that other templates can have the same problems too. To avoid this whack-a-mole, just add NEED_FALLBACK to INHERITED_FLAGS so that it's always inherited. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/ctr.c | 2 -- crypto/skcipher.c | 2 -- crypto/xts.c | 2 -- 3 files changed, 6 deletions(-) (limited to 'crypto') diff --git a/crypto/ctr.c b/crypto/ctr.c index ae8d88c7..c39fcffb 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -265,8 +265,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 3b93a74a..467af525 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -943,8 +943,6 @@ struct skcipher_instance *skcipher_alloc_instance_simple( err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return ERR_PTR(err); - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/xts.c b/crypto/xts.c index 35a30610..9a7adab6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -340,8 +340,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; - mask |= crypto_requires_off(crypto_get_attr_type(tb), - CRYPTO_ALG_NEED_FALLBACK); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) -- cgit v1.2.3 From bfbb4c6881f990fa2383694273f54774f76d4641 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Jul 2020 20:34:28 -0700 Subject: crypto: xts - prefix function and struct names with "xts" Overly-generic names can cause problems like naming collisions, confusing crash reports, and reduced grep-ability. E.g. see commit ccf5a2ec44cb ("crypto - Avoid free() namespace collision"). Clean this up for the xts template by prefixing the names with "xts_". (I didn't use "crypto_xts_" instead because that seems overkill.) Also constify the tfm context in a couple places, and make xts_free_instance() use the instance context structure so that it doesn't just assume the crypto_skcipher_spawn is at the beginning. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/xts.c | 137 +++++++++++++++++++++++++++++++---------------------------- 1 file changed, 72 insertions(+), 65 deletions(-) (limited to 'crypto') diff --git a/crypto/xts.c b/crypto/xts.c index 9a7adab6..3c3ed02c 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -20,7 +20,7 @@ #include #include -struct priv { +struct xts_tfm_ctx { struct crypto_skcipher *child; struct crypto_cipher *tweak; }; @@ -30,17 +30,17 @@ struct xts_instance_ctx { char name[CRYPTO_MAX_ALG_NAME]; }; -struct rctx { +struct xts_request_ctx { le128 t; struct scatterlist *tail; struct scatterlist sg[2]; struct skcipher_request subreq; }; -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child; struct crypto_cipher *tweak; int err; @@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) +static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, + bool enc) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const int bs = XTS_BLOCK_SIZE; @@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) return err; } -static int xor_tweak_pre(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) { - return xor_tweak(req, false, enc); + return xts_xor_tweak(req, false, enc); } -static int xor_tweak_post(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) { - return xor_tweak(req, true, enc); + return xts_xor_tweak(req, true, enc); } -static void cts_done(struct crypto_async_request *areq, int err) +static void xts_cts_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; le128 b; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(&b, &rctx->t, &b); @@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int cts_final(struct skcipher_request *req, - int (*crypt)(struct skcipher_request *req)) +static int xts_cts_final(struct skcipher_request *req, + int (*crypt)(struct skcipher_request *req)) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int tail = req->cryptlen % XTS_BLOCK_SIZE; le128 b[2]; @@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req, scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); + skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, + req); skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, XTS_BLOCK_SIZE, NULL); @@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req, return 0; } -static void encrypt_done(struct crypto_async_request *areq, int err) +static void xts_encrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, true); + err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_encrypt); + err = xts_cts_final(req, crypto_skcipher_encrypt); if (err == -EINPROGRESS) return; } @@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static void decrypt_done(struct crypto_async_request *areq, int err) +static void xts_decrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, false); + err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_decrypt); + err = xts_cts_final(req, crypto_skcipher_decrypt); if (err == -EINPROGRESS) return; } @@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) +static int xts_init_crypt(struct skcipher_request *req, + crypto_completion_t compl) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; if (req->cryptlen < XTS_BLOCK_SIZE) @@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) return 0; } -static int encrypt(struct skcipher_request *req) +static int xts_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, encrypt_done) ?: - xor_tweak_pre(req, true) ?: + err = xts_init_crypt(req, xts_encrypt_done) ?: + xts_xor_tweak_pre(req, true) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req, true); + xts_xor_tweak_post(req, true); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_encrypt); + return xts_cts_final(req, crypto_skcipher_encrypt); } -static int decrypt(struct skcipher_request *req) +static int xts_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, decrypt_done) ?: - xor_tweak_pre(req, false) ?: + err = xts_init_crypt(req, xts_decrypt_done) ?: + xts_xor_tweak_pre(req, false) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req, false); + xts_xor_tweak_post(req, false); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_decrypt); + return xts_cts_final(req, crypto_skcipher_decrypt); } -static int init_tfm(struct crypto_skcipher *tfm) +static int xts_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child; struct crypto_cipher *tweak; @@ -309,26 +314,28 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->tweak = tweak; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + - sizeof(struct rctx)); + sizeof(struct xts_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void xts_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->child); crypto_free_cipher(ctx->tweak); } -static void crypto_xts_free(struct skcipher_instance *inst) +static void xts_free_instance(struct skcipher_instance *inst) { - crypto_drop_skcipher(skcipher_instance_ctx(inst)); + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ictx->spawn); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; struct xts_instance_ctx *ctx; @@ -416,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = xts_init_tfm; + inst->alg.exit = xts_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = xts_setkey; + inst->alg.encrypt = xts_encrypt; + inst->alg.decrypt = xts_decrypt; - inst->free = crypto_xts_free; + inst->free = xts_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_xts_free(inst); + xts_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template xts_tmpl = { .name = "xts", - .create = create, + .create = xts_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init xts_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&xts_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit xts_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&xts_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(xts_module_init); +module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); -- cgit v1.2.3 From e86dff2f74f8211a81e40b0b27bd33dbc26e9205 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 10 Jul 2020 20:36:49 -0700 Subject: crypto: lrw - prefix function and struct names with "lrw" Overly-generic names can cause problems like naming collisions, confusing crash reports, and reduced grep-ability. E.g. see commit ccf5a2ec44cb ("crypto - Avoid free() namespace collision"). Clean this up for the lrw template by prefixing the names with "lrw_". (I didn't use "crypto_lrw_" instead because that seems overkill.) Also constify the tfm context in a couple places. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/lrw.c | 119 ++++++++++++++++++++++++++++++----------------------------- 1 file changed, 61 insertions(+), 58 deletions(-) (limited to 'crypto') diff --git a/crypto/lrw.c b/crypto/lrw.c index a709c801..3f90a5ec 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -27,7 +27,7 @@ #define LRW_BLOCK_SIZE 16 -struct priv { +struct lrw_tfm_ctx { struct crypto_skcipher *child; /* @@ -49,12 +49,12 @@ struct priv { be128 mulinc[128]; }; -struct rctx { +struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; @@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } @@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; - * int i = next_index(&counter); + * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ -static int next_index(u32 *counter) +static int lrw_next_index(u32 *counter) { int i, res = 0; @@ -135,14 +135,14 @@ static int next_index(u32 *counter) * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than - * just doing the next_index() calls again. + * just doing the lrw_next_index() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct priv *ctx = crypto_skcipher_ctx(tfm); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; @@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { @@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - return xor_tweak(req, false); + return lrw_xor_tweak(req, false); } -static int xor_tweak_post(struct skcipher_request *req) +static int lrw_xor_tweak_post(struct skcipher_request *req) { - return xor_tweak(req, true); + return lrw_xor_tweak(req, true); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void lrw_init_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); @@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req) gf128mul_64k_bbe(&rctx->t, ctx->table); } -static int encrypt(struct skcipher_request *req) +static int lrw_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int decrypt(struct skcipher_request *req) +static int lrw_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int init_tfm(struct crypto_skcipher *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); @@ -273,27 +276,27 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + - sizeof(struct rctx)); + sizeof(struct lrw_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } -static void crypto_lrw_free(struct skcipher_instance *inst) +static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; @@ -384,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + LRW_BLOCK_SIZE; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; - inst->free = crypto_lrw_free; + inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - crypto_lrw_free(inst); + lrw_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .create = create, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); -- cgit v1.2.3 From ea481c5089ac181c94985da2657b9d63d767187e Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:51 -0400 Subject: padata: remove start function padata_start() is only used right after pcrypt allocates an instance with all possible CPUs, when PADATA_INVALID can't happen, so there's no need for a separate "start" step. It can be done during allocation to save text, make using padata easier, and avoid unneeded calls in the future. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index cbc383a1..fb9127ec 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -355,9 +355,6 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: -- cgit v1.2.3 From 3e75ecda404d247538db23b3114df7ecaeb287b4 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:52 -0400 Subject: padata: remove stop function padata_stop() has two callers and is unnecessary in both cases. When pcrypt calls it before padata_free(), it's being unloaded so there are no outstanding padata jobs[0]. When __padata_free() calls it, it's either along the same path or else pcrypt initialization failed, which of course means there are also no outstanding jobs. Removing it simplifies padata and saves text. [0] https://lore.kernel.org/linux-crypto/20191119225017.mjrak2fwa5vccazl@gondor.apana.org.au/ Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index fb9127ec..2d4ac9d4 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -327,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -358,7 +352,7 @@ static int __init pcrypt_init(void) return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -369,8 +363,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } -- cgit v1.2.3 From 2c7384f9dbcc7523c93f02893e463b1c0505d0d7 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 14 Jul 2020 16:13:55 -0400 Subject: padata: fold padata_alloc_possible() into padata_alloc() There's no reason to have two interfaces when there's only one caller. Removing _possible saves text and simplifies future changes. Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 2d4ac9d4..d569c7ed 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -316,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; -- cgit v1.2.3 From 6a2672e9f67e63383da06b2cb3168d5832e5c7c0 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 18:49:59 +0200 Subject: crypto: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Herbert Xu --- crypto/Kconfig | 46 +++++++++++++++++++++++----------------------- crypto/blake2b_generic.c | 2 +- crypto/camellia_generic.c | 2 +- crypto/ecc.c | 2 +- crypto/jitterentropy.c | 4 ++-- crypto/lrw.c | 2 +- crypto/salsa20_generic.c | 4 ++-- crypto/sha3_generic.c | 2 +- 8 files changed, 32 insertions(+), 32 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 091c0a0b..1b57419f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -548,7 +548,7 @@ config CRYPTO_XCBC select CRYPTO_MANAGER help XCBC: Keyed-Hashing with encryption algorithm - http://www.ietf.org/rfc/rfc3566.txt + https://www.ietf.org/rfc/rfc3566.txt http://csrc.nist.gov/encryption/modes/proposedmodes/ xcbc-mac/xcbc-mac-spec.pdf @@ -561,7 +561,7 @@ config CRYPTO_VMAC very high speed on 64-bit architectures. See also: - + comment "Digest" @@ -816,7 +816,7 @@ config CRYPTO_RMD128 RIPEMD-160 should be used. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" @@ -833,7 +833,7 @@ config CRYPTO_RMD160 against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" @@ -845,7 +845,7 @@ config CRYPTO_RMD256 (than RIPEMD-128). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" @@ -857,7 +857,7 @@ config CRYPTO_RMD320 (than RIPEMD-160). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See + See config CRYPTO_SHA1 tristate "SHA1 digest algorithm" @@ -1045,7 +1045,7 @@ config CRYPTO_TGR192 Tiger was developed by Ross Anderson and Eli Biham. See also: - . + . config CRYPTO_WP512 tristate "Whirlpool digest algorithms" @@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH designed for use on "large microprocessors". See also: - + config CRYPTO_BLOWFISH_COMMON tristate @@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON generic c and the assembler implementations. See also: - + config CRYPTO_BLOWFISH_X86_64 tristate "Blowfish cipher algorithm (x86_64)" @@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64 designed for use on "large microprocessors". See also: - + config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" @@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20 Salsa20 stream cipher algorithm. Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See + Stream Cipher Project. See The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein . See + Bernstein . See config CRYPTO_CHACHA20 tristate "ChaCha stream cipher algorithms" @@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20 ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. Bernstein and further specified in RFC7539 for use in IETF protocols. This is the portable C implementation of ChaCha20. See also: - + XChaCha20 is the application of the XSalsa20 construction to ChaCha20 rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length @@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT variant of Serpent for compatibility with old kerneli.org code. See also: - + config CRYPTO_SERPENT_SSE2_X86_64 tristate "Serpent cipher algorithm (x86_64/SSE2)" @@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 blocks parallel using SSE2 instruction set. See also: - + config CRYPTO_SERPENT_SSE2_586 tristate "Serpent cipher algorithm (i586/SSE2)" @@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586 blocks parallel using SSE2 instruction set. See also: - + config CRYPTO_SERPENT_AVX_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX)" @@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64 eight blocks parallel using the AVX instruction set. See also: - + config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" @@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 blocks parallel using AVX2 instruction set. See also: - + config CRYPTO_SM4 tristate "SM4 cipher algorithm" @@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH bits. See also: - + config CRYPTO_TWOFISH_COMMON tristate @@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586 bits. See also: - + config CRYPTO_TWOFISH_X86_64 tristate "Twofish cipher algorithm (x86_64)" @@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64 bits. See also: - + config CRYPTO_TWOFISH_X86_64_3WAY tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" @@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY blocks parallel, utilizing resources of out-of-order CPUs better. See also: - + config CRYPTO_TWOFISH_AVX_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX)" @@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 eight blocks parallel using the AVX Instruction Set. See also: - + comment "Compression" diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 0ffd8d92..a2ffe60e 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -8,7 +8,7 @@ * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about the BLAKE2 hash function can be found at * https://blake2.net. diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 9a5783e5..0b9f409f 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -6,7 +6,7 @@ /* * Algorithm Specification - * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html + * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ /* diff --git a/crypto/ecc.c b/crypto/ecc.c index 02d35be7..86c32493 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point) } /* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf + * coordinates. From https://eprint.iacr.org/2011/338.pdf */ /* Double in place */ diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 57f4a1ac..6e147c43 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -7,7 +7,7 @@ * Design * ====== * - * See http://www.chronox.de/jent.html + * See https://www.chronox.de/jent.html * * License * ======= @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.2.0 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ diff --git a/crypto/lrw.c b/crypto/lrw.c index 3f90a5ec..bcf09fbc 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -9,7 +9,7 @@ */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index c81a4440..3418869d 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -9,8 +9,8 @@ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream * Cipher Project. It is designed by Daniel J. Bernstein . * More information about eSTREAM and Salsa20 can be found here: - * http://www.ecrypt.eu.org/stream/ - * http://cr.yp.to/snuffle.html + * https://www.ecrypt.eu.org/stream/ + * https://cr.yp.to/snuffle.html * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 44e263e2..3e406993 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -3,7 +3,7 @@ * Cryptographic API. * * SHA-3, as specified in - * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik * Ard Biesheuvel -- cgit v1.2.3 From 750b0c45c1f26f78f03ed03463bf8149d3dfc8ff Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 19 Jul 2020 11:07:50 -0700 Subject: crypto: testmgr - delete duplicated words Delete the doubled word "from" in multiple places. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/testmgr.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d2998390..b9a2d73d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = { }; /* - * SHA1 test vectors from from FIPS PUB 180-1 + * SHA1 test vectors from FIPS PUB 180-1 * Long vector from CAVS 5.0 */ static const struct hash_testvec sha1_tv_template[] = { @@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = { /* - * SHA224 test vectors from from FIPS PUB 180-2 + * SHA224 test vectors from FIPS PUB 180-2 */ static const struct hash_testvec sha224_tv_template[] = { { @@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = { }; /* - * SHA256 test vectors from from NIST + * SHA256 test vectors from NIST */ static const struct hash_testvec sha256_tv_template[] = { { @@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = { }; /* - * SHA384 test vectors from from NIST and kerneli + * SHA384 test vectors from NIST and kerneli */ static const struct hash_testvec sha384_tv_template[] = { { @@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = { }; /* - * SHA512 test vectors from from NIST and kerneli + * SHA512 test vectors from NIST and kerneli */ static const struct hash_testvec sha512_tv_template[] = { { -- cgit v1.2.3 From be0f7db4659756372b238ad029a58bea7c4f99b6 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Mon, 20 Jul 2020 19:07:48 +0200 Subject: crypto: ecdh - check validity of Z before export SP800-56A rev3 section 5.7.1.2 step 2 mandates that the validity of the calculated shared secret is verified before the data is returned to the caller. Thus, the export function and the validity check functions are reversed. In addition, the sensitive variables of priv and rand_z are zeroized. Signed-off-by: Stephan Mueller Reviewed-by: Vitaly Chikunov Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ecc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/ecc.c b/crypto/ecc.c index 86c32493..c8b259e5 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1495,11 +1495,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); - ecc_swap_digits(product->x, secret, ndigits); - - if (ecc_point_is_zero(product)) + if (ecc_point_is_zero(product)) { ret = -EFAULT; + goto err_validity; + } + + ecc_swap_digits(product->x, secret, ndigits); +err_validity: + memzero_explicit(priv, sizeof(priv)); + memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: ecc_free_point(pk); -- cgit v1.2.3 From e0e6565e8ef837bbdb086e018d18307c1d22f1bc Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Mon, 20 Jul 2020 19:08:32 +0200 Subject: crypto: dh - check validity of Z before export SP800-56A rev3 section 5.7.1.1 step 2 mandates that the validity of the calculated shared secret is verified before the data is returned to the caller. This patch adds the validation check. Signed-off-by: Stephan Mueller Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/dh.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'crypto') diff --git a/crypto/dh.c b/crypto/dh.c index 566f624a..f84fd50e 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -9,6 +9,7 @@ #include #include #include +#include #include struct dh_ctx { @@ -179,6 +180,34 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (fips_enabled && req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + } + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; -- cgit v1.2.3 From 29e35bd9bd7432e79e48c0c4dfdf11ec7ce9b748 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Mon, 20 Jul 2020 19:08:52 +0200 Subject: crypto: dh - SP800-56A rev 3 local public key validation After the generation of a local public key, SP800-56A rev 3 section 5.6.2.1.3 mandates a validation of that key with a full validation compliant to section 5.6.2.3.1. Only if the full validation passes, the key is allowed to be used. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/dh.c | 59 ++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 25 deletions(-) (limited to 'crypto') diff --git a/crypto/dh.c b/crypto/dh.c index f84fd50e..cd4f3209 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -180,32 +180,41 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; - /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ - if (fips_enabled && req->src) { - MPI pone; - - /* z <= 1 */ - if (mpi_cmp_ui(val, 1) < 1) { - ret = -EBADMSG; - goto err_free_base; - } - - /* z == p - 1 */ - pone = mpi_alloc(0); - - if (!pone) { - ret = -ENOMEM; - goto err_free_base; + if (fips_enabled) { + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + } else { + if (dh_is_pubkey_valid(ctx, val)) { + ret = -EAGAIN; + goto err_free_val; + } } - - ret = mpi_sub_ui(pone, ctx->p, 1); - if (!ret && !mpi_cmp(pone, val)) - ret = -EBADMSG; - - mpi_free(pone); - - if (ret) - goto err_free_base; } ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); -- cgit v1.2.3 From 2fa0b79ccce0650cb17cb6ceeaa4137b47468ec1 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Mon, 20 Jul 2020 19:09:23 +0200 Subject: crypto: ecc - SP800-56A rev 3 local public key validation After the generation of a local public key, SP800-56A rev 3 section 5.6.2.1.3 mandates a validation of that key with a full validation compliant to section 5.6.2.3.3. Only if the full validation passes, the key is allowed to be used. The patch adds the full key validation compliant to 5.6.2.3.3 and performs the required check on the generated public key. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/ecc.c | 31 ++++++++++++++++++++++++++++++- crypto/ecc.h | 14 ++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ecc.c b/crypto/ecc.c index c8b259e5..8acf8433 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, } ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); - if (ecc_point_is_zero(pk)) { + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (ecc_is_pubkey_valid_full(curve, pk)) { ret = -EAGAIN; goto err_free_point; } @@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, } EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); +/* SP800-56A section 5.6.2.3.3 full verification */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + + /* Checks 1 through 3 */ + int ret = ecc_is_pubkey_valid_partial(curve, pk); + + if (ret) + return ret; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits); + if (!ecc_point_is_zero(nQ)) + ret = -EINVAL; + + ecc_free_point(nQ); + + return ret; +} +EXPORT_SYMBOL(ecc_is_pubkey_valid_full); + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) diff --git a/crypto/ecc.h b/crypto/ecc.h index ab0eb70b..d4e546b9 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -147,6 +147,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, struct ecc_point *pk); +/** + * ecc_is_pubkey_valid_full() - Full public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full + * Public-Key Validation Routine. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk); + /** * vli_is_zero() - Determine is vli is zero * -- cgit v1.2.3 From 18614fdea411d506ee302d4f16adcc095f0246cd Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 21 Jul 2020 09:05:54 +0300 Subject: crypto: xts - Replace memcpy() invocation with simple assignment Colin reports that the memcpy() call in xts_cts_final() trigggers a "Overlapping buffer in memory copy" warning in Coverity, which is a false postive, given that tail is guaranteed to be smaller than or equal to the distance between source and destination. However, given that any additional bytes that we copy will be ignored anyway, we can simply copy XTS_BLOCK_SIZE unconditionally, which means we can use struct assignment of the array members instead, which is likely to be more efficient as well. Addresses-Coverity: ("Overlapping buffer in memory copy") Fixes: 8118cd4cf772 ("crypto: xts - add support for ciphertext stealing") Reported-by: Colin Ian King Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/xts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/xts.c b/crypto/xts.c index 3c3ed02c..ad45b009 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -171,7 +171,7 @@ static int xts_cts_final(struct skcipher_request *req, offset - XTS_BLOCK_SIZE); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); - memcpy(b + 1, b, tail); + b[1] = b[0]; scatterwalk_map_and_copy(b, req->src, offset, tail, 0); le128_xor(b, &rctx->t, b); -- cgit v1.2.3