From a495eea64280ffa1f0ab0e1c98f7e51be5db0bdc Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Fri, 23 Oct 2015 14:10:36 +0200 Subject: crypto: algif - Change some variable to size_t Some variable are set as int but store only positive values. Furthermore there are used in operation/function that wait for unsigned value. This patch set them as size_t. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- crypto/algif_aead.c | 6 +++--- crypto/algif_skcipher.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb..f70bcf84 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } while (size) { - unsigned long len = size; + size_t len = size; struct scatterlist *sg = NULL; /* use the existing memory in an allocated page */ @@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* allocate a new page */ len = min_t(unsigned long, size, aead_sndbuf(sk)); while (len) { - int plen = 0; + size_t plen = 0; if (sgl->cur >= ALG_MAX_PAGES) { aead_put_sgl(sk); @@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } sg = sgl->sg + sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg, alloc_page(GFP_KERNEL)); err = -ENOMEM; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee..bbb1b66e 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -40,7 +40,7 @@ struct skcipher_ctx { struct af_alg_completion completion; atomic_t inflight; - unsigned used; + size_t used; unsigned int len; bool more; @@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) return 0; } -static void skcipher_pull_sgl(struct sock *sk, int used, int put) +static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put) sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { - int plen = min_t(int, used, sg[i].length); + size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; @@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, while (size) { struct scatterlist *sg; unsigned long len = size; - int plen; + size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, @@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; -- cgit v1.2.3 From 8ce05a29afadd321b224e22a132a3b7bc863aaa4 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Fri, 13 Nov 2015 12:01:33 +0100 Subject: crypto: rsa - only require output buffers as big as needed. rhe RSA operations explicitly left-align the integers being written skipping any leading zero bytes, but still require the output buffers to include just enough space for the integer + the leading zero bytes. Since the size of integer + the leading zero bytes (i.e. the key modulus size) can now be obtained more easily through crypto_akcipher_maxsize change the operations to only require as big a buffer as actually needed if the caller has that information. The semantics for request->dst_len don't change. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/rsa.c | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/crypto/rsa.c b/crypto/rsa.c index 1093e041..58aad69a 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -91,12 +91,6 @@ static int rsa_enc(struct akcipher_request *req) goto err_free_c; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_c; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -136,12 +130,6 @@ static int rsa_dec(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; c = mpi_read_raw_from_sgl(req->src, req->src_len); if (!c) @@ -180,12 +168,6 @@ static int rsa_sign(struct akcipher_request *req) goto err_free_s; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_s; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -225,12 +207,6 @@ static int rsa_verify(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; s = mpi_read_raw_from_sgl(req->src, req->src_len); if (!s) { -- cgit v1.2.3 From e556483473dcb68832f10f980265225233df219b Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:14 +0800 Subject: crypto: api - use list_first_entry_or_null and list_next_entry Simplify crypto_more_spawns() with list_first_entry_or_null() and list_next_entry(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu --- crypto/algapi.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 59bf491f..7be76aa3 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, { struct crypto_spawn *spawn, *n; - if (list_empty(stack)) + spawn = list_first_entry_or_null(stack, struct crypto_spawn, list); + if (!spawn) return NULL; - spawn = list_first_entry(stack, struct crypto_spawn, list); - n = list_entry(spawn->list.next, struct crypto_spawn, list); + n = list_next_entry(spawn, list); if (spawn->alg && &n->list != stack && !n->alg) n->alg = (n->list.next == stack) ? alg : - &list_entry(n->list.next, struct crypto_spawn, - list)->inst->alg; + &list_next_entry(n, list)->inst->alg; list_move(&spawn->list, secondary_spawns); -- cgit v1.2.3 From 510efb482ea38ea6df91d2e426c82ee451e390b9 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:15 +0800 Subject: crypto: mcryptd - use list_first_entry_or_null() Simplify mcryptd_opportunistic_flush() with list_first_entry_or_null(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu --- crypto/mcryptd.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index fe5b495a..f78d4fc4 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void) flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); while (single_task_running()) { mutex_lock(&flist->lock); - if (list_empty(&flist->list)) { - mutex_unlock(&flist->lock); - return; - } - cstate = list_entry(flist->list.next, + cstate = list_first_entry_or_null(&flist->list, struct mcryptd_alg_cstate, flush_list); - if (!cstate->flusher_engaged) { + if (!cstate || !cstate->flusher_engaged) { mutex_unlock(&flist->lock); return; } -- cgit v1.2.3 From 964b99a9f86f32a18603d1967b5dbd9ab1116573 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 17 Nov 2015 13:37:10 +0100 Subject: crypto: tcrypt - fix keysize argument of test_aead_speed for gcm(aes) The key sizes used by AES in GCM mode should be 128, 192 or 256 bits (16, 24 or 32 bytes). There is no additional 4byte nonce as for RFC 4106. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 46a4a757..270bc4b8 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, 0, 16, 16, aead_speed_template_20); test_aead_speed("gcm(aes)", ENCRYPT, sec, - NULL, 0, 16, 8, aead_speed_template_20); + NULL, 0, 16, 8, speed_template_16_24_32); break; case 212: -- cgit v1.2.3 From 051425a3a855cbce7838dd01f8e22b15d66f9b35 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Wed, 25 Nov 2015 23:48:28 +0600 Subject: crypto: cryptod - use crypto_skcipher_type() for getting skcipher type The provides inline function - crypto_skcipher_type(). Let's use it in the cryptd_alloc_ablkcipher() instead of direct calculation. Signed-off-by: Alexander Kuleshov Signed-off-by: Herbert Xu --- crypto/cryptd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b1..c4af8aa1 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -887,8 +887,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-EINVAL); - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_BLKCIPHER; + type = crypto_skcipher_type(type); mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); tfm = crypto_alloc_base(cryptd_alg_name, type, mask); -- cgit v1.2.3 From ee8815558138ae82da14dd55f281d20fb15fb1f9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 26 Nov 2015 13:55:39 +0800 Subject: net: Generalise wq_has_sleeper helper The memory barrier in the helper wq_has_sleeper is needed by just about every user of waitqueue_active. This patch generalises it by making it take a wait_queue_head_t directly. The existing helper is renamed to skwq_has_sleeper. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb..fb99f308 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,7 +106,7 @@ static void aead_wmem_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); - if (wq_has_sleeper(wq)) + if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); @@ -157,7 +157,7 @@ static void aead_data_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); - if (wq_has_sleeper(wq)) + if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLRDNORM | POLLRDBAND); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee..0e6702e4 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -238,7 +238,7 @@ static void skcipher_wmem_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); - if (wq_has_sleeper(wq)) + if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); @@ -288,7 +288,7 @@ static void skcipher_data_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); - if (wq_has_sleeper(wq)) + if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | POLLRDNORM | POLLRDBAND); -- cgit v1.2.3 From b1d13cb9d87a3d25f79388d8db914a482ce5b3b7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 29 Nov 2015 20:03:10 -0800 Subject: net: rename SOCK_ASYNC_NOSPACE and SOCK_ASYNC_WAITDATA This patch is a cleanup to make following patch easier to review. Goal is to move SOCK_ASYNC_NOSPACE and SOCK_ASYNC_WAITDATA from (struct socket)->flags to a (struct socket_wq)->flags to benefit from RCU protection in sock_wake_async() To ease backports, we rename both constants. Two new helpers, sk_set_bit(int nr, struct sock *sk) and sk_clear_bit(int net, struct sock *sk) are added so that following patch can change their implementation. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- crypto/algif_aead.c | 4 ++-- crypto/algif_skcipher.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb..6d4d4569 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee..ca9efe17 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (signal_pending(current)) @@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) return -EAGAIN; } - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } -- cgit v1.2.3 From bb271afcdfa6930e8570e2a306eb86f26ee8b50f Mon Sep 17 00:00:00 2001 From: "Wang, Rui Y" Date: Sun, 29 Nov 2015 22:45:34 +0800 Subject: crypto: cryptd - Assign statesize properly cryptd_create_hash() fails by returning -EINVAL. It is because after 68418efba ("crypto: ahash - ensure statesize is non-zero") all ahash drivers must have a non-zero statesize. This patch fixes the problem by properly assigning the statesize. Signed-off-by: Rui Wang Signed-off-by: Herbert Xu --- crypto/cryptd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c4af8aa1..7921251c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.base.cra_flags = type; inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.statesize = salg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; -- cgit v1.2.3 From b6df858ce0a79e0128fa42d280361b3419521710 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:33 +0100 Subject: crypto: akcipher - add akcipher declarations needed by templates. Add a struct akcipher_instance and struct akcipher_spawn similar to how AEAD declares them and the macros for converting to/from crypto_instance/crypto_spawn. Also add register functions to avoid exposing crypto_akcipher_type. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/akcipher.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 120ec042..def301ed 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_NET @@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_akcipher_free_instance(struct crypto_instance *inst) +{ + struct akcipher_instance *akcipher = akcipher_instance(inst); + + akcipher->free(akcipher); +} + static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, + .free = crypto_akcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_akcipher_show, #endif @@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = { .tfmsize = offsetof(struct crypto_akcipher, base), }; +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_akcipher_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_akcipher); + struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, u32 mask) { @@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); -int crypto_register_akcipher(struct akcipher_alg *alg) +static void akcipher_prepare_alg(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; +} + +int crypto_register_akcipher(struct akcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + akcipher_prepare_alg(alg); return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_akcipher); @@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst) +{ + akcipher_prepare_alg(&inst->alg); + return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(akcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); -- cgit v1.2.3 From 7185f32fb45d8c5f03780a40b0c74d9802f3cb2c Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:34 +0100 Subject: crypto: rsa - RSA padding algorithm This patch adds PKCS#1 v1.5 standard RSA padding as a separate template. This way an RSA cipher with padding can be obtained by instantiating "pkcs1pad(rsa)". The reason for adding this is that RSA is almost never used without this padding (or OAEP) so it will be needed for either certificate work in the kernel or the userspace, and I also hear that it is likely implemented by hardware RSA in which case hardware implementations of the whole of pkcs1pad(rsa) can be provided. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/rsa-pkcs1pad.c | 617 ++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/rsa.c | 16 +- 3 files changed, 633 insertions(+), 1 deletion(-) create mode 100644 crypto/rsa-pkcs1pad.c diff --git a/crypto/Makefile b/crypto/Makefile index f7aba923..2acdbbd3 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o rsa_generic-y += rsaprivkey-asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o +rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o cryptomgr-y := algboss.o testmgr.o diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c new file mode 100644 index 00000000..accc67d1 --- /dev/null +++ b/crypto/rsa-pkcs1pad.c @@ -0,0 +1,617 @@ +/* + * RSA padding templates. + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + + unsigned int key_size; +}; + +struct pkcs1pad_request { + struct akcipher_request child_req; + + struct scatterlist in_sg[3], out_sg[2]; + uint8_t *in_buf, *out_buf; +}; + +static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* + * The maximum destination buffer size for the encrypt/sign operations + * will be the same as for RSA, even though it's smaller for + * decrypt/verify. + */ + + return ctx->key_size ?: -EINVAL; +} + +static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, + struct scatterlist *next) +{ + int nsegs = next ? 1 : 0; + + if (offset_in_page(buf) + len <= PAGE_SIZE) { + nsegs += 1; + sg_init_table(sg, nsegs); + sg_set_buf(sg, buf, len); + } else { + nsegs += 2; + sg_init_table(sg, nsegs); + sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); + sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), + offset_in_page(buf) + len - PAGE_SIZE); + } + + if (next) + sg_chain(sg, nsegs, next); +} + +static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + + if (!err) { + if (req_ctx->child_req.dst_len < ctx->key_size) { + memset(zeros, 0, sizeof(zeros)); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, + sizeof(zeros)), + zeros, sizeof(zeros)); + } + + sg_pcopy_from_buffer(req->dst, + sg_nents_for_len(req->dst, ctx->key_size), + req_ctx->out_buf, req_ctx->child_req.dst_len, + sizeof(zeros)); + } + req->dst_len = ctx->key_size; + + kfree(req_ctx->in_buf); + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_encrypt_sign_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, + pkcs1pad_encrypt_sign_complete(req, err)); +} + +static int pkcs1pad_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int i, ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x02; + for (i = 1; i < ps_end; i++) + req_ctx->in_buf[i] = 1 + prandom_u32_max(255); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_encrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x02) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] == 0x00) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_decrypt_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +} + +static int pkcs1pad_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_decrypt_complete_cb, req); + + err = crypto_akcipher_decrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_decrypt_complete(req, err); + + return err; +} + +static int pkcs1pad_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x01; + memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_sign(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x01) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] != 0xff) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len || + req_ctx->out_buf[pos] != 0x00) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_verify_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); +} + +/* + * The verify operation is here for completeness similar to the verification + * defined in RFC2313 section 10.2 except that block type 0 is not accepted, + * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to + * retrieve the DigestInfo from a signature, instead the user is expected + * to call the sign operation to generate the expected signature and compare + * signatures instead of the message-digests. + */ +static int pkcs1pad_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_verify_complete_cb, req); + + err = crypto_akcipher_verify(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_verify_complete(req, err); + + return err; +} + +static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) +{ + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void pkcs1pad_free(struct akcipher_instance *inst) +{ + struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst); + + crypto_drop_akcipher(spawn); + + kfree(inst); +} + +static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct akcipher_instance *inst; + struct crypto_akcipher_spawn *spawn; + struct akcipher_alg *rsa_alg; + const char *rsa_alg_name; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) + return -EINVAL; + + rsa_alg_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(rsa_alg_name)) + return PTR_ERR(rsa_alg_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = akcipher_instance_ctx(inst); + crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); + err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(spawn); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); + + inst->alg.init = pkcs1pad_init_tfm; + inst->alg.exit = pkcs1pad_exit_tfm; + + inst->alg.encrypt = pkcs1pad_encrypt; + inst->alg.decrypt = pkcs1pad_decrypt; + inst->alg.sign = pkcs1pad_sign; + inst->alg.verify = pkcs1pad_verify; + inst->alg.set_pub_key = pkcs1pad_set_pub_key; + inst->alg.set_priv_key = pkcs1pad_set_priv_key; + inst->alg.max_size = pkcs1pad_get_max_size; + inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; + + inst->free = pkcs1pad_free; + + err = akcipher_register_instance(tmpl, inst); + if (err) + goto out_drop_alg; + + return 0; + +out_drop_alg: + crypto_drop_akcipher(spawn); +out_free_inst: + kfree(inst); + return err; +} + +struct crypto_template rsa_pkcs1pad_tmpl = { + .name = "pkcs1pad", + .create = pkcs1pad_create, + .module = THIS_MODULE, +}; diff --git a/crypto/rsa.c b/crypto/rsa.c index 58aad69a..77d737f5 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * RSAEP function [RFC3447 sec 5.1.1] @@ -315,11 +316,24 @@ static struct akcipher_alg rsa = { static int rsa_init(void) { - return crypto_register_akcipher(&rsa); + int err; + + err = crypto_register_akcipher(&rsa); + if (err) + return err; + + err = crypto_register_template(&rsa_pkcs1pad_tmpl); + if (err) { + crypto_unregister_akcipher(&rsa); + return err; + } + + return 0; } static void rsa_exit(void) { + crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } -- cgit v1.2.3 From a6b4ab981699dcddd849cbca065f60d7914cb559 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 6 Dec 2015 02:51:38 +0100 Subject: crypto: chacha20poly1305 - Skip encryption/decryption for 0-len If the length of the plaintext is zero, there's no need to waste cycles on encryption and decryption. Using the chacha20poly1305 construction for zero-length plaintexts is a common way of using a shared encryption key for AAD authentication. Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- crypto/chacha20poly1305.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce0..7b6b935c 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (rctx->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req) if (err) return err; +skip: return poly_verify_tag(req); } @@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (req->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req) if (err) return err; +skip: return poly_genkey(req); } -- cgit v1.2.3 From e27bb8291df73746266cd9a706e7d4908ab29297 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 6 Dec 2015 02:51:37 +0100 Subject: crypto: skcipher - Copy iv from desc even for 0-len walks Some ciphers actually support encrypting zero length plaintexts. For example, many AEAD modes support this. The resulting ciphertext for those winds up being only the authentication tag, which is a result of the key, the iv, the additional data, and the fact that the plaintext had zero length. The blkcipher constructors won't copy the IV to the right place, however, when using a zero length input, resulting in some significant problems when ciphers call their initialization routines, only to find that the ->iv parameter is uninitialized. One such example of this would be using chacha20poly1305 with a zero length input, which then calls chacha20, which calls the key setup routine, which eventually OOPSes due to the uninitialized ->iv member. Signed-off-by: Jason A. Donenfeld Cc: Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 2 +- crypto/blkcipher.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index b4ffc5be..e5b57218 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req, if (WARN_ON_ONCE(in_irq())) return -EDEADLK; + walk->iv = req->info; walk->nbytes = walk->total; if (unlikely(!walk->total)) return 0; walk->iv_buffer = NULL; - walk->iv = req->info; if (unlikely(((unsigned long)walk->iv & alignmask))) { int err = ablkcipher_copy_iv(walk, tfm, alignmask); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 11b98149..8cc1622b 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, if (WARN_ON_ONCE(in_irq())) return -EDEADLK; + walk->iv = desc->info; walk->nbytes = walk->total; if (unlikely(!walk->total)) return 0; walk->buffer = NULL; - walk->iv = desc->info; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = blkcipher_copy_iv(walk); if (err) -- cgit v1.2.3 From 01998b932a15f6d4190233a63a60d22f39bb6866 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 7 Dec 2015 21:36:57 +0100 Subject: crypto: drbg - constify drbg_state_ops structures The drbg_state_ops structures are never modified, so declare them as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- crypto/drbg.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index a7c23146..ab6ef1d0 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -626,7 +626,7 @@ out: return len; } -static struct drbg_state_ops drbg_ctr_ops = { +static const struct drbg_state_ops drbg_ctr_ops = { .update = drbg_ctr_update, .generate = drbg_ctr_generate, .crypto_init = drbg_init_sym_kernel, @@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg, return len; } -static struct drbg_state_ops drbg_hmac_ops = { +static const struct drbg_state_ops drbg_hmac_ops = { .update = drbg_hmac_update, .generate = drbg_hmac_generate, .crypto_init = drbg_init_hash_kernel, @@ -1032,7 +1032,7 @@ out: * scratchpad usage: as update and generate are used isolated, both * can use the scratchpad */ -static struct drbg_state_ops drbg_hash_ops = { +static const struct drbg_state_ops drbg_hash_ops = { .update = drbg_hash_update, .generate = drbg_hash_generate, .crypto_init = drbg_init_hash_kernel, -- cgit v1.2.3 From 86a5d9c4e2eab0aaf7f3e7e382b631a79f11c8ab Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 9 Dec 2015 15:05:28 -0500 Subject: crypto: asymmetric_keys - signature.c does not need This file does not contain any modular related function calls. So get rid of module.h since it drags in a lot of other headers and adds to the preprocessing load. It does export some symbols though, so we'll need to ensure it has export.h present instead. Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/signature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 9441240f..004d5fc8 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -13,7 +13,7 @@ #define pr_fmt(fmt) "SIG: "fmt #include -#include +#include #include #include #include "asymmetric_keys.h" -- cgit v1.2.3 From af103e020b7d8bd0bd4c9fe94cf4067cc2453ca9 Mon Sep 17 00:00:00 2001 From: Petko Manolov Date: Wed, 2 Dec 2015 17:47:55 +0200 Subject: IMA: create machine owner and blacklist keyrings This option creates IMA MOK and blacklist keyrings. IMA MOK is an intermediate keyring that sits between .system and .ima keyrings, effectively forming a simple CA hierarchy. To successfully import a key into .ima_mok it must be signed by a key which CA is in .system keyring. On turn any key that needs to go in .ima keyring must be signed by CA in either .system or .ima_mok keyrings. IMA MOK is empty at kernel boot. IMA blacklist keyring contains all revoked IMA keys. It is consulted before any other keyring. If the search is successful the requested operation is rejected and error is returned to the caller. Signed-off-by: Petko Manolov Signed-off-by: Mimi Zohar --- crypto/asymmetric_keys/x509_public_key.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 2a44b375..9e9e5a6a 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -321,6 +321,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) goto error_free_cert; } else if (!prep->trusted) { ret = x509_validate_trust(cert, get_system_trusted_keyring()); + if (ret) + ret = x509_validate_trust(cert, get_ima_mok_keyring()); if (!ret) prep->trusted = 1; } -- cgit v1.2.3 From a80ce6d6d76522a720ff98f7a1877ff945d364bf Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Thu, 5 Nov 2015 21:43:06 +0200 Subject: keys, trusted: select hash algorithm for TPM2 chips Added 'hash=' option for selecting the hash algorithm for add_key() syscall and documentation for it. Added entry for sm3-256 to the following tables in order to support TPM_ALG_SM3_256: * hash_algo_name * hash_digest_size Includes support for the following hash algorithms: * sha1 * sha256 * sha384 * sha512 * sm3-256 Signed-off-by: Jarkko Sakkinen Tested-by: Colin Ian King Reviewed-by: James Morris Reviewed-by: Mimi Zohar Acked-by: Peter Huewe --- crypto/hash_info.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/hash_info.c b/crypto/hash_info.c index 3e7ff46f..7b1e0b18 100644 --- a/crypto/hash_info.c +++ b/crypto/hash_info.c @@ -31,6 +31,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = { [HASH_ALGO_TGR_128] = "tgr128", [HASH_ALGO_TGR_160] = "tgr160", [HASH_ALGO_TGR_192] = "tgr192", + [HASH_ALGO_SM3_256] = "sm3-256", }; EXPORT_SYMBOL_GPL(hash_algo_name); @@ -52,5 +53,6 @@ const int hash_digest_size[HASH_ALGO__LAST] = { [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE, [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE, [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE, + [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE, }; EXPORT_SYMBOL_GPL(hash_digest_size); -- cgit v1.2.3 From 10dd01cec59cd01503a2848383d591b345400fc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 18 Dec 2015 19:16:57 +0800 Subject: crypto: algif_skcipher - Use new skcipher interface This patch replaces uses of ablkcipher with the new skcipher interface. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu Tested-by: --- crypto/algif_skcipher.c | 61 ++++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee..973fe45e 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -47,7 +47,7 @@ struct skcipher_ctx { bool merge; bool enc; - struct ablkcipher_request req; + struct skcipher_request req; }; struct skcipher_async_rsgl { @@ -64,13 +64,13 @@ struct skcipher_async_req { }; #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) #define GET_REQ_SIZE(ctx) \ - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) #define GET_IV_SIZE(ctx) \ - crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) + crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) @@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); - unsigned ivsize = crypto_ablkcipher_ivsize(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); + unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; long copied = 0; @@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; - struct ablkcipher_request *req; + struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); unsigned int reqlen = sizeof(struct skcipher_async_req) + @@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, } sg_init_table(sreq->tsg, tx_nents); memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); - ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - skcipher_async_cb, sk); + skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_async_cb, sk); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; @@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, if (mark) sg_mark_end(sreq->tsg + txbufs - 1); - ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, sreq->iv); - err = ctx->enc ? crypto_ablkcipher_encrypt(req) : - crypto_ablkcipher_decrypt(req); + skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, + len, sreq->iv); + err = ctx->enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; @@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( + unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; @@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, if (!used) goto free; - ablkcipher_request_set_crypt(&ctx->req, sg, - ctx->rsgl.sg, used, - ctx->iv); + skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, + ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? - crypto_ablkcipher_encrypt(&ctx->req) : - crypto_ablkcipher_decrypt(&ctx->req), + crypto_skcipher_encrypt(&ctx->req) : + crypto_skcipher_decrypt(&ctx->req), &ctx->completion); free: @@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_ops = { static void *skcipher_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_ablkcipher(name, type, mask); + return crypto_alloc_skcipher(name, type, mask); } static void skcipher_release(void *private) { - crypto_free_ablkcipher(private); + crypto_free_skcipher(private); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { - return crypto_ablkcipher_setkey(private, key, keylen); + return crypto_skcipher_setkey(private, key, keylen); } static void skcipher_wait(struct sock *sk) @@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); if (atomic_read(&ctx->inflight)) skcipher_wait(sk); skcipher_free_sgl(sk); - sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); + sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } @@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); + unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); + memset(ctx->iv, 0, crypto_skcipher_ivsize(private)); INIT_LIST_HEAD(&ctx->tsgl); ctx->len = len; @@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk) ask->private = ctx; - ablkcipher_request_set_tfm(&ctx->req, private); - ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + skcipher_request_set_tfm(&ctx->req, private); + skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; -- cgit v1.2.3 From 82a00f55e7a23f92c870a44f477c7aa3c35c16d5 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 12 Dec 2015 00:03:51 -0500 Subject: crypto: rsa-pkcs1pad - don't allocate buffer on stack Avoid the s390 compile "warning: 'pkcs1pad_encrypt_sign_complete' uses dynamic stack allocation" reported by kbuild test robot. Don't use a flat zero-filled buffer, instead zero the contents of the SGL. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu --- crypto/rsa-pkcs1pad.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index accc67d1..50f5c97e 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -110,21 +110,32 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; + size_t chunk_len, pad_left; + struct sg_mapping_iter miter; if (!err) { - if (req_ctx->child_req.dst_len < ctx->key_size) { - memset(zeros, 0, sizeof(zeros)); - sg_copy_from_buffer(req->dst, - sg_nents_for_len(req->dst, - sizeof(zeros)), - zeros, sizeof(zeros)); + if (pad_len) { + sg_miter_start(&miter, req->dst, + sg_nents_for_len(req->dst, pad_len), + SG_MITER_ATOMIC | SG_MITER_TO_SG); + + pad_left = pad_len; + while (pad_left) { + sg_miter_next(&miter); + + chunk_len = min(miter.length, pad_left); + memset(miter.addr, 0, chunk_len); + pad_left -= chunk_len; + } + + sg_miter_stop(&miter); } sg_pcopy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), req_ctx->out_buf, req_ctx->child_req.dst_len, - sizeof(zeros)); + pad_len); } req->dst_len = ctx->key_size; -- cgit v1.2.3 From afd2d69869c095d5790bf25b259782f6f5377b94 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:39 +0100 Subject: crypto: hash - add zero length message hash for shax and md5 Some crypto drivers cannot process empty data message and return a precalculated hash for md5/sha1/sha224/sha256. This patch add thoses precalculated hash in include/crypto. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- crypto/md5.c | 6 ++++++ crypto/sha1_generic.c | 7 +++++++ crypto/sha256_generic.c | 16 ++++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/crypto/md5.c b/crypto/md5.c index 33d17e9a..2355a7c2 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -24,6 +24,12 @@ #include #include +const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +EXPORT_SYMBOL_GPL(md5_zero_message_hash); + /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 39e3acc4..6877cbb9 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -26,6 +26,13 @@ #include #include +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; +EXPORT_SYMBOL_GPL(sha1_zero_message_hash); + static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 78431163..8f9c47e1 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -27,6 +27,22 @@ #include #include +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); -- cgit v1.2.3 From 874fbf3b2b51ca47c6a69a8e8ec5e8480c492478 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 7 Jan 2016 11:02:34 +1100 Subject: async_tx: use GFP_NOWAIT rather than GFP_IO These async_XX functions are called from md/raid5 in an atomic section, between get_cpu() and put_cpu(), so they must not sleep. So use GFP_NOWAIT rather than GFP_IO. Dan Williams writes: Longer term async_tx needs to be merged into md directly as we can allocate this unmap data statically per-stripe rather than per request. Fixed: 72d3260bd533 ("async_pq: convert to dmaengine_unmap_data") Cc: stable@vger.kernel.org (v3.13+) Reported-and-tested-by: Stanislav Samsonov Acked-by: Dan Williams Signed-off-by: NeilBrown Signed-off-by: Vinod Koul --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_pq.c | 4 ++-- crypto/async_tx/async_raid6_recov.c | 4 ++-- crypto/async_tx/async_xor.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index f8c0b8db..88bc8e6b 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dmaengine_unmap_data *unmap = NULL; if (device) - unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 5d355e0c..c0748bbd 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); /* XORing P/Q is only implemented in software */ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && @@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks < 4); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 934a8498..8fab6275 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, u8 *a, *b, *c; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { struct device *dev = dma->dev; @@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, u8 *d, *s; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { dma_addr_t dma_dest[2]; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e1bce26c..da75777f 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; @@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { -- cgit v1.2.3 From 232771d84c93fa781e244dc778e1bb2e81ea642a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 25 Dec 2015 15:40:05 +0800 Subject: crypto: algif_skcipher - Require setkey before accept(2) Some cipher implementations will crash if you try to use them without calling setkey first. This patch adds a check so that the accept(2) call will fail with -ENOKEY if setkey hasn't been done on the socket yet. Cc: stable@vger.kernel.org Reported-by: Dmitry Vyukov Signed-off-by: Herbert Xu Tested-by: Dmitry Vyukov --- crypto/algif_skcipher.c | 48 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 5c756b30..f4431bc1 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -31,6 +31,11 @@ struct skcipher_sg_list { struct scatterlist sg[0]; }; +struct skcipher_tfm { + struct crypto_skcipher *skcipher; + bool has_key; +}; + struct skcipher_ctx { struct list_head tsgl; struct af_alg_sgl rsgl; @@ -750,17 +755,41 @@ static struct proto_ops algif_skcipher_ops = { static void *skcipher_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_skcipher(name, type, mask); + struct skcipher_tfm *tfm; + struct crypto_skcipher *skcipher; + + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); + if (!tfm) + return ERR_PTR(-ENOMEM); + + skcipher = crypto_alloc_skcipher(name, type, mask); + if (IS_ERR(skcipher)) { + kfree(tfm); + return ERR_CAST(skcipher); + } + + tfm->skcipher = skcipher; + + return tfm; } static void skcipher_release(void *private) { - crypto_free_skcipher(private); + struct skcipher_tfm *tfm = private; + + crypto_free_skcipher(tfm->skcipher); + kfree(tfm); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { - return crypto_skcipher_setkey(private, key, keylen); + struct skcipher_tfm *tfm = private; + int err; + + err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); + tfm->has_key = !err; + + return err; } static void skcipher_wait(struct sock *sk) @@ -792,20 +821,25 @@ static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private); + struct skcipher_tfm *tfm = private; + struct crypto_skcipher *skcipher = tfm->skcipher; + unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); + + if (!tfm->has_key) + return -ENOKEY; ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private), + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(private)); + memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); INIT_LIST_HEAD(&ctx->tsgl); ctx->len = len; @@ -818,7 +852,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk) ask->private = ctx; - skcipher_request_set_tfm(&ctx->req, private); + skcipher_request_set_tfm(&ctx->req, skcipher); skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); -- cgit v1.2.3 From 796caacfda1cea522bd0b442b37a6a9286500487 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Dec 2015 11:47:53 +0800 Subject: crypto: af_alg - Disallow bind/setkey/... after accept(2) Each af_alg parent socket obtained by socket(2) corresponds to a tfm object once bind(2) has succeeded. An accept(2) call on that parent socket creates a context which then uses the tfm object. Therefore as long as any child sockets created by accept(2) exist the parent socket must not be modified or freed. This patch guarantees this by using locks and a reference count on the parent socket. Any attempt to modify the parent socket will fail with EBUSY. Cc: stable@vger.kernel.org Reported-by: Dmitry Vyukov Signed-off-by: Herbert Xu --- crypto/af_alg.c | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index a8e7aa3e..7b5b5926 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -125,6 +125,23 @@ int af_alg_release(struct socket *sock) } EXPORT_SYMBOL_GPL(af_alg_release); +void af_alg_release_parent(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + bool last; + + sk = ask->parent; + ask = alg_sk(sk); + + lock_sock(sk); + last = !--ask->refcnt; + release_sock(sk); + + if (last) + sock_put(sk); +} +EXPORT_SYMBOL_GPL(af_alg_release_parent); + static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { const u32 forbidden = CRYPTO_ALG_INTERNAL; @@ -133,6 +150,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct sockaddr_alg *sa = (void *)uaddr; const struct af_alg_type *type; void *private; + int err; if (sock->state == SS_CONNECTED) return -EINVAL; @@ -160,16 +178,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return PTR_ERR(private); } + err = -EBUSY; lock_sock(sk); + if (ask->refcnt) + goto unlock; swap(ask->type, type); swap(ask->private, private); + err = 0; + +unlock: release_sock(sk); alg_do_release(type, private); - return 0; + return err; } static int alg_setkey(struct sock *sk, char __user *ukey, @@ -202,11 +226,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; - int err = -ENOPROTOOPT; + int err = -EBUSY; lock_sock(sk); + if (ask->refcnt) + goto unlock; + type = ask->type; + err = -ENOPROTOOPT; if (level != SOL_ALG || !type) goto unlock; @@ -264,7 +292,8 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) sk2->sk_family = PF_ALG; - sock_hold(sk); + if (!ask->refcnt++) + sock_hold(sk); alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; -- cgit v1.2.3 From 21720b3ea4d374faaa77a1bf94a700515125301d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Dec 2015 20:24:17 +0800 Subject: crypto: af_alg - Fix socket double-free when accept fails When we fail an accept(2) call we will end up freeing the socket twice, once due to the direct sk_free call and once again through newsock. This patch fixes this by removing the sk_free call. Cc: stable@vger.kernel.org Reported-by: Dmitry Vyukov Signed-off-by: Herbert Xu --- crypto/af_alg.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 7b5b5926..eaf98e28 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -285,10 +285,8 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) security_sk_clone(sk, sk2); err = type->accept(ask->private, sk2); - if (err) { - sk_free(sk2); + if (err) goto unlock; - } sk2->sk_family = PF_ALG; -- cgit v1.2.3 From 51a3b513bb33b3dc43bb0f91d59b89bff758ced8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 4 Jan 2016 13:35:18 +0900 Subject: crypto: af_alg - Add nokey compatibility path This patch adds a compatibility path to support old applications that do acept(2) before setkey. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/af_alg.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index eaf98e28..6566d2eb 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type) goto unlock; type->ops->owner = THIS_MODULE; + if (type->ops_nokey) + type->ops_nokey->owner = THIS_MODULE; node->type = type; list_add(&node->list, &alg_types); err = 0; @@ -267,6 +269,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) const struct af_alg_type *type; struct sock *sk2; int err; + bool nokey; lock_sock(sk); type = ask->type; @@ -285,12 +288,17 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) security_sk_clone(sk, sk2); err = type->accept(ask->private, sk2); + + nokey = err == -ENOKEY; + if (nokey && type->accept_nokey) + err = type->accept_nokey(ask->private, sk2); + if (err) goto unlock; sk2->sk_family = PF_ALG; - if (!ask->refcnt++) + if (nokey || !ask->refcnt++) sock_hold(sk); alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; @@ -298,6 +306,9 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) newsock->ops = type->ops; newsock->state = SS_CONNECTED; + if (nokey) + newsock->ops = type->ops_nokey; + err = 0; unlock: -- cgit v1.2.3 From 12cc849695e5bb32645d2e145f4f35656eaebcb4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 4 Jan 2016 13:36:12 +0900 Subject: crypto: algif_skcipher - Add nokey compatibility path This patch adds a compatibility path to support old applications that do acept(2) before setkey. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 149 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 144 insertions(+), 5 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index f4431bc1..110bab49 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -753,6 +753,99 @@ static struct proto_ops algif_skcipher_ops = { .poll = skcipher_poll, }; +static int skcipher_check_key(struct socket *sock) +{ + int err; + struct sock *psk; + struct alg_sock *pask; + struct skcipher_tfm *tfm; + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + + if (ask->refcnt) + return 0; + + psk = ask->parent; + pask = alg_sk(ask->parent); + tfm = pask->private; + + err = -ENOKEY; + lock_sock(psk); + if (!tfm->has_key) + goto unlock; + + if (!pask->refcnt++) + sock_hold(psk); + + ask->refcnt = 1; + sock_put(psk); + + err = 0; + +unlock: + release_sock(psk); + + return err; +} + +static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t size) +{ + int err; + + err = skcipher_check_key(sock); + if (err) + return err; + + return skcipher_sendmsg(sock, msg, size); +} + +static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + int err; + + err = skcipher_check_key(sock); + if (err) + return err; + + return skcipher_sendpage(sock, page, offset, size, flags); +} + +static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) +{ + int err; + + err = skcipher_check_key(sock); + if (err) + return err; + + return skcipher_recvmsg(sock, msg, ignored, flags); +} + +static struct proto_ops algif_skcipher_ops_nokey = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .accept = sock_no_accept, + .setsockopt = sock_no_setsockopt, + + .release = af_alg_release, + .sendmsg = skcipher_sendmsg_nokey, + .sendpage = skcipher_sendpage_nokey, + .recvmsg = skcipher_recvmsg_nokey, + .poll = skcipher_poll, +}; + static void *skcipher_bind(const char *name, u32 type, u32 mask) { struct skcipher_tfm *tfm; @@ -802,7 +895,7 @@ static void skcipher_wait(struct sock *sk) msleep(100); } -static void skcipher_sock_destruct(struct sock *sk) +static void skcipher_sock_destruct_common(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -814,10 +907,33 @@ static void skcipher_sock_destruct(struct sock *sk) skcipher_free_sgl(sk); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); +} + +static void skcipher_sock_destruct(struct sock *sk) +{ + skcipher_sock_destruct_common(sk); af_alg_release_parent(sk); } -static int skcipher_accept_parent(void *private, struct sock *sk) +static void skcipher_release_parent_nokey(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + + if (!ask->refcnt) { + sock_put(ask->parent); + return; + } + + af_alg_release_parent(sk); +} + +static void skcipher_sock_destruct_nokey(struct sock *sk) +{ + skcipher_sock_destruct_common(sk); + skcipher_release_parent_nokey(sk); +} + +static int skcipher_accept_parent_common(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); @@ -825,9 +941,6 @@ static int skcipher_accept_parent(void *private, struct sock *sk) struct crypto_skcipher *skcipher = tfm->skcipher; unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); - if (!tfm->has_key) - return -ENOKEY; - ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -861,12 +974,38 @@ static int skcipher_accept_parent(void *private, struct sock *sk) return 0; } +static int skcipher_accept_parent(void *private, struct sock *sk) +{ + struct skcipher_tfm *tfm = private; + + if (!tfm->has_key) + return -ENOKEY; + + return skcipher_accept_parent_common(private, sk); +} + +static int skcipher_accept_parent_nokey(void *private, struct sock *sk) +{ + int err; + + err = skcipher_accept_parent_common(private, sk); + if (err) + goto out; + + sk->sk_destruct = skcipher_sock_destruct_nokey; + +out: + return err; +} + static const struct af_alg_type algif_type_skcipher = { .bind = skcipher_bind, .release = skcipher_release, .setkey = skcipher_setkey, .accept = skcipher_accept_parent, + .accept_nokey = skcipher_accept_parent_nokey, .ops = &algif_skcipher_ops, + .ops_nokey = &algif_skcipher_ops_nokey, .name = "skcipher", .owner = THIS_MODULE }; -- cgit v1.2.3 From bd57898bac03e310b9eea53b23c26d5a743cd94b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 8 Jan 2016 21:28:26 +0800 Subject: crypto: hash - Add crypto_ahash_has_setkey This patch adds a way for ahash users to determine whether a key is required by a crypto_ahash transform. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/ahash.c | 5 ++++- crypto/shash.c | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crypto/ahash.c b/crypto/ahash.c index 9c1dc8d6..d19b5232 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); hash->setkey = ahash_nosetkey; + hash->has_setkey = false; hash->export = ahash_no_export; hash->import = ahash_no_import; @@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) hash->finup = alg->finup ?: ahash_def_finup; hash->digest = alg->digest; - if (alg->setkey) + if (alg->setkey) { hash->setkey = alg->setkey; + hash->has_setkey = true; + } if (alg->export) hash->export = alg->export; if (alg->import) diff --git a/crypto/shash.c b/crypto/shash.c index ecb1e3d3..88a27de7 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -355,8 +355,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->finup = shash_async_finup; crt->digest = shash_async_digest; - if (alg->setkey) + if (alg->setkey) { crt->setkey = shash_async_setkey; + crt->has_setkey = true; + } if (alg->export) crt->export = shash_async_export; if (alg->import) -- cgit v1.2.3 From af44f48c34c04fd549fa71de3e430f298f1484f5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 8 Jan 2016 21:31:04 +0800 Subject: crypto: algif_hash - Require setkey before accept(2) Hash implementations that require a key may crash if you use them without setting a key. This patch adds the necessary checks so that if you do attempt to use them without a key that we return -ENOKEY instead of proceeding. This patch also adds a compatibility path to support old applications that do acept(2) before setkey. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_hash.c | 201 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 193 insertions(+), 8 deletions(-) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index b4c24fe3..46637bed 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,6 +34,11 @@ struct hash_ctx { struct ahash_request req; }; +struct algif_hash_tfm { + struct crypto_ahash *hash; + bool has_key; +}; + static int hash_sendmsg(struct socket *sock, struct msghdr *msg, size_t ignored) { @@ -235,22 +240,151 @@ static struct proto_ops algif_hash_ops = { .accept = hash_accept, }; +static int hash_check_key(struct socket *sock) +{ + int err; + struct sock *psk; + struct alg_sock *pask; + struct algif_hash_tfm *tfm; + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + + if (ask->refcnt) + return 0; + + psk = ask->parent; + pask = alg_sk(ask->parent); + tfm = pask->private; + + err = -ENOKEY; + lock_sock(psk); + if (!tfm->has_key) + goto unlock; + + if (!pask->refcnt++) + sock_hold(psk); + + ask->refcnt = 1; + sock_put(psk); + + err = 0; + +unlock: + release_sock(psk); + + return err; +} + +static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t size) +{ + int err; + + err = hash_check_key(sock); + if (err) + return err; + + return hash_sendmsg(sock, msg, size); +} + +static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + int err; + + err = hash_check_key(sock); + if (err) + return err; + + return hash_sendpage(sock, page, offset, size, flags); +} + +static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) +{ + int err; + + err = hash_check_key(sock); + if (err) + return err; + + return hash_recvmsg(sock, msg, ignored, flags); +} + +static int hash_accept_nokey(struct socket *sock, struct socket *newsock, + int flags) +{ + int err; + + err = hash_check_key(sock); + if (err) + return err; + + return hash_accept(sock, newsock, flags); +} + +static struct proto_ops algif_hash_ops_nokey = { + .family = PF_ALG, + + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .getname = sock_no_getname, + .ioctl = sock_no_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .getsockopt = sock_no_getsockopt, + .mmap = sock_no_mmap, + .bind = sock_no_bind, + .setsockopt = sock_no_setsockopt, + .poll = sock_no_poll, + + .release = af_alg_release, + .sendmsg = hash_sendmsg_nokey, + .sendpage = hash_sendpage_nokey, + .recvmsg = hash_recvmsg_nokey, + .accept = hash_accept_nokey, +}; + static void *hash_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_ahash(name, type, mask); + struct algif_hash_tfm *tfm; + struct crypto_ahash *hash; + + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); + if (!tfm) + return ERR_PTR(-ENOMEM); + + hash = crypto_alloc_ahash(name, type, mask); + if (IS_ERR(hash)) { + kfree(tfm); + return ERR_CAST(hash); + } + + tfm->hash = hash; + + return tfm; } static void hash_release(void *private) { - crypto_free_ahash(private); + struct algif_hash_tfm *tfm = private; + + crypto_free_ahash(tfm->hash); + kfree(tfm); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { - return crypto_ahash_setkey(private, key, keylen); + struct algif_hash_tfm *tfm = private; + int err; + + err = crypto_ahash_setkey(tfm->hash, key, keylen); + tfm->has_key = !err; + + return err; } -static void hash_sock_destruct(struct sock *sk) +static void hash_sock_destruct_common(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; @@ -258,15 +392,40 @@ static void hash_sock_destruct(struct sock *sk) sock_kzfree_s(sk, ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); sock_kfree_s(sk, ctx, ctx->len); +} + +static void hash_sock_destruct(struct sock *sk) +{ + hash_sock_destruct_common(sk); af_alg_release_parent(sk); } -static int hash_accept_parent(void *private, struct sock *sk) +static void hash_release_parent_nokey(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + + if (!ask->refcnt) { + sock_put(ask->parent); + return; + } + + af_alg_release_parent(sk); +} + +static void hash_sock_destruct_nokey(struct sock *sk) +{ + hash_sock_destruct_common(sk); + hash_release_parent_nokey(sk); +} + +static int hash_accept_parent_common(void *private, struct sock *sk) { struct hash_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private); - unsigned ds = crypto_ahash_digestsize(private); + struct algif_hash_tfm *tfm = private; + struct crypto_ahash *hash = tfm->hash; + unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); + unsigned ds = crypto_ahash_digestsize(hash); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -286,7 +445,7 @@ static int hash_accept_parent(void *private, struct sock *sk) ask->private = ctx; - ahash_request_set_tfm(&ctx->req, private); + ahash_request_set_tfm(&ctx->req, hash); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); @@ -295,12 +454,38 @@ static int hash_accept_parent(void *private, struct sock *sk) return 0; } +static int hash_accept_parent(void *private, struct sock *sk) +{ + struct algif_hash_tfm *tfm = private; + + if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) + return -ENOKEY; + + return hash_accept_parent_common(private, sk); +} + +static int hash_accept_parent_nokey(void *private, struct sock *sk) +{ + int err; + + err = hash_accept_parent_common(private, sk); + if (err) + goto out; + + sk->sk_destruct = hash_sock_destruct_nokey; + +out: + return err; +} + static const struct af_alg_type algif_type_hash = { .bind = hash_bind, .release = hash_release, .setkey = hash_setkey, .accept = hash_accept_parent, + .accept_nokey = hash_accept_parent_nokey, .ops = &algif_hash_ops, + .ops_nokey = &algif_hash_ops_nokey, .name = "hash", .owner = THIS_MODULE }; -- cgit v1.2.3 From b8adebe5e58c2f6532fe82b382feeeeb3c01da2e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 Jan 2016 21:26:50 +0800 Subject: crypto: skcipher - Add crypto_skcipher_has_setkey This patch adds a way for skcipher users to determine whether a key is required by a transform. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/skcipher.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7591928b..d199c0b1 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -118,6 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->decrypt = skcipher_decrypt_blkcipher; skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); + skcipher->has_setkey = calg->cra_blkcipher.max_keysize; return 0; } @@ -210,6 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + sizeof(struct ablkcipher_request); + skcipher->has_setkey = calg->cra_ablkcipher.max_keysize; return 0; } -- cgit v1.2.3 From da014d710016fa4d23b1fda4eb7be878e9c8b26b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 11 Jan 2016 21:29:41 +0800 Subject: crypto: algif_skcipher - Add key check exception for cipher_null This patch adds an exception to the key check so that cipher_null users may continue to use algif_skcipher without setting a key. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 110bab49..4a5bdb69 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -978,7 +978,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_tfm *tfm = private; - if (!tfm->has_key) + if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) return -ENOKEY; return skcipher_accept_parent_common(private, sk); -- cgit v1.2.3 From e8ae341ff042e81d49f021aeb53a51a7dbb68e72 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 Jan 2016 14:59:03 +0800 Subject: crypto: af_alg - Allow af_af_alg_release_parent to be called on nokey path This patch allows af_alg_release_parent to be called even for nokey sockets. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/af_alg.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 6566d2eb..e7cb8367 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -133,6 +133,12 @@ void af_alg_release_parent(struct sock *sk) bool last; sk = ask->parent; + + if (ask->nokey_refcnt && !ask->refcnt) { + sock_put(sk); + return; + } + ask = alg_sk(sk); lock_sock(sk); @@ -268,8 +274,8 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; struct sock *sk2; + unsigned int nokey; int err; - bool nokey; lock_sock(sk); type = ask->type; @@ -302,6 +308,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) sock_hold(sk); alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; + alg_sk(sk2)->nokey_refcnt = nokey; newsock->ops = type->ops; newsock->state = SS_CONNECTED; -- cgit v1.2.3 From 1a1a436bd9fe300018c7af28d087012141937bc1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 Jan 2016 15:00:36 +0800 Subject: crypto: algif_hash - Remove custom release parent function This patch removes the custom release parent function as the generic af_alg_release_parent now works for nokey sockets too. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_hash.c | 43 +++---------------------------------------- 1 file changed, 3 insertions(+), 40 deletions(-) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 46637bed..3653ab60 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -384,7 +384,7 @@ static int hash_setkey(void *private, const u8 *key, unsigned int keylen) return err; } -static void hash_sock_destruct_common(struct sock *sk) +static void hash_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; @@ -392,33 +392,10 @@ static void hash_sock_destruct_common(struct sock *sk) sock_kzfree_s(sk, ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); sock_kfree_s(sk, ctx, ctx->len); -} - -static void hash_sock_destruct(struct sock *sk) -{ - hash_sock_destruct_common(sk); - af_alg_release_parent(sk); -} - -static void hash_release_parent_nokey(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - - if (!ask->refcnt) { - sock_put(ask->parent); - return; - } - af_alg_release_parent(sk); } -static void hash_sock_destruct_nokey(struct sock *sk) -{ - hash_sock_destruct_common(sk); - hash_release_parent_nokey(sk); -} - -static int hash_accept_parent_common(void *private, struct sock *sk) +static int hash_accept_parent_nokey(void *private, struct sock *sk) { struct hash_ctx *ctx; struct alg_sock *ask = alg_sk(sk); @@ -461,21 +438,7 @@ static int hash_accept_parent(void *private, struct sock *sk) if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) return -ENOKEY; - return hash_accept_parent_common(private, sk); -} - -static int hash_accept_parent_nokey(void *private, struct sock *sk) -{ - int err; - - err = hash_accept_parent_common(private, sk); - if (err) - goto out; - - sk->sk_destruct = hash_sock_destruct_nokey; - -out: - return err; + return hash_accept_parent_nokey(private, sk); } static const struct af_alg_type algif_type_hash = { -- cgit v1.2.3 From 65e66c1db2fffccd4d8586f724d4b74678db645d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 Jan 2016 15:01:06 +0800 Subject: crypto: algif_skcipher - Remove custom release parent function This patch removes the custom release parent function as the generic af_alg_release_parent now works for nokey sockets too. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 43 +++---------------------------------------- 1 file changed, 3 insertions(+), 40 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 4a5bdb69..1f99d2d4 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -895,7 +895,7 @@ static void skcipher_wait(struct sock *sk) msleep(100); } -static void skcipher_sock_destruct_common(struct sock *sk) +static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -907,33 +907,10 @@ static void skcipher_sock_destruct_common(struct sock *sk) skcipher_free_sgl(sk); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); -} - -static void skcipher_sock_destruct(struct sock *sk) -{ - skcipher_sock_destruct_common(sk); - af_alg_release_parent(sk); -} - -static void skcipher_release_parent_nokey(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - - if (!ask->refcnt) { - sock_put(ask->parent); - return; - } - af_alg_release_parent(sk); } -static void skcipher_sock_destruct_nokey(struct sock *sk) -{ - skcipher_sock_destruct_common(sk); - skcipher_release_parent_nokey(sk); -} - -static int skcipher_accept_parent_common(void *private, struct sock *sk) +static int skcipher_accept_parent_nokey(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); @@ -981,21 +958,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk) if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) return -ENOKEY; - return skcipher_accept_parent_common(private, sk); -} - -static int skcipher_accept_parent_nokey(void *private, struct sock *sk) -{ - int err; - - err = skcipher_accept_parent_common(private, sk); - if (err) - goto out; - - sk->sk_destruct = skcipher_sock_destruct_nokey; - -out: - return err; + return skcipher_accept_parent_nokey(private, sk); } static const struct af_alg_type algif_type_skcipher = { -- cgit v1.2.3 From 366f8f5f4b55065dd49a82a5ad1239d3ca570cbc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 Jan 2016 15:03:32 +0800 Subject: crypto: af_alg - Forbid bind(2) when nokey child sockets are present This patch forbids the calling of bind(2) when there are child sockets created by accept(2) in existence, even if they are created on the nokey path. This is needed as those child sockets have references to the tfm object which bind(2) will destroy. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/af_alg.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index e7cb8367..f5e18c2a 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -130,19 +130,16 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - bool last; + unsigned int nokey = ask->nokey_refcnt; + bool last = nokey && !ask->refcnt; sk = ask->parent; - - if (ask->nokey_refcnt && !ask->refcnt) { - sock_put(sk); - return; - } - ask = alg_sk(sk); lock_sock(sk); - last = !--ask->refcnt; + ask->nokey_refcnt -= nokey; + if (!last) + last = !--ask->refcnt; release_sock(sk); if (last) @@ -188,7 +185,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (ask->refcnt | ask->nokey_refcnt) goto unlock; swap(ask->type, type); @@ -306,6 +303,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) if (nokey || !ask->refcnt++) sock_hold(sk); + ask->nokey_refcnt += nokey; alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; alg_sk(sk2)->nokey_refcnt = nokey; -- cgit v1.2.3 From a02047b802233d3a6e6c484b9860e5dc7448c816 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 15 Jan 2016 22:01:08 +0800 Subject: crypto: algif_hash - Fix race condition in hash_check_key We need to lock the child socket in hash_check_key as otherwise two simultaneous calls can cause the parent socket to be freed. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_hash.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 3653ab60..608a7562 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -242,22 +242,23 @@ static struct proto_ops algif_hash_ops = { static int hash_check_key(struct socket *sock) { - int err; + int err = 0; struct sock *psk; struct alg_sock *pask; struct algif_hash_tfm *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + lock_sock(sk); if (ask->refcnt) - return 0; + goto unlock_child; psk = ask->parent; pask = alg_sk(ask->parent); tfm = pask->private; err = -ENOKEY; - lock_sock(psk); + lock_sock_nested(psk, SINGLE_DEPTH_NESTING); if (!tfm->has_key) goto unlock; @@ -271,6 +272,8 @@ static int hash_check_key(struct socket *sock) unlock: release_sock(psk); +unlock_child: + release_sock(sk); return err; } -- cgit v1.2.3 From 78b8d2df6f42bb9af072b7b438049a5189d71d31 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 15 Jan 2016 22:02:20 +0800 Subject: crypto: algif_skcipher - Fix race condition in skcipher_check_key We need to lock the child socket in skcipher_check_key as otherwise two simultaneous calls can cause the parent socket to be freed. Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 1f99d2d4..dfff8b0b 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -755,22 +755,23 @@ static struct proto_ops algif_skcipher_ops = { static int skcipher_check_key(struct socket *sock) { - int err; + int err = 0; struct sock *psk; struct alg_sock *pask; struct skcipher_tfm *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + lock_sock(sk); if (ask->refcnt) - return 0; + goto unlock_child; psk = ask->parent; pask = alg_sk(ask->parent); tfm = pask->private; err = -ENOKEY; - lock_sock(psk); + lock_sock_nested(psk, SINGLE_DEPTH_NESTING); if (!tfm->has_key) goto unlock; @@ -784,6 +785,8 @@ static int skcipher_check_key(struct socket *sock) unlock: release_sock(psk); +unlock_child: + release_sock(sk); return err; } -- cgit v1.2.3 From 266a9ee25fc2d3b966f047c3d8588d89ac41043c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 18 Jan 2016 18:46:10 +0800 Subject: crypto: algif_skcipher - Load TX SG list after waiting We need to load the TX SG list in sendmsg(2) after waiting for incoming data, not before. Cc: stable@vger.kernel.org Reported-by: Dmitry Vyukov Signed-off-by: Herbert Xu Tested-by: Dmitry Vyukov --- crypto/algif_skcipher.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index dfff8b0b..df86fb47 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -647,13 +647,6 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, lock_sock(sk); while (msg_data_left(msg)) { - sgl = list_first_entry(&ctx->tsgl, - struct skcipher_sg_list, list); - sg = sgl->sg; - - while (!sg->length) - sg++; - if (!ctx->used) { err = skcipher_wait_for_data(sk, flags); if (err) @@ -674,6 +667,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, if (!used) goto free; + sgl = list_first_entry(&ctx->tsgl, + struct skcipher_sg_list, list); + sg = sgl->sg; + + while (!sg->length) + sg++; + skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, ctx->iv); -- cgit v1.2.3 From 0a9e44e9ab8d7640b9792ac6bebe640cc607318b Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 18 Jan 2016 17:06:05 +0100 Subject: crypto: crc32c - Fix crc32c soft dependency I don't think it makes sense for a module to have a soft dependency on itself. This seems quite cyclic by nature and I can't see what purpose it could serve. OTOH libcrc32c calls crypto_alloc_shash("crc32c", 0, 0) so it pretty much assumes that some incarnation of the "crc32c" hash algorithm has been loaded. Therefore it makes sense to have the soft dependency there (as crc-t10dif does.) Cc: stable@vger.kernel.org Cc: Tim Chen Cc: "David S. Miller" Signed-off-by: Jean Delvare Signed-off-by: Herbert Xu --- crypto/crc32c_generic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 06f1b60f..4c0a0e27 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); MODULE_ALIAS_CRYPTO("crc32c-generic"); -MODULE_SOFTDEP("pre: crc32c"); -- cgit v1.2.3 From 823be10916d86ed4ef1995bbddb4b0ee6571143a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 19 Jan 2016 21:23:57 +0800 Subject: crypto: algif_skcipher - sendmsg SG marking is off by one We mark the end of the SG list in sendmsg and sendpage and unmark it on the next send call. Unfortunately the unmarking in sendmsg is off-by-one, leading to an SG list that is too short. Fixes: a57d5b404077 ("crypto: algif - Mark sgl end at the end of data") Cc: stable@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/algif_skcipher.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index df86fb47..a81c10fa 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -392,7 +392,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); sg = sgl->sg; - sg_unmark_end(sg + sgl->cur); + if (sgl->cur) + sg_unmark_end(sg + sgl->cur - 1); do { i = sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); -- cgit v1.2.3