From 3014649037efac090a2c0d806d7f8d0a28a95cb3 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 21 Jul 2017 16:42:36 +0100 Subject: crypto: scompress - don't sleep with preemption disabled Due to the use of per-CPU buffers, scomp_acomp_comp_decomp() executes with preemption disabled, and so whether the CRYPTO_TFM_REQ_MAY_SLEEP flag is set is irrelevant, since we cannot sleep anyway. So disregard the flag, and use GFP_ATOMIC unconditionally. Cc: # v4.10+ Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/scompress.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'crypto/scompress.c') diff --git a/crypto/scompress.c b/crypto/scompress.c index ae1d3cf2..0b40d991 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -211,9 +211,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) scratch_dst, &req->dlen, *ctx); if (!ret) { if (!req->dst) { - req->dst = crypto_scomp_sg_alloc(req->dlen, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC); + req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); if (!req->dst) goto out; } -- cgit v1.2.3 From 4757f1715ee63e749594ef98c0631125f3607671 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 21 Jul 2017 16:42:37 +0100 Subject: crypto: scompress - free partially allocated scratch buffers on failure When allocating the per-CPU scratch buffers, we allocate the source and destination buffers separately, but bail immediately if the second allocation fails, without freeing the first one. Fix that. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/scompress.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'crypto/scompress.c') diff --git a/crypto/scompress.c b/crypto/scompress.c index 0b40d991..2c076483 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -125,8 +125,11 @@ static int crypto_scomp_alloc_all_scratches(void) if (!scomp_src_scratches) return -ENOMEM; scomp_dst_scratches = crypto_scomp_alloc_scratches(); - if (!scomp_dst_scratches) + if (!scomp_dst_scratches) { + crypto_scomp_free_scratches(scomp_src_scratches); + scomp_src_scratches = NULL; return -ENOMEM; + } } return 0; } -- cgit v1.2.3 From 58b3573d7326a2e7ff31d82e9946602eb8fb6549 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 21 Jul 2017 16:42:38 +0100 Subject: crypto: scompress - defer allocation of scratch buffer to first use The scompress code allocates 2 x 128 KB of scratch buffers for each CPU, so that clients of the async API can use synchronous implementations even from atomic context. However, on systems such as Cavium Thunderx (which has 96 cores), this adds up to a non-negligible 24 MB. Also, 32-bit systems may prefer to use their precious vmalloc space for other things,especially since there don't appear to be any clients for the async compression API yet. So let's defer allocation of the scratch buffers until the first time we allocate an acompress cipher based on an scompress implementation. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/scompress.c | 46 +++++++++++++++++----------------------------- 1 file changed, 17 insertions(+), 29 deletions(-) (limited to 'crypto/scompress.c') diff --git a/crypto/scompress.c b/crypto/scompress.c index 2c076483..2075e2c4 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) seq_puts(m, "type : scomp\n"); } -static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) -{ - return 0; -} - static void crypto_scomp_free_scratches(void * __percpu *scratches) { int i; @@ -134,6 +129,17 @@ static int crypto_scomp_alloc_all_scratches(void) return 0; } +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) +{ + int ret; + + mutex_lock(&scomp_lock); + ret = crypto_scomp_alloc_all_scratches(); + mutex_unlock(&scomp_lock); + + return ret; +} + static void crypto_scomp_sg_free(struct scatterlist *sgl) { int i, n; @@ -241,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); crypto_free_scomp(*ctx); + + mutex_lock(&scomp_lock); + crypto_scomp_free_all_scratches(); + mutex_unlock(&scomp_lock); } int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) @@ -317,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = { int crypto_register_scomp(struct scomp_alg *alg) { struct crypto_alg *base = &alg->base; - int ret = -ENOMEM; - - mutex_lock(&scomp_lock); - if (crypto_scomp_alloc_all_scratches()) - goto error; base->cra_type = &crypto_scomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; - ret = crypto_register_alg(base); - if (ret) - goto error; - - mutex_unlock(&scomp_lock); - return ret; - -error: - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - return ret; + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_scomp); int crypto_unregister_scomp(struct scomp_alg *alg) { - int ret; - - mutex_lock(&scomp_lock); - ret = crypto_unregister_alg(&alg->base); - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - - return ret; + return crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_scomp); -- cgit v1.2.3