From 8289b99b6683641386e6c6f1b7db1c1c36a621bf Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 3 Nov 2018 14:56:03 -0700 Subject: crypto: user - clean up report structure copying There have been a pretty ridiculous number of issues with initializing the report structures that are copied to userspace by NETLINK_CRYPTO. Commit 813e2f3cf818 ("crypto: user - Prepare for CRYPTO_MAX_ALG_NAME expansion") replaced some strncpy()s with strlcpy()s, thereby introducing information leaks. Later two other people tried to replace other strncpy()s with strlcpy() too, which would have introduced even more information leaks: - https://lore.kernel.org/patchwork/patch/954991/ - https://patchwork.kernel.org/patch/10434351/ Commit 0115ab23b941 ("crypto: user - Implement a generic crypto statistics") also uses the buggy strlcpy() approach and therefore leaks uninitialized memory to userspace. A fix was proposed, but it was originally incomplete. Seeing as how apparently no one can get this right with the current approach, change all the reporting functions to: - Start by memsetting the report structure to 0. This guarantees it's always initialized, regardless of what happens later. - Initialize all strings using strscpy(). This is safe after the memset, ensures null termination of long strings, avoids unnecessary work, and avoids the -Wstringop-truncation warnings from gcc. - Use sizeof(var) instead of sizeof(type). This is more robust against copy+paste errors. For simplicity, also reuse the -EMSGSIZE return value from nla_put(). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/rng.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'crypto/rng.c') diff --git a/crypto/rng.c b/crypto/rng.c index 547f16ec..2406501b 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -74,17 +74,13 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_rng rrng; - strncpy(rrng.type, "rng", sizeof(rrng.type)); + memset(&rrng, 0, sizeof(rrng)); - rrng.seedsize = seedsize(alg); + strscpy(rrng.type, "rng", sizeof(rrng.type)); - if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, - sizeof(struct crypto_report_rng), &rrng)) - goto nla_put_failure; - return 0; + rrng.seedsize = seedsize(alg); -nla_put_failure: - return -EMSGSIZE; + return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng); } #else static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) -- cgit v1.2.3 From 69bd3f459030421c05f6cf915e67dde167360e8c Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:21 +0000 Subject: crypto: user - fix use_after_free of struct xxx_request All crypto_stats functions use the struct xxx_request for feeding stats, but in some case this structure could already be freed. For fixing this, the needed parameters (len and alg) will be stored before the request being executed. Fixes: 0115ab23b941 ("crypto: user - Implement a generic crypto statistics") Reported-by: syzbot Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/ahash.c | 17 ++++- crypto/algapi.c | 233 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/rng.c | 4 +- 3 files changed, 250 insertions(+), 4 deletions(-) (limited to 'crypto/rng.c') diff --git a/crypto/ahash.c b/crypto/ahash.c index 3a348fbc..5d320a81 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -364,20 +364,28 @@ static int crypto_ahash_op(struct ahash_request *req, int crypto_ahash_final(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -385,13 +393,16 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = crypto_ahash_op(req, tfm->digest); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_digest); diff --git a/crypto/algapi.c b/crypto/algapi.c index 42fe316f..4c1e6079 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1078,6 +1078,239 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, } EXPORT_SYMBOL_GPL(crypto_type_has_alg); +#ifdef CONFIG_CRYPTO_STATS +void crypto_stats_get(struct crypto_alg *alg) +{ + crypto_alg_get(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_get); + +void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(nbytes, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt); + +void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(nbytes, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt); + +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->aead_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(cryptlen, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt); + +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->aead_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(cryptlen, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt); + +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->akcipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(src_len, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt); + +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->akcipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(src_len, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); + +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->akcipher_err_cnt); + else + atomic64_inc(&alg->sign_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); + +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->akcipher_err_cnt); + else + atomic64_inc(&alg->verify_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); + +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->compress_err_cnt); + } else { + atomic64_inc(&alg->compress_cnt); + atomic64_add(slen, &alg->compress_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_compress); + +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->compress_err_cnt); + } else { + atomic64_inc(&alg->decompress_cnt); + atomic64_add(slen, &alg->decompress_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_decompress); + +void crypto_stats_ahash_update(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->hash_err_cnt); + else + atomic64_add(nbytes, &alg->hash_tlen); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ahash_update); + +void crypto_stats_ahash_final(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->hash_err_cnt); + } else { + atomic64_inc(&alg->hash_cnt); + atomic64_add(nbytes, &alg->hash_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); + +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->setsecret_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); + +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->generate_public_key_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); + +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->compute_shared_secret_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); + +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->rng_err_cnt); + else + atomic64_inc(&alg->seed_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_rng_seed); + +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->rng_err_cnt); + } else { + atomic64_inc(&alg->generate_cnt); + atomic64_add(dlen, &alg->generate_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_rng_generate); + +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(cryptlen, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt); + +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(cryptlen, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); +#endif + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/rng.c b/crypto/rng.c index 2406501b..33c38a72 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -35,9 +35,11 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { + struct crypto_alg *alg = tfm->base.__crt_alg; u8 *buf = NULL; int err; + crypto_stats_get(alg); if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); if (!buf) @@ -50,7 +52,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) } err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - crypto_stat_rng_seed(tfm, err); + crypto_stats_rng_seed(alg, err); out: kzfree(buf); return err; -- cgit v1.2.3