From dfc8ec4cd4d6953f68c09949ffc3a0274f779736 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:15 -0400 Subject: crypto: 842 - change 842 alg to use software Change the crypto 842 compression alg to use the software 842 compression and decompression library. Add the crypto driver_name as "842-generic". Remove the fallback to LZO compression. Previously, this crypto compression alg attemped 842 compression using PowerPC hardware, and fell back to LZO compression and decompression if the 842 PowerPC hardware was unavailable or failed. This should not fall back to any other compression method, however; users of this crypto compression alg can fallback if desired, and transparent fallback tricks callers into thinking they are getting 842 compression when they actually get LZO compression - the failure of the 842 hardware should not be transparent to the caller. The crypto compression alg for a hardware device also should not be located in crypto/ so this is now a software-only implementation that uses the 842 software compression/decompression library. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu --- crypto/Kconfig | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 8aaf298a..eba55b42 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1412,10 +1412,9 @@ config CRYPTO_LZO config CRYPTO_842 tristate "842 compression algorithm" - depends on CRYPTO_DEV_NX_COMPRESS - # 842 uses lzo if the hardware becomes unavailable - select LZO_COMPRESS - select LZO_DECOMPRESS + select CRYPTO_ALGAPI + select 842_COMPRESS + select 842_DECOMPRESS help This is the 842 algorithm. -- cgit v1.2.3 From 66d678ee742a91d8a6eace277eba39afaa354b56 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 15:11:13 +0800 Subject: crypto: seqiv - Add support for new AEAD interface This patch converts the seqiv IV generator to work with the new AEAD interface where IV generators are just normal AEAD algorithms. Full backwards compatibility is paramount at this point since no users have yet switched over to the new interface. Nor can they switch to the new interface until IV generation is fully supported by it. So this means we are adding two versions of seqiv alongside the existing one. The first one is the one that will be used when the underlying AEAD algorithm has switched over to the new AEAD interface. The second one handles the current case where the underlying AEAD algorithm still uses the old interface. Both versions export themselves through the new AEAD interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/aead.c | 100 +++++++++------ crypto/seqiv.c | 386 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 440 insertions(+), 47 deletions(-) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index eba55b42..657bb82a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -221,6 +221,7 @@ config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD select CRYPTO_BLKCIPHER + select CRYPTO_NULL select CRYPTO_RNG help This IV generator generates an IV based on a sequence number by diff --git a/crypto/aead.c b/crypto/aead.c index d231e283..5fa992ac 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -378,15 +378,16 @@ static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, return crypto_grab_spawn(&spawn->base, name, type, mask); } -struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, - u32 mask) +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, u32 mask) { const char *name; struct crypto_aead_spawn *spawn; struct crypto_attr_type *algt; - struct crypto_instance *inst; - struct crypto_alg *alg; + struct aead_instance *inst; + struct aead_alg *alg; + unsigned int ivsize; + unsigned int maxauthsize; int err; algt = crypto_get_attr_type(tb); @@ -405,20 +406,28 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, if (!inst) return ERR_PTR(-ENOMEM); - spawn = crypto_instance_ctx(inst); + spawn = aead_instance_ctx(inst); /* Ignore async algorithms if necessary. */ mask |= crypto_requires_sync(algt->type, algt->mask); - crypto_set_aead_spawn(spawn, inst); + crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); err = crypto_grab_nivaead(spawn, name, type, mask); if (err) goto err_free_inst; - alg = crypto_aead_spawn_alg(spawn); + alg = crypto_spawn_aead_alg(spawn); + + if (alg->base.cra_aead.encrypt) { + ivsize = alg->base.cra_aead.ivsize; + maxauthsize = alg->base.cra_aead.maxauthsize; + } else { + ivsize = alg->ivsize; + maxauthsize = alg->maxauthsize; + } err = -EINVAL; - if (!alg->cra_aead.ivsize) + if (!ivsize) goto err_drop_alg; /* @@ -427,39 +436,56 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, * template name and double-check the IV generator. */ if (algt->mask & CRYPTO_ALG_GENIV) { - if (strcmp(tmpl->name, alg->cra_aead.geniv)) + if (!alg->base.cra_aead.encrypt) + goto err_drop_alg; + if (strcmp(tmpl->name, alg->base.cra_aead.geniv)) goto err_drop_alg; - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, + memcpy(inst->alg.base.cra_name, alg->base.cra_name, CRYPTO_MAX_ALG_NAME); - } else { - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; + memcpy(inst->alg.base.cra_driver_name, + alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_GENIV; + inst->alg.base.cra_flags |= alg->base.cra_flags & + CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = alg->base.cra_blocksize; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; + inst->alg.base.cra_type = &crypto_aead_type; + + inst->alg.base.cra_aead.ivsize = ivsize; + inst->alg.base.cra_aead.maxauthsize = maxauthsize; + + inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey; + inst->alg.base.cra_aead.setauthsize = + alg->base.cra_aead.setauthsize; + inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt; + inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt; + + goto out; } - inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV; - inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_aead_type; + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; - inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; - inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; - inst->alg.cra_aead.geniv = alg->cra_aead.geniv; + inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = alg->base.cra_blocksize; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; + inst->alg.base.cra_type = &crypto_new_aead_type; - inst->alg.cra_aead.setkey = alg->cra_aead.setkey; - inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; - inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt; - inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt; + inst->alg.ivsize = ivsize; + inst->alg.maxauthsize = maxauthsize; out: return inst; @@ -473,9 +499,9 @@ err_free_inst: } EXPORT_SYMBOL_GPL(aead_geniv_alloc); -void aead_geniv_free(struct crypto_instance *inst) +void aead_geniv_free(struct aead_instance *inst) { - crypto_drop_aead(crypto_instance_ctx(inst)); + crypto_drop_aead(aead_instance_ctx(inst)); kfree(inst); } EXPORT_SYMBOL_GPL(aead_geniv_free); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 5bbf2e9e..27dbab8a 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -15,7 +15,9 @@ #include #include +#include #include +#include #include #include #include @@ -29,6 +31,29 @@ struct seqiv_ctx { u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; +struct seqiv_aead_ctx { + struct crypto_aead *child; + spinlock_t lock; + struct crypto_blkcipher *null; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +static int seqiv_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) +{ + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setkey(ctx->child, key, keylen); +} + +static int seqiv_aead_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setauthsize(ctx->child, authsize); +} + static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) { struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); @@ -81,6 +106,33 @@ static void seqiv_aead_complete(struct crypto_async_request *base, int err) aead_givcrypt_complete(req, err); } +static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); + +out: + kzfree(subreq->iv); +} + +static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + seqiv_aead_encrypt_complete2(req, err); + aead_request_complete(req, err); +} + static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, unsigned int ivsize) { @@ -186,6 +238,171 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) return err; } +static int seqiv_aead_encrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + u8 *info; + unsigned int ivsize; + int err; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + info = req->iv; + + ivsize = crypto_aead_ivsize(geniv); + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + memcpy(info, req->iv, ivsize); + compl = seqiv_aead_encrypt_complete; + data = req; + } + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, info); + aead_request_set_ad(subreq, req->assoclen, ivsize); + + crypto_xor(info, ctx->salt, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); + + err = crypto_aead_encrypt(subreq); + if (unlikely(info != req->iv)) + seqiv_aead_encrypt_complete2(req, err); + return err; +} + +static int seqiv_aead_encrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + u8 *info; + unsigned int ivsize; + int err; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + info = req->iv; + + ivsize = crypto_aead_ivsize(geniv); + + if (req->src != req->dst) { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dst, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(src, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + memcpy(info, req->iv, ivsize); + compl = seqiv_aead_encrypt_complete; + data = req; + } + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->dst, req->dst, + req->cryptlen - ivsize, info); + aead_request_set_ad(subreq, req->assoclen + ivsize, 0); + + crypto_xor(info, ctx->salt, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); + + err = crypto_aead_encrypt(subreq); + if (unlikely(info != req->iv)) + seqiv_aead_encrypt_complete2(req, err); + return err; +} + +static int seqiv_aead_decrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + ivsize = crypto_aead_ivsize(geniv); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen, ivsize); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + + return crypto_aead_decrypt(subreq); +} + +static int seqiv_aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + ivsize = crypto_aead_ivsize(geniv); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen + ivsize, 0); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + if (req->src != req->dst) + scatterwalk_map_and_copy(req->iv, req->dst, + req->assoclen, ivsize, 1); + + return crypto_aead_decrypt(subreq); +} + static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -232,6 +449,52 @@ unlock: return seqiv_aead_givencrypt(req); } +static int seqiv_aead_encrypt_compat_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != seqiv_aead_encrypt_compat_first) + goto unlock; + + geniv->encrypt = seqiv_aead_encrypt_compat; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return seqiv_aead_encrypt_compat(req); +} + +static int seqiv_aead_encrypt_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != seqiv_aead_encrypt_first) + goto unlock; + + geniv->encrypt = seqiv_aead_encrypt; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return seqiv_aead_encrypt(req); +} + static int seqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); @@ -244,7 +507,7 @@ static int seqiv_init(struct crypto_tfm *tfm) return skcipher_geniv_init(tfm); } -static int seqiv_aead_init(struct crypto_tfm *tfm) +static int seqiv_old_aead_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); @@ -257,6 +520,69 @@ static int seqiv_aead_init(struct crypto_tfm *tfm) return aead_geniv_init(tfm); } +static int seqiv_aead_compat_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + + err = aead_geniv_init(tfm); + + ctx->child = geniv->child; + geniv->child = geniv; + + return err; +} + +static int seqiv_aead_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + + ctx->null = crypto_get_default_null_skcipher(); + err = PTR_ERR(ctx->null); + if (IS_ERR(ctx->null)) + goto out; + + err = aead_geniv_init(tfm); + if (err) + goto drop_null; + + ctx->child = geniv->child; + geniv->child = geniv; + +out: + return err; + +drop_null: + crypto_put_default_null_skcipher(); + goto out; +} + +static void seqiv_aead_compat_exit(struct crypto_tfm *tfm) +{ + struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static void seqiv_aead_exit(struct crypto_tfm *tfm) +{ + struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); + crypto_put_default_null_skcipher(); +} + static struct crypto_template seqiv_tmpl; static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) @@ -280,35 +606,76 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) inst->alg.cra_exit = skcipher_geniv_exit; inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; + inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); out: return inst; } +static struct crypto_instance *seqiv_old_aead_alloc(struct aead_instance *aead) +{ + struct crypto_instance *inst = aead_crypto_instance(aead); + + if (inst->alg.cra_aead.ivsize < sizeof(u64)) { + aead_geniv_free(aead); + return ERR_PTR(-EINVAL); + } + + inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; + + inst->alg.cra_init = seqiv_old_aead_init; + inst->alg.cra_exit = aead_geniv_exit; + + inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; + inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); + + return inst; +} + static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) { - struct crypto_instance *inst; + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); if (IS_ERR(inst)) goto out; - if (inst->alg.cra_aead.ivsize < sizeof(u64)) { + if (inst->alg.base.cra_aead.encrypt) + return seqiv_old_aead_alloc(inst); + + if (inst->alg.ivsize < sizeof(u64)) { aead_geniv_free(inst); inst = ERR_PTR(-EINVAL); goto out; } - inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; + spawn = aead_instance_ctx(inst); + alg = crypto_spawn_aead_alg(spawn); - inst->alg.cra_init = seqiv_aead_init; - inst->alg.cra_exit = aead_geniv_exit; + inst->alg.setkey = seqiv_aead_setkey; + inst->alg.setauthsize = seqiv_aead_setauthsize; + inst->alg.encrypt = seqiv_aead_encrypt_first; + inst->alg.decrypt = seqiv_aead_decrypt; - inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; + inst->alg.base.cra_init = seqiv_aead_init; + inst->alg.base.cra_exit = seqiv_aead_exit; + + inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); + inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; + + if (alg->base.cra_aead.encrypt) { + inst->alg.encrypt = seqiv_aead_encrypt_compat_first; + inst->alg.decrypt = seqiv_aead_decrypt_compat; + + inst->alg.base.cra_init = seqiv_aead_compat_init; + inst->alg.base.cra_exit = seqiv_aead_compat_exit; + } out: - return inst; + return aead_crypto_instance(inst); } static struct crypto_instance *seqiv_alloc(struct rtattr **tb) @@ -334,7 +701,6 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb) goto put_rng; inst->alg.cra_alignmask |= __alignof__(u32) - 1; - inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); out: return inst; @@ -349,7 +715,7 @@ static void seqiv_free(struct crypto_instance *inst) if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) skcipher_geniv_free(inst); else - aead_geniv_free(inst); + aead_geniv_free(aead_instance(inst)); crypto_put_default_rng(); } -- cgit v1.2.3 From 14a21b9e58a568596eab3fdc28e8028db17012b7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 15:11:15 +0800 Subject: crypto: echainiv - Add encrypted chain IV generator This patch adds a new AEAD IV generator echainiv. It is intended to replace the existing skcipher IV generator eseqiv. If the underlying AEAD algorithm is using the old AEAD interface, then echainiv will simply use its IV generator. Otherwise, echainiv will encrypt a counter just like eseqiv but it'll first xor it against a previously stored IV similar to chainiv. Signed-off-by: Herbert Xu --- crypto/Kconfig | 10 + crypto/Makefile | 1 + crypto/echainiv.c | 531 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 542 insertions(+) create mode 100644 crypto/echainiv.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 657bb82a..b7088d13 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -227,6 +227,16 @@ config CRYPTO_SEQIV This IV generator generates an IV based on a sequence number by xoring it with a salt. This algorithm is mainly useful for CTR +config CRYPTO_ECHAINIV + tristate "Encrypted Chain IV Generator" + select CRYPTO_AEAD + select CRYPTO_NULL + select CRYPTO_RNG + help + This IV generator generates an IV based on the encryption of + a sequence number xored with a salt. This is the default + algorithm for CBC. + comment "Block modes" config CRYPTO_CBC diff --git a/crypto/Makefile b/crypto/Makefile index 97b7d3ac..df553630 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o +obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o crypto_hash-y += ahash.o crypto_hash-y += shash.o diff --git a/crypto/echainiv.c b/crypto/echainiv.c new file mode 100644 index 00000000..e5a9878e --- /dev/null +++ b/crypto/echainiv.c @@ -0,0 +1,531 @@ +/* + * echainiv: Encrypted Chain IV Generator + * + * This generator generates an IV based on a sequence number by xoring it + * with a salt and then encrypting it with the same key as used to encrypt + * the plain text. This algorithm requires that the block size be equal + * to the IV size. It is mainly useful for CBC. + * + * This generator can only be used by algorithms where authentication + * is performed after encryption (i.e., authenc). + * + * Copyright (c) 2015 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_IV_SIZE 16 + +struct echainiv_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct scatterlist ivbuf[2]; + struct scatterlist *ivsg; + struct aead_givcrypt_request subreq; +}; + +struct echainiv_ctx { + struct crypto_aead *child; + spinlock_t lock; + struct crypto_blkcipher *null; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); + +static int echainiv_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) +{ + struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setkey(ctx->child, key, keylen); +} + +static int echainiv_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +/* We don't care if we get preempted and read/write IVs from the next CPU. */ +void echainiv_read_iv(u8 *dst, unsigned size) +{ + u32 *a = (u32 *)dst; + u32 __percpu *b = echainiv_iv; + + for (; size >= 4; size -= 4) { + *a++ = this_cpu_read(*b); + b++; + } +} + +void echainiv_write_iv(const u8 *src, unsigned size) +{ + const u32 *a = (const u32 *)src; + u32 __percpu *b = echainiv_iv; + + for (; size >= 4; size -= 4) { + this_cpu_write(*b, *a); + a++; + b++; + } +} + +static void echainiv_encrypt_compat_complete2(struct aead_request *req, + int err) +{ + struct echainiv_request_ctx *rctx = aead_request_ctx(req); + struct aead_givcrypt_request *subreq = &rctx->subreq; + struct crypto_aead *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0, + crypto_aead_ivsize(geniv), 1); + +out: + kzfree(subreq->giv); +} + +static void echainiv_encrypt_compat_complete( + struct crypto_async_request *base, int err) +{ + struct aead_request *req = base->data; + + echainiv_encrypt_compat_complete2(req, err); + aead_request_complete(req, err); +} + +static void echainiv_encrypt_complete2(struct aead_request *req, int err) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *geniv; + unsigned int ivsize; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + ivsize = crypto_aead_ivsize(geniv); + + echainiv_write_iv(subreq->iv, ivsize); + + if (req->iv != subreq->iv) + memcpy(req->iv, subreq->iv, ivsize); + +out: + if (req->iv != subreq->iv) + kzfree(subreq->iv); +} + +static void echainiv_encrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + echainiv_encrypt_complete2(req, err); + aead_request_complete(req, err); +} + +static int echainiv_encrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct echainiv_request_ctx *rctx = aead_request_ctx(req); + struct aead_givcrypt_request *subreq = &rctx->subreq; + unsigned int ivsize = crypto_aead_ivsize(geniv); + crypto_completion_t compl; + void *data; + u8 *info; + __be64 seq; + int err; + + compl = req->base.complete; + data = req->base.data; + + rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen); + info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg); + + if (!info) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + compl = echainiv_encrypt_compat_complete; + data = req; + } + + memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq)); + + aead_givcrypt_set_tfm(subreq, ctx->child); + aead_givcrypt_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_givcrypt_set_crypt(subreq, + scatterwalk_ffwd(rctx->src, req->src, + req->assoclen + ivsize), + scatterwalk_ffwd(rctx->dst, rctx->ivsg, + ivsize), + req->cryptlen - ivsize, req->iv); + aead_givcrypt_set_assoc(subreq, req->src, req->assoclen); + aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq)); + + err = crypto_aead_givencrypt(subreq); + if (unlikely(PageHighMem(sg_page(rctx->ivsg)))) + echainiv_encrypt_compat_complete2(req, err); + return err; +} + +static int echainiv_encrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + u8 *info; + unsigned int ivsize; + int err; + + aead_request_set_tfm(subreq, ctx->child); + + compl = echainiv_encrypt_complete; + data = req; + info = req->iv; + + ivsize = crypto_aead_ivsize(geniv); + + if (req->src != req->dst) { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dst, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(src, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + memcpy(info, req->iv, ivsize); + } + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->dst, req->dst, + req->cryptlen - ivsize, info); + aead_request_set_ad(subreq, req->assoclen + ivsize, 0); + + crypto_xor(info, ctx->salt, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); + echainiv_read_iv(info, ivsize); + + err = crypto_aead_encrypt(subreq); + echainiv_encrypt_complete2(req, err); + return err; +} + +static int echainiv_decrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + ivsize = crypto_aead_ivsize(geniv); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen, ivsize); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + + return crypto_aead_decrypt(subreq); +} + +static int echainiv_decrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + ivsize = crypto_aead_ivsize(geniv); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen + ivsize, 0); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + if (req->src != req->dst) + scatterwalk_map_and_copy(req->iv, req->dst, + req->assoclen, ivsize, 1); + + return crypto_aead_decrypt(subreq); +} + +static int echainiv_encrypt_compat_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != echainiv_encrypt_compat_first) + goto unlock; + + geniv->encrypt = echainiv_encrypt_compat; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return echainiv_encrypt_compat(req); +} + +static int echainiv_encrypt_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != echainiv_encrypt_first) + goto unlock; + + geniv->encrypt = echainiv_encrypt; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return echainiv_encrypt(req); +} + +static int echainiv_compat_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx)); + + err = aead_geniv_init(tfm); + + ctx->child = geniv->child; + geniv->child = geniv; + + return err; +} + +static int echainiv_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + + ctx->null = crypto_get_default_null_skcipher(); + err = PTR_ERR(ctx->null); + if (IS_ERR(ctx->null)) + goto out; + + err = aead_geniv_init(tfm); + if (err) + goto drop_null; + + ctx->child = geniv->child; + geniv->child = geniv; + +out: + return err; + +drop_null: + crypto_put_default_null_skcipher(); + goto out; +} + +static void echainiv_compat_exit(struct crypto_tfm *tfm) +{ + struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static void echainiv_exit(struct crypto_tfm *tfm) +{ + struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); + crypto_put_default_null_skcipher(); +} + +static struct crypto_template echainiv_tmpl; + +static struct crypto_instance *echainiv_aead_alloc(struct rtattr **tb) +{ + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; + + inst = aead_geniv_alloc(&echainiv_tmpl, tb, 0, 0); + + if (IS_ERR(inst)) + goto out; + + if (inst->alg.ivsize < sizeof(u64) || + inst->alg.ivsize & (sizeof(u32) - 1) || + inst->alg.ivsize > MAX_IV_SIZE) { + aead_geniv_free(inst); + inst = ERR_PTR(-EINVAL); + goto out; + } + + spawn = aead_instance_ctx(inst); + alg = crypto_spawn_aead_alg(spawn); + + inst->alg.setkey = echainiv_setkey; + inst->alg.setauthsize = echainiv_setauthsize; + inst->alg.encrypt = echainiv_encrypt_first; + inst->alg.decrypt = echainiv_decrypt; + + inst->alg.base.cra_init = echainiv_init; + inst->alg.base.cra_exit = echainiv_exit; + + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); + inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; + + if (alg->base.cra_aead.encrypt) { + inst->alg.encrypt = echainiv_encrypt_compat_first; + inst->alg.decrypt = echainiv_decrypt_compat; + + inst->alg.base.cra_init = echainiv_compat_init; + inst->alg.base.cra_exit = echainiv_compat_exit; + } + +out: + return aead_crypto_instance(inst); +} + +static struct crypto_instance *echainiv_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + int err; + + err = crypto_get_default_rng(); + if (err) + return ERR_PTR(err); + + inst = echainiv_aead_alloc(tb); + + if (IS_ERR(inst)) + goto put_rng; + +out: + return inst; + +put_rng: + crypto_put_default_rng(); + goto out; +} + +static void echainiv_free(struct crypto_instance *inst) +{ + aead_geniv_free(aead_instance(inst)); + crypto_put_default_rng(); +} + +static struct crypto_template echainiv_tmpl = { + .name = "echainiv", + .alloc = echainiv_alloc, + .free = echainiv_free, + .module = THIS_MODULE, +}; + +static int __init echainiv_module_init(void) +{ + return crypto_register_template(&echainiv_tmpl); +} + +static void __exit echainiv_module_exit(void) +{ + crypto_unregister_template(&echainiv_tmpl); +} + +module_init(echainiv_module_init); +module_exit(echainiv_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Encrypted Chain IV Generator"); +MODULE_ALIAS_CRYPTO("echainiv"); -- cgit v1.2.3 From 9a79dc3dbfe617e6e02964eba0b009bf5ebd6610 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Mon, 25 May 2015 15:10:20 +0200 Subject: crypto: jitterentropy - add jitterentropy RNG The CPU Jitter RNG provides a source of good entropy by collecting CPU executing time jitter. The entropy in the CPU execution time jitter is magnified by the CPU Jitter Random Number Generator. The CPU Jitter Random Number Generator uses the CPU execution timing jitter to generate a bit stream which complies with different statistical measurements that determine the bit stream is random. The CPU Jitter Random Number Generator delivers entropy which follows information theoretical requirements. Based on these studies and the implementation, the caller can assume that one bit of data extracted from the CPU Jitter Random Number Generator holds one bit of entropy. The CPU Jitter Random Number Generator provides a decentralized source of entropy, i.e. every caller can operate on a private state of the entropy pool. The RNG does not have any dependencies on any other service in the kernel. The RNG only needs a high-resolution time stamp. Further design details, the cryptographic assessment and large array of test results are documented at http://www.chronox.de/jent.html. CC: Andreas Steffen CC: Theodore Ts'o CC: Sandy Harris Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/Kconfig | 10 + crypto/Makefile | 2 + crypto/jitterentropy.c | 909 +++++++++++++++++++++++++++++++++++++++++++++++++ crypto/testmgr.c | 4 + 4 files changed, 925 insertions(+) create mode 100644 crypto/jitterentropy.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index b7088d13..af011a96 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1489,9 +1489,19 @@ config CRYPTO_DRBG tristate default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR) select CRYPTO_RNG + select CRYPTO_JITTERENTROPY endif # if CRYPTO_DRBG_MENU +config CRYPTO_JITTERENTROPY + tristate "Jitterentropy Non-Deterministic Random Number Generator" + help + The Jitterentropy RNG is a noise that is intended + to provide seed to another RNG. The RNG does not + perform any cryptographic whitening of the generated + random numbers. This Jitterentropy RNG registers with + the kernel crypto API and can be used by any caller. + config CRYPTO_USER_API tristate diff --git a/crypto/Makefile b/crypto/Makefile index df553630..5db5b95d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -95,6 +95,8 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o obj-$(CONFIG_CRYPTO_RNG2) += krng.o obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_DRBG) += drbg.o +CFLAGS_jitterentropy.o = -O0 +obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c new file mode 100644 index 00000000..1ebe58a2 --- /dev/null +++ b/crypto/jitterentropy.c @@ -0,0 +1,909 @@ +/* + * Non-physical true random number generator based on timing jitter. + * + * Copyright Stephan Mueller , 2014 + * + * Design + * ====== + * + * See http://www.chronox.de/jent.html + * + * License + * ======= + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL2 are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +/* + * This Jitterentropy RNG is based on the jitterentropy library + * version 1.1.0 provided at http://www.chronox.de/jent.html + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __OPTIMIZE__ + #error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c." +#endif + +/* The entropy pool */ +struct rand_data { + /* all data values that are vital to maintain the security + * of the RNG are marked as SENSITIVE. A user must not + * access that information while the RNG executes its loops to + * calculate the next random value. */ + __u64 data; /* SENSITIVE Actual random number */ + __u64 old_data; /* SENSITIVE Previous random number */ + __u64 prev_time; /* SENSITIVE Previous time stamp */ +#define DATA_SIZE_BITS ((sizeof(__u64)) * 8) + __u64 last_delta; /* SENSITIVE stuck test */ + __s64 last_delta2; /* SENSITIVE stuck test */ + unsigned int stuck:1; /* Time measurement stuck */ + unsigned int osr; /* Oversample rate */ + unsigned int stir:1; /* Post-processing stirring */ + unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */ +#define JENT_MEMORY_BLOCKS 64 +#define JENT_MEMORY_BLOCKSIZE 32 +#define JENT_MEMORY_ACCESSLOOPS 128 +#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) + unsigned char *mem; /* Memory access location with size of + * memblocks * memblocksize */ + unsigned int memlocation; /* Pointer to byte in *mem */ + unsigned int memblocks; /* Number of memory blocks in *mem */ + unsigned int memblocksize; /* Size of one memory block in bytes */ + unsigned int memaccessloops; /* Number of memory accesses per random + * bit generation */ +}; + +/* Flags that can be used to initialize the RNG */ +#define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */ +#define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */ +#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more + * entropy, saves MEMORY_SIZE RAM for + * entropy collector */ + +#define DRIVER_NAME "jitterentropy" + +/* -- error codes for init function -- */ +#define JENT_ENOTIME 1 /* Timer service not available */ +#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ +#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ +#define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */ +#define JENT_EVARVAR 5 /* Timer does not produce variations of + * variations (2nd derivation of time is + * zero). */ +#define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi + * small. */ + +/*************************************************************************** + * Helper functions + ***************************************************************************/ + +static inline void jent_get_nstime(__u64 *out) +{ + struct timespec ts; + __u64 tmp = 0; + + tmp = random_get_entropy(); + + /* + * If random_get_entropy does not return a value (which is possible on, + * for example, MIPS), invoke __getnstimeofday + * hoping that there are timers we can work with. + * + * The list of available timers can be obtained from + * /sys/devices/system/clocksource/clocksource0/available_clocksource + * and are registered with clocksource_register() + */ + if ((0 == tmp) && +#ifndef MODULE + (0 == timekeeping_valid_for_hres()) && +#endif + (0 == __getnstimeofday(&ts))) { + tmp = ts.tv_sec; + tmp = tmp << 32; + tmp = tmp | ts.tv_nsec; + } + + *out = tmp; +} + + +/** + * Update of the loop count used for the next round of + * an entropy collection. + * + * Input: + * @ec entropy collector struct -- may be NULL + * @bits is the number of low bits of the timer to consider + * @min is the number of bits we shift the timer value to the right at + * the end to make sure we have a guaranteed minimum value + * + * @return Newly calculated loop counter + */ +static __u64 jent_loop_shuffle(struct rand_data *ec, + unsigned int bits, unsigned int min) +{ + __u64 time = 0; + __u64 shuffle = 0; + unsigned int i = 0; + unsigned int mask = (1<data; + /* + * we fold the time value as much as possible to ensure that as many + * bits of the time stamp are included as possible + */ + for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) { + shuffle ^= time & mask; + time = time >> bits; + } + + /* + * We add a lower boundary value to ensure we have a minimum + * RNG loop count. + */ + return (shuffle + (1<= i; i++) { + __u64 tmp = time << (DATA_SIZE_BITS - i); + + tmp = tmp >> (DATA_SIZE_BITS - 1); + new ^= tmp; + } + } + *folded = new; + return fold_loop_cnt; +} + +/** + * Memory Access noise source -- this is a noise source based on variations in + * memory access times + * + * This function performs memory accesses which will add to the timing + * variations due to an unknown amount of CPU wait states that need to be + * added when accessing memory. The memory size should be larger than the L1 + * caches as outlined in the documentation and the associated testing. + * + * The L1 cache has a very high bandwidth, albeit its access rate is usually + * slower than accessing CPU registers. Therefore, L1 accesses only add minimal + * variations as the CPU has hardly to wait. Starting with L2, significant + * variations are added because L2 typically does not belong to the CPU any more + * and therefore a wider range of CPU wait states is necessary for accesses. + * L3 and real memory accesses have even a wider range of wait states. However, + * to reliably access either L3 or memory, the ec->mem memory must be quite + * large which is usually not desirable. + * + * Input: + * @ec Reference to the entropy collector with the memory access data -- if + * the reference to the memory block to be accessed is NULL, this noise + * source is disabled + * @loop_cnt if a value not equal to 0 is set, use the given value as number of + * loops to perform the folding + * + * @return Number of memory access operations + */ +static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) +{ + unsigned char *tmpval = NULL; + unsigned int wrap = 0; + __u64 i = 0; +#define MAX_ACC_LOOP_BIT 7 +#define MIN_ACC_LOOP_BIT 0 + __u64 acc_loop_cnt = + jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); + + if (NULL == ec || NULL == ec->mem) + return 0; + wrap = ec->memblocksize * ec->memblocks; + + /* + * testing purposes -- allow test app to set the counter, not + * needed during runtime + */ + if (loop_cnt) + acc_loop_cnt = loop_cnt; + + for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { + tmpval = ec->mem + ec->memlocation; + /* + * memory access: just add 1 to one byte, + * wrap at 255 -- memory access implies read + * from and write to memory location + */ + *tmpval = (*tmpval + 1) & 0xff; + /* + * Addition of memblocksize - 1 to pointer + * with wrap around logic to ensure that every + * memory location is hit evenly + */ + ec->memlocation = ec->memlocation + ec->memblocksize - 1; + ec->memlocation = ec->memlocation % wrap; + } + return i; +} + +/*************************************************************************** + * Start of entropy processing logic + ***************************************************************************/ + +/** + * Stuck test by checking the: + * 1st derivation of the jitter measurement (time delta) + * 2nd derivation of the jitter measurement (delta of time deltas) + * 3rd derivation of the jitter measurement (delta of delta of time deltas) + * + * All values must always be non-zero. + * + * Input: + * @ec Reference to entropy collector + * @current_delta Jitter time delta + * + * @return + * 0 jitter measurement not stuck (good bit) + * 1 jitter measurement stuck (reject bit) + */ +static void jent_stuck(struct rand_data *ec, __u64 current_delta) +{ + __s64 delta2 = ec->last_delta - current_delta; + __s64 delta3 = delta2 - ec->last_delta2; + + ec->last_delta = current_delta; + ec->last_delta2 = delta2; + + if (!current_delta || !delta2 || !delta3) + ec->stuck = 1; +} + +/** + * This is the heart of the entropy generation: calculate time deltas and + * use the CPU jitter in the time deltas. The jitter is folded into one + * bit. You can call this function the "random bit generator" as it + * produces one random bit per invocation. + * + * WARNING: ensure that ->prev_time is primed before using the output + * of this function! This can be done by calling this function + * and not using its result. + * + * Input: + * @entropy_collector Reference to entropy collector + * + * @return One random bit + */ +static __u64 jent_measure_jitter(struct rand_data *ec) +{ + __u64 time = 0; + __u64 data = 0; + __u64 current_delta = 0; + + /* Invoke one noise source before time measurement to add variations */ + jent_memaccess(ec, 0); + + /* + * Get time stamp and calculate time delta to previous + * invocation to measure the timing variations + */ + jent_get_nstime(&time); + current_delta = time - ec->prev_time; + ec->prev_time = time; + + /* Now call the next noise sources which also folds the data */ + jent_fold_time(ec, current_delta, &data, 0); + + /* + * Check whether we have a stuck measurement. The enforcement + * is performed after the stuck value has been mixed into the + * entropy pool. + */ + jent_stuck(ec, current_delta); + + return data; +} + +/** + * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the + * documentation of that RNG, the bits from jent_measure_jitter are considered + * independent which implies that the Von Neuman unbias operation is applicable. + * A proof of the Von-Neumann unbias operation to remove skews is given in the + * document "A proposal for: Functionality classes for random number + * generators", version 2.0 by Werner Schindler, section 5.4.1. + * + * Input: + * @entropy_collector Reference to entropy collector + * + * @return One random bit + */ +static __u64 jent_unbiased_bit(struct rand_data *entropy_collector) +{ + do { + __u64 a = jent_measure_jitter(entropy_collector); + __u64 b = jent_measure_jitter(entropy_collector); + + if (a == b) + continue; + if (1 == a) + return 1; + else + return 0; + } while (1); +} + +/** + * Shuffle the pool a bit by mixing some value with a bijective function (XOR) + * into the pool. + * + * The function generates a mixer value that depends on the bits set and the + * location of the set bits in the random number generated by the entropy + * source. Therefore, based on the generated random number, this mixer value + * can have 2**64 different values. That mixer value is initialized with the + * first two SHA-1 constants. After obtaining the mixer value, it is XORed into + * the random number. + * + * The mixer value is not assumed to contain any entropy. But due to the XOR + * operation, it can also not destroy any entropy present in the entropy pool. + * + * Input: + * @entropy_collector Reference to entropy collector + */ +static void jent_stir_pool(struct rand_data *entropy_collector) +{ + /* + * to shut up GCC on 32 bit, we have to initialize the 64 variable + * with two 32 bit variables + */ + union c { + __u64 u64; + __u32 u32[2]; + }; + /* + * This constant is derived from the first two 32 bit initialization + * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 + */ + union c constant; + /* + * The start value of the mixer variable is derived from the third + * and fourth 32 bit initialization vector of SHA-1 as defined in + * FIPS 180-4 section 5.3.1 + */ + union c mixer; + unsigned int i = 0; + + /* + * Store the SHA-1 constants in reverse order to make up the 64 bit + * value -- this applies to a little endian system, on a big endian + * system, it reverses as expected. But this really does not matter + * as we do not rely on the specific numbers. We just pick the SHA-1 + * constants as they have a good mix of bit set and unset. + */ + constant.u32[1] = 0x67452301; + constant.u32[0] = 0xefcdab89; + mixer.u32[1] = 0x98badcfe; + mixer.u32[0] = 0x10325476; + + for (i = 0; i < DATA_SIZE_BITS; i++) { + /* + * get the i-th bit of the input random number and only XOR + * the constant into the mixer value when that bit is set + */ + if ((entropy_collector->data >> i) & 1) + mixer.u64 ^= constant.u64; + mixer.u64 = rol64(mixer.u64, 1); + } + entropy_collector->data ^= mixer.u64; +} + +/** + * Generator of one 64 bit random number + * Function fills rand_data->data + * + * Input: + * @ec Reference to entropy collector + */ +static void jent_gen_entropy(struct rand_data *ec) +{ + unsigned int k = 0; + + /* priming of the ->prev_time value */ + jent_measure_jitter(ec); + + while (1) { + __u64 data = 0; + + if (ec->disable_unbias == 1) + data = jent_measure_jitter(ec); + else + data = jent_unbiased_bit(ec); + + /* enforcement of the jent_stuck test */ + if (ec->stuck) { + /* + * We only mix in the bit considered not appropriate + * without the LSFR. The reason is that if we apply + * the LSFR and we do not rotate, the 2nd bit with LSFR + * will cancel out the first LSFR application on the + * bad bit. + * + * And we do not rotate as we apply the next bit to the + * current bit location again. + */ + ec->data ^= data; + ec->stuck = 0; + continue; + } + + /* + * Fibonacci LSFR with polynom of + * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is + * primitive according to + * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf + * (the shift values are the polynom values minus one + * due to counting bits from 0 to 63). As the current + * position is always the LSB, the polynom only needs + * to shift data in from the left without wrap. + */ + ec->data ^= data; + ec->data ^= ((ec->data >> 63) & 1); + ec->data ^= ((ec->data >> 60) & 1); + ec->data ^= ((ec->data >> 55) & 1); + ec->data ^= ((ec->data >> 30) & 1); + ec->data ^= ((ec->data >> 27) & 1); + ec->data ^= ((ec->data >> 22) & 1); + ec->data = rol64(ec->data, 1); + + /* + * We multiply the loop value with ->osr to obtain the + * oversampling rate requested by the caller + */ + if (++k >= (DATA_SIZE_BITS * ec->osr)) + break; + } + if (ec->stir) + jent_stir_pool(ec); +} + +/** + * The continuous test required by FIPS 140-2 -- the function automatically + * primes the test if needed. + * + * Return: + * 0 if FIPS test passed + * < 0 if FIPS test failed + */ +static void jent_fips_test(struct rand_data *ec) +{ + if (!fips_enabled) + return; + + /* prime the FIPS test */ + if (!ec->old_data) { + ec->old_data = ec->data; + jent_gen_entropy(ec); + } + + if (ec->data == ec->old_data) + panic(DRIVER_NAME ": Duplicate output detected\n"); + + ec->old_data = ec->data; +} + + +/** + * Entry function: Obtain entropy for the caller. + * + * This function invokes the entropy gathering logic as often to generate + * as many bytes as requested by the caller. The entropy gathering logic + * creates 64 bit per invocation. + * + * This function truncates the last 64 bit entropy value output to the exact + * size specified by the caller. + * + * Input: + * @ec Reference to entropy collector + * @data pointer to buffer for storing random data -- buffer must already + * exist + * @len size of the buffer, specifying also the requested number of random + * in bytes + * + * @return 0 when request is fulfilled or an error + * + * The following error codes can occur: + * -1 entropy_collector is NULL + */ +static ssize_t jent_read_entropy(struct rand_data *ec, u8 *data, size_t len) +{ + u8 *p = data; + + if (!ec) + return -EINVAL; + + while (0 < len) { + size_t tocopy; + + jent_gen_entropy(ec); + jent_fips_test(ec); + if ((DATA_SIZE_BITS / 8) < len) + tocopy = (DATA_SIZE_BITS / 8); + else + tocopy = len; + memcpy(p, &ec->data, tocopy); + + len -= tocopy; + p += tocopy; + } + + return 0; +} + +/*************************************************************************** + * Initialization logic + ***************************************************************************/ + +static struct rand_data *jent_entropy_collector_alloc(unsigned int osr, + unsigned int flags) +{ + struct rand_data *entropy_collector; + + entropy_collector = kzalloc(sizeof(struct rand_data), GFP_KERNEL); + if (!entropy_collector) + return NULL; + + if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) { + /* Allocate memory for adding variations based on memory + * access + */ + entropy_collector->mem = kzalloc(JENT_MEMORY_SIZE, GFP_KERNEL); + if (!entropy_collector->mem) { + kfree(entropy_collector); + return NULL; + } + entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; + entropy_collector->memblocks = JENT_MEMORY_BLOCKS; + entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; + } + + /* verify and set the oversampling rate */ + if (0 == osr) + osr = 1; /* minimum sampling rate is 1 */ + entropy_collector->osr = osr; + + entropy_collector->stir = 1; + if (flags & JENT_DISABLE_STIR) + entropy_collector->stir = 0; + if (flags & JENT_DISABLE_UNBIAS) + entropy_collector->disable_unbias = 1; + + /* fill the data pad with non-zero values */ + jent_gen_entropy(entropy_collector); + + return entropy_collector; +} + +static void jent_entropy_collector_free(struct rand_data *entropy_collector) +{ + if (entropy_collector->mem) + kzfree(entropy_collector->mem); + entropy_collector->mem = NULL; + if (entropy_collector) + kzfree(entropy_collector); + entropy_collector = NULL; +} + +static int jent_entropy_init(void) +{ + int i; + __u64 delta_sum = 0; + __u64 old_delta = 0; + int time_backwards = 0; + int count_var = 0; + int count_mod = 0; + + /* We could perform statistical tests here, but the problem is + * that we only have a few loop counts to do testing. These + * loop counts may show some slight skew and we produce + * false positives. + * + * Moreover, only old systems show potentially problematic + * jitter entropy that could potentially be caught here. But + * the RNG is intended for hardware that is available or widely + * used, but not old systems that are long out of favor. Thus, + * no statistical tests. + */ + + /* + * We could add a check for system capabilities such as clock_getres or + * check for CONFIG_X86_TSC, but it does not make much sense as the + * following sanity checks verify that we have a high-resolution + * timer. + */ + /* + * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is + * definitely too little. + */ +#define TESTLOOPCOUNT 300 +#define CLEARCACHE 100 + for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { + __u64 time = 0; + __u64 time2 = 0; + __u64 folded = 0; + __u64 delta = 0; + unsigned int lowdelta = 0; + + jent_get_nstime(&time); + jent_fold_time(NULL, time, &folded, 1< i) + continue; + + /* test whether we have an increasing timer */ + if (!(time2 > time)) + time_backwards++; + + /* + * Avoid modulo of 64 bit integer to allow code to compile + * on 32 bit architectures. + */ + lowdelta = time2 - time; + if (!(lowdelta % 100)) + count_mod++; + + /* + * ensure that we have a varying delta timer which is necessary + * for the calculation of entropy -- perform this check + * only after the first loop is executed as we need to prime + * the old_data value + */ + if (i) { + if (delta != old_delta) + count_var++; + if (delta > old_delta) + delta_sum += (delta - old_delta); + else + delta_sum += (old_delta - delta); + } + old_delta = delta; + } + + /* + * we allow up to three times the time running backwards. + * CLOCK_REALTIME is affected by adjtime and NTP operations. Thus, + * if such an operation just happens to interfere with our test, it + * should not fail. The value of 3 should cover the NTP case being + * performed during our test run. + */ + if (3 < time_backwards) + return JENT_ENOMONOTONIC; + /* Error if the time variances are always identical */ + if (!delta_sum) + return JENT_EVARVAR; + + /* + * Variations of deltas of time must on average be larger + * than 1 to ensure the entropy estimation + * implied with 1 is preserved + */ + if (delta_sum <= 1) + return JENT_EMINVARVAR; + + /* + * Ensure that we have variations in the time stamp below 10 for at + * least 10% of all checks -- on some platforms, the counter + * increments in multiples of 100, but not always + */ + if ((TESTLOOPCOUNT/10 * 9) < count_mod) + return JENT_ECOARSETIME; + + return 0; +} + +/*************************************************************************** + * Kernel crypto API interface + ***************************************************************************/ + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; +}; + +static int jent_kcapi_init(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + int ret = 0; + + rng->entropy_collector = jent_entropy_collector_alloc(1, 0); + if (!rng->entropy_collector) + ret = -ENOMEM; + + spin_lock_init(&rng->jent_lock); + return ret; +} + +static void jent_kcapi_cleanup(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + + spin_lock(&rng->jent_lock); + if (rng->entropy_collector) + jent_entropy_collector_free(rng->entropy_collector); + rng->entropy_collector = NULL; + spin_unlock(&rng->jent_lock); +} + +static int jent_kcapi_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) +{ + struct jitterentropy *rng = crypto_rng_ctx(tfm); + int ret = 0; + + spin_lock(&rng->jent_lock); + ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); + spin_unlock(&rng->jent_lock); + + return ret; +} + +static int jent_kcapi_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) +{ + return 0; +} + +static struct rng_alg jent_alg = { + .generate = jent_kcapi_random, + .seed = jent_kcapi_reset, + .seedsize = 0, + .base = { + .cra_name = "jitterentropy_rng", + .cra_driver_name = "jitterentropy_rng", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct jitterentropy), + .cra_module = THIS_MODULE, + .cra_init = jent_kcapi_init, + .cra_exit = jent_kcapi_cleanup, + + } +}; + +static int __init jent_mod_init(void) +{ + int ret = 0; + + ret = jent_entropy_init(); + if (ret) { + pr_info(DRIVER_NAME ": Initialization failed with host not compliant with requirements: %d\n", ret); + return -EFAULT; + } + return crypto_register_rng(&jent_alg); +} + +static void __exit jent_mod_exit(void) +{ + crypto_unregister_rng(&jent_alg); +} + +module_init(jent_mod_init); +module_exit(jent_mod_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Stephan Mueller "); +MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter"); +MODULE_ALIAS_CRYPTO("jitterentropy_rng"); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 18172526..277b3ac0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3105,6 +3105,10 @@ static const struct alg_test_desc alg_test_descs[] = { .count = HMAC_SHA512_TEST_VECTORS } } + }, { + .alg = "jitterentropy_rng", + .fips_allowed = 1, + .test = alg_test_null, }, { .alg = "lrw(aes)", .test = alg_test_skcipher, -- cgit v1.2.3 From 7de6f3b2327e4bd3cec82a80eb6d014f908facc1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 28 May 2015 11:30:35 +0800 Subject: Revert "crypto: algif_aead - Disable AEAD user-space for now" This reverts commit 0eec8189c0b23c5d2b7ad9c11d955b4fc8bd74e0 as the algif_aead interface has been switched over to the new AEAD interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 0ff4cd44..af011a96 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1532,6 +1532,15 @@ config CRYPTO_USER_API_RNG This option enables the user-spaces interface for random number generator algorithms. +config CRYPTO_USER_API_AEAD + tristate "User-space interface for AEAD cipher algorithms" + depends on NET + select CRYPTO_AEAD + select CRYPTO_USER_API + help + This option enables the user-spaces interface for AEAD + cipher algorithms. + config CRYPTO_HASH_INFO bool -- cgit v1.2.3 From e4a6db70d12f8a57c8d0cbf99894463458882f9e Mon Sep 17 00:00:00 2001 From: Martin Willi Date: Mon, 1 Jun 2015 13:43:56 +0200 Subject: crypto: chacha20 - Add a generic ChaCha20 stream cipher implementation ChaCha20 is a high speed 256-bit key size stream cipher algorithm designed by Daniel J. Bernstein. It is further specified in RFC7539 for use in IETF protocols as a building block for the ChaCha20-Poly1305 AEAD. This is a portable C implementation without any architecture specific optimizations. It uses a 16-byte IV, which includes the 12-byte ChaCha20 nonce prepended by the initial block counter. Some algorithms require an explicit counter value, for example the mentioned AEAD construction. Signed-off-by: Martin Willi Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/Kconfig | 13 +++ crypto/Makefile | 1 + crypto/chacha20_generic.c | 216 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 230 insertions(+) create mode 100644 crypto/chacha20_generic.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index af011a96..bf657bec 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1156,6 +1156,19 @@ config CRYPTO_SALSA20_X86_64 The Salsa20 stream cipher algorithm is designed by Daniel J. Bernstein . See +config CRYPTO_CHACHA20 + tristate "ChaCha20 cipher algorithm" + select CRYPTO_BLKCIPHER + help + ChaCha20 cipher algorithm, RFC7539. + + ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. + Bernstein and further specified in RFC7539 for use in IETF protocols. + This is the portable C implementation of ChaCha20. + + See also: + + config CRYPTO_SEED tristate "SEED cipher algorithm" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 5db5b95d..be87ec1a 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -80,6 +80,7 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o +obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c new file mode 100644 index 00000000..fa42e708 --- /dev/null +++ b/crypto/chacha20_generic.c @@ -0,0 +1,216 @@ +/* + * ChaCha20 256-bit cipher algorithm, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include + +#define CHACHA20_NONCE_SIZE 16 +#define CHACHA20_KEY_SIZE 32 +#define CHACHA20_BLOCK_SIZE 64 + +struct chacha20_ctx { + u32 key[8]; +}; + +static inline u32 rotl32(u32 v, u8 n) +{ + return (v << n) | (v >> (sizeof(v) * 8 - n)); +} + +static inline u32 le32_to_cpuvp(const void *p) +{ + return le32_to_cpup(p); +} + +static void chacha20_block(u32 *state, void *stream) +{ + u32 x[16], *out = stream; + int i; + + for (i = 0; i < ARRAY_SIZE(x); i++) + x[i] = state[i]; + + for (i = 0; i < 20; i += 2) { + x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); + x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); + x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); + x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); + + x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); + x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); + x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); + x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); + + x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); + x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); + x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); + x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); + + x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); + x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); + x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); + x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); + + x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); + x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); + x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); + x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); + + x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); + x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); + x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); + x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); + + x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); + x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); + x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); + x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); + + x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); + x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); + x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); + x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); + } + + for (i = 0; i < ARRAY_SIZE(x); i++) + out[i] = cpu_to_le32(x[i] + state[i]); + + state[12]++; +} + +static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes) +{ + u8 stream[CHACHA20_BLOCK_SIZE]; + + if (dst != src) + memcpy(dst, src, bytes); + + while (bytes >= CHACHA20_BLOCK_SIZE) { + chacha20_block(state, stream); + crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); + bytes -= CHACHA20_BLOCK_SIZE; + dst += CHACHA20_BLOCK_SIZE; + } + if (bytes) { + chacha20_block(state, stream); + crypto_xor(dst, stream, bytes); + } +} + +static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) +{ + static const char constant[16] = "expand 32-byte k"; + + state[0] = le32_to_cpuvp(constant + 0); + state[1] = le32_to_cpuvp(constant + 4); + state[2] = le32_to_cpuvp(constant + 8); + state[3] = le32_to_cpuvp(constant + 12); + state[4] = ctx->key[0]; + state[5] = ctx->key[1]; + state[6] = ctx->key[2]; + state[7] = ctx->key[3]; + state[8] = ctx->key[4]; + state[9] = ctx->key[5]; + state[10] = ctx->key[6]; + state[11] = ctx->key[7]; + state[12] = le32_to_cpuvp(iv + 0); + state[13] = le32_to_cpuvp(iv + 4); + state[14] = le32_to_cpuvp(iv + 8); + state[15] = le32_to_cpuvp(iv + 12); +} + +static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keysize) +{ + struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm); + int i; + + if (keysize != CHACHA20_KEY_SIZE) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx->key); i++) + ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32)); + + return 0; +} + +static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct blkcipher_walk walk; + u32 state[16]; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE); + + chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv); + + while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { + chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, + rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); + err = blkcipher_walk_done(desc, &walk, + walk.nbytes % CHACHA20_BLOCK_SIZE); + } + + if (walk.nbytes) { + chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + + return err; +} + +static struct crypto_alg alg = { + .cra_name = "chacha20", + .cra_driver_name = "chacha20-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = sizeof(struct chacha20_ctx), + .cra_alignmask = sizeof(u32) - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = CHACHA20_KEY_SIZE, + .max_keysize = CHACHA20_KEY_SIZE, + .ivsize = CHACHA20_NONCE_SIZE, + .geniv = "seqiv", + .setkey = chacha20_setkey, + .encrypt = chacha20_crypt, + .decrypt = chacha20_crypt, + }, + }, +}; + +static int __init chacha20_generic_mod_init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit chacha20_generic_mod_fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(chacha20_generic_mod_init); +module_exit(chacha20_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi "); +MODULE_DESCRIPTION("chacha20 cipher algorithm"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-generic"); -- cgit v1.2.3 From f929328828b8cfc9c563e554cf7099e73583690d Mon Sep 17 00:00:00 2001 From: Martin Willi Date: Mon, 1 Jun 2015 13:43:58 +0200 Subject: crypto: poly1305 - Add a generic Poly1305 authenticator implementation Poly1305 is a fast message authenticator designed by Daniel J. Bernstein. It is further defined in RFC7539 as a building block for the ChaCha20-Poly1305 AEAD for use in IETF protocols. This is a portable C implementation of the algorithm without architecture specific optimizations, based on public domain code by Daniel J. Bernstein and Andrew Moon. Signed-off-by: Martin Willi Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++ crypto/Makefile | 1 + crypto/poly1305_generic.c | 300 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 310 insertions(+) create mode 100644 crypto/poly1305_generic.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index bf657bec..9c00454e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -426,6 +426,15 @@ config CRYPTO_GHASH help GHASH is message digest algorithm for GCM (Galois/Counter Mode). +config CRYPTO_POLY1305 + tristate "Poly1305 authenticator algorithm" + help + Poly1305 authenticator algorithm, RFC7539. + + Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein. + It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use + in IETF protocols. This is the portable C implementation of Poly1305. + config CRYPTO_MD4 tristate "MD4 digest algorithm" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index be87ec1a..2424c81d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o +obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c new file mode 100644 index 00000000..9c1159b9 --- /dev/null +++ b/crypto/poly1305_generic.c @@ -0,0 +1,300 @@ +/* + * Poly1305 authenticator algorithm, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * Based on public domain code by Andrew Moon and Daniel J. Bernstein. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define POLY1305_DIGEST_SIZE 16 + +struct poly1305_ctx { + /* key */ + u32 r[5]; + /* finalize key */ + u32 s[4]; +}; + +struct poly1305_desc_ctx { + /* accumulator */ + u32 h[5]; + /* partial buffer */ + u8 buf[POLY1305_BLOCK_SIZE]; + /* bytes used in partial buffer */ + unsigned int buflen; +}; + +static inline u64 mlt(u64 a, u64 b) +{ + return a * b; +} + +static inline u32 sr(u64 v, u_char n) +{ + return v >> n; +} + +static inline u32 and(u32 v, u32 mask) +{ + return v & mask; +} + +static inline u32 le32_to_cpuvp(const void *p) +{ + return le32_to_cpup(p); +} + +static int poly1305_init(struct shash_desc *desc) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + + memset(dctx->h, 0, sizeof(dctx->h)); + dctx->buflen = 0; + + return 0; +} + +static int poly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct poly1305_ctx *ctx = crypto_shash_ctx(tfm); + + if (keylen != POLY1305_KEY_SIZE) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + ctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff; + ctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03; + ctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff; + ctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff; + ctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff; + + ctx->s[0] = le32_to_cpuvp(key + 16); + ctx->s[1] = le32_to_cpuvp(key + 20); + ctx->s[2] = le32_to_cpuvp(key + 24); + ctx->s[3] = le32_to_cpuvp(key + 28); + + return 0; +} + +static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx, + struct poly1305_ctx *ctx, const u8 *src, + unsigned int srclen, u32 hibit) +{ + u32 r0, r1, r2, r3, r4; + u32 s1, s2, s3, s4; + u32 h0, h1, h2, h3, h4; + u64 d0, d1, d2, d3, d4; + + r0 = ctx->r[0]; + r1 = ctx->r[1]; + r2 = ctx->r[2]; + r3 = ctx->r[3]; + r4 = ctx->r[4]; + + s1 = r1 * 5; + s2 = r2 * 5; + s3 = r3 * 5; + s4 = r4 * 5; + + h0 = dctx->h[0]; + h1 = dctx->h[1]; + h2 = dctx->h[2]; + h3 = dctx->h[3]; + h4 = dctx->h[4]; + + while (likely(srclen >= POLY1305_BLOCK_SIZE)) { + + /* h += m[i] */ + h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff; + h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff; + h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff; + h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff; + h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit; + + /* h *= r */ + d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + + mlt(h3, s2) + mlt(h4, s1); + d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + + mlt(h3, s3) + mlt(h4, s2); + d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + + mlt(h3, s4) + mlt(h4, s3); + d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + + mlt(h3, r0) + mlt(h4, s4); + d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + + mlt(h3, r1) + mlt(h4, r0); + + /* (partial) h %= p */ + d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); + d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); + d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); + d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); + h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); + h1 += h0 >> 26; h0 = h0 & 0x3ffffff; + + src += POLY1305_BLOCK_SIZE; + srclen -= POLY1305_BLOCK_SIZE; + } + + dctx->h[0] = h0; + dctx->h[1] = h1; + dctx->h[2] = h2; + dctx->h[3] = h3; + dctx->h[4] = h4; + + return srclen; +} + +static int poly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + struct poly1305_ctx *ctx = crypto_shash_ctx(desc->tfm); + unsigned int bytes; + + if (unlikely(dctx->buflen)) { + bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); + memcpy(dctx->buf + dctx->buflen, src, bytes); + src += bytes; + srclen -= bytes; + dctx->buflen += bytes; + + if (dctx->buflen == POLY1305_BLOCK_SIZE) { + poly1305_blocks(dctx, ctx, dctx->buf, + POLY1305_BLOCK_SIZE, 1 << 24); + dctx->buflen = 0; + } + } + + if (likely(srclen >= POLY1305_BLOCK_SIZE)) { + bytes = poly1305_blocks(dctx, ctx, src, srclen, 1 << 24); + src += srclen - bytes; + srclen = bytes; + } + + if (unlikely(srclen)) { + dctx->buflen = srclen; + memcpy(dctx->buf, src, srclen); + } + + return 0; +} + +static int poly1305_final(struct shash_desc *desc, u8 *dst) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + struct poly1305_ctx *ctx = crypto_shash_ctx(desc->tfm); + __le32 *mac = (__le32 *)dst; + u32 h0, h1, h2, h3, h4; + u32 g0, g1, g2, g3, g4; + u32 mask; + u64 f = 0; + + if (unlikely(dctx->buflen)) { + dctx->buf[dctx->buflen++] = 1; + memset(dctx->buf + dctx->buflen, 0, + POLY1305_BLOCK_SIZE - dctx->buflen); + poly1305_blocks(dctx, ctx, dctx->buf, POLY1305_BLOCK_SIZE, 0); + } + + /* fully carry h */ + h0 = dctx->h[0]; + h1 = dctx->h[1]; + h2 = dctx->h[2]; + h3 = dctx->h[3]; + h4 = dctx->h[4]; + + h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; + h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; + h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; + h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; + h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; + + /* compute h + -p */ + g0 = h0 + 5; + g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; + g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; + g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; + g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; + + /* select h if h < p, or h + -p if h >= p */ + mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; + g0 &= mask; + g1 &= mask; + g2 &= mask; + g3 &= mask; + g4 &= mask; + mask = ~mask; + h0 = (h0 & mask) | g0; + h1 = (h1 & mask) | g1; + h2 = (h2 & mask) | g2; + h3 = (h3 & mask) | g3; + h4 = (h4 & mask) | g4; + + /* h = h % (2^128) */ + h0 = (h0 >> 0) | (h1 << 26); + h1 = (h1 >> 6) | (h2 << 20); + h2 = (h2 >> 12) | (h3 << 14); + h3 = (h3 >> 18) | (h4 << 8); + + /* mac = (h + s) % (2^128) */ + f = (f >> 32) + h0 + ctx->s[0]; mac[0] = cpu_to_le32(f); + f = (f >> 32) + h1 + ctx->s[1]; mac[1] = cpu_to_le32(f); + f = (f >> 32) + h2 + ctx->s[2]; mac[2] = cpu_to_le32(f); + f = (f >> 32) + h3 + ctx->s[3]; mac[3] = cpu_to_le32(f); + + return 0; +} + +static struct shash_alg poly1305_alg = { + .digestsize = POLY1305_DIGEST_SIZE, + .init = poly1305_init, + .update = poly1305_update, + .final = poly1305_final, + .setkey = poly1305_setkey, + .descsize = sizeof(struct poly1305_desc_ctx), + .base = { + .cra_name = "poly1305", + .cra_driver_name = "poly1305-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_alignmask = sizeof(u32) - 1, + .cra_blocksize = POLY1305_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct poly1305_ctx), + .cra_module = THIS_MODULE, + }, +}; + +static int __init poly1305_mod_init(void) +{ + return crypto_register_shash(&poly1305_alg); +} + +static void __exit poly1305_mod_exit(void) +{ + crypto_unregister_shash(&poly1305_alg); +} + +module_init(poly1305_mod_init); +module_exit(poly1305_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi "); +MODULE_DESCRIPTION("Poly1305 authenticator"); +MODULE_ALIAS_CRYPTO("poly1305"); +MODULE_ALIAS_CRYPTO("poly1305-generic"); -- cgit v1.2.3 From 584f430dc4b1d5986db27b5cffbca9f3e60310fd Mon Sep 17 00:00:00 2001 From: Martin Willi Date: Mon, 1 Jun 2015 13:44:00 +0200 Subject: crypto: chacha20poly1305 - Add a ChaCha20-Poly1305 AEAD construction, RFC7539 This AEAD uses a chacha20 ablkcipher and a poly1305 ahash to construct the ChaCha20-Poly1305 AEAD as defined in RFC7539. It supports both synchronous and asynchronous operations, even if we currently have no async chacha20 or poly1305 drivers. Signed-off-by: Martin Willi Acked-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 + crypto/Makefile | 1 + crypto/chacha20poly1305.c | 663 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 676 insertions(+) create mode 100644 crypto/chacha20poly1305.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 9c00454e..1bc7e0b8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -217,6 +217,18 @@ config CRYPTO_GCM Support for Galois/Counter Mode (GCM) and Galois Message Authentication Code (GMAC). Required for IPSec. +config CRYPTO_CHACHA20POLY1305 + tristate "ChaCha20-Poly1305 AEAD support" + select CRYPTO_CHACHA20 + select CRYPTO_POLY1305 + select CRYPTO_AEAD + help + ChaCha20-Poly1305 AEAD support, RFC7539. + + Support for the AEAD wrapper using the ChaCha20 stream cipher combined + with the Poly1305 authenticator. It is defined in RFC7539 for use in + IETF protocols. + config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index 2424c81d..e6cf6a54 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o +obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c new file mode 100644 index 00000000..6171cf14 --- /dev/null +++ b/crypto/chacha20poly1305.c @@ -0,0 +1,663 @@ +/* + * ChaCha20-Poly1305 AEAD, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +#define POLY1305_BLOCK_SIZE 16 +#define POLY1305_DIGEST_SIZE 16 +#define POLY1305_KEY_SIZE 32 +#define CHACHA20_KEY_SIZE 32 +#define CHACHA20_IV_SIZE 16 +#define CHACHAPOLY_IV_SIZE 12 + +struct chachapoly_instance_ctx { + struct crypto_skcipher_spawn chacha; + struct crypto_ahash_spawn poly; + unsigned int saltlen; +}; + +struct chachapoly_ctx { + struct crypto_ablkcipher *chacha; + struct crypto_ahash *poly; + /* key bytes we use for the ChaCha20 IV */ + unsigned int saltlen; + u8 salt[]; +}; + +struct poly_req { + /* zero byte padding for AD/ciphertext, as needed */ + u8 pad[POLY1305_BLOCK_SIZE]; + /* tail data with AD/ciphertext lengths */ + struct { + __le64 assoclen; + __le64 cryptlen; + } tail; + struct scatterlist src[1]; + struct ahash_request req; /* must be last member */ +}; + +struct chacha_req { + /* the key we generate for Poly1305 using Chacha20 */ + u8 key[POLY1305_KEY_SIZE]; + u8 iv[CHACHA20_IV_SIZE]; + struct scatterlist src[1]; + struct ablkcipher_request req; /* must be last member */ +}; + +struct chachapoly_req_ctx { + /* calculated Poly1305 tag */ + u8 tag[POLY1305_DIGEST_SIZE]; + /* length of data to en/decrypt, without ICV */ + unsigned int cryptlen; + union { + struct poly_req poly; + struct chacha_req chacha; + } u; +}; + +static inline void async_done_continue(struct aead_request *req, int err, + int (*cont)(struct aead_request *)) +{ + if (!err) + err = cont(req); + + if (err != -EINPROGRESS && err != -EBUSY) + aead_request_complete(req, err); +} + +static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + __le32 leicb = cpu_to_le32(icb); + + memcpy(iv, &leicb, sizeof(leicb)); + memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen); + memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, + CHACHA20_IV_SIZE - sizeof(leicb) - ctx->saltlen); +} + +static int poly_verify_tag(struct aead_request *req) +{ + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + u8 tag[sizeof(rctx->tag)]; + + scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0); + if (crypto_memneq(tag, rctx->tag, sizeof(tag))) + return -EBADMSG; + return 0; +} + +static int poly_copy_tag(struct aead_request *req) +{ + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + + scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen, + sizeof(rctx->tag), 1); + return 0; +} + +static void chacha_decrypt_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_verify_tag); +} + +static int chacha_decrypt(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct chacha_req *creq = &rctx->u.chacha; + int err; + + chacha_iv(creq->iv, req, 1); + + ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), + chacha_decrypt_done, req); + ablkcipher_request_set_tfm(&creq->req, ctx->chacha); + ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, + rctx->cryptlen, creq->iv); + err = crypto_ablkcipher_decrypt(&creq->req); + if (err) + return err; + + return poly_verify_tag(req); +} + +static int poly_tail_continue(struct aead_request *req) +{ + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + + if (rctx->cryptlen == req->cryptlen) /* encrypting */ + return poly_copy_tag(req); + + return chacha_decrypt(req); +} + +static void poly_tail_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_tail_continue); +} + +static int poly_tail(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + __le64 len; + int err; + + sg_init_table(preq->src, 1); + len = cpu_to_le64(req->assoclen); + memcpy(&preq->tail.assoclen, &len, sizeof(len)); + len = cpu_to_le64(rctx->cryptlen); + memcpy(&preq->tail.cryptlen, &len, sizeof(len)); + sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail)); + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_tail_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + ahash_request_set_crypt(&preq->req, preq->src, + rctx->tag, sizeof(preq->tail)); + + err = crypto_ahash_finup(&preq->req); + if (err) + return err; + + return poly_tail_continue(req); +} + +static void poly_cipherpad_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_tail); +} + +static int poly_cipherpad(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + unsigned int padlen, bs = POLY1305_BLOCK_SIZE; + int err; + + padlen = (bs - (rctx->cryptlen % bs)) % bs; + memset(preq->pad, 0, sizeof(preq->pad)); + sg_init_table(preq->src, 1); + sg_set_buf(preq->src, &preq->pad, padlen); + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_cipherpad_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); + + err = crypto_ahash_update(&preq->req); + if (err) + return err; + + return poly_tail(req); +} + +static void poly_cipher_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_cipherpad); +} + +static int poly_cipher(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + struct scatterlist *crypt = req->src; + int err; + + if (rctx->cryptlen == req->cryptlen) /* encrypting */ + crypt = req->dst; + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_cipher_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); + + err = crypto_ahash_update(&preq->req); + if (err) + return err; + + return poly_cipherpad(req); +} + +static void poly_adpad_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_cipher); +} + +static int poly_adpad(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + unsigned int padlen, bs = POLY1305_BLOCK_SIZE; + int err; + + padlen = (bs - (req->assoclen % bs)) % bs; + memset(preq->pad, 0, sizeof(preq->pad)); + sg_init_table(preq->src, 1); + sg_set_buf(preq->src, preq->pad, padlen); + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_adpad_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); + + err = crypto_ahash_update(&preq->req); + if (err) + return err; + + return poly_cipher(req); +} + +static void poly_ad_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_adpad); +} + +static int poly_ad(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + int err; + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_ad_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen); + + err = crypto_ahash_update(&preq->req); + if (err) + return err; + + return poly_adpad(req); +} + +static void poly_init_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_ad); +} + +static int poly_init(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct poly_req *preq = &rctx->u.poly; + int err; + + ahash_request_set_callback(&preq->req, aead_request_flags(req), + poly_init_done, req); + ahash_request_set_tfm(&preq->req, ctx->poly); + + err = crypto_ahash_init(&preq->req); + if (err) + return err; + + return poly_ad(req); +} + +static int poly_genkey_continue(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct chachapoly_ctx *ctx = crypto_aead_ctx(aead); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct chacha_req *creq = &rctx->u.chacha; + int err; + + crypto_ahash_clear_flags(ctx->poly, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(ctx->poly, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + + err = crypto_ahash_setkey(ctx->poly, creq->key, sizeof(creq->key)); + crypto_aead_set_flags(aead, crypto_ahash_get_flags(ctx->poly) & + CRYPTO_TFM_RES_MASK); + if (err) + return err; + + return poly_init(req); +} + +static void poly_genkey_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_genkey_continue); +} + +static int poly_genkey(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct chacha_req *creq = &rctx->u.chacha; + int err; + + sg_init_table(creq->src, 1); + memset(creq->key, 0, sizeof(creq->key)); + sg_set_buf(creq->src, creq->key, sizeof(creq->key)); + + chacha_iv(creq->iv, req, 0); + + ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), + poly_genkey_done, req); + ablkcipher_request_set_tfm(&creq->req, ctx->chacha); + ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src, + POLY1305_KEY_SIZE, creq->iv); + + err = crypto_ablkcipher_decrypt(&creq->req); + if (err) + return err; + + return poly_genkey_continue(req); +} + +static void chacha_encrypt_done(struct crypto_async_request *areq, int err) +{ + async_done_continue(areq->data, err, poly_genkey); +} + +static int chacha_encrypt(struct aead_request *req) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + struct chacha_req *creq = &rctx->u.chacha; + int err; + + chacha_iv(creq->iv, req, 1); + + ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), + chacha_encrypt_done, req); + ablkcipher_request_set_tfm(&creq->req, ctx->chacha); + ablkcipher_request_set_crypt(&creq->req, req->src, req->dst, + req->cryptlen, creq->iv); + err = crypto_ablkcipher_encrypt(&creq->req); + if (err) + return err; + + return poly_genkey(req); +} + +static int chachapoly_encrypt(struct aead_request *req) +{ + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + + rctx->cryptlen = req->cryptlen; + + /* encrypt call chain: + * - chacha_encrypt/done() + * - poly_genkey/done/continue() + * - poly_init/done() + * - poly_ad/done() + * - poly_adpad/done() + * - poly_cipher/done() + * - poly_cipherpad/done() + * - poly_tail/done/continue() + * - poly_copy_tag() + */ + return chacha_encrypt(req); +} + +static int chachapoly_decrypt(struct aead_request *req) +{ + struct chachapoly_req_ctx *rctx = aead_request_ctx(req); + + if (req->cryptlen < POLY1305_DIGEST_SIZE) + return -EINVAL; + rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; + + /* decrypt call chain: + * - poly_genkey/done/continue() + * - poly_init/done() + * - poly_ad/done() + * - poly_adpad/done() + * - poly_cipher/done() + * - poly_cipherpad/done() + * - poly_tail/done/continue() + * - chacha_decrypt/done() + * - poly_verify_tag() + */ + return poly_genkey(req); +} + +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct chachapoly_ctx *ctx = crypto_aead_ctx(aead); + int err; + + if (keylen != ctx->saltlen + CHACHA20_KEY_SIZE) + return -EINVAL; + + keylen -= ctx->saltlen; + memcpy(ctx->salt, key + keylen, ctx->saltlen); + + crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + + err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen); + crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int chachapoly_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize != POLY1305_DIGEST_SIZE) + return -EINVAL; + + return 0; +} + +static int chachapoly_init(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst); + struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ablkcipher *chacha; + struct crypto_ahash *poly; + unsigned long align; + + poly = crypto_spawn_ahash(&ictx->poly); + if (IS_ERR(poly)) + return PTR_ERR(poly); + + chacha = crypto_spawn_skcipher(&ictx->chacha); + if (IS_ERR(chacha)) { + crypto_free_ahash(poly); + return PTR_ERR(chacha); + } + + ctx->chacha = chacha; + ctx->poly = poly; + ctx->saltlen = ictx->saltlen; + + align = crypto_tfm_alg_alignmask(tfm); + align &= ~(crypto_tfm_ctx_alignment() - 1); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + align + offsetof(struct chachapoly_req_ctx, u) + + max(offsetof(struct chacha_req, req) + + sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(chacha), + offsetof(struct poly_req, req) + + sizeof(struct ahash_request) + + crypto_ahash_reqsize(poly))); + + return 0; +} + +static void chachapoly_exit(struct crypto_tfm *tfm) +{ + struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_ahash(ctx->poly); + crypto_free_ablkcipher(ctx->chacha); +} + +static struct crypto_instance *chachapoly_alloc(struct rtattr **tb, + const char *name, + unsigned int ivsize) +{ + struct crypto_attr_type *algt; + struct crypto_instance *inst; + struct crypto_alg *chacha; + struct crypto_alg *poly; + struct ahash_alg *poly_ahash; + struct chachapoly_instance_ctx *ctx; + const char *chacha_name, *poly_name; + int err; + + if (ivsize > CHACHAPOLY_IV_SIZE) + return ERR_PTR(-EINVAL); + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_CAST(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) + return ERR_PTR(-EINVAL); + + chacha_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(chacha_name)) + return ERR_CAST(chacha_name); + poly_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(poly_name)) + return ERR_CAST(poly_name); + + poly = crypto_find_alg(poly_name, &crypto_ahash_type, + CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(poly)) + return ERR_CAST(poly); + + err = -ENOMEM; + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + goto out_put_poly; + + ctx = crypto_instance_ctx(inst); + ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; + poly_ahash = container_of(poly, struct ahash_alg, halg.base); + err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst); + if (err) + goto err_free_inst; + + crypto_set_skcipher_spawn(&ctx->chacha, inst); + err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, + crypto_requires_sync(algt->type, + algt->mask)); + if (err) + goto err_drop_poly; + + chacha = crypto_skcipher_spawn_alg(&ctx->chacha); + + err = -EINVAL; + /* Need 16-byte IV size, including Initial Block Counter value */ + if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE) + goto out_drop_chacha; + /* Not a stream cipher? */ + if (chacha->cra_blocksize != 1) + goto out_drop_chacha; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s,%s)", name, chacha_name, + poly_name) >= CRYPTO_MAX_ALG_NAME) + goto out_drop_chacha; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s(%s,%s)", name, chacha->cra_driver_name, + poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto out_drop_chacha; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; + inst->alg.cra_flags |= (chacha->cra_flags | + poly->cra_flags) & CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = (chacha->cra_priority + + poly->cra_priority) / 2; + inst->alg.cra_blocksize = 1; + inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask; + inst->alg.cra_type = &crypto_nivaead_type; + inst->alg.cra_aead.ivsize = ivsize; + inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE; + inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; + inst->alg.cra_init = chachapoly_init; + inst->alg.cra_exit = chachapoly_exit; + inst->alg.cra_aead.encrypt = chachapoly_encrypt; + inst->alg.cra_aead.decrypt = chachapoly_decrypt; + inst->alg.cra_aead.setkey = chachapoly_setkey; + inst->alg.cra_aead.setauthsize = chachapoly_setauthsize; + inst->alg.cra_aead.geniv = "seqiv"; + +out: + crypto_mod_put(poly); + return inst; + +out_drop_chacha: + crypto_drop_skcipher(&ctx->chacha); +err_drop_poly: + crypto_drop_ahash(&ctx->poly); +err_free_inst: + kfree(inst); +out_put_poly: + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_instance *rfc7539_alloc(struct rtattr **tb) +{ + return chachapoly_alloc(tb, "rfc7539", 12); +} + +static void chachapoly_free(struct crypto_instance *inst) +{ + struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->chacha); + crypto_drop_ahash(&ctx->poly); + kfree(inst); +} + +static struct crypto_template rfc7539_tmpl = { + .name = "rfc7539", + .alloc = rfc7539_alloc, + .free = chachapoly_free, + .module = THIS_MODULE, +}; + +static int __init chacha20poly1305_module_init(void) +{ + return crypto_register_template(&rfc7539_tmpl); +} + +static void __exit chacha20poly1305_module_exit(void) +{ + crypto_unregister_template(&rfc7539_tmpl); +} + +module_init(chacha20poly1305_module_init); +module_exit(chacha20poly1305_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi "); +MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD"); +MODULE_ALIAS_CRYPTO("chacha20poly1305"); +MODULE_ALIAS_CRYPTO("rfc7539"); -- cgit v1.2.3 From e3cf1c151f07de085311b372e0b290970f0faf6b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 3 Jun 2015 14:49:29 +0800 Subject: crypto: echainiv - Set Kconfig default to m As this is required by many IPsec algorithms, let's set the default to m. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 1bc7e0b8..91018d9e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -244,6 +244,7 @@ config CRYPTO_ECHAINIV select CRYPTO_AEAD select CRYPTO_NULL select CRYPTO_RNG + default m help This IV generator generates an IV based on the encryption of a sequence number xored with a salt. This is the default -- cgit v1.2.3 From dda8f1184d3ff6f1993ea60989fc2209248d9db7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 3 Jun 2015 14:49:31 +0800 Subject: crypto: rng - Make DRBG the default RNG This patch creates a new invisible Kconfig option CRYPTO_RNG_DEFAULT that simply selects the DRBG. This new option is then selected by the IV generators. Signed-off-by: Herbert Xu --- crypto/Kconfig | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 91018d9e..cb7806f3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -78,6 +78,10 @@ config CRYPTO_RNG2 tristate select CRYPTO_ALGAPI2 +config CRYPTO_RNG_DEFAULT + tristate + select CRYPTO_DRBG_MENU + config CRYPTO_PCOMP tristate select CRYPTO_PCOMP2 @@ -234,7 +238,7 @@ config CRYPTO_SEQIV select CRYPTO_AEAD select CRYPTO_BLKCIPHER select CRYPTO_NULL - select CRYPTO_RNG + select CRYPTO_RNG_DEFAULT help This IV generator generates an IV based on a sequence number by xoring it with a salt. This algorithm is mainly useful for CTR @@ -243,7 +247,7 @@ config CRYPTO_ECHAINIV tristate "Encrypted Chain IV Generator" select CRYPTO_AEAD select CRYPTO_NULL - select CRYPTO_RNG + select CRYPTO_RNG_DEFAULT default m help This IV generator generates an IV based on the encryption of @@ -1484,7 +1488,6 @@ comment "Random Number Generation" config CRYPTO_ANSI_CPRNG tristate "Pseudo Random Number Generation for Cryptographic modules" - default m select CRYPTO_AES select CRYPTO_RNG help @@ -1502,11 +1505,9 @@ menuconfig CRYPTO_DRBG_MENU if CRYPTO_DRBG_MENU config CRYPTO_DRBG_HMAC - bool "Enable HMAC DRBG" + bool default y select CRYPTO_HMAC - help - Enable the HMAC DRBG variant as defined in NIST SP800-90A. config CRYPTO_DRBG_HASH bool "Enable Hash DRBG" @@ -1522,7 +1523,7 @@ config CRYPTO_DRBG_CTR config CRYPTO_DRBG tristate - default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR) + default CRYPTO_DRBG_MENU select CRYPTO_RNG select CRYPTO_JITTERENTROPY -- cgit v1.2.3 From b6a0750d6e2d827643ca7484db52bbf1ec6fcbc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 11 Jun 2015 08:55:10 +0800 Subject: crypto: drbg - Add select on sha256 The hash-based DRBG variants all use sha256 so we need to add a select on it. Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index cb7806f3..f6fc054e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1508,10 +1508,11 @@ config CRYPTO_DRBG_HMAC bool default y select CRYPTO_HMAC + select CRYPTO_SHA256 config CRYPTO_DRBG_HASH bool "Enable Hash DRBG" - select CRYPTO_HASH + select CRYPTO_SHA256 help Enable the Hash DRBG variant as defined in NIST SP800-90A. -- cgit v1.2.3 From e795bc3714e3b4f17d1a336e37d251bc05153f5f Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 16 Jun 2015 10:30:55 -0700 Subject: crypto: akcipher - add PKE API Add Public Key Encryption API. Signed-off-by: Tadeusz Struk Made CRYPTO_AKCIPHER invisible like other type config options. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 ++++ crypto/Makefile | 1 + crypto/akcipher.c | 117 +++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/crypto_user.c | 22 ++++++++++ 4 files changed, 149 insertions(+) create mode 100644 crypto/akcipher.c (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index f6fc054e..eb0aca45 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -91,6 +91,15 @@ config CRYPTO_PCOMP2 tristate select CRYPTO_ALGAPI2 +config CRYPTO_AKCIPHER2 + tristate + select CRYPTO_ALGAPI2 + +config CRYPTO_AKCIPHER + tristate + select CRYPTO_AKCIPHER2 + select CRYPTO_ALGAPI + config CRYPTO_MANAGER tristate "Cryptographic algorithm manager" select CRYPTO_MANAGER2 diff --git a/crypto/Makefile b/crypto/Makefile index c8420357..1ed382df 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -28,6 +28,7 @@ crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o +obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o cryptomgr-y := algboss.o testmgr.o diff --git a/crypto/akcipher.c b/crypto/akcipher.c new file mode 100644 index 00000000..d7986414 --- /dev/null +++ b/crypto/akcipher.c @@ -0,0 +1,117 @@ +/* + * Public Key Encryption + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +#ifdef CONFIG_NET +static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_akcipher rakcipher; + + strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, + sizeof(struct crypto_report_akcipher), &rakcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else +static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); + +static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_puts(m, "type : akcipher\n"); +} + +static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm); + struct akcipher_alg *alg = crypto_akcipher_alg(akcipher); + + alg->exit(akcipher); +} + +static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm); + struct akcipher_alg *alg = crypto_akcipher_alg(akcipher); + + if (alg->exit) + akcipher->base.exit = crypto_akcipher_exit_tfm; + + if (alg->init) + return alg->init(akcipher); + + return 0; +} + +static const struct crypto_type crypto_akcipher_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_akcipher_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_akcipher_show, +#endif + .report = crypto_akcipher_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_AKCIPHER, + .tfmsize = offsetof(struct crypto_akcipher, base), +}; + +struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_akcipher_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); + +int crypto_register_akcipher(struct akcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + base->cra_type = &crypto_akcipher_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_akcipher); + +void crypto_unregister_akcipher(struct akcipher_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic public key cihper type"); diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 41dfe762..11dbd5a8 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "internal.h" @@ -110,6 +111,21 @@ nla_put_failure: return -EMSGSIZE; } +static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_akcipher rakcipher; + + strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); + + if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, + sizeof(struct crypto_report_akcipher), &rakcipher)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -154,6 +170,12 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; + + case CRYPTO_ALG_TYPE_AKCIPHER: + if (crypto_report_akcipher(skb, alg)) + goto nla_put_failure; + + break; } out: -- cgit v1.2.3 From 8b0b3e1274dc6cdca25304e46c17303ba2fcbdc8 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 16 Jun 2015 10:31:01 -0700 Subject: crypto: rsa - add a new rsa generic implementation Add a new rsa generic SW implementation. This implements only cryptographic primitives. Signed-off-by: Tadeusz Struk Added select on ASN1. Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 ++ crypto/Makefile | 8 ++ crypto/rsa.c | 315 ++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/rsa_helper.c | 121 ++++++++++++++++++++ crypto/rsakey.asn1 | 5 + 5 files changed, 457 insertions(+) create mode 100644 crypto/rsa.c create mode 100644 crypto/rsa_helper.c create mode 100644 crypto/rsakey.asn1 (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index eb0aca45..d6b2a8b6 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -100,6 +100,14 @@ config CRYPTO_AKCIPHER select CRYPTO_AKCIPHER2 select CRYPTO_ALGAPI +config CRYPTO_RSA + tristate "RSA algorithm" + select AKCIPHER + select MPILIB + select ASN1 + help + Generic implementation of the RSA public key algorithm. + config CRYPTO_MANAGER tristate "Cryptographic algorithm manager" select CRYPTO_MANAGER2 diff --git a/crypto/Makefile b/crypto/Makefile index 1ed382df..0077476f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -30,6 +30,14 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o +$(obj)/rsakey-asn1.o: $(obj)/rsakey-asn1.c $(obj)/rsakey-asn1.h +clean-files += rsakey-asn1.c rsakey-asn1.h + +rsa_generic-y := rsakey-asn1.o +rsa_generic-y += rsa.o +rsa_generic-y += rsa_helper.o +obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o + cryptomgr-y := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o diff --git a/crypto/rsa.c b/crypto/rsa.c new file mode 100644 index 00000000..752af065 --- /dev/null +++ b/crypto/rsa.c @@ -0,0 +1,315 @@ +/* RSA asymmetric public-key algorithm [RFC3447] + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include + +/* + * RSAEP function [RFC3447 sec 5.1.1] + * c = m^e mod n; + */ +static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) +{ + /* (1) Validate 0 <= m < n */ + if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) + return -EINVAL; + + /* (2) c = m^e mod n */ + return mpi_powm(c, m, key->e, key->n); +} + +/* + * RSADP function [RFC3447 sec 5.1.2] + * m = c^d mod n; + */ +static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) +{ + /* (1) Validate 0 <= c < n */ + if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) + return -EINVAL; + + /* (2) m = c^d mod n */ + return mpi_powm(m, c, key->d, key->n); +} + +/* + * RSASP1 function [RFC3447 sec 5.2.1] + * s = m^d mod n + */ +static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) +{ + /* (1) Validate 0 <= m < n */ + if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) + return -EINVAL; + + /* (2) s = m^d mod n */ + return mpi_powm(s, m, key->d, key->n); +} + +/* + * RSAVP1 function [RFC3447 sec 5.2.2] + * m = s^e mod n; + */ +static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) +{ + /* (1) Validate 0 <= s < n */ + if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0) + return -EINVAL; + + /* (2) m = s^e mod n */ + return mpi_powm(m, s, key->e, key->n); +} + +static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) +{ + return akcipher_tfm_ctx(tfm); +} + +static int rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct rsa_key *pkey = rsa_get_key(tfm); + MPI m, c = mpi_alloc(0); + int ret = 0; + int sign; + + if (!c) + return -ENOMEM; + + if (unlikely(!pkey->n || !pkey->e)) { + ret = -EINVAL; + goto err_free_c; + } + + if (req->dst_len < mpi_get_size(pkey->n)) { + req->dst_len = mpi_get_size(pkey->n); + ret = -EOVERFLOW; + goto err_free_c; + } + + m = mpi_read_raw_data(req->src, req->src_len); + if (!m) { + ret = -ENOMEM; + goto err_free_c; + } + + ret = _rsa_enc(pkey, c, m); + if (ret) + goto err_free_m; + + ret = mpi_read_buffer(c, req->dst, req->dst_len, &req->dst_len, &sign); + if (ret) + goto err_free_m; + + if (sign < 0) { + ret = -EBADMSG; + goto err_free_m; + } + +err_free_m: + mpi_free(m); +err_free_c: + mpi_free(c); + return ret; +} + +static int rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct rsa_key *pkey = rsa_get_key(tfm); + MPI c, m = mpi_alloc(0); + int ret = 0; + int sign; + + if (!m) + return -ENOMEM; + + if (unlikely(!pkey->n || !pkey->d)) { + ret = -EINVAL; + goto err_free_m; + } + + if (req->dst_len < mpi_get_size(pkey->n)) { + req->dst_len = mpi_get_size(pkey->n); + ret = -EOVERFLOW; + goto err_free_m; + } + + c = mpi_read_raw_data(req->src, req->src_len); + if (!c) { + ret = -ENOMEM; + goto err_free_m; + } + + ret = _rsa_dec(pkey, m, c); + if (ret) + goto err_free_c; + + ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign); + if (ret) + goto err_free_c; + + if (sign < 0) { + ret = -EBADMSG; + goto err_free_c; + } + +err_free_c: + mpi_free(c); +err_free_m: + mpi_free(m); + return ret; +} + +static int rsa_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct rsa_key *pkey = rsa_get_key(tfm); + MPI m, s = mpi_alloc(0); + int ret = 0; + int sign; + + if (!s) + return -ENOMEM; + + if (unlikely(!pkey->n || !pkey->d)) { + ret = -EINVAL; + goto err_free_s; + } + + if (req->dst_len < mpi_get_size(pkey->n)) { + req->dst_len = mpi_get_size(pkey->n); + ret = -EOVERFLOW; + goto err_free_s; + } + + m = mpi_read_raw_data(req->src, req->src_len); + if (!m) { + ret = -ENOMEM; + goto err_free_s; + } + + ret = _rsa_sign(pkey, s, m); + if (ret) + goto err_free_m; + + ret = mpi_read_buffer(s, req->dst, req->dst_len, &req->dst_len, &sign); + if (ret) + goto err_free_m; + + if (sign < 0) { + ret = -EBADMSG; + goto err_free_m; + } + +err_free_m: + mpi_free(m); +err_free_s: + mpi_free(s); + return ret; +} + +static int rsa_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + const struct rsa_key *pkey = rsa_get_key(tfm); + MPI s, m = mpi_alloc(0); + int ret = 0; + int sign; + + if (!m) + return -ENOMEM; + + if (unlikely(!pkey->n || !pkey->e)) { + ret = -EINVAL; + goto err_free_m; + } + + if (req->dst_len < mpi_get_size(pkey->n)) { + req->dst_len = mpi_get_size(pkey->n); + ret = -EOVERFLOW; + goto err_free_m; + } + + s = mpi_read_raw_data(req->src, req->src_len); + if (!s) { + ret = -ENOMEM; + goto err_free_m; + } + + ret = _rsa_verify(pkey, m, s); + if (ret) + goto err_free_s; + + ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign); + if (ret) + goto err_free_s; + + if (sign < 0) { + ret = -EBADMSG; + goto err_free_s; + } + +err_free_s: + mpi_free(s); +err_free_m: + mpi_free(m); + return ret; +} + +static int rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + + return rsa_parse_key(pkey, key, keylen); +} + +static void rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct rsa_key *pkey = akcipher_tfm_ctx(tfm); + + rsa_free_key(pkey); +} + +static struct akcipher_alg rsa = { + .encrypt = rsa_enc, + .decrypt = rsa_dec, + .sign = rsa_sign, + .verify = rsa_verify, + .setkey = rsa_setkey, + .exit = rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "rsa-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct rsa_key), + }, +}; + +static int rsa_init(void) +{ + return crypto_register_akcipher(&rsa); +} + +static void rsa_exit(void) +{ + crypto_unregister_akcipher(&rsa); +} + +module_init(rsa_init); +module_exit(rsa_exit); +MODULE_ALIAS_CRYPTO("rsa"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RSA generic algorithm"); diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c new file mode 100644 index 00000000..3e8e0a9e --- /dev/null +++ b/crypto/rsa_helper.c @@ -0,0 +1,121 @@ +/* + * RSA key extract helper + * + * Copyright (c) 2015, Intel Corporation + * Authors: Tadeusz Struk + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#include +#include +#include +#include +#include +#include "rsakey-asn1.h" + +int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + key->n = mpi_read_raw_data(value, vlen); + + if (!key->n) + return -ENOMEM; + + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (mpi_get_size(key->n) != 256 || + mpi_get_size(key->n) != 384)) { + pr_err("RSA: key size not allowed in FIPS mode\n"); + mpi_free(key->n); + key->n = NULL; + return -EINVAL; + } + return 0; +} + +int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + key->e = mpi_read_raw_data(value, vlen); + + if (!key->e) + return -ENOMEM; + + return 0; +} + +int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct rsa_key *key = context; + + key->d = mpi_read_raw_data(value, vlen); + + if (!key->d) + return -ENOMEM; + + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (mpi_get_size(key->d) != 256 || + mpi_get_size(key->d) != 384)) { + pr_err("RSA: key size not allowed in FIPS mode\n"); + mpi_free(key->d); + key->d = NULL; + return -EINVAL; + } + return 0; +} + +static void free_mpis(struct rsa_key *key) +{ + mpi_free(key->n); + mpi_free(key->e); + mpi_free(key->d); + key->n = NULL; + key->e = NULL; + key->d = NULL; +} + +/** + * rsa_free_key() - frees rsa key allocated by rsa_parse_key() + * + * @rsa_key: struct rsa_key key representation + */ +void rsa_free_key(struct rsa_key *key) +{ + free_mpis(key); +} +EXPORT_SYMBOL_GPL(rsa_free_key); + +/** + * rsa_parse_key() - extracts an rsa key from BER encoded buffer + * and stores it in the provided struct rsa_key + * + * @rsa_key: struct rsa_key key representation + * @key: key in BER format + * @key_len: length of key + * + * Return: 0 on success or error code in case of error + */ +int rsa_parse_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len) +{ + int ret; + + free_mpis(rsa_key); + ret = asn1_ber_decoder(&rsakey_decoder, rsa_key, key, key_len); + if (ret < 0) + goto error; + + return 0; +error: + free_mpis(rsa_key); + return ret; +} +EXPORT_SYMBOL_GPL(rsa_parse_key); diff --git a/crypto/rsakey.asn1 b/crypto/rsakey.asn1 new file mode 100644 index 00000000..3c7b5df7 --- /dev/null +++ b/crypto/rsakey.asn1 @@ -0,0 +1,5 @@ +RsaKey ::= SEQUENCE { + n INTEGER ({ rsa_get_n }), + e INTEGER ({ rsa_get_e }), + d INTEGER ({ rsa_get_d }) +} -- cgit v1.2.3 From 47bcc533d3dbd1d588e5b766afa84df40c791693 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 16 Jun 2015 10:31:06 -0700 Subject: crypto: testmgr - add tests vectors for RSA New test vectors for RSA algorithm. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/testmgr.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++ crypto/testmgr.h | 187 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 346 insertions(+) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index d6b2a8b6..6c79ef0e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -121,6 +121,7 @@ config CRYPTO_MANAGER2 select CRYPTO_HASH2 select CRYPTO_BLKCIPHER2 select CRYPTO_PCOMP2 + select CRYPTO_AKCIPHER2 config CRYPTO_USER tristate "Userspace cryptographic algorithm configuration" diff --git a/crypto/testmgr.c b/crypto/testmgr.c index ccd19cfe..975e1eac 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "internal.h" @@ -116,6 +117,11 @@ struct drbg_test_suite { unsigned int count; }; +struct akcipher_test_suite { + struct akcipher_testvec *vecs; + unsigned int count; +}; + struct alg_test_desc { const char *alg; int (*test)(const struct alg_test_desc *desc, const char *driver, @@ -130,6 +136,7 @@ struct alg_test_desc { struct hash_test_suite hash; struct cprng_test_suite cprng; struct drbg_test_suite drbg; + struct akcipher_test_suite akcipher; } suite; }; @@ -1825,6 +1832,147 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver, } +static int do_test_rsa(struct crypto_akcipher *tfm, + struct akcipher_testvec *vecs) +{ + struct akcipher_request *req; + void *outbuf_enc = NULL; + void *outbuf_dec = NULL; + struct tcrypt_result result; + unsigned int out_len_max, out_len = 0; + int err = -ENOMEM; + + req = akcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) + return err; + + init_completion(&result.completion); + err = crypto_akcipher_setkey(tfm, vecs->key, vecs->key_len); + if (err) + goto free_req; + + akcipher_request_set_crypt(req, vecs->m, outbuf_enc, vecs->m_size, + out_len); + /* expect this to fail, and update the required buf len */ + crypto_akcipher_encrypt(req); + out_len = req->dst_len; + if (!out_len) { + err = -EINVAL; + goto free_req; + } + + out_len_max = out_len; + err = -ENOMEM; + outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); + if (!outbuf_enc) + goto free_req; + + akcipher_request_set_crypt(req, vecs->m, outbuf_enc, vecs->m_size, + out_len); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); + + /* Run RSA encrypt - c = m^e mod n;*/ + err = wait_async_op(&result, crypto_akcipher_encrypt(req)); + if (err) { + pr_err("alg: rsa: encrypt test failed. err %d\n", err); + goto free_all; + } + if (out_len != vecs->c_size) { + pr_err("alg: rsa: encrypt test failed. Invalid output len\n"); + err = -EINVAL; + goto free_all; + } + /* verify that encrypted message is equal to expected */ + if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { + pr_err("alg: rsa: encrypt test failed. Invalid output\n"); + err = -EINVAL; + goto free_all; + } + /* Don't invoke decrypt for vectors with public key */ + if (vecs->public_key_vec) { + err = 0; + goto free_all; + } + outbuf_dec = kzalloc(out_len_max, GFP_KERNEL); + if (!outbuf_dec) { + err = -ENOMEM; + goto free_all; + } + init_completion(&result.completion); + akcipher_request_set_crypt(req, outbuf_enc, outbuf_dec, vecs->c_size, + out_len); + + /* Run RSA decrypt - m = c^d mod n;*/ + err = wait_async_op(&result, crypto_akcipher_decrypt(req)); + if (err) { + pr_err("alg: rsa: decrypt test failed. err %d\n", err); + goto free_all; + } + out_len = req->dst_len; + if (out_len != vecs->m_size) { + pr_err("alg: rsa: decrypt test failed. Invalid output len\n"); + err = -EINVAL; + goto free_all; + } + /* verify that decrypted message is equal to the original msg */ + if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) { + pr_err("alg: rsa: decrypt test failed. Invalid output\n"); + err = -EINVAL; + } +free_all: + kfree(outbuf_dec); + kfree(outbuf_enc); +free_req: + akcipher_request_free(req); + return err; +} + +static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs, + unsigned int tcount) +{ + int ret, i; + + for (i = 0; i < tcount; i++) { + ret = do_test_rsa(tfm, vecs++); + if (ret) { + pr_err("alg: rsa: test failed on vector %d, err=%d\n", + i + 1, ret); + return ret; + } + } + return 0; +} + +static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, + struct akcipher_testvec *vecs, unsigned int tcount) +{ + if (strncmp(alg, "rsa", 3) == 0) + return test_rsa(tfm, vecs, tcount); + + return 0; +} + +static int alg_test_akcipher(const struct alg_test_desc *desc, + const char *driver, u32 type, u32 mask) +{ + struct crypto_akcipher *tfm; + int err = 0; + + tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask); + if (IS_ERR(tfm)) { + pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + if (desc->suite.akcipher.vecs) + err = test_akcipher(tfm, desc->alg, desc->suite.akcipher.vecs, + desc->suite.akcipher.count); + + crypto_free_akcipher(tfm); + return err; +} + static int alg_test_null(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -3452,6 +3600,16 @@ static const struct alg_test_desc alg_test_descs[] = { .count = RMD320_TEST_VECTORS } } + }, { + .alg = "rsa", + .test = alg_test_akcipher, + .fips_allowed = 1, + .suite = { + .akcipher = { + .vecs = rsa_tv_template, + .count = RSA_TEST_VECTORS + } + } }, { .alg = "salsa20", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 35f37bcb..868edf11 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -125,8 +125,195 @@ struct drbg_testvec { size_t expectedlen; }; +struct akcipher_testvec { + unsigned char *key; + unsigned char *m; + unsigned char *c; + unsigned int key_len; + unsigned int m_size; + unsigned int c_size; + bool public_key_vec; +}; + static char zeroed_string[48]; +/* + * RSA test vectors. Borrowed from openSSL. + */ +#ifdef CONFIG_CRYPTO_FIPS +#define RSA_TEST_VECTORS 2 +#else +#define RSA_TEST_VECTORS 4 +#endif +static struct akcipher_testvec rsa_tv_template[] = { + { +#ifndef CONFIG_CRYPTO_FIPS + .key = + "\x30\x81\x88" /* sequence of 136 bytes */ + "\x02\x41" /* modulus - integer of 65 bytes */ + "\x00\xAA\x36\xAB\xCE\x88\xAC\xFD\xFF\x55\x52\x3C\x7F\xC4\x52\x3F" + "\x90\xEF\xA0\x0D\xF3\x77\x4A\x25\x9F\x2E\x62\xB4\xC5\xD9\x9C\xB5" + "\xAD\xB3\x00\xA0\x28\x5E\x53\x01\x93\x0E\x0C\x70\xFB\x68\x76\x93" + "\x9C\xE6\x16\xCE\x62\x4A\x11\xE0\x08\x6D\x34\x1E\xBC\xAC\xA0\xA1" + "\xF5" + "\x02\x01\x11" /* public key - integer of 1 byte */ + "\x02\x40" /* private key - integer of 64 bytes */ + "\x0A\x03\x37\x48\x62\x64\x87\x69\x5F\x5F\x30\xBC\x38\xB9\x8B\x44" + "\xC2\xCD\x2D\xFF\x43\x40\x98\xCD\x20\xD8\xA1\x38\xD0\x90\xBF\x64" + "\x79\x7C\x3F\xA7\xA2\xCD\xCB\x3C\xD1\xE0\xBD\xBA\x26\x54\xB4\xF9" + "\xDF\x8E\x8A\xE5\x9D\x73\x3D\x9F\x33\xB3\x01\x62\x4A\xFD\x1D\x51", + .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", + .c = + "\x63\x1c\xcd\x7b\xe1\x7e\xe4\xde\xc9\xa8\x89\xa1\x74\xcb\x3c\x63" + "\x7d\x24\xec\x83\xc3\x15\xe4\x7f\x73\x05\x34\xd1\xec\x22\xbb\x8a" + "\x5e\x32\x39\x6d\xc1\x1d\x7d\x50\x3b\x9f\x7a\xad\xf0\x2e\x25\x53" + "\x9f\x6e\xbd\x4c\x55\x84\x0c\x9b\xcf\x1a\x4b\x51\x1e\x9e\x0c\x06", + .key_len = 139, + .m_size = 8, + .c_size = 64, + }, { + .key = + "\x30\x82\x01\x0B" /* sequence of 267 bytes */ + "\x02\x81\x81" /* modulus - integer of 129 bytes */ + "\x00\xBB\xF8\x2F\x09\x06\x82\xCE\x9C\x23\x38\xAC\x2B\x9D\xA8\x71" + "\xF7\x36\x8D\x07\xEE\xD4\x10\x43\xA4\x40\xD6\xB6\xF0\x74\x54\xF5" + "\x1F\xB8\xDF\xBA\xAF\x03\x5C\x02\xAB\x61\xEA\x48\xCE\xEB\x6F\xCD" + "\x48\x76\xED\x52\x0D\x60\xE1\xEC\x46\x19\x71\x9D\x8A\x5B\x8B\x80" + "\x7F\xAF\xB8\xE0\xA3\xDF\xC7\x37\x72\x3E\xE6\xB4\xB7\xD9\x3A\x25" + "\x84\xEE\x6A\x64\x9D\x06\x09\x53\x74\x88\x34\xB2\x45\x45\x98\x39" + "\x4E\xE0\xAA\xB1\x2D\x7B\x61\xA5\x1F\x52\x7A\x9A\x41\xF6\xC1\x68" + "\x7F\xE2\x53\x72\x98\xCA\x2A\x8F\x59\x46\xF8\xE5\xFD\x09\x1D\xBD" + "\xCB" + "\x02\x01\x11" /* public key - integer of 1 byte */ + "\x02\x81\x81" /* private key - integer of 129 bytes */ + "\x00\xA5\xDA\xFC\x53\x41\xFA\xF2\x89\xC4\xB9\x88\xDB\x30\xC1\xCD" + "\xF8\x3F\x31\x25\x1E\x06\x68\xB4\x27\x84\x81\x38\x01\x57\x96\x41" + "\xB2\x94\x10\xB3\xC7\x99\x8D\x6B\xC4\x65\x74\x5E\x5C\x39\x26\x69" + "\xD6\x87\x0D\xA2\xC0\x82\xA9\x39\xE3\x7F\xDC\xB8\x2E\xC9\x3E\xDA" + "\xC9\x7F\xF3\xAD\x59\x50\xAC\xCF\xBC\x11\x1C\x76\xF1\xA9\x52\x94" + "\x44\xE5\x6A\xAF\x68\xC5\x6C\x09\x2C\xD3\x8D\xC3\xBE\xF5\xD2\x0A" + "\x93\x99\x26\xED\x4F\x74\xA1\x3E\xDD\xFB\xE1\xA1\xCE\xCC\x48\x94" + "\xAF\x94\x28\xC2\xB7\xB8\x88\x3F\xE4\x46\x3A\x4B\xC8\x5B\x1C\xB3" + "\xC1", + .key_len = 271, + .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", + .c = + "\x74\x1b\x55\xac\x47\xb5\x08\x0a\x6e\x2b\x2d\xf7\x94\xb8\x8a\x95" + "\xed\xa3\x6b\xc9\x29\xee\xb2\x2c\x80\xc3\x39\x3b\x8c\x62\x45\x72" + "\xc2\x7f\x74\x81\x91\x68\x44\x48\x5a\xdc\xa0\x7e\xa7\x0b\x05\x7f" + "\x0e\xa0\x6c\xe5\x8f\x19\x4d\xce\x98\x47\x5f\xbd\x5f\xfe\xe5\x34" + "\x59\x89\xaf\xf0\xba\x44\xd7\xf1\x1a\x50\x72\xef\x5e\x4a\xb6\xb7" + "\x54\x34\xd1\xc4\x83\x09\xdf\x0f\x91\x5f\x7d\x91\x70\x2f\xd4\x13" + "\xcc\x5e\xa4\x6c\xc3\x4d\x28\xef\xda\xaf\xec\x14\x92\xfc\xa3\x75" + "\x13\xb4\xc1\xa1\x11\xfc\x40\x2f\x4c\x9d\xdf\x16\x76\x11\x20\x6b", + .m_size = 8, + .c_size = 128, + }, { +#endif + .key = + "\x30\x82\x02\x0D" /* sequence of 525 bytes */ + "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */ + "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" + "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" + "\xC6\x67\xFF\x1D\x1E\x3C\x1D\xC1\xB5\x5F\x6C\xC0\xB2\x07\x3A\x6D" + "\x41\xE4\x25\x99\xAC\xFC\xD2\x0F\x02\xD3\xD1\x54\x06\x1A\x51\x77" + "\xBD\xB6\xBF\xEA\xA7\x5C\x06\xA9\x5D\x69\x84\x45\xD7\xF5\x05\xBA" + "\x47\xF0\x1B\xD7\x2B\x24\xEC\xCB\x9B\x1B\x10\x8D\x81\xA0\xBE\xB1" + "\x8C\x33\xE4\x36\xB8\x43\xEB\x19\x2A\x81\x8D\xDE\x81\x0A\x99\x48" + "\xB6\xF6\xBC\xCD\x49\x34\x3A\x8F\x26\x94\xE3\x28\x82\x1A\x7C\x8F" + "\x59\x9F\x45\xE8\x5D\x1A\x45\x76\x04\x56\x05\xA1\xD0\x1B\x8C\x77" + "\x6D\xAF\x53\xFA\x71\xE2\x67\xE0\x9A\xFE\x03\xA9\x85\xD2\xC9\xAA" + "\xBA\x2A\xBC\xF4\xA0\x08\xF5\x13\x98\x13\x5D\xF0\xD9\x33\x34\x2A" + "\x61\xC3\x89\x55\xF0\xAE\x1A\x9C\x22\xEE\x19\x05\x8D\x32\xFE\xEC" + "\x9C\x84\xBA\xB7\xF9\x6C\x3A\x4F\x07\xFC\x45\xEB\x12\xE5\x7B\xFD" + "\x55\xE6\x29\x69\xD1\xC2\xE8\xB9\x78\x59\xF6\x79\x10\xC6\x4E\xEB" + "\x6A\x5E\xB9\x9A\xC7\xC4\x5B\x63\xDA\xA3\x3F\x5E\x92\x7A\x81\x5E" + "\xD6\xB0\xE2\x62\x8F\x74\x26\xC2\x0C\xD3\x9A\x17\x47\xE6\x8E\xAB" + "\x02\x03\x01\x00\x01" /* public key - integer of 3 bytes */ + "\x02\x82\x01\x00" /* private key - integer of 256 bytes */ + "\x52\x41\xF4\xDA\x7B\xB7\x59\x55\xCA\xD4\x2F\x0F\x3A\xCB\xA4\x0D" + "\x93\x6C\xCC\x9D\xC1\xB2\xFB\xFD\xAE\x40\x31\xAC\x69\x52\x21\x92" + "\xB3\x27\xDF\xEA\xEE\x2C\x82\xBB\xF7\x40\x32\xD5\x14\xC4\x94\x12" + "\xEC\xB8\x1F\xCA\x59\xE3\xC1\x78\xF3\x85\xD8\x47\xA5\xD7\x02\x1A" + "\x65\x79\x97\x0D\x24\xF4\xF0\x67\x6E\x75\x2D\xBF\x10\x3D\xA8\x7D" + "\xEF\x7F\x60\xE4\xE6\x05\x82\x89\x5D\xDF\xC6\xD2\x6C\x07\x91\x33" + "\x98\x42\xF0\x02\x00\x25\x38\xC5\x85\x69\x8A\x7D\x2F\x95\x6C\x43" + "\x9A\xB8\x81\xE2\xD0\x07\x35\xAA\x05\x41\xC9\x1E\xAF\xE4\x04\x3B" + "\x19\xB8\x73\xA2\xAC\x4B\x1E\x66\x48\xD8\x72\x1F\xAC\xF6\xCB\xBC" + "\x90\x09\xCA\xEC\x0C\xDC\xF9\x2C\xD7\xEB\xAE\xA3\xA4\x47\xD7\x33" + "\x2F\x8A\xCA\xBC\x5E\xF0\x77\xE4\x97\x98\x97\xC7\x10\x91\x7D\x2A" + "\xA6\xFF\x46\x83\x97\xDE\xE9\xE2\x17\x03\x06\x14\xE2\xD7\xB1\x1D" + "\x77\xAF\x51\x27\x5B\x5E\x69\xB8\x81\xE6\x11\xC5\x43\x23\x81\x04" + "\x62\xFF\xE9\x46\xB8\xD8\x44\xDB\xA5\xCC\x31\x54\x34\xCE\x3E\x82" + "\xD6\xBF\x7A\x0B\x64\x21\x6D\x88\x7E\x5B\x45\x12\x1E\x63\x8D\x49" + "\xA7\x1D\xD9\x1E\x06\xCD\xE8\xBA\x2C\x8C\x69\x32\xEA\xBE\x60\x71", + .key_len = 529, + .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", + .c = + "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" + "\xcf\x49\xbc\x48\xd3\x6c\x8f\x0a\x0e\xc1\x73\xbd\x7b\x55\x79\x36" + "\x0e\xa1\x87\x88\xb9\x2c\x90\xa6\x53\x5e\xe9\xef\xc4\xe2\x4d\xdd" + "\xf7\xa6\x69\x82\x3f\x56\xa4\x7b\xfb\x62\xe0\xae\xb8\xd3\x04\xb3" + "\xac\x5a\x15\x2a\xe3\x19\x9b\x03\x9a\x0b\x41\xda\x64\xec\x0a\x69" + "\xfc\xf2\x10\x92\xf3\xc1\xbf\x84\x7f\xfd\x2c\xae\xc8\xb5\xf6\x41" + "\x70\xc5\x47\x03\x8a\xf8\xff\x6f\x3f\xd2\x6f\x09\xb4\x22\xf3\x30" + "\xbe\xa9\x85\xcb\x9c\x8d\xf9\x8f\xeb\x32\x91\xa2\x25\x84\x8f\xf5" + "\xdc\xc7\x06\x9c\x2d\xe5\x11\x2c\x09\x09\x87\x09\xa9\xf6\x33\x73" + "\x90\xf1\x60\xf2\x65\xdd\x30\xa5\x66\xce\x62\x7b\xd0\xf8\x2d\x3d" + "\x19\x82\x77\xe3\x0a\x5f\x75\x2f\x8e\xb1\xe5\xe8\x91\x35\x1b\x3b" + "\x33\xb7\x66\x92\xd1\xf2\x8e\x6f\xe5\x75\x0c\xad\x36\xfb\x4e\xd0" + "\x66\x61\xbd\x49\xfe\xf4\x1a\xa2\x2b\x49\xfe\x03\x4c\x74\x47\x8d" + "\x9a\x66\xb2\x49\x46\x4d\x77\xea\x33\x4d\x6b\x3c\xb4\x49\x4a\xc6" + "\x7d\x3d\xb5\xb9\x56\x41\x15\x67\x0f\x94\x3c\x93\x65\x27\xe0\x21" + "\x5d\x59\xc3\x62\xd5\xa6\xda\x38\x26\x22\x5e\x34\x1c\x94\xaf\x98", + .m_size = 8, + .c_size = 256, + }, { + .key = + "\x30\x82\x01\x09" /* sequence of 265 bytes */ + "\x02\x82\x01\x00" /* modulus - integer of 256 bytes */ + "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" + "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" + "\xC6\x67\xFF\x1D\x1E\x3C\x1D\xC1\xB5\x5F\x6C\xC0\xB2\x07\x3A\x6D" + "\x41\xE4\x25\x99\xAC\xFC\xD2\x0F\x02\xD3\xD1\x54\x06\x1A\x51\x77" + "\xBD\xB6\xBF\xEA\xA7\x5C\x06\xA9\x5D\x69\x84\x45\xD7\xF5\x05\xBA" + "\x47\xF0\x1B\xD7\x2B\x24\xEC\xCB\x9B\x1B\x10\x8D\x81\xA0\xBE\xB1" + "\x8C\x33\xE4\x36\xB8\x43\xEB\x19\x2A\x81\x8D\xDE\x81\x0A\x99\x48" + "\xB6\xF6\xBC\xCD\x49\x34\x3A\x8F\x26\x94\xE3\x28\x82\x1A\x7C\x8F" + "\x59\x9F\x45\xE8\x5D\x1A\x45\x76\x04\x56\x05\xA1\xD0\x1B\x8C\x77" + "\x6D\xAF\x53\xFA\x71\xE2\x67\xE0\x9A\xFE\x03\xA9\x85\xD2\xC9\xAA" + "\xBA\x2A\xBC\xF4\xA0\x08\xF5\x13\x98\x13\x5D\xF0\xD9\x33\x34\x2A" + "\x61\xC3\x89\x55\xF0\xAE\x1A\x9C\x22\xEE\x19\x05\x8D\x32\xFE\xEC" + "\x9C\x84\xBA\xB7\xF9\x6C\x3A\x4F\x07\xFC\x45\xEB\x12\xE5\x7B\xFD" + "\x55\xE6\x29\x69\xD1\xC2\xE8\xB9\x78\x59\xF6\x79\x10\xC6\x4E\xEB" + "\x6A\x5E\xB9\x9A\xC7\xC4\x5B\x63\xDA\xA3\x3F\x5E\x92\x7A\x81\x5E" + "\xD6\xB0\xE2\x62\x8F\x74\x26\xC2\x0C\xD3\x9A\x17\x47\xE6\x8E\xAB" + "\x02\x03\x01\x00\x01", /* public key - integer of 3 bytes */ + .key_len = 269, + .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", + .c = + "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" + "\xcf\x49\xbc\x48\xd3\x6c\x8f\x0a\x0e\xc1\x73\xbd\x7b\x55\x79\x36" + "\x0e\xa1\x87\x88\xb9\x2c\x90\xa6\x53\x5e\xe9\xef\xc4\xe2\x4d\xdd" + "\xf7\xa6\x69\x82\x3f\x56\xa4\x7b\xfb\x62\xe0\xae\xb8\xd3\x04\xb3" + "\xac\x5a\x15\x2a\xe3\x19\x9b\x03\x9a\x0b\x41\xda\x64\xec\x0a\x69" + "\xfc\xf2\x10\x92\xf3\xc1\xbf\x84\x7f\xfd\x2c\xae\xc8\xb5\xf6\x41" + "\x70\xc5\x47\x03\x8a\xf8\xff\x6f\x3f\xd2\x6f\x09\xb4\x22\xf3\x30" + "\xbe\xa9\x85\xcb\x9c\x8d\xf9\x8f\xeb\x32\x91\xa2\x25\x84\x8f\xf5" + "\xdc\xc7\x06\x9c\x2d\xe5\x11\x2c\x09\x09\x87\x09\xa9\xf6\x33\x73" + "\x90\xf1\x60\xf2\x65\xdd\x30\xa5\x66\xce\x62\x7b\xd0\xf8\x2d\x3d" + "\x19\x82\x77\xe3\x0a\x5f\x75\x2f\x8e\xb1\xe5\xe8\x91\x35\x1b\x3b" + "\x33\xb7\x66\x92\xd1\xf2\x8e\x6f\xe5\x75\x0c\xad\x36\xfb\x4e\xd0" + "\x66\x61\xbd\x49\xfe\xf4\x1a\xa2\x2b\x49\xfe\x03\x4c\x74\x47\x8d" + "\x9a\x66\xb2\x49\x46\x4d\x77\xea\x33\x4d\x6b\x3c\xb4\x49\x4a\xc6" + "\x7d\x3d\xb5\xb9\x56\x41\x15\x67\x0f\x94\x3c\x93\x65\x27\xe0\x21" + "\x5d\x59\xc3\x62\xd5\xa6\xda\x38\x26\x22\x5e\x34\x1c\x94\xaf\x98", + .m_size = 8, + .c_size = 256, + .public_key_vec = true, + } +}; + /* * MD4 test vectors from RFC1320 */ -- cgit v1.2.3 From fb0cf19afc46b3fccfe0b2c86e97253844fc62aa Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 19 Jun 2015 10:27:39 -0700 Subject: crypto: rsa - fix invalid select for AKCIPHER Should be CRYPTO_AKCIPHER instead of AKCIPHER Reported-by: Andreas Ruprecht Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto/Kconfig') diff --git a/crypto/Kconfig b/crypto/Kconfig index 6c79ef0e..b4cfc575 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -102,7 +102,7 @@ config CRYPTO_AKCIPHER config CRYPTO_RSA tristate "RSA algorithm" - select AKCIPHER + select CRYPTO_AKCIPHER select MPILIB select ASN1 help -- cgit v1.2.3