From 95a23ceed19aa967a07aba164784f5bc559cb2c3 Mon Sep 17 00:00:00 2001 From: Nick Terrell Date: Fri, 30 Mar 2018 12:14:53 -0700 Subject: crypto: zstd - Add zstd support Adds zstd support to crypto and scompress. Only supports the default level. Previously we held off on this patch, since there weren't any users. Now zram is ready for zstd support, but depends on CONFIG_CRYPTO_ZSTD, which isn't defined until this patch is in. I also see a patch adding zstd to pstore [0], which depends on crypto zstd. [0] lkml.kernel.org/r/9c9416b2dff19f05fb4c35879aaa83d11ff72c92.1521626182.git.geliangtang@gmail.com Signed-off-by: Nick Terrell Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto/Makefile') diff --git a/crypto/Makefile b/crypto/Makefile index 3a5f0161..065423d6 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -137,6 +137,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o +obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o ecdh_generic-y := ecc.o ecdh_generic-y += ecdh.o -- cgit v1.2.3 From cc695b2e62a68ca148d05d1b4042eee07ec65d6a Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 11 May 2018 14:12:49 +0200 Subject: crypto: aegis - Add generic AEGIS AEAD implementations This patch adds the generic implementation of the AEGIS family of AEAD algorithms (AEGIS-128, AEGIS-128L, and AEGIS-256). The original authors of AEGIS are Hongjun Wu and Bart Preneel. At the time of writing, AEGIS is one of the finalists in CAESAR, an open competition intended to select a portfolio of alternatives to the problematic AES-GCM: https://competitions.cr.yp.to/caesar-submissions.html https://competitions.cr.yp.to/round3/aegisv11.pdf Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- crypto/Kconfig | 21 +++ crypto/Makefile | 3 + crypto/aegis.h | 80 ++++++++ crypto/aegis128.c | 463 ++++++++++++++++++++++++++++++++++++++++++++++ crypto/aegis128l.c | 527 +++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/aegis256.c | 478 ++++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1572 insertions(+) create mode 100644 crypto/aegis.h create mode 100644 crypto/aegis128.c create mode 100644 crypto/aegis128l.c create mode 100644 crypto/aegis256.c (limited to 'crypto/Makefile') diff --git a/crypto/Kconfig b/crypto/Kconfig index a5c5f7bb..48856238 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -289,6 +289,27 @@ config CRYPTO_CHACHA20POLY1305 with the Poly1305 authenticator. It is defined in RFC7539 for use in IETF protocols. +config CRYPTO_AEGIS128 + tristate "AEGIS-128 AEAD algorithm" + select CRYPTO_AEAD + select CRYPTO_AES # for AES S-box tables + help + Support for the AEGIS-128 dedicated AEAD algorithm. + +config CRYPTO_AEGIS128L + tristate "AEGIS-128L AEAD algorithm" + select CRYPTO_AEAD + select CRYPTO_AES # for AES S-box tables + help + Support for the AEGIS-128L dedicated AEAD algorithm. + +config CRYPTO_AEGIS256 + tristate "AEGIS-256 AEAD algorithm" + select CRYPTO_AEAD + select CRYPTO_AES # for AES S-box tables + help + Support for the AEGIS-256 dedicated AEAD algorithm. + config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index 065423d6..f2008d49 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -86,6 +86,9 @@ obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o obj-$(CONFIG_CRYPTO_CCM) += ccm.o obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o +obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o +obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o +obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o diff --git a/crypto/aegis.h b/crypto/aegis.h new file mode 100644 index 00000000..f1c6900d --- /dev/null +++ b/crypto/aegis.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AEGIS common definitions + * + * Copyright (c) 2018 Ondrej Mosnacek + * Copyright (c) 2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_AEGIS_H +#define _CRYPTO_AEGIS_H + +#include +#include + +#define AEGIS_BLOCK_SIZE 16 + +union aegis_block { + __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)]; + u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)]; + u8 bytes[AEGIS_BLOCK_SIZE]; +}; + +#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) +#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) + +static const union aegis_block crypto_aegis_const[2] = { + { .words64 = { + cpu_to_le64(U64_C(0x0d08050302010100)), + cpu_to_le64(U64_C(0x6279e99059372215)), + } }, + { .words64 = { + cpu_to_le64(U64_C(0xf12fc26d55183ddb)), + cpu_to_le64(U64_C(0xdd28b57342311120)), + } }, +}; + +static void crypto_aegis_block_xor(union aegis_block *dst, + const union aegis_block *src) +{ + dst->words64[0] ^= src->words64[0]; + dst->words64[1] ^= src->words64[1]; +} + +static void crypto_aegis_block_and(union aegis_block *dst, + const union aegis_block *src) +{ + dst->words64[0] &= src->words64[0]; + dst->words64[1] &= src->words64[1]; +} + +static void crypto_aegis_aesenc(union aegis_block *dst, + const union aegis_block *src, + const union aegis_block *key) +{ + u32 *d = dst->words32; + const u8 *s = src->bytes; + const u32 *k = key->words32; + const u32 *t0 = crypto_ft_tab[0]; + const u32 *t1 = crypto_ft_tab[1]; + const u32 *t2 = crypto_ft_tab[2]; + const u32 *t3 = crypto_ft_tab[3]; + u32 d0, d1, d2, d3; + + d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0]; + d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1]; + d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2]; + d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3]; + + d[0] = d0; + d[1] = d1; + d[2] = d2; + d[3] = d3; +} + +#endif /* _CRYPTO_AEGIS_H */ diff --git a/crypto/aegis128.c b/crypto/aegis128.c new file mode 100644 index 00000000..38271303 --- /dev/null +++ b/crypto/aegis128.c @@ -0,0 +1,463 @@ +/* + * The AEGIS-128 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS128_NONCE_SIZE 16 +#define AEGIS128_STATE_BLOCKS 5 +#define AEGIS128_KEY_SIZE 16 +#define AEGIS128_MIN_AUTH_SIZE 8 +#define AEGIS128_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS128_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static void crypto_aegis128_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1]; + for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + crypto_aegis128_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg) +{ + crypto_aegis128_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + unsigned int i; + + key_iv = *key; + crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = *key; + state->blocks[4] = *key; + + crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]); + + for (i = 0; i < 5; i++) { + crypto_aegis128_update_a(state, key); + crypto_aegis128_update_a(state, &key_iv); + } +} + +static void crypto_aegis128_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis128_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis128_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis128_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis128_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis128_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis128_update_a(state, &buf); + } +} + +static void crypto_aegis128_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_chunk(state, dst, src, chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +static void crypto_aegis128_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis128_update_a(state, &tmp); + + for (i = 0; i < AEGIS128_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE); + return 0; +} + +static int crypto_aegis128_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128_init(&state, &ctx->key, req->iv); + crypto_aegis128_process_ad(&state, req->src, req->assoclen); + crypto_aegis128_process_crypt(&state, req, ops); + crypto_aegis128_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128_encrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128_decrypt(struct aead_request *req) +{ + static const struct aegis128_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128_decrypt_chunk, + }; + static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + crypto_aegis128_crypt(req, &tag, cryptlen, &ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis128_init_tfm(struct crypto_aead *tfm) +{ + return 0; +} + +static void crypto_aegis128_exit_tfm(struct crypto_aead *tfm) +{ +} + +static struct aead_alg crypto_aegis128_alg = { + .setkey = crypto_aegis128_setkey, + .setauthsize = crypto_aegis128_setauthsize, + .encrypt = crypto_aegis128_encrypt, + .decrypt = crypto_aegis128_decrypt, + .init = crypto_aegis128_init_tfm, + .exit = crypto_aegis128_exit_tfm, + + .ivsize = AEGIS128_NONCE_SIZE, + .maxauthsize = AEGIS128_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128", + .cra_driver_name = "aegis128-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128_module_init(void) +{ + return crypto_register_aead(&crypto_aegis128_alg); +} + +static void __exit crypto_aegis128_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128_alg); +} + +module_init(crypto_aegis128_module_init); +module_exit(crypto_aegis128_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128"); +MODULE_ALIAS_CRYPTO("aegis128-generic"); diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c new file mode 100644 index 00000000..0cc1a752 --- /dev/null +++ b/crypto/aegis128l.c @@ -0,0 +1,527 @@ +/* + * The AEGIS-128L Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS128L_CHUNK_BLOCKS 2 +#define AEGIS128L_CHUNK_SIZE (AEGIS128L_CHUNK_BLOCKS * AEGIS_BLOCK_SIZE) +#define AEGIS128L_NONCE_SIZE 16 +#define AEGIS128L_STATE_BLOCKS 8 +#define AEGIS128L_KEY_SIZE 16 +#define AEGIS128L_MIN_AUTH_SIZE 8 +#define AEGIS128L_MAX_AUTH_SIZE 16 + +union aegis_chunk { + union aegis_block blocks[AEGIS128L_CHUNK_BLOCKS]; + u8 bytes[AEGIS128L_CHUNK_SIZE]; +}; + +struct aegis_state { + union aegis_block blocks[AEGIS128L_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key; +}; + +struct aegis128l_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static void crypto_aegis128l_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS128L_STATE_BLOCKS - 1]; + for (i = AEGIS128L_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis128l_update_a(struct aegis_state *state, + const union aegis_chunk *msg) +{ + crypto_aegis128l_update(state); + crypto_aegis_block_xor(&state->blocks[0], &msg->blocks[0]); + crypto_aegis_block_xor(&state->blocks[4], &msg->blocks[1]); +} + +static void crypto_aegis128l_update_u(struct aegis_state *state, + const void *msg) +{ + crypto_aegis128l_update(state); + crypto_xor(state->blocks[0].bytes, msg + 0 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + crypto_xor(state->blocks[4].bytes, msg + 1 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis128l_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv; + union aegis_chunk chunk; + unsigned int i; + + memcpy(chunk.blocks[0].bytes, iv, AEGIS_BLOCK_SIZE); + chunk.blocks[1] = *key; + + key_iv = *key; + crypto_aegis_block_xor(&key_iv, &chunk.blocks[0]); + + state->blocks[0] = key_iv; + state->blocks[1] = crypto_aegis_const[1]; + state->blocks[2] = crypto_aegis_const[0]; + state->blocks[3] = crypto_aegis_const[1]; + state->blocks[4] = key_iv; + state->blocks[5] = *key; + state->blocks[6] = *key; + state->blocks[7] = *key; + + crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[6], &crypto_aegis_const[1]); + crypto_aegis_block_xor(&state->blocks[7], &crypto_aegis_const[0]); + + for (i = 0; i < 10; i++) { + crypto_aegis128l_update_a(state, &chunk); + } +} + +static void crypto_aegis128l_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_chunk *src_chunk = + (const union aegis_chunk *)src; + + while (size >= AEGIS128L_CHUNK_SIZE) { + crypto_aegis128l_update_a(state, src_chunk); + + size -= AEGIS128L_CHUNK_SIZE; + src_chunk += 1; + } + } else { + while (size >= AEGIS128L_CHUNK_SIZE) { + crypto_aegis128l_update_u(state, src); + + size -= AEGIS128L_CHUNK_SIZE; + src += AEGIS128L_CHUNK_SIZE; + } + } +} + +static void crypto_aegis128l_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_chunk tmp; + union aegis_block *tmp0 = &tmp.blocks[0]; + union aegis_block *tmp1 = &tmp.blocks[1]; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS128L_CHUNK_SIZE) { + union aegis_chunk *dst_blk = + (union aegis_chunk *)dst; + const union aegis_chunk *src_blk = + (const union aegis_chunk *)src; + + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]); + + crypto_aegis128l_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS128L_CHUNK_SIZE; + src += AEGIS128L_CHUNK_SIZE; + dst += AEGIS128L_CHUNK_SIZE; + } + } else { + while (size >= AEGIS128L_CHUNK_SIZE) { + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + + crypto_aegis128l_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE); + + size -= AEGIS128L_CHUNK_SIZE; + src += AEGIS128L_CHUNK_SIZE; + dst += AEGIS128L_CHUNK_SIZE; + } + } + + if (size > 0) { + union aegis_chunk msg = {}; + memcpy(msg.bytes, src, size); + + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + + crypto_aegis128l_update_a(state, &msg); + + crypto_aegis_block_xor(&msg.blocks[0], tmp0); + crypto_aegis_block_xor(&msg.blocks[1], tmp1); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128l_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_chunk tmp; + union aegis_block *tmp0 = &tmp.blocks[0]; + union aegis_block *tmp1 = &tmp.blocks[1]; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS128L_CHUNK_SIZE) { + union aegis_chunk *dst_blk = + (union aegis_chunk *)dst; + const union aegis_chunk *src_blk = + (const union aegis_chunk *)src; + + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + crypto_aegis_block_xor(tmp0, &src_blk->blocks[0]); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + crypto_aegis_block_xor(tmp1, &src_blk->blocks[1]); + + crypto_aegis128l_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS128L_CHUNK_SIZE; + src += AEGIS128L_CHUNK_SIZE; + dst += AEGIS128L_CHUNK_SIZE; + } + } else { + while (size >= AEGIS128L_CHUNK_SIZE) { + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + crypto_xor(tmp0->bytes, src + 0 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + crypto_xor(tmp1->bytes, src + 1 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + + crypto_aegis128l_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS128L_CHUNK_SIZE); + + size -= AEGIS128L_CHUNK_SIZE; + src += AEGIS128L_CHUNK_SIZE; + dst += AEGIS128L_CHUNK_SIZE; + } + } + + if (size > 0) { + union aegis_chunk msg = {}; + memcpy(msg.bytes, src, size); + + *tmp0 = state->blocks[2]; + crypto_aegis_block_and(tmp0, &state->blocks[3]); + crypto_aegis_block_xor(tmp0, &state->blocks[6]); + crypto_aegis_block_xor(tmp0, &state->blocks[1]); + crypto_aegis_block_xor(&msg.blocks[0], tmp0); + + *tmp1 = state->blocks[6]; + crypto_aegis_block_and(tmp1, &state->blocks[7]); + crypto_aegis_block_xor(tmp1, &state->blocks[5]); + crypto_aegis_block_xor(tmp1, &state->blocks[2]); + crypto_aegis_block_xor(&msg.blocks[1], tmp1); + + memset(msg.bytes + size, 0, AEGIS128L_CHUNK_SIZE - size); + + crypto_aegis128l_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis128l_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_chunk buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS128L_CHUNK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS128L_CHUNK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis128l_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis128l_ad(state, src, left); + src += left & ~(AEGIS128L_CHUNK_SIZE - 1); + left &= AEGIS128L_CHUNK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS128L_CHUNK_SIZE - pos); + crypto_aegis128l_update_a(state, &buf); + } +} + +static void crypto_aegis128l_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis128l_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_chunk(state, dst, src, chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +static void crypto_aegis128l_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_chunk tmp; + unsigned int i; + + tmp.blocks[0].words64[0] = cpu_to_le64(assocbits); + tmp.blocks[0].words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp.blocks[0], &state->blocks[2]); + + tmp.blocks[1] = tmp.blocks[0]; + for (i = 0; i < 7; i++) + crypto_aegis128l_update_a(state, &tmp); + + for (i = 0; i < 7; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis128l_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS128L_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, AEGIS128L_KEY_SIZE); + return 0; +} + +static int crypto_aegis128l_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS128L_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS128L_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis128l_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis128l_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis128l_init(&state, &ctx->key, req->iv); + crypto_aegis128l_process_ad(&state, req->src, req->assoclen); + crypto_aegis128l_process_crypt(&state, req, ops); + crypto_aegis128l_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis128l_encrypt(struct aead_request *req) +{ + static const struct aegis128l_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis128l_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis128l_crypt(req, &tag, cryptlen, &ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis128l_decrypt(struct aead_request *req) +{ + static const struct aegis128l_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis128l_decrypt_chunk, + }; + static const u8 zeros[AEGIS128L_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + crypto_aegis128l_crypt(req, &tag, cryptlen, &ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis128l_init_tfm(struct crypto_aead *tfm) +{ + return 0; +} + +static void crypto_aegis128l_exit_tfm(struct crypto_aead *tfm) +{ +} + +static struct aead_alg crypto_aegis128l_alg = { + .setkey = crypto_aegis128l_setkey, + .setauthsize = crypto_aegis128l_setauthsize, + .encrypt = crypto_aegis128l_encrypt, + .decrypt = crypto_aegis128l_decrypt, + .init = crypto_aegis128l_init_tfm, + .exit = crypto_aegis128l_exit_tfm, + + .ivsize = AEGIS128L_NONCE_SIZE, + .maxauthsize = AEGIS128L_MAX_AUTH_SIZE, + .chunksize = AEGIS128L_CHUNK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis128l", + .cra_driver_name = "aegis128l-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis128l_module_init(void) +{ + return crypto_register_aead(&crypto_aegis128l_alg); +} + +static void __exit crypto_aegis128l_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis128l_alg); +} + +module_init(crypto_aegis128l_module_init); +module_exit(crypto_aegis128l_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-128L AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis128l"); +MODULE_ALIAS_CRYPTO("aegis128l-generic"); diff --git a/crypto/aegis256.c b/crypto/aegis256.c new file mode 100644 index 00000000..a489d741 --- /dev/null +++ b/crypto/aegis256.c @@ -0,0 +1,478 @@ +/* + * The AEGIS-256 Authenticated-Encryption Algorithm + * + * Copyright (c) 2017-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "aegis.h" + +#define AEGIS256_NONCE_SIZE 32 +#define AEGIS256_STATE_BLOCKS 6 +#define AEGIS256_KEY_SIZE 32 +#define AEGIS256_MIN_AUTH_SIZE 8 +#define AEGIS256_MAX_AUTH_SIZE 16 + +struct aegis_state { + union aegis_block blocks[AEGIS256_STATE_BLOCKS]; +}; + +struct aegis_ctx { + union aegis_block key[AEGIS256_KEY_SIZE / AEGIS_BLOCK_SIZE]; +}; + +struct aegis256_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +}; + +static void crypto_aegis256_update(struct aegis_state *state) +{ + union aegis_block tmp; + unsigned int i; + + tmp = state->blocks[AEGIS256_STATE_BLOCKS - 1]; + for (i = AEGIS256_STATE_BLOCKS - 1; i > 0; i--) + crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1], + &state->blocks[i]); + crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]); +} + +static void crypto_aegis256_update_a(struct aegis_state *state, + const union aegis_block *msg) +{ + crypto_aegis256_update(state); + crypto_aegis_block_xor(&state->blocks[0], msg); +} + +static void crypto_aegis256_update_u(struct aegis_state *state, const void *msg) +{ + crypto_aegis256_update(state); + crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE); +} + +static void crypto_aegis256_init(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv) +{ + union aegis_block key_iv[2]; + unsigned int i; + + key_iv[0] = key[0]; + key_iv[1] = key[1]; + crypto_xor(key_iv[0].bytes, iv + 0 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + crypto_xor(key_iv[1].bytes, iv + 1 * AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + + state->blocks[0] = key_iv[0]; + state->blocks[1] = key_iv[1]; + state->blocks[2] = crypto_aegis_const[1]; + state->blocks[3] = crypto_aegis_const[0]; + state->blocks[4] = key[0]; + state->blocks[5] = key[1]; + + crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[0]); + crypto_aegis_block_xor(&state->blocks[5], &crypto_aegis_const[1]); + + for (i = 0; i < 4; i++) { + crypto_aegis256_update_a(state, &key[0]); + crypto_aegis256_update_a(state, &key[1]); + crypto_aegis256_update_a(state, &key_iv[0]); + crypto_aegis256_update_a(state, &key_iv[1]); + } +} + +static void crypto_aegis256_ad(struct aegis_state *state, + const u8 *src, unsigned int size) +{ + if (AEGIS_ALIGNED(src)) { + const union aegis_block *src_blk = + (const union aegis_block *)src; + + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis256_update_a(state, src_blk); + + size -= AEGIS_BLOCK_SIZE; + src_blk++; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + crypto_aegis256_update_u(state, src); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + } + } +} + +static void crypto_aegis256_encrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis256_update_a(state, src_blk); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis256_update_u(state, src); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + + crypto_aegis256_update_a(state, &msg); + + crypto_aegis_block_xor(&msg, &tmp); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis256_decrypt_chunk(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + union aegis_block tmp; + + if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) { + while (size >= AEGIS_BLOCK_SIZE) { + union aegis_block *dst_blk = + (union aegis_block *)dst; + const union aegis_block *src_blk = + (const union aegis_block *)src; + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&tmp, src_blk); + + crypto_aegis256_update_a(state, &tmp); + + *dst_blk = tmp; + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } else { + while (size >= AEGIS_BLOCK_SIZE) { + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE); + + crypto_aegis256_update_a(state, &tmp); + + memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE); + + size -= AEGIS_BLOCK_SIZE; + src += AEGIS_BLOCK_SIZE; + dst += AEGIS_BLOCK_SIZE; + } + } + + if (size > 0) { + union aegis_block msg = {}; + memcpy(msg.bytes, src, size); + + tmp = state->blocks[2]; + crypto_aegis_block_and(&tmp, &state->blocks[3]); + crypto_aegis_block_xor(&tmp, &state->blocks[5]); + crypto_aegis_block_xor(&tmp, &state->blocks[4]); + crypto_aegis_block_xor(&tmp, &state->blocks[1]); + crypto_aegis_block_xor(&msg, &tmp); + + memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size); + + crypto_aegis256_update_a(state, &msg); + + memcpy(dst, msg.bytes, size); + } +} + +static void crypto_aegis256_process_ad(struct aegis_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + union aegis_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= AEGIS_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = AEGIS_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + crypto_aegis256_update_a(state, &buf); + pos = 0; + left -= fill; + src += fill; + } + + crypto_aegis256_ad(state, src, left); + src += left & ~(AEGIS_BLOCK_SIZE - 1); + left &= AEGIS_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos); + crypto_aegis256_update_a(state, &buf); + } +} + +static void crypto_aegis256_process_crypt(struct aegis_state *state, + struct aead_request *req, + const struct aegis256_ops *ops) +{ + struct skcipher_walk walk; + u8 *src, *dst; + unsigned int chunksize; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops->crypt_chunk(state, dst, src, chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +static void crypto_aegis256_final(struct aegis_state *state, + union aegis_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + union aegis_block tmp; + unsigned int i; + + tmp.words64[0] = cpu_to_le64(assocbits); + tmp.words64[1] = cpu_to_le64(cryptbits); + + crypto_aegis_block_xor(&tmp, &state->blocks[3]); + + for (i = 0; i < 7; i++) + crypto_aegis256_update_a(state, &tmp); + + for (i = 0; i < AEGIS256_STATE_BLOCKS; i++) + crypto_aegis_block_xor(tag_xor, &state->blocks[i]); +} + +static int crypto_aegis256_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct aegis_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != AEGIS256_KEY_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key[0].bytes, key, AEGIS_BLOCK_SIZE); + memcpy(ctx->key[1].bytes, key + AEGIS_BLOCK_SIZE, + AEGIS_BLOCK_SIZE); + return 0; +} + +static int crypto_aegis256_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + if (authsize > AEGIS256_MAX_AUTH_SIZE) + return -EINVAL; + if (authsize < AEGIS256_MIN_AUTH_SIZE) + return -EINVAL; + return 0; +} + +static void crypto_aegis256_crypt(struct aead_request *req, + union aegis_block *tag_xor, + unsigned int cryptlen, + const struct aegis256_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct aegis_ctx *ctx = crypto_aead_ctx(tfm); + struct aegis_state state; + + crypto_aegis256_init(&state, ctx->key, req->iv); + crypto_aegis256_process_ad(&state, req->src, req->assoclen); + crypto_aegis256_process_crypt(&state, req, ops); + crypto_aegis256_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_aegis256_encrypt(struct aead_request *req) +{ + static const struct aegis256_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_aegis256_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_aegis256_crypt(req, &tag, cryptlen, &ops); + + scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen, + authsize, 1); + return 0; +} + +static int crypto_aegis256_decrypt(struct aead_request *req) +{ + static const struct aegis256_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_aegis256_decrypt_chunk, + }; + static const u8 zeros[AEGIS256_MAX_AUTH_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union aegis_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen, + authsize, 0); + + crypto_aegis256_crypt(req, &tag, cryptlen, &ops); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static int crypto_aegis256_init_tfm(struct crypto_aead *tfm) +{ + return 0; +} + +static void crypto_aegis256_exit_tfm(struct crypto_aead *tfm) +{ +} + +static struct aead_alg crypto_aegis256_alg = { + .setkey = crypto_aegis256_setkey, + .setauthsize = crypto_aegis256_setauthsize, + .encrypt = crypto_aegis256_encrypt, + .decrypt = crypto_aegis256_decrypt, + .init = crypto_aegis256_init_tfm, + .exit = crypto_aegis256_exit_tfm, + + .ivsize = AEGIS256_NONCE_SIZE, + .maxauthsize = AEGIS256_MAX_AUTH_SIZE, + .chunksize = AEGIS_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aegis_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "aegis256", + .cra_driver_name = "aegis256-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_aegis256_module_init(void) +{ + return crypto_register_aead(&crypto_aegis256_alg); +} + +static void __exit crypto_aegis256_module_exit(void) +{ + crypto_unregister_aead(&crypto_aegis256_alg); +} + +module_init(crypto_aegis256_module_init); +module_exit(crypto_aegis256_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("AEGIS-256 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("aegis256"); +MODULE_ALIAS_CRYPTO("aegis256-generic"); -- cgit v1.2.3 From 4cc215a8b72d5acd158cda9789eb499cda062fc7 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 11 May 2018 14:19:09 +0200 Subject: crypto: morus - Add generic MORUS AEAD implementations This patch adds the generic implementation of the MORUS family of AEAD algorithms (MORUS-640 and MORUS-1280). The original authors of MORUS are Hongjun Wu and Tao Huang. At the time of writing, MORUS is one of the finalists in CAESAR, an open competition intended to select a portfolio of alternatives to the problematic AES-GCM: https://competitions.cr.yp.to/caesar-submissions.html https://competitions.cr.yp.to/round3/morusv2.pdf Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 ++ crypto/Makefile | 2 + crypto/morus1280.c | 549 +++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/morus640.c | 544 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1107 insertions(+) create mode 100644 crypto/morus1280.c create mode 100644 crypto/morus640.c (limited to 'crypto/Makefile') diff --git a/crypto/Kconfig b/crypto/Kconfig index d8d123ea..7c53547f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -334,6 +334,18 @@ config CRYPTO_AEGIS256_AESNI_SSE2 help AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm. +config CRYPTO_MORUS640 + tristate "MORUS-640 AEAD algorithm" + select CRYPTO_AEAD + help + Support for the MORUS-640 dedicated AEAD algorithm. + +config CRYPTO_MORUS1280 + tristate "MORUS-1280 AEAD algorithm" + select CRYPTO_AEAD + help + Support for the MORUS-1280 dedicated AEAD algorithm. + config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index f2008d49..6d1d40ee 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -89,6 +89,8 @@ obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o obj-$(CONFIG_CRYPTO_AEGIS128) += aegis128.o obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o +obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o +obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o diff --git a/crypto/morus1280.c b/crypto/morus1280.c new file mode 100644 index 00000000..6180b255 --- /dev/null +++ b/crypto/morus1280.c @@ -0,0 +1,549 @@ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MORUS1280_WORD_SIZE 8 +#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE) +#define MORUS1280_BLOCK_ALIGN (__alignof__(__le64)) +#define MORUS1280_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS1280_BLOCK_ALIGN) + +struct morus1280_block { + u64 words[MORUS_BLOCK_WORDS]; +}; + +union morus1280_block_in { + __le64 words[MORUS_BLOCK_WORDS]; + u8 bytes[MORUS1280_BLOCK_SIZE]; +}; + +struct morus1280_state { + struct morus1280_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus1280_ctx { + struct morus1280_block key; +}; + +struct morus1280_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct morus1280_state *state, + u8 *dst, const u8 *src, unsigned int size); +}; + +static const struct morus1280_block crypto_morus1280_const[1] = { + { .words = { + U64_C(0x0d08050302010100), + U64_C(0x6279e99059372215), + U64_C(0xf12fc26d55183ddb), + U64_C(0xdd28b57342311120), + } }, +}; + +static void crypto_morus1280_round(struct morus1280_block *b0, + struct morus1280_block *b1, + struct morus1280_block *b2, + struct morus1280_block *b3, + struct morus1280_block *b4, + const struct morus1280_block *m, + unsigned int b, unsigned int w) +{ + unsigned int i; + struct morus1280_block tmp; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + b0->words[i] ^= b1->words[i] & b2->words[i]; + b0->words[i] ^= b3->words[i]; + b0->words[i] ^= m->words[i]; + b0->words[i] = rol64(b0->words[i], b); + } + + tmp = *b3; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i]; +} + +static void crypto_morus1280_update(struct morus1280_state *state, + const struct morus1280_block *m) +{ + static const struct morus1280_block z = {}; + + struct morus1280_block *s = state->s; + + crypto_morus1280_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 13, 1); + crypto_morus1280_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 46, 2); + crypto_morus1280_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 38, 3); + crypto_morus1280_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 7, 2); + crypto_morus1280_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 4, 1); +} + +static void crypto_morus1280_load_a(struct morus1280_block *dst, const u8 *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + dst->words[i] = le64_to_cpu(*(const __le64 *)src); + src += MORUS1280_WORD_SIZE; + } +} + +static void crypto_morus1280_load_u(struct morus1280_block *dst, const u8 *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + dst->words[i] = get_unaligned_le64(src); + src += MORUS1280_WORD_SIZE; + } +} + +static void crypto_morus1280_load(struct morus1280_block *dst, const u8 *src) +{ + if (MORUS1280_ALIGNED(src)) + crypto_morus1280_load_a(dst, src); + else + crypto_morus1280_load_u(dst, src); +} + +static void crypto_morus1280_store_a(u8 *dst, const struct morus1280_block *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + *(__le64 *)dst = cpu_to_le64(src->words[i]); + dst += MORUS1280_WORD_SIZE; + } +} + +static void crypto_morus1280_store_u(u8 *dst, const struct morus1280_block *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + put_unaligned_le64(src->words[i], dst); + dst += MORUS1280_WORD_SIZE; + } +} + +static void crypto_morus1280_store(u8 *dst, const struct morus1280_block *src) +{ + if (MORUS1280_ALIGNED(dst)) + crypto_morus1280_store_a(dst, src); + else + crypto_morus1280_store_u(dst, src); +} + +static void crypto_morus1280_ad(struct morus1280_state *state, const u8 *src, + unsigned int size) +{ + struct morus1280_block m; + + if (MORUS1280_ALIGNED(src)) { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_a(&m, src); + crypto_morus1280_update(state, &m); + + size -= MORUS1280_BLOCK_SIZE; + src += MORUS1280_BLOCK_SIZE; + } + } else { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_u(&m, src); + crypto_morus1280_update(state, &m); + + size -= MORUS1280_BLOCK_SIZE; + src += MORUS1280_BLOCK_SIZE; + } + } +} + +static void crypto_morus1280_core(const struct morus1280_state *state, + struct morus1280_block *blk) +{ + unsigned int i; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i]; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + blk->words[i] ^= state->s[0].words[i]; + blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i]; + } +} + +static void crypto_morus1280_encrypt_chunk(struct morus1280_state *state, + u8 *dst, const u8 *src, + unsigned int size) +{ + struct morus1280_block c, m; + + if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_a(&m, src); + c = m; + crypto_morus1280_core(state, &c); + crypto_morus1280_store_a(dst, &c); + crypto_morus1280_update(state, &m); + + src += MORUS1280_BLOCK_SIZE; + dst += MORUS1280_BLOCK_SIZE; + size -= MORUS1280_BLOCK_SIZE; + } + } else { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_u(&m, src); + c = m; + crypto_morus1280_core(state, &c); + crypto_morus1280_store_u(dst, &c); + crypto_morus1280_update(state, &m); + + src += MORUS1280_BLOCK_SIZE; + dst += MORUS1280_BLOCK_SIZE; + size -= MORUS1280_BLOCK_SIZE; + } + } + + if (size > 0) { + union morus1280_block_in tail; + + memcpy(tail.bytes, src, size); + memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); + + crypto_morus1280_load_a(&m, tail.bytes); + c = m; + crypto_morus1280_core(state, &c); + crypto_morus1280_store_a(tail.bytes, &c); + crypto_morus1280_update(state, &m); + + memcpy(dst, tail.bytes, size); + } +} + +static void crypto_morus1280_decrypt_chunk(struct morus1280_state *state, + u8 *dst, const u8 *src, + unsigned int size) +{ + struct morus1280_block m; + + if (MORUS1280_ALIGNED(src) && MORUS1280_ALIGNED(dst)) { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_a(&m, src); + crypto_morus1280_core(state, &m); + crypto_morus1280_store_a(dst, &m); + crypto_morus1280_update(state, &m); + + src += MORUS1280_BLOCK_SIZE; + dst += MORUS1280_BLOCK_SIZE; + size -= MORUS1280_BLOCK_SIZE; + } + } else { + while (size >= MORUS1280_BLOCK_SIZE) { + crypto_morus1280_load_u(&m, src); + crypto_morus1280_core(state, &m); + crypto_morus1280_store_u(dst, &m); + crypto_morus1280_update(state, &m); + + src += MORUS1280_BLOCK_SIZE; + dst += MORUS1280_BLOCK_SIZE; + size -= MORUS1280_BLOCK_SIZE; + } + } + + if (size > 0) { + union morus1280_block_in tail; + + memcpy(tail.bytes, src, size); + memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); + + crypto_morus1280_load_a(&m, tail.bytes); + crypto_morus1280_core(state, &m); + crypto_morus1280_store_a(tail.bytes, &m); + memset(tail.bytes + size, 0, MORUS1280_BLOCK_SIZE - size); + crypto_morus1280_load_a(&m, tail.bytes); + crypto_morus1280_update(state, &m); + + memcpy(dst, tail.bytes, size); + } +} + +static void crypto_morus1280_init(struct morus1280_state *state, + const struct morus1280_block *key, + const u8 *iv) +{ + static const struct morus1280_block z = {}; + + union morus1280_block_in tmp; + unsigned int i; + + memcpy(tmp.bytes, iv, MORUS_NONCE_SIZE); + memset(tmp.bytes + MORUS_NONCE_SIZE, 0, + MORUS1280_BLOCK_SIZE - MORUS_NONCE_SIZE); + + crypto_morus1280_load(&state->s[0], tmp.bytes); + state->s[1] = *key; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[2].words[i] = U64_C(0xFFFFFFFFFFFFFFFF); + state->s[3] = z; + state->s[4] = crypto_morus1280_const[0]; + + for (i = 0; i < 16; i++) + crypto_morus1280_update(state, &z); + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[1].words[i] ^= key->words[i]; +} + +static void crypto_morus1280_process_ad(struct morus1280_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus1280_block m; + union morus1280_block_in buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS1280_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS1280_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + + crypto_morus1280_load_a(&m, buf.bytes); + crypto_morus1280_update(state, &m); + + pos = 0; + left -= fill; + src += fill; + } + + crypto_morus1280_ad(state, src, left); + src += left & ~(MORUS1280_BLOCK_SIZE - 1); + left &= MORUS1280_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); + + crypto_morus1280_load_a(&m, buf.bytes); + crypto_morus1280_update(state, &m); + } +} + +static void crypto_morus1280_process_crypt(struct morus1280_state *state, + struct aead_request *req, + const struct morus1280_ops *ops) +{ + struct skcipher_walk walk; + u8 *dst; + const u8 *src; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + ops->crypt_chunk(state, dst, src, walk.nbytes); + + skcipher_walk_done(&walk, 0); + } +} + +static void crypto_morus1280_final(struct morus1280_state *state, + struct morus1280_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + struct morus1280_block tmp; + unsigned int i; + + tmp.words[0] = cpu_to_le64(assocbits); + tmp.words[1] = cpu_to_le64(cryptbits); + tmp.words[2] = 0; + tmp.words[3] = 0; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[4].words[i] ^= state->s[0].words[i]; + + for (i = 0; i < 10; i++) + crypto_morus1280_update(state, &tmp); + + crypto_morus1280_core(state, tag_xor); +} + +static int crypto_morus1280_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); + union morus1280_block_in tmp; + + if (keylen == MORUS1280_BLOCK_SIZE) + crypto_morus1280_load(&ctx->key, key); + else if (keylen == MORUS1280_BLOCK_SIZE / 2) { + memcpy(tmp.bytes, key, keylen); + memcpy(tmp.bytes + keylen, key, keylen); + + crypto_morus1280_load(&ctx->key, tmp.bytes); + } else { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return 0; +} + +static int crypto_morus1280_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} + +static void crypto_morus1280_crypt(struct aead_request *req, + struct morus1280_block *tag_xor, + unsigned int cryptlen, + const struct morus1280_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_state state; + + crypto_morus1280_init(&state, &ctx->key, req->iv); + crypto_morus1280_process_ad(&state, req->src, req->assoclen); + crypto_morus1280_process_crypt(&state, req, ops); + crypto_morus1280_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_morus1280_encrypt(struct aead_request *req) +{ + static const struct morus1280_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_morus1280_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_block tag = {}; + union morus1280_block_in tag_out; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus1280_crypt(req, &tag, cryptlen, &ops); + crypto_morus1280_store(tag_out.bytes, &tag); + + scatterwalk_map_and_copy(tag_out.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} + +static int crypto_morus1280_decrypt(struct aead_request *req) +{ + static const struct morus1280_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_morus1280_decrypt_chunk, + }; + static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union morus1280_block_in tag_in; + struct morus1280_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag_in.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus1280_load(&tag, tag_in.bytes); + crypto_morus1280_crypt(req, &tag, cryptlen, &ops); + crypto_morus1280_store(tag_in.bytes, &tag); + + return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static int crypto_morus1280_init_tfm(struct crypto_aead *tfm) +{ + return 0; +} + +static void crypto_morus1280_exit_tfm(struct crypto_aead *tfm) +{ +} + +static struct aead_alg crypto_morus1280_alg = { + .setkey = crypto_morus1280_setkey, + .setauthsize = crypto_morus1280_setauthsize, + .encrypt = crypto_morus1280_encrypt, + .decrypt = crypto_morus1280_decrypt, + .init = crypto_morus1280_init_tfm, + .exit = crypto_morus1280_exit_tfm, + + .ivsize = MORUS_NONCE_SIZE, + .maxauthsize = MORUS_MAX_AUTH_SIZE, + .chunksize = MORUS1280_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct morus1280_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "morus1280", + .cra_driver_name = "morus1280-generic", + + .cra_module = THIS_MODULE, + } +}; + + +static int __init crypto_morus1280_module_init(void) +{ + return crypto_register_aead(&crypto_morus1280_alg); +} + +static void __exit crypto_morus1280_module_exit(void) +{ + crypto_unregister_aead(&crypto_morus1280_alg); +} + +module_init(crypto_morus1280_module_init); +module_exit(crypto_morus1280_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("MORUS-1280 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("morus1280"); +MODULE_ALIAS_CRYPTO("morus1280-generic"); diff --git a/crypto/morus640.c b/crypto/morus640.c new file mode 100644 index 00000000..9fbcde30 --- /dev/null +++ b/crypto/morus640.c @@ -0,0 +1,544 @@ +/* + * The MORUS-640 Authenticated-Encryption Algorithm + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MORUS640_WORD_SIZE 4 +#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE) +#define MORUS640_BLOCK_ALIGN (__alignof__(__le32)) +#define MORUS640_ALIGNED(p) IS_ALIGNED((uintptr_t)p, MORUS640_BLOCK_ALIGN) + +struct morus640_block { + u32 words[MORUS_BLOCK_WORDS]; +}; + +union morus640_block_in { + __le32 words[MORUS_BLOCK_WORDS]; + u8 bytes[MORUS640_BLOCK_SIZE]; +}; + +struct morus640_state { + struct morus640_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus640_ctx { + struct morus640_block key; +}; + +struct morus640_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_chunk)(struct morus640_state *state, + u8 *dst, const u8 *src, unsigned int size); +}; + +static const struct morus640_block crypto_morus640_const[2] = { + { .words = { + U32_C(0x02010100), + U32_C(0x0d080503), + U32_C(0x59372215), + U32_C(0x6279e990), + } }, + { .words = { + U32_C(0x55183ddb), + U32_C(0xf12fc26d), + U32_C(0x42311120), + U32_C(0xdd28b573), + } }, +}; + +static void crypto_morus640_round(struct morus640_block *b0, + struct morus640_block *b1, + struct morus640_block *b2, + struct morus640_block *b3, + struct morus640_block *b4, + const struct morus640_block *m, + unsigned int b, unsigned int w) +{ + unsigned int i; + struct morus640_block tmp; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + b0->words[i] ^= b1->words[i] & b2->words[i]; + b0->words[i] ^= b3->words[i]; + b0->words[i] ^= m->words[i]; + b0->words[i] = rol32(b0->words[i], b); + } + + tmp = *b3; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + b3->words[(i + w) % MORUS_BLOCK_WORDS] = tmp.words[i]; +} + +static void crypto_morus640_update(struct morus640_state *state, + const struct morus640_block *m) +{ + static const struct morus640_block z = {}; + + struct morus640_block *s = state->s; + + crypto_morus640_round(&s[0], &s[1], &s[2], &s[3], &s[4], &z, 5, 1); + crypto_morus640_round(&s[1], &s[2], &s[3], &s[4], &s[0], m, 31, 2); + crypto_morus640_round(&s[2], &s[3], &s[4], &s[0], &s[1], m, 7, 3); + crypto_morus640_round(&s[3], &s[4], &s[0], &s[1], &s[2], m, 22, 2); + crypto_morus640_round(&s[4], &s[0], &s[1], &s[2], &s[3], m, 13, 1); +} + +static void crypto_morus640_load_a(struct morus640_block *dst, const u8 *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + dst->words[i] = le32_to_cpu(*(const __le32 *)src); + src += MORUS640_WORD_SIZE; + } +} + +static void crypto_morus640_load_u(struct morus640_block *dst, const u8 *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + dst->words[i] = get_unaligned_le32(src); + src += MORUS640_WORD_SIZE; + } +} + +static void crypto_morus640_load(struct morus640_block *dst, const u8 *src) +{ + if (MORUS640_ALIGNED(src)) + crypto_morus640_load_a(dst, src); + else + crypto_morus640_load_u(dst, src); +} + +static void crypto_morus640_store_a(u8 *dst, const struct morus640_block *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + *(__le32 *)dst = cpu_to_le32(src->words[i]); + dst += MORUS640_WORD_SIZE; + } +} + +static void crypto_morus640_store_u(u8 *dst, const struct morus640_block *src) +{ + unsigned int i; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + put_unaligned_le32(src->words[i], dst); + dst += MORUS640_WORD_SIZE; + } +} + +static void crypto_morus640_store(u8 *dst, const struct morus640_block *src) +{ + if (MORUS640_ALIGNED(dst)) + crypto_morus640_store_a(dst, src); + else + crypto_morus640_store_u(dst, src); +} + +static void crypto_morus640_ad(struct morus640_state *state, const u8 *src, + unsigned int size) +{ + struct morus640_block m; + + if (MORUS640_ALIGNED(src)) { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_a(&m, src); + crypto_morus640_update(state, &m); + + size -= MORUS640_BLOCK_SIZE; + src += MORUS640_BLOCK_SIZE; + } + } else { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_u(&m, src); + crypto_morus640_update(state, &m); + + size -= MORUS640_BLOCK_SIZE; + src += MORUS640_BLOCK_SIZE; + } + } +} + +static void crypto_morus640_core(const struct morus640_state *state, + struct morus640_block *blk) +{ + unsigned int i; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + blk->words[(i + 3) % MORUS_BLOCK_WORDS] ^= state->s[1].words[i]; + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) { + blk->words[i] ^= state->s[0].words[i]; + blk->words[i] ^= state->s[2].words[i] & state->s[3].words[i]; + } +} + +static void crypto_morus640_encrypt_chunk(struct morus640_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + struct morus640_block c, m; + + if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_a(&m, src); + c = m; + crypto_morus640_core(state, &c); + crypto_morus640_store_a(dst, &c); + crypto_morus640_update(state, &m); + + src += MORUS640_BLOCK_SIZE; + dst += MORUS640_BLOCK_SIZE; + size -= MORUS640_BLOCK_SIZE; + } + } else { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_u(&m, src); + c = m; + crypto_morus640_core(state, &c); + crypto_morus640_store_u(dst, &c); + crypto_morus640_update(state, &m); + + src += MORUS640_BLOCK_SIZE; + dst += MORUS640_BLOCK_SIZE; + size -= MORUS640_BLOCK_SIZE; + } + } + + if (size > 0) { + union morus640_block_in tail; + + memcpy(tail.bytes, src, size); + memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); + + crypto_morus640_load_a(&m, tail.bytes); + c = m; + crypto_morus640_core(state, &c); + crypto_morus640_store_a(tail.bytes, &c); + crypto_morus640_update(state, &m); + + memcpy(dst, tail.bytes, size); + } +} + +static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst, + const u8 *src, unsigned int size) +{ + struct morus640_block m; + + if (MORUS640_ALIGNED(src) && MORUS640_ALIGNED(dst)) { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_a(&m, src); + crypto_morus640_core(state, &m); + crypto_morus640_store_a(dst, &m); + crypto_morus640_update(state, &m); + + src += MORUS640_BLOCK_SIZE; + dst += MORUS640_BLOCK_SIZE; + size -= MORUS640_BLOCK_SIZE; + } + } else { + while (size >= MORUS640_BLOCK_SIZE) { + crypto_morus640_load_u(&m, src); + crypto_morus640_core(state, &m); + crypto_morus640_store_u(dst, &m); + crypto_morus640_update(state, &m); + + src += MORUS640_BLOCK_SIZE; + dst += MORUS640_BLOCK_SIZE; + size -= MORUS640_BLOCK_SIZE; + } + } + + if (size > 0) { + union morus640_block_in tail; + + memcpy(tail.bytes, src, size); + + crypto_morus640_load_a(&m, src); + crypto_morus640_core(state, &m); + crypto_morus640_store_a(tail.bytes, &m); + memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size); + crypto_morus640_load_a(&m, tail.bytes); + crypto_morus640_update(state, &m); + + memcpy(dst, tail.bytes, size); + } +} + +static void crypto_morus640_init(struct morus640_state *state, + const struct morus640_block *key, + const u8 *iv) +{ + static const struct morus640_block z = {}; + + unsigned int i; + + crypto_morus640_load(&state->s[0], iv); + state->s[1] = *key; + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[2].words[i] = U32_C(0xFFFFFFFF); + state->s[3] = crypto_morus640_const[0]; + state->s[4] = crypto_morus640_const[1]; + + for (i = 0; i < 16; i++) + crypto_morus640_update(state, &z); + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[1].words[i] ^= key->words[i]; +} + +static void crypto_morus640_process_ad(struct morus640_state *state, + struct scatterlist *sg_src, + unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus640_block m; + union morus640_block_in buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS640_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS640_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + + crypto_morus640_load_a(&m, buf.bytes); + crypto_morus640_update(state, &m); + + pos = 0; + left -= fill; + src += fill; + } + + crypto_morus640_ad(state, src, left); + src += left & ~(MORUS640_BLOCK_SIZE - 1); + left &= MORUS640_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos); + + crypto_morus640_load_a(&m, buf.bytes); + crypto_morus640_update(state, &m); + } +} + +static void crypto_morus640_process_crypt(struct morus640_state *state, + struct aead_request *req, + const struct morus640_ops *ops) +{ + struct skcipher_walk walk; + u8 *dst; + const u8 *src; + + ops->skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + ops->crypt_chunk(state, dst, src, walk.nbytes); + + skcipher_walk_done(&walk, 0); + } +} + +static void crypto_morus640_final(struct morus640_state *state, + struct morus640_block *tag_xor, + u64 assoclen, u64 cryptlen) +{ + u64 assocbits = assoclen * 8; + u64 cryptbits = cryptlen * 8; + + u32 assocbits_lo = (u32)assocbits; + u32 assocbits_hi = (u32)(assocbits >> 32); + u32 cryptbits_lo = (u32)cryptbits; + u32 cryptbits_hi = (u32)(cryptbits >> 32); + + struct morus640_block tmp; + unsigned int i; + + tmp.words[0] = cpu_to_le32(assocbits_lo); + tmp.words[1] = cpu_to_le32(assocbits_hi); + tmp.words[2] = cpu_to_le32(cryptbits_lo); + tmp.words[3] = cpu_to_le32(cryptbits_hi); + + for (i = 0; i < MORUS_BLOCK_WORDS; i++) + state->s[4].words[i] ^= state->s[0].words[i]; + + for (i = 0; i < 10; i++) + crypto_morus640_update(state, &tmp); + + crypto_morus640_core(state, tag_xor); +} + +static int crypto_morus640_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus640_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != MORUS640_BLOCK_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + crypto_morus640_load(&ctx->key, key); + return 0; +} + +static int crypto_morus640_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} + +static void crypto_morus640_crypt(struct aead_request *req, + struct morus640_block *tag_xor, + unsigned int cryptlen, + const struct morus640_ops *ops) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_state state; + + crypto_morus640_init(&state, &ctx->key, req->iv); + crypto_morus640_process_ad(&state, req->src, req->assoclen); + crypto_morus640_process_crypt(&state, req, ops); + crypto_morus640_final(&state, tag_xor, req->assoclen, cryptlen); +} + +static int crypto_morus640_encrypt(struct aead_request *req) +{ + static const struct morus640_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_chunk = crypto_morus640_encrypt_chunk, + }; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_block tag = {}; + union morus640_block_in tag_out; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus640_crypt(req, &tag, cryptlen, &ops); + crypto_morus640_store(tag_out.bytes, &tag); + + scatterwalk_map_and_copy(tag_out.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} + +static int crypto_morus640_decrypt(struct aead_request *req) +{ + static const struct morus640_ops ops = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_chunk = crypto_morus640_decrypt_chunk, + }; + static const u8 zeros[MORUS640_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + union morus640_block_in tag_in; + struct morus640_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag_in.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus640_load(&tag, tag_in.bytes); + crypto_morus640_crypt(req, &tag, cryptlen, &ops); + crypto_morus640_store(tag_in.bytes, &tag); + + return crypto_memneq(tag_in.bytes, zeros, authsize) ? -EBADMSG : 0; +} + +static int crypto_morus640_init_tfm(struct crypto_aead *tfm) +{ + return 0; +} + +static void crypto_morus640_exit_tfm(struct crypto_aead *tfm) +{ +} + +static struct aead_alg crypto_morus640_alg = { + .setkey = crypto_morus640_setkey, + .setauthsize = crypto_morus640_setauthsize, + .encrypt = crypto_morus640_encrypt, + .decrypt = crypto_morus640_decrypt, + .init = crypto_morus640_init_tfm, + .exit = crypto_morus640_exit_tfm, + + .ivsize = MORUS_NONCE_SIZE, + .maxauthsize = MORUS_MAX_AUTH_SIZE, + .chunksize = MORUS640_BLOCK_SIZE, + + .base = { + .cra_flags = CRYPTO_ALG_TYPE_AEAD, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct morus640_ctx), + .cra_alignmask = 0, + + .cra_priority = 100, + + .cra_name = "morus640", + .cra_driver_name = "morus640-generic", + + .cra_module = THIS_MODULE, + } +}; + +static int __init crypto_morus640_module_init(void) +{ + return crypto_register_aead(&crypto_morus640_alg); +} + +static void __exit crypto_morus640_module_exit(void) +{ + crypto_unregister_aead(&crypto_morus640_alg); +} + +module_init(crypto_morus640_module_init); +module_exit(crypto_morus640_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("MORUS-640 AEAD algorithm"); +MODULE_ALIAS_CRYPTO("morus640"); +MODULE_ALIAS_CRYPTO("morus640-generic"); -- cgit v1.2.3 From 093e2758759f7bf57f7c0c74d7212e7b9fc55efc Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 11 May 2018 14:19:11 +0200 Subject: crypto: morus - Add common SIMD glue code for MORUS This patch adds a common glue code for optimized implementations of MORUS AEAD algorithms. Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- crypto/Kconfig | 16 +++ crypto/Makefile | 2 + crypto/morus1280_glue.c | 302 ++++++++++++++++++++++++++++++++++++++++++++++++ crypto/morus640_glue.c | 298 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 618 insertions(+) create mode 100644 crypto/morus1280_glue.c create mode 100644 crypto/morus640_glue.c (limited to 'crypto/Makefile') diff --git a/crypto/Kconfig b/crypto/Kconfig index 7c53547f..4761667f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -340,12 +340,28 @@ config CRYPTO_MORUS640 help Support for the MORUS-640 dedicated AEAD algorithm. +config CRYPTO_MORUS640_GLUE + tristate "MORUS-640 AEAD algorithm (glue for SIMD optimizations)" + select CRYPTO_AEAD + select CRYPTO_CRYPTD + help + Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD + algorithm. + config CRYPTO_MORUS1280 tristate "MORUS-1280 AEAD algorithm" select CRYPTO_AEAD help Support for the MORUS-1280 dedicated AEAD algorithm. +config CRYPTO_MORUS1280_GLUE + tristate "MORUS-1280 AEAD algorithm (glue for SIMD optimizations)" + select CRYPTO_AEAD + select CRYPTO_CRYPTD + help + Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD + algorithm. + config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD diff --git a/crypto/Makefile b/crypto/Makefile index 6d1d40ee..68a7c546 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -91,6 +91,8 @@ obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o +obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o +obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o diff --git a/crypto/morus1280_glue.c b/crypto/morus1280_glue.c new file mode 100644 index 00000000..ce1e5c34 --- /dev/null +++ b/crypto/morus1280_glue.c @@ -0,0 +1,302 @@ +/* + * The MORUS-1280 Authenticated-Encryption Algorithm + * Common glue skeleton + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct morus1280_state { + struct morus1280_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus1280_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, const void *src, void *dst, + unsigned int length); + void (*crypt_tail)(void *state, const void *src, void *dst, + unsigned int length); +}; + +static void crypto_morus1280_glue_process_ad( + struct morus1280_state *state, + const struct morus1280_glue_ops *ops, + struct scatterlist *sg_src, unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus1280_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS1280_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS1280_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); + pos = 0; + left -= fill; + src += fill; + } + + ops->ad(state, src, left); + src += left & ~(MORUS1280_BLOCK_SIZE - 1); + left &= MORUS1280_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); + } +} + +static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, + struct morus1280_ops ops, + struct aead_request *req) +{ + struct skcipher_walk walk; + u8 *cursor_src, *cursor_dst; + unsigned int chunksize, base; + + ops.skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + cursor_src = walk.src.virt.addr; + cursor_dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); + + base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); + cursor_src += base; + cursor_dst += base; + chunksize &= MORUS1280_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops.crypt_tail(state, cursor_src, cursor_dst, + chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen == MORUS1280_BLOCK_SIZE) { + memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE); + } else if (keylen == MORUS1280_BLOCK_SIZE / 2) { + memcpy(ctx->key.bytes, key, keylen); + memcpy(ctx->key.bytes + keylen, key, keylen); + } else { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey); + +int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize); + +static void crypto_morus1280_glue_crypt(struct aead_request *req, + struct morus1280_ops ops, + unsigned int cryptlen, + struct morus1280_block *tag_xor) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_state state; + + kernel_fpu_begin(); + + ctx->ops->init(&state, &ctx->key, req->iv); + crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); + crypto_morus1280_glue_process_crypt(&state, ops, req); + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +int crypto_morus1280_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = ctx->ops->enc, + .crypt_tail = ctx->ops->enc_tail, + }; + + struct morus1280_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt); + +int crypto_morus1280_glue_decrypt(struct aead_request *req) +{ + static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); + struct morus1280_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = ctx->ops->dec, + .crypt_tail = ctx->ops->dec_tail, + }; + + struct morus1280_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt); + +void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, + const struct morus1280_glue_ops *ops) +{ + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); + ctx->ops = ops; +} +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops); + +int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey); + +int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize); + +int cryptd_morus1280_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt); + +int cryptd_morus1280_glue_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt); + +int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + const char *name = crypto_aead_alg(aead)->base.cra_driver_name; + char internal_name[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) + >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm); + +void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for optimizations"); diff --git a/crypto/morus640_glue.c b/crypto/morus640_glue.c new file mode 100644 index 00000000..c7e788cf --- /dev/null +++ b/crypto/morus640_glue.c @@ -0,0 +1,298 @@ +/* + * The MORUS-640 Authenticated-Encryption Algorithm + * Common glue skeleton + * + * Copyright (c) 2016-2018 Ondrej Mosnacek + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct morus640_state { + struct morus640_block s[MORUS_STATE_BLOCKS]; +}; + +struct morus640_ops { + int (*skcipher_walk_init)(struct skcipher_walk *walk, + struct aead_request *req, bool atomic); + + void (*crypt_blocks)(void *state, const void *src, void *dst, + unsigned int length); + void (*crypt_tail)(void *state, const void *src, void *dst, + unsigned int length); +}; + +static void crypto_morus640_glue_process_ad( + struct morus640_state *state, + const struct morus640_glue_ops *ops, + struct scatterlist *sg_src, unsigned int assoclen) +{ + struct scatter_walk walk; + struct morus640_block buf; + unsigned int pos = 0; + + scatterwalk_start(&walk, sg_src); + while (assoclen != 0) { + unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int left = size; + void *mapped = scatterwalk_map(&walk); + const u8 *src = (const u8 *)mapped; + + if (pos + size >= MORUS640_BLOCK_SIZE) { + if (pos > 0) { + unsigned int fill = MORUS640_BLOCK_SIZE - pos; + memcpy(buf.bytes + pos, src, fill); + ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); + pos = 0; + left -= fill; + src += fill; + } + + ops->ad(state, src, left); + src += left & ~(MORUS640_BLOCK_SIZE - 1); + left &= MORUS640_BLOCK_SIZE - 1; + } + + memcpy(buf.bytes + pos, src, left); + + pos += left; + assoclen -= size; + scatterwalk_unmap(mapped); + scatterwalk_advance(&walk, size); + scatterwalk_done(&walk, 0, assoclen); + } + + if (pos > 0) { + memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos); + ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); + } +} + +static void crypto_morus640_glue_process_crypt(struct morus640_state *state, + struct morus640_ops ops, + struct aead_request *req) +{ + struct skcipher_walk walk; + u8 *cursor_src, *cursor_dst; + unsigned int chunksize, base; + + ops.skcipher_walk_init(&walk, req, false); + + while (walk.nbytes) { + cursor_src = walk.src.virt.addr; + cursor_dst = walk.dst.virt.addr; + chunksize = walk.nbytes; + + ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); + + base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); + cursor_src += base; + cursor_dst += base; + chunksize &= MORUS640_BLOCK_SIZE - 1; + + if (chunksize > 0) + ops.crypt_tail(state, cursor_src, cursor_dst, + chunksize); + + skcipher_walk_done(&walk, 0); + } +} + +int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct morus640_ctx *ctx = crypto_aead_ctx(aead); + + if (keylen != MORUS640_BLOCK_SIZE) { + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey); + +int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize); + +static void crypto_morus640_glue_crypt(struct aead_request *req, + struct morus640_ops ops, + unsigned int cryptlen, + struct morus640_block *tag_xor) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_state state; + + kernel_fpu_begin(); + + ctx->ops->init(&state, &ctx->key, req->iv); + crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); + crypto_morus640_glue_process_crypt(&state, ops, req); + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); + + kernel_fpu_end(); +} + +int crypto_morus640_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_encrypt, + .crypt_blocks = ctx->ops->enc, + .crypt_tail = ctx->ops->enc_tail, + }; + + struct morus640_block tag = {}; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen; + + crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); + + scatterwalk_map_and_copy(tag.bytes, req->dst, + req->assoclen + cryptlen, authsize, 1); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt); + +int crypto_morus640_glue_decrypt(struct aead_request *req) +{ + static const u8 zeros[MORUS640_BLOCK_SIZE] = {}; + + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct morus640_ctx *ctx = crypto_aead_ctx(tfm); + struct morus640_ops OPS = { + .skcipher_walk_init = skcipher_walk_aead_decrypt, + .crypt_blocks = ctx->ops->dec, + .crypt_tail = ctx->ops->dec_tail, + }; + + struct morus640_block tag; + unsigned int authsize = crypto_aead_authsize(tfm); + unsigned int cryptlen = req->cryptlen - authsize; + + scatterwalk_map_and_copy(tag.bytes, req->src, + req->assoclen + cryptlen, authsize, 0); + + crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); + + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt); + +void crypto_morus640_glue_init_ops(struct crypto_aead *aead, + const struct morus640_glue_ops *ops) +{ + struct morus640_ctx *ctx = crypto_aead_ctx(aead); + ctx->ops = ops; +} +EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops); + +int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey); + +int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead, + unsigned int authsize) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize); + +int cryptd_morus640_glue_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_encrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt); + +int cryptd_morus640_glue_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + struct cryptd_aead *cryptd_tfm = *ctx; + + aead = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + aead = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, aead); + + return crypto_aead_decrypt(req); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt); + +int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + const char *name = crypto_aead_alg(aead)->base.cra_driver_name; + char internal_name[CRYPTO_MAX_ALG_NAME]; + + if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) + >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + return 0; +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm); + +void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} +EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ondrej Mosnacek "); +MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for optimizations"); -- cgit v1.2.3 From 3b373894f0cdb1d7d75b2b9460a47dac88651b99 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Mon, 21 May 2018 21:41:51 +0200 Subject: crypto: morus - Mark MORUS SIMD glue as x86-specific Commit 093e2758759f ("crypto: morus - Add common SIMD glue code for MORUS") accidetally consiedered the glue code to be usable by different architectures, but it seems to be only usable on x86. This patch moves it under arch/x86/crypto and adds 'depends on X86' to the Kconfig options and also removes the prompt to hide these internal options from the user. Reported-by: kbuild test robot Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- crypto/Kconfig | 6 +- crypto/Makefile | 2 - crypto/morus1280_glue.c | 302 ------------------------------------------------ crypto/morus640_glue.c | 298 ----------------------------------------------- 4 files changed, 4 insertions(+), 604 deletions(-) delete mode 100644 crypto/morus1280_glue.c delete mode 100644 crypto/morus640_glue.c (limited to 'crypto/Makefile') diff --git a/crypto/Kconfig b/crypto/Kconfig index 75f5efde..30d54a56 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -341,7 +341,8 @@ config CRYPTO_MORUS640 Support for the MORUS-640 dedicated AEAD algorithm. config CRYPTO_MORUS640_GLUE - tristate "MORUS-640 AEAD algorithm (glue for SIMD optimizations)" + tristate + depends on X86 select CRYPTO_AEAD select CRYPTO_CRYPTD help @@ -363,7 +364,8 @@ config CRYPTO_MORUS1280 Support for the MORUS-1280 dedicated AEAD algorithm. config CRYPTO_MORUS1280_GLUE - tristate "MORUS-1280 AEAD algorithm (glue for SIMD optimizations)" + tristate + depends on X86 select CRYPTO_AEAD select CRYPTO_CRYPTD help diff --git a/crypto/Makefile b/crypto/Makefile index 68a7c546..6d1d40ee 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -91,8 +91,6 @@ obj-$(CONFIG_CRYPTO_AEGIS128L) += aegis128l.o obj-$(CONFIG_CRYPTO_AEGIS256) += aegis256.o obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o -obj-$(CONFIG_CRYPTO_MORUS640_GLUE) += morus640_glue.o -obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o diff --git a/crypto/morus1280_glue.c b/crypto/morus1280_glue.c deleted file mode 100644 index ce1e5c34..00000000 --- a/crypto/morus1280_glue.c +++ /dev/null @@ -1,302 +0,0 @@ -/* - * The MORUS-1280 Authenticated-Encryption Algorithm - * Common glue skeleton - * - * Copyright (c) 2016-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct morus1280_state { - struct morus1280_block s[MORUS_STATE_BLOCKS]; -}; - -struct morus1280_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_blocks)(void *state, const void *src, void *dst, - unsigned int length); - void (*crypt_tail)(void *state, const void *src, void *dst, - unsigned int length); -}; - -static void crypto_morus1280_glue_process_ad( - struct morus1280_state *state, - const struct morus1280_glue_ops *ops, - struct scatterlist *sg_src, unsigned int assoclen) -{ - struct scatter_walk walk; - struct morus1280_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= MORUS1280_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = MORUS1280_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); - pos = 0; - left -= fill; - src += fill; - } - - ops->ad(state, src, left); - src += left & ~(MORUS1280_BLOCK_SIZE - 1); - left &= MORUS1280_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); - ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); - } -} - -static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, - struct morus1280_ops ops, - struct aead_request *req) -{ - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS1280_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); - - skcipher_walk_done(&walk, 0); - } -} - -int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct morus1280_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen == MORUS1280_BLOCK_SIZE) { - memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE); - } else if (keylen == MORUS1280_BLOCK_SIZE / 2) { - memcpy(ctx->key.bytes, key, keylen); - memcpy(ctx->key.bytes + keylen, key, keylen); - } else { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey); - -int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; -} -EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize); - -static void crypto_morus1280_glue_crypt(struct aead_request *req, - struct morus1280_ops ops, - unsigned int cryptlen, - struct morus1280_block *tag_xor) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); - struct morus1280_state state; - - kernel_fpu_begin(); - - ctx->ops->init(&state, &ctx->key, req->iv); - crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus1280_glue_process_crypt(&state, ops, req); - ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); - - kernel_fpu_end(); -} - -int crypto_morus1280_glue_encrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); - struct morus1280_ops OPS = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_blocks = ctx->ops->enc, - .crypt_tail = ctx->ops->enc_tail, - }; - - struct morus1280_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); - - scatterwalk_map_and_copy(tag.bytes, req->dst, - req->assoclen + cryptlen, authsize, 1); - return 0; -} -EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt); - -int crypto_morus1280_glue_decrypt(struct aead_request *req) -{ - static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); - struct morus1280_ops OPS = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_blocks = ctx->ops->dec, - .crypt_tail = ctx->ops->dec_tail, - }; - - struct morus1280_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, - req->assoclen + cryptlen, authsize, 0); - - crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} -EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt); - -void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, - const struct morus1280_glue_ops *ops) -{ - struct morus1280_ctx *ctx = crypto_aead_ctx(aead); - ctx->ops = ops; -} -EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops); - -int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey); - -int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, - unsigned int authsize) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize); - -int cryptd_morus1280_glue_encrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - aead = &cryptd_tfm->base; - if (irq_fpu_usable() && (!in_atomic() || - !cryptd_aead_queued(cryptd_tfm))) - aead = cryptd_aead_child(cryptd_tfm); - - aead_request_set_tfm(req, aead); - - return crypto_aead_encrypt(req); -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt); - -int cryptd_morus1280_glue_decrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - aead = &cryptd_tfm->base; - if (irq_fpu_usable() && (!in_atomic() || - !cryptd_aead_queued(cryptd_tfm))) - aead = cryptd_aead_child(cryptd_tfm); - - aead_request_set_tfm(req, aead); - - return crypto_aead_decrypt(req); -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt); - -int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead) -{ - struct cryptd_aead *cryptd_tfm; - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - const char *name = crypto_aead_alg(aead)->base.cra_driver_name; - char internal_name[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) - >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - *ctx = cryptd_tfm; - crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); - return 0; -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm); - -void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - - cryptd_free_aead(*ctx); -} -EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for optimizations"); diff --git a/crypto/morus640_glue.c b/crypto/morus640_glue.c deleted file mode 100644 index c7e788cf..00000000 --- a/crypto/morus640_glue.c +++ /dev/null @@ -1,298 +0,0 @@ -/* - * The MORUS-640 Authenticated-Encryption Algorithm - * Common glue skeleton - * - * Copyright (c) 2016-2018 Ondrej Mosnacek - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct morus640_state { - struct morus640_block s[MORUS_STATE_BLOCKS]; -}; - -struct morus640_ops { - int (*skcipher_walk_init)(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); - - void (*crypt_blocks)(void *state, const void *src, void *dst, - unsigned int length); - void (*crypt_tail)(void *state, const void *src, void *dst, - unsigned int length); -}; - -static void crypto_morus640_glue_process_ad( - struct morus640_state *state, - const struct morus640_glue_ops *ops, - struct scatterlist *sg_src, unsigned int assoclen) -{ - struct scatter_walk walk; - struct morus640_block buf; - unsigned int pos = 0; - - scatterwalk_start(&walk, sg_src); - while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); - unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; - - if (pos + size >= MORUS640_BLOCK_SIZE) { - if (pos > 0) { - unsigned int fill = MORUS640_BLOCK_SIZE - pos; - memcpy(buf.bytes + pos, src, fill); - ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); - pos = 0; - left -= fill; - src += fill; - } - - ops->ad(state, src, left); - src += left & ~(MORUS640_BLOCK_SIZE - 1); - left &= MORUS640_BLOCK_SIZE - 1; - } - - memcpy(buf.bytes + pos, src, left); - - pos += left; - assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); - } - - if (pos > 0) { - memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos); - ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE); - } -} - -static void crypto_morus640_glue_process_crypt(struct morus640_state *state, - struct morus640_ops ops, - struct aead_request *req) -{ - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS640_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); - - skcipher_walk_done(&walk, 0); - } -} - -int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct morus640_ctx *ctx = crypto_aead_ctx(aead); - - if (keylen != MORUS640_BLOCK_SIZE) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE); - return 0; -} -EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey); - -int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; -} -EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize); - -static void crypto_morus640_glue_crypt(struct aead_request *req, - struct morus640_ops ops, - unsigned int cryptlen, - struct morus640_block *tag_xor) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus640_ctx *ctx = crypto_aead_ctx(tfm); - struct morus640_state state; - - kernel_fpu_begin(); - - ctx->ops->init(&state, &ctx->key, req->iv); - crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus640_glue_process_crypt(&state, ops, req); - ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); - - kernel_fpu_end(); -} - -int crypto_morus640_glue_encrypt(struct aead_request *req) -{ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus640_ctx *ctx = crypto_aead_ctx(tfm); - struct morus640_ops OPS = { - .skcipher_walk_init = skcipher_walk_aead_encrypt, - .crypt_blocks = ctx->ops->enc, - .crypt_tail = ctx->ops->enc_tail, - }; - - struct morus640_block tag = {}; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen; - - crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); - - scatterwalk_map_and_copy(tag.bytes, req->dst, - req->assoclen + cryptlen, authsize, 1); - return 0; -} -EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt); - -int crypto_morus640_glue_decrypt(struct aead_request *req) -{ - static const u8 zeros[MORUS640_BLOCK_SIZE] = {}; - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct morus640_ctx *ctx = crypto_aead_ctx(tfm); - struct morus640_ops OPS = { - .skcipher_walk_init = skcipher_walk_aead_decrypt, - .crypt_blocks = ctx->ops->dec, - .crypt_tail = ctx->ops->dec_tail, - }; - - struct morus640_block tag; - unsigned int authsize = crypto_aead_authsize(tfm); - unsigned int cryptlen = req->cryptlen - authsize; - - scatterwalk_map_and_copy(tag.bytes, req->src, - req->assoclen + cryptlen, authsize, 0); - - crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag); - - return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; -} -EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt); - -void crypto_morus640_glue_init_ops(struct crypto_aead *aead, - const struct morus640_glue_ops *ops) -{ - struct morus640_ctx *ctx = crypto_aead_ctx(aead); - ctx->ops = ops; -} -EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops); - -int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey); - -int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead, - unsigned int authsize) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize); - -int cryptd_morus640_glue_encrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - aead = &cryptd_tfm->base; - if (irq_fpu_usable() && (!in_atomic() || - !cryptd_aead_queued(cryptd_tfm))) - aead = cryptd_aead_child(cryptd_tfm); - - aead_request_set_tfm(req, aead); - - return crypto_aead_encrypt(req); -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt); - -int cryptd_morus640_glue_decrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - struct cryptd_aead *cryptd_tfm = *ctx; - - aead = &cryptd_tfm->base; - if (irq_fpu_usable() && (!in_atomic() || - !cryptd_aead_queued(cryptd_tfm))) - aead = cryptd_aead_child(cryptd_tfm); - - aead_request_set_tfm(req, aead); - - return crypto_aead_decrypt(req); -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt); - -int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead) -{ - struct cryptd_aead *cryptd_tfm; - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - const char *name = crypto_aead_alg(aead)->base.cra_driver_name; - char internal_name[CRYPTO_MAX_ALG_NAME]; - - if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) - >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - - *ctx = cryptd_tfm; - crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); - return 0; -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm); - -void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead) -{ - struct cryptd_aead **ctx = crypto_aead_ctx(aead); - - cryptd_free_aead(*ctx); -} -EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ondrej Mosnacek "); -MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for optimizations"); -- cgit v1.2.3