summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig14
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/af_alg.c696
-rw-r--r--crypto/ahash.c41
-rw-r--r--crypto/algapi.c31
-rw-r--r--crypto/algboss.c1
-rw-r--r--crypto/algif_aead.c866
-rw-r--r--crypto/algif_hash.c30
-rw-r--r--crypto/algif_skcipher.c830
-rw-r--r--crypto/api.c13
-rw-r--r--crypto/asymmetric_keys/Kconfig1
-rw-r--r--crypto/asymmetric_keys/Makefile1
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_key_type.c1
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c8
-rw-r--r--crypto/asymmetric_keys/public_key.c30
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/async_tx/Kconfig1
-rw-r--r--crypto/async_tx/Makefile1
-rw-r--r--crypto/authencesn.c5
-rw-r--r--crypto/ccm.c4
-rw-r--r--crypto/chacha20_generic.c9
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/ctr.c3
-rw-r--r--crypto/cts.c6
-rw-r--r--crypto/dh.c36
-rw-r--r--crypto/dh_helper.c20
-rw-r--r--crypto/drbg.c44
-rw-r--r--crypto/ecc_curve_defs.h1
-rw-r--r--crypto/ecdh.c57
-rw-r--r--crypto/ecdh_helper.c2
-rw-r--r--crypto/gcm.c55
-rw-r--r--crypto/gf128mul.c13
-rw-r--r--crypto/keywrap.c84
-rw-r--r--crypto/lrw.c17
-rw-r--r--crypto/pcbc.c12
-rw-r--r--crypto/ripemd.h1
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rng.c6
-rw-r--r--crypto/rsa-pkcs1pad.c16
-rw-r--r--crypto/scompress.c55
-rw-r--r--crypto/serpent_generic.c77
-rw-r--r--crypto/shash.c10
-rw-r--r--crypto/skcipher.c17
-rw-r--r--crypto/sm3_generic.c210
-rw-r--r--crypto/tcrypt.c217
-rw-r--r--crypto/testmgr.c210
-rw-r--r--crypto/testmgr.h74
-rw-r--r--crypto/xor.c7
-rw-r--r--crypto/xts.c14
53 files changed, 1894 insertions, 1972 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index caa770e5..f7911963 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Generic algorithms support
#
@@ -859,6 +860,17 @@ config CRYPTO_SHA3
References:
http://keccak.noekeon.org/
+config CRYPTO_SM3
+ tristate "SM3 digest algorithm"
+ select CRYPTO_HASH
+ help
+ SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
+ It is part of the Chinese Commercial Cryptography suite.
+
+ References:
+ http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+ https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
+
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
select CRYPTO_HASH
@@ -1753,6 +1765,8 @@ config CRYPTO_USER_API_AEAD
tristate "User-space interface for AEAD cipher algorithms"
depends on NET
select CRYPTO_AEAD
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_NULL
select CRYPTO_USER_API
help
This option enables the user-spaces interface for AEAD
diff --git a/crypto/Makefile b/crypto/Makefile
index d41f0331..d674884b 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Cryptographic API
#
@@ -70,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
+obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 92a3d540..85cea9de 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/net.h>
#include <linux/rwsem.h>
+#include <linux/sched/signal.h>
#include <linux/security.h>
struct alg_type_list {
@@ -480,32 +481,695 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
}
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
+/**
+ * af_alg_alloc_tsgl - allocate the TX SGL
+ *
+ * @sk socket of connection to user space
+ * @return: 0 upon success, < 0 upon error
+ */
+int af_alg_alloc_tsgl(struct sock *sk)
{
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&completion->completion);
- reinit_completion(&completion->completion);
- err = completion->err;
- break;
- };
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct scatterlist *sg = NULL;
+
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
+ if (!list_empty(&ctx->tsgl_list))
+ sg = sgl->sg;
+
+ if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+ sgl = sock_kmalloc(sk, sizeof(*sgl) +
+ sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+ GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+ if (sg)
+ sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+ list_add_tail(&sgl->list, &ctx->tsgl_list);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
+
+/**
+ * aead_count_tsgl - Count number of TX SG entries
+ *
+ * The counting starts from the beginning of the SGL to @bytes. If
+ * an offset is provided, the counting of the SG entries starts at the offset.
+ *
+ * @sk socket of connection to user space
+ * @bytes Count the number of SG entries holding given number of bytes.
+ * @offset Start the counting of SG entries from the given offset.
+ * @return Number of TX SG entries found given the constraints
+ */
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl, *tmp;
+ unsigned int i;
+ unsigned int sgl_count = 0;
+
+ if (!bytes)
+ return 0;
+
+ list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
+ struct scatterlist *sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+ size_t bytes_count;
+
+ /* Skip offset */
+ if (offset >= sg[i].length) {
+ offset -= sg[i].length;
+ bytes -= sg[i].length;
+ continue;
+ }
+
+ bytes_count = sg[i].length - offset;
+
+ offset = 0;
+ sgl_count++;
+
+ /* If we have seen requested number of bytes, stop */
+ if (bytes_count >= bytes)
+ return sgl_count;
+
+ bytes -= bytes_count;
+ }
+ }
+
+ return sgl_count;
+}
+EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
+
+/**
+ * aead_pull_tsgl - Release the specified buffers from TX SGL
+ *
+ * If @dst is non-null, reassign the pages to dst. The caller must release
+ * the pages. If @dst_offset is given only reassign the pages to @dst starting
+ * at the @dst_offset (byte). The caller must ensure that @dst is large
+ * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ *
+ * @sk socket of connection to user space
+ * @used Number of bytes to pull from TX SGL
+ * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+ * caller must release the buffers in dst.
+ * @dst_offset Reassign the TX SGL from given offset. All buffers before
+ * reaching the offset is released.
+ */
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+ size_t dst_offset)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct scatterlist *sg;
+ unsigned int i, j = 0;
+
+ while (!list_empty(&ctx->tsgl_list)) {
+ sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
+ list);
+ sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+ size_t plen = min_t(size_t, used, sg[i].length);
+ struct page *page = sg_page(sg + i);
+
+ if (!page)
+ continue;
+
+ /*
+ * Assumption: caller created af_alg_count_tsgl(len)
+ * SG entries in dst.
+ */
+ if (dst) {
+ if (dst_offset >= plen) {
+ /* discard page before offset */
+ dst_offset -= plen;
+ } else {
+ /* reassign page to dst after offset */
+ get_page(page);
+ sg_set_page(dst + j, page,
+ plen - dst_offset,
+ sg[i].offset + dst_offset);
+ dst_offset = 0;
+ j++;
+ }
+ }
+
+ sg[i].length -= plen;
+ sg[i].offset += plen;
+
+ used -= plen;
+ ctx->used -= plen;
+
+ if (sg[i].length)
+ return;
+
+ put_page(page);
+ sg_assign_page(sg + i, NULL);
+ }
+
+ list_del(&sgl->list);
+ sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
+ (MAX_SGL_ENTS + 1));
+ }
+
+ if (!ctx->used)
+ ctx->merge = 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
+
+/**
+ * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
+ *
+ * @areq Request holding the TX and RX SGL
+ */
+void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
+{
+ struct sock *sk = areq->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_rsgl *rsgl, *tmp;
+ struct scatterlist *tsgl;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
+ ctx->rcvused -= rsgl->sg_num_bytes;
+ af_alg_free_sg(&rsgl->sgl);
+ list_del(&rsgl->list);
+ if (rsgl != &areq->first_rsgl)
+ sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+ }
+
+ tsgl = areq->tsgl;
+ for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
+ if (!sg_page(sg))
+ continue;
+ put_page(sg_page(sg));
+ }
+
+ if (areq->tsgl && areq->tsgl_entries)
+ sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
+
+/**
+ * af_alg_wait_for_wmem - wait for availability of writable memory
+ *
+ * @sk socket of connection to user space
+ * @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @return 0 when writable memory is available, < 0 upon error
+ */
+int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err = -ERESTARTSYS;
+ long timeout;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
+ err = 0;
+ break;
+ }
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
+EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
-void af_alg_complete(struct crypto_async_request *req, int err)
+/**
+ * af_alg_wmem_wakeup - wakeup caller when writable memory is available
+ *
+ * @sk socket of connection to user space
+ */
+void af_alg_wmem_wakeup(struct sock *sk)
{
- struct af_alg_completion *completion = req->data;
+ struct socket_wq *wq;
- if (err == -EINPROGRESS)
+ if (!af_alg_writable(sk))
return;
- completion->err = err;
- complete(&completion->completion);
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
+
+/**
+ * af_alg_wait_for_data - wait for availability of TX data
+ *
+ * @sk socket of connection to user space
+ * @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @return 0 when writable memory is available, < 0 upon error
+ */
+int af_alg_wait_for_data(struct sock *sk, unsigned flags)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ long timeout;
+ int err = -ERESTARTSYS;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
+ &wait)) {
+ err = 0;
+ break;
+ }
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
+
+/**
+ * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
+ *
+ * @sk socket of connection to user space
+ */
+
+void af_alg_data_wakeup(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct socket_wq *wq;
+
+ if (!ctx->used)
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
+
+/**
+ * af_alg_sendmsg - implementation of sendmsg system call handler
+ *
+ * The sendmsg system call handler obtains the user data and stores it
+ * in ctx->tsgl_list. This implies allocation of the required numbers of
+ * struct af_alg_tsgl.
+ *
+ * In addition, the ctx is filled with the information sent via CMSG.
+ *
+ * @sock socket of connection to user space
+ * @msg message from user space
+ * @size size of message from user space
+ * @ivsize the size of the IV for the cipher operation to verify that the
+ * user-space-provided IV has the right size
+ * @return the number of copied data upon success, < 0 upon error
+ */
+int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ unsigned int ivsize)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+ bool enc = 0;
+ bool init = 0;
+ int err = 0;
+
+ if (msg->msg_controllen) {
+ err = af_alg_cmsg_send(msg, &con);
+ if (err)
+ return err;
+
+ init = 1;
+ switch (con.op) {
+ case ALG_OP_ENCRYPT:
+ enc = 1;
+ break;
+ case ALG_OP_DECRYPT:
+ enc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (con.iv && con.iv->ivlen != ivsize)
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ if (init) {
+ ctx->enc = enc;
+ if (con.iv)
+ memcpy(ctx->iv, con.iv->iv, ivsize);
+
+ ctx->aead_assoclen = con.aead_assoclen;
+ }
+
+ while (size) {
+ struct scatterlist *sg;
+ size_t len = size;
+ size_t plen;
+
+ /* use the existing memory in an allocated page */
+ if (ctx->merge) {
+ sgl = list_entry(ctx->tsgl_list.prev,
+ struct af_alg_tsgl, list);
+ sg = sgl->sg + sgl->cur - 1;
+ len = min_t(size_t, len,
+ PAGE_SIZE - sg->offset - sg->length);
+
+ err = memcpy_from_msg(page_address(sg_page(sg)) +
+ sg->offset + sg->length,
+ msg, len);
+ if (err)
+ goto unlock;
+
+ sg->length += len;
+ ctx->merge = (sg->offset + sg->length) &
+ (PAGE_SIZE - 1);
+
+ ctx->used += len;
+ copied += len;
+ size -= len;
+ continue;
+ }
+
+ if (!af_alg_writable(sk)) {
+ err = af_alg_wait_for_wmem(sk, msg->msg_flags);
+ if (err)
+ goto unlock;
+ }
+
+ /* allocate a new page */
+ len = min_t(unsigned long, len, af_alg_sndbuf(sk));
+
+ err = af_alg_alloc_tsgl(sk);
+ if (err)
+ goto unlock;
+
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
+ list);
+ sg = sgl->sg;
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
+
+ do {
+ unsigned int i = sgl->cur;
+
+ plen = min_t(size_t, len, PAGE_SIZE);
+
+ sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+ if (!sg_page(sg + i)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = memcpy_from_msg(page_address(sg_page(sg + i)),
+ msg, plen);
+ if (err) {
+ __free_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ goto unlock;
+ }
+
+ sg[i].length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ size -= plen;
+ sgl->cur++;
+ } while (len && sgl->cur < MAX_SGL_ENTS);
+
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
+
+ ctx->merge = plen & (PAGE_SIZE - 1);
+ }
+
+ err = 0;
+
+ ctx->more = msg->msg_flags & MSG_MORE;
+
+unlock:
+ af_alg_data_wakeup(sk);
+ release_sock(sk);
+
+ return copied ?: err;
+}
+EXPORT_SYMBOL_GPL(af_alg_sendmsg);
+
+/**
+ * af_alg_sendpage - sendpage system call handler
+ *
+ * This is a generic implementation of sendpage to fill ctx->tsgl_list.
+ */
+ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ int err = -EINVAL;
+
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (!size)
+ goto done;
+
+ if (!af_alg_writable(sk)) {
+ err = af_alg_wait_for_wmem(sk, flags);
+ if (err)
+ goto unlock;
+ }
+
+ err = af_alg_alloc_tsgl(sk);
+ if (err)
+ goto unlock;
+
+ ctx->merge = 0;
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
+
+ if (sgl->cur)
+ sg_unmark_end(sgl->sg + sgl->cur - 1);
+
+ sg_mark_end(sgl->sg + sgl->cur);
+
+ get_page(page);
+ sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+ sgl->cur++;
+ ctx->used += size;
+
+done:
+ ctx->more = flags & MSG_MORE;
+
+unlock:
+ af_alg_data_wakeup(sk);
+ release_sock(sk);
+
+ return err ?: size;
+}
+EXPORT_SYMBOL_GPL(af_alg_sendpage);
+
+/**
+ * af_alg_async_cb - AIO callback handler
+ *
+ * This handler cleans up the struct af_alg_async_req upon completion of the
+ * AIO operation.
+ *
+ * The number of bytes to be generated with the AIO operation must be set
+ * in areq->outlen before the AIO callback handler is invoked.
+ */
+void af_alg_async_cb(struct crypto_async_request *_req, int err)
+{
+ struct af_alg_async_req *areq = _req->data;
+ struct sock *sk = areq->sk;
+ struct kiocb *iocb = areq->iocb;
+ unsigned int resultlen;
+
+ lock_sock(sk);
+
+ /* Buffer size written by crypto operation. */
+ resultlen = areq->outlen;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+ __sock_put(sk);
+
+ iocb->ki_complete(iocb, err ? err : resultlen, 0);
+
+ release_sock(sk);
+}
+EXPORT_SYMBOL_GPL(af_alg_async_cb);
+
+/**
+ * af_alg_poll - poll system call handler
+ */
+unsigned int af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ unsigned int mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ if (!ctx->more || ctx->used)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (af_alg_writable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(af_alg_poll);
+
+/**
+ * af_alg_alloc_areq - allocate struct af_alg_async_req
+ *
+ * @sk socket of connection to user space
+ * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
+ * @return allocated data structure or ERR_PTR upon error
+ */
+struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ unsigned int areqlen)
+{
+ struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+
+ if (unlikely(!areq))
+ return ERR_PTR(-ENOMEM);
+
+ areq->areqlen = areqlen;
+ areq->sk = sk;
+ areq->last_rsgl = NULL;
+ INIT_LIST_HEAD(&areq->rsgl_list);
+ areq->tsgl = NULL;
+ areq->tsgl_entries = 0;
+
+ return areq;
+}
+EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
+
+/**
+ * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
+ * operation
+ *
+ * @sk socket of connection to user space
+ * @msg user space message
+ * @flags flags used to invoke recvmsg with
+ * @areq instance of the cryptographic request that will hold the RX SGL
+ * @maxsize maximum number of bytes to be pulled from user space
+ * @outlen number of bytes in the RX SGL
+ * @return 0 on success, < 0 upon error
+ */
+int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
+ struct af_alg_async_req *areq, size_t maxsize,
+ size_t *outlen)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ size_t len = 0;
+
+ while (maxsize > len && msg_data_left(msg)) {
+ struct af_alg_rsgl *rsgl;
+ size_t seglen;
+ int err;
+
+ /* limit the amount of readable buffers */
+ if (!af_alg_readable(sk))
+ break;
+
+ if (!ctx->used) {
+ err = af_alg_wait_for_data(sk, flags);
+ if (err)
+ return err;
+ }
+
+ seglen = min_t(size_t, (maxsize - len),
+ msg_data_left(msg));
+
+ if (list_empty(&areq->rsgl_list)) {
+ rsgl = &areq->first_rsgl;
+ } else {
+ rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
+ if (unlikely(!rsgl))
+ return -ENOMEM;
+ }
+
+ rsgl->sgl.npages = 0;
+ list_add_tail(&rsgl->list, &areq->rsgl_list);
+
+ /* make one iovec available as scatterlist */
+ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ if (err < 0)
+ return err;
+
+ /* chain the new scatterlist with previous one */
+ if (areq->last_rsgl)
+ af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
+
+ areq->last_rsgl = rsgl;
+ len += err;
+ ctx->rcvused += err;
+ rsgl->sg_num_bytes = err;
+ iov_iter_advance(&msg->msg_iter, err);
+ }
+
+ *outlen = len;
+ return 0;
}
-EXPORT_SYMBOL_GPL(af_alg_complete);
+EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
static int __init af_alg_init(void)
{
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 826cd7ab..3a35d67d 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req,
return err;
err = op(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
ahash_restore_req(req, err);
@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
req->base.complete = ahash_def_finup_done2;
err = crypto_ahash_reqtfm(req)->final(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req)
return err;
err = tfm->update(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
@@ -588,6 +582,35 @@ int crypto_unregister_ahash(struct ahash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
+int crypto_register_ahashes(struct ahash_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_ahash(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_ahash(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_ahashes);
+
+void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
+
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e4cc7615..60d7366e 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
- err = -EBUSY;
- if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
goto out;
+ }
+ err = -EBUSY;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
@@ -975,13 +977,15 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
-void __crypto_xor(u8 *dst, const u8 *src, unsigned int len)
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
- int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
relalign = d ? 1 << __ffs(d) : size;
@@ -992,34 +996,37 @@ void __crypto_xor(u8 *dst, const u8 *src, unsigned int len)
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
- *dst++ ^= *src++;
+ *dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
- *(u64 *)dst ^= *(u64 *)src;
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
dst += 8;
- src += 8;
+ src1 += 8;
+ src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
- *(u32 *)dst ^= *(u32 *)src;
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
dst += 4;
- src += 4;
+ src1 += 4;
+ src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
- *(u16 *)dst ^= *(u16 *)src;
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
dst += 2;
- src += 2;
+ src1 += 2;
+ src2 += 2;
len -= 2;
}
while (len--)
- *dst++ ^= *src++;
+ *dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 960d8548..5e6df2a0 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
int notnum = 0;
name = ++p;
- len = 0;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
notnum |= !isdigit(*p);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index be117495..aacae083 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -5,88 +5,56 @@
*
* This file provides the user-space API for AEAD ciphers.
*
- * This file is derived from algif_skcipher.c.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
+ *
+ * The following concept of the memory management is used:
+ *
+ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
+ * filled by user space with the data submitted via sendpage/sendmsg. Filling
+ * up the TX SGL does not cause a crypto operation -- the data will only be
+ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
+ * provide a buffer which is tracked with the RX SGL.
+ *
+ * During the processing of the recvmsg operation, the cipher request is
+ * allocated and prepared. As part of the recvmsg operation, the processed
+ * TX buffers are extracted from the TX SGL into a separate SGL.
+ *
+ * After the completion of the crypto operation, the RX SGL and the cipher
+ * request is released. The extracted TX SGL parts are released together with
+ * the RX SGL release.
*/
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
+#include <crypto/skcipher.h>
+#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
-#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
-struct aead_sg_list {
- unsigned int cur;
- struct scatterlist sg[ALG_MAX_PAGES];
-};
-
-struct aead_async_rsgl {
- struct af_alg_sgl sgl;
- struct list_head list;
-};
-
-struct aead_async_req {
- struct scatterlist *tsgl;
- struct aead_async_rsgl first_rsgl;
- struct list_head list;
- struct kiocb *iocb;
- struct sock *sk;
- unsigned int tsgls;
- char iv[];
-};
-
struct aead_tfm {
struct crypto_aead *aead;
bool has_key;
+ struct crypto_skcipher *null_tfm;
};
-struct aead_ctx {
- struct aead_sg_list tsgl;
- struct aead_async_rsgl first_rsgl;
- struct list_head list;
-
- void *iv;
-
- struct af_alg_completion completion;
-
- unsigned long used;
-
- unsigned int len;
- bool more;
- bool merge;
- bool enc;
-
- size_t aead_assoclen;
- struct aead_request aead_req;
-};
-
-static inline int aead_sndbuf(struct sock *sk)
+static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
-
- return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->used, 0);
-}
-
-static inline bool aead_writable(struct sock *sk)
-{
- return PAGE_SIZE <= aead_sndbuf(sk);
-}
-
-static inline bool aead_sufficient_data(struct aead_ctx *ctx)
-{
- unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int as = crypto_aead_authsize(tfm);
/*
* The minimum amount of memory needed for an AEAD cipher is
@@ -95,484 +63,58 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
-static void aead_reset_ctx(struct aead_ctx *ctx)
-{
- struct aead_sg_list *sgl = &ctx->tsgl;
-
- sg_init_table(sgl->sg, ALG_MAX_PAGES);
- sgl->cur = 0;
- ctx->used = 0;
- ctx->more = 0;
- ctx->merge = 0;
-}
-
-static void aead_put_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct scatterlist *sg = sgl->sg;
- unsigned int i;
-
- for (i = 0; i < sgl->cur; i++) {
- if (!sg_page(sg + i))
- continue;
-
- put_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- }
- aead_reset_ctx(ctx);
-}
-
-static void aead_wmem_wakeup(struct sock *sk)
-{
- struct socket_wq *wq;
-
- if (!aead_writable(sk))
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- rcu_read_unlock();
-}
-
-static int aead_wait_for_data(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- long timeout;
- int err = -ERESTARTSYS;
-
- if (flags & MSG_DONTWAIT)
- return -EAGAIN;
-
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- return err;
-}
-
-static void aead_data_wakeup(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct socket_wq *wq;
-
- if (ctx->more)
- return;
- if (!ctx->used)
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- rcu_read_unlock();
-}
-
static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned ivsize =
- crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct af_alg_control con = {};
- long copied = 0;
- bool enc = 0;
- bool init = 0;
- int err = -EINVAL;
-
- if (msg->msg_controllen) {
- err = af_alg_cmsg_send(msg, &con);
- if (err)
- return err;
-
- init = 1;
- switch (con.op) {
- case ALG_OP_ENCRYPT:
- enc = 1;
- break;
- case ALG_OP_DECRYPT:
- enc = 0;
- break;
- default:
- return -EINVAL;
- }
-
- if (con.iv && con.iv->ivlen != ivsize)
- return -EINVAL;
- }
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (init) {
- ctx->enc = enc;
- if (con.iv)
- memcpy(ctx->iv, con.iv->iv, ivsize);
-
- ctx->aead_assoclen = con.aead_assoclen;
- }
-
- while (size) {
- size_t len = size;
- struct scatterlist *sg = NULL;
-
- /* use the existing memory in an allocated page */
- if (ctx->merge) {
- sg = sgl->sg + sgl->cur - 1;
- len = min_t(unsigned long, len,
- PAGE_SIZE - sg->offset - sg->length);
- err = memcpy_from_msg(page_address(sg_page(sg)) +
- sg->offset + sg->length,
- msg, len);
- if (err)
- goto unlock;
-
- sg->length += len;
- ctx->merge = (sg->offset + sg->length) &
- (PAGE_SIZE - 1);
-
- ctx->used += len;
- copied += len;
- size -= len;
- continue;
- }
-
- if (!aead_writable(sk)) {
- /* user space sent too much data */
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- goto unlock;
- }
-
- /* allocate a new page */
- len = min_t(unsigned long, size, aead_sndbuf(sk));
- while (len) {
- size_t plen = 0;
-
- if (sgl->cur >= ALG_MAX_PAGES) {
- aead_put_sgl(sk);
- err = -E2BIG;
- goto unlock;
- }
-
- sg = sgl->sg + sgl->cur;
- plen = min_t(size_t, len, PAGE_SIZE);
-
- sg_assign_page(sg, alloc_page(GFP_KERNEL));
- err = -ENOMEM;
- if (!sg_page(sg))
- goto unlock;
-
- err = memcpy_from_msg(page_address(sg_page(sg)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg));
- sg_assign_page(sg, NULL);
- goto unlock;
- }
-
- sg->offset = 0;
- sg->length = plen;
- len -= plen;
- ctx->used += plen;
- copied += plen;
- sgl->cur++;
- size -= plen;
- ctx->merge = plen & (PAGE_SIZE - 1);
- }
- }
-
- err = 0;
-
- ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more && !aead_sufficient_data(ctx)) {
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- }
-
-unlock:
- aead_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: copied;
-}
-
-static ssize_t aead_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct aead_sg_list *sgl = &ctx->tsgl;
- int err = -EINVAL;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- if (sgl->cur >= ALG_MAX_PAGES)
- return -E2BIG;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (!size)
- goto done;
-
- if (!aead_writable(sk)) {
- /* user space sent too much data */
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- goto unlock;
- }
-
- ctx->merge = 0;
-
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
-
- err = 0;
-
-done:
- ctx->more = flags & MSG_MORE;
- if (!ctx->more && !aead_sufficient_data(ctx)) {
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- }
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
-unlock:
- aead_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: size;
+ return af_alg_sendmsg(sock, msg, size, ivsize);
}
-#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
- ((char *)req + sizeof(struct aead_request) + \
- crypto_aead_reqsize(tfm))
-
- #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
- crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
- sizeof(struct aead_request)
-
-static void aead_async_cb(struct crypto_async_request *_req, int err)
+static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
+ struct scatterlist *src,
+ struct scatterlist *dst, unsigned int len)
{
- struct aead_request *req = _req->data;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
- struct sock *sk = areq->sk;
- struct scatterlist *sg = areq->tsgl;
- struct aead_async_rsgl *rsgl;
- struct kiocb *iocb = areq->iocb;
- unsigned int i, reqlen = GET_REQ_SIZE(tfm);
-
- list_for_each_entry(rsgl, &areq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &areq->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
- }
+ SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
- for (i = 0; i < areq->tsgls; i++)
- put_page(sg_page(sg + i));
+ skcipher_request_set_tfm(skreq, null_tfm);
+ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, src, dst, len, NULL);
- sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
- sock_kfree_s(sk, req, reqlen);
- __sock_put(sk);
- iocb->ki_complete(iocb, err, err);
+ return crypto_skcipher_encrypt(skreq);
}
-static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
- int flags)
+static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
- struct aead_async_req *areq;
- struct aead_request *req = NULL;
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int as = crypto_aead_authsize(tfm);
- unsigned int i, reqlen = GET_REQ_SIZE(tfm);
- int err = -ENOMEM;
- unsigned long used;
- size_t outlen = 0;
- size_t usedpages = 0;
-
- lock_sock(sk);
- if (ctx->more) {
- err = aead_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
-
- if (!aead_sufficient_data(ctx))
- goto unlock;
-
- used = ctx->used;
- if (ctx->enc)
- outlen = used + as;
- else
- outlen = used - as;
-
- req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
-
- areq = GET_ASYM_REQ(req, tfm);
- memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
- INIT_LIST_HEAD(&areq->list);
- areq->iocb = msg->msg_iocb;
- areq->sk = sk;
- memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
- aead_request_set_tfm(req, tfm);
- aead_request_set_ad(req, ctx->aead_assoclen);
- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- aead_async_cb, req);
- used -= ctx->aead_assoclen;
-
- /* take over all tx sgls from ctx */
- areq->tsgl = sock_kmalloc(sk,
- sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
- GFP_KERNEL);
- if (unlikely(!areq->tsgl))
- goto free;
-
- sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
- for (i = 0; i < sgl->cur; i++)
- sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
- sgl->sg[i].length, sgl->sg[i].offset);
-
- areq->tsgls = sgl->cur;
-
- /* create rx sgls */
- while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
- size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
- (outlen - usedpages));
-
- if (list_empty(&areq->list)) {
- rsgl = &areq->first_rsgl;
-
- } else {
- rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
- if (unlikely(!rsgl)) {
- err = -ENOMEM;
- goto free;
- }
- }
- rsgl->sgl.npages = 0;
- list_add_tail(&rsgl->list, &areq->list);
-
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
- goto free;
-
- usedpages += err;
-
- /* chain the new scatterlist with previous one */
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
-
- last_rsgl = rsgl;
-
- iov_iter_advance(&msg->msg_iter, err);
- }
-
- /* ensure output buffer is sufficiently large */
- if (usedpages < outlen) {
- err = -EINVAL;
- goto unlock;
- }
-
- aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
- areq->iv);
- err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
- if (err) {
- if (err == -EINPROGRESS) {
- sock_hold(sk);
- err = -EIOCBQUEUED;
- aead_reset_ctx(ctx);
- goto unlock;
- } else if (err == -EBADMSG) {
- aead_put_sgl(sk);
- }
- goto free;
- }
- aead_put_sgl(sk);
-
-free:
- list_for_each_entry(rsgl, &areq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &areq->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
- }
- if (areq->tsgl)
- sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
- if (req)
- sock_kfree_s(sk, req, reqlen);
-unlock:
- aead_wmem_wakeup(sk);
- release_sock(sk);
- return err ? err : outlen;
-}
-
-static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct aead_async_rsgl *last_rsgl = NULL;
- struct aead_async_rsgl *rsgl, *tmp;
- int err = -EINVAL;
- unsigned long used = 0;
- size_t outlen = 0;
- size_t usedpages = 0;
-
- lock_sock(sk);
+ struct af_alg_async_req *areq;
+ struct af_alg_tsgl *tsgl;
+ struct scatterlist *src;
+ int err = 0;
+ size_t used = 0; /* [in] TX bufs to be en/decrypted */
+ size_t outlen = 0; /* [out] RX bufs produced by kernel */
+ size_t usedpages = 0; /* [in] RX bufs to be used from user */
+ size_t processed = 0; /* [in] TX bufs to be consumed */
/*
- * Please see documentation of aead_request_set_crypt for the
- * description of the AEAD memory structure expected from the caller.
+ * Data length provided by caller via sendmsg/sendpage that has not
+ * yet been processed.
*/
-
- if (ctx->more) {
- err = aead_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
-
- /* data length provided by caller via sendmsg/sendpage */
used = ctx->used;
/*
@@ -584,8 +126,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
* the error message in sendmsg/sendpage and still call recvmsg. This
* check here protects the kernel integrity.
*/
- if (!aead_sufficient_data(ctx))
- goto unlock;
+ if (!aead_sufficient_data(sk))
+ return -EINVAL;
/*
* Calculate the minimum output buffer size holding the result of the
@@ -606,104 +148,191 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
*/
used -= ctx->aead_assoclen;
- /* convert iovecs of output buffers into scatterlists */
- while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
- size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
- (outlen - usedpages));
-
- if (list_empty(&ctx->list)) {
- rsgl = &ctx->first_rsgl;
- } else {
- rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
- if (unlikely(!rsgl)) {
- err = -ENOMEM;
- goto unlock;
- }
- }
- rsgl->sgl.npages = 0;
- list_add_tail(&rsgl->list, &ctx->list);
+ /* Allocate cipher request for current operation. */
+ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+ crypto_aead_reqsize(tfm));
+ if (IS_ERR(areq))
+ return PTR_ERR(areq);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
- goto unlock;
- usedpages += err;
- /* chain the new scatterlist with previous one */
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+ /* convert iovecs of output buffers into RX SGL */
+ err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
+ if (err)
+ goto free;
- last_rsgl = rsgl;
+ /*
+ * Ensure output buffer is sufficiently large. If the caller provides
+ * less buffer space, only use the relative required input size. This
+ * allows AIO operation where the caller sent all data to be processed
+ * and the AIO operation performs the operation on the different chunks
+ * of the input data.
+ */
+ if (usedpages < outlen) {
+ size_t less = outlen - usedpages;
- iov_iter_advance(&msg->msg_iter, err);
+ if (used < less) {
+ err = -EINVAL;
+ goto free;
+ }
+ used -= less;
+ outlen -= less;
}
- /* ensure output buffer is sufficiently large */
- if (usedpages < outlen) {
- err = -EINVAL;
- goto unlock;
- }
+ processed = used + ctx->aead_assoclen;
+ tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
- sg_mark_end(sgl->sg + sgl->cur - 1);
- aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
- used, ctx->iv);
- aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
+ /*
+ * Copy of AAD from source to destination
+ *
+ * The AAD is copied to the destination buffer without change. Even
+ * when user space uses an in-place cipher operation, the kernel
+ * will copy the data as it does not see whether such in-place operation
+ * is initiated.
+ *
+ * To ensure efficiency, the following implementation ensure that the
+ * ciphers are invoked to perform a crypto operation in-place. This
+ * is achieved by memory management specified as follows.
+ */
- err = af_alg_wait_for_completion(ctx->enc ?
- crypto_aead_encrypt(&ctx->aead_req) :
- crypto_aead_decrypt(&ctx->aead_req),
- &ctx->completion);
+ /* Use the RX SGL as source (and destination) for crypto op. */
+ src = areq->first_rsgl.sgl.sg;
+
+ if (ctx->enc) {
+ /*
+ * Encryption operation - The in-place cipher operation is
+ * achieved by the following operation:
+ *
+ * TX SGL: AAD || PT
+ * | |
+ * | copy |
+ * v v
+ * RX SGL: AAD || PT || Tag
+ */
+ err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ areq->first_rsgl.sgl.sg, processed);
+ if (err)
+ goto free;
+ af_alg_pull_tsgl(sk, processed, NULL, 0);
+ } else {
+ /*
+ * Decryption operation - To achieve an in-place cipher
+ * operation, the following SGL structure is used:
+ *
+ * TX SGL: AAD || CT || Tag
+ * | | ^
+ * | copy | | Create SGL link.
+ * v v |
+ * RX SGL: AAD || CT ----+
+ */
+
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ areq->first_rsgl.sgl.sg, outlen);
+ if (err)
+ goto free;
- if (err) {
- /* EBADMSG implies a valid cipher operation took place */
- if (err == -EBADMSG)
- aead_put_sgl(sk);
+ /* Create TX SGL for tag and chain it to RX SGL. */
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
+ processed - as);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
+ areq->tsgl_entries,
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+
+ /* Release TX SGL, except for tag data and reassign tag data. */
+ af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
+
+ /* chain the areq TX SGL holding the tag with RX SGL */
+ if (usedpages) {
+ /* RX SGL present */
+ struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+
+ sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+ sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
+ areq->tsgl);
+ } else
+ /* no RX SGL present (e.g. authentication only) */
+ src = areq->tsgl;
+ }
- goto unlock;
+ /* Initialize the crypto operation */
+ aead_request_set_crypt(&areq->cra_u.aead_req, src,
+ areq->first_rsgl.sgl.sg, used, ctx->iv);
+ aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
+ aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
+
+ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation */
+ areq->iocb = msg->msg_iocb;
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_async_cb, areq);
+ err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ crypto_aead_decrypt(&areq->cra_u.aead_req);
+ } else {
+ /* Synchronous operation */
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
+ crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ crypto_aead_decrypt(&areq->cra_u.aead_req),
+ &ctx->wait);
}
- aead_put_sgl(sk);
- err = 0;
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS) {
+ sock_hold(sk);
-unlock:
- list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- list_del(&rsgl->list);
- if (rsgl != &ctx->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+ /* Remember output size that will be generated. */
+ areq->outlen = outlen;
+
+ return -EIOCBQUEUED;
}
- INIT_LIST_HEAD(&ctx->list);
- aead_wmem_wakeup(sk);
- release_sock(sk);
- return err ? err : outlen;
-}
+free:
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
-static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
- int flags)
-{
- return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
- aead_recvmsg_async(sock, msg, flags) :
- aead_recvmsg_sync(sock, msg, flags);
+ return err ? err : outlen;
}
-static unsigned int aead_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned int mask;
-
- sock_poll_wait(file, sk_sleep(sk), wait);
- mask = 0;
+ int ret = 0;
- if (!ctx->more)
- mask |= POLLIN | POLLRDNORM;
+ lock_sock(sk);
+ while (msg_data_left(msg)) {
+ int err = _aead_recvmsg(sock, msg, ignored, flags);
+
+ /*
+ * This error covers -EIOCBQUEUED which implies that we can
+ * only handle one AIO request. If the caller wants to have
+ * multiple AIO requests in parallel, he must make multiple
+ * separate AIO calls.
+ *
+ * Also return the error if no data has been processed so far.
+ */
+ if (err <= 0) {
+ if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
+ ret = err;
+ goto out;
+ }
- if (aead_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ ret += err;
+ }
- return mask;
+out:
+ af_alg_wmem_wakeup(sk);
+ release_sock(sk);
+ return ret;
}
static struct proto_ops algif_aead_ops = {
@@ -723,9 +352,9 @@ static struct proto_ops algif_aead_ops = {
.release = af_alg_release,
.sendmsg = aead_sendmsg,
- .sendpage = aead_sendpage,
+ .sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
- .poll = aead_poll,
+ .poll = af_alg_poll,
};
static int aead_check_key(struct socket *sock)
@@ -787,7 +416,7 @@ static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
if (err)
return err;
- return aead_sendpage(sock, page, offset, size, flags);
+ return af_alg_sendpage(sock, page, offset, size, flags);
}
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -821,13 +450,14 @@ static struct proto_ops algif_aead_ops_nokey = {
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
- .poll = aead_poll,
+ .poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
+ struct crypto_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
@@ -839,7 +469,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
+ null_tfm = crypto_get_default_null_skcipher2();
+ if (IS_ERR(null_tfm)) {
+ crypto_free_aead(aead);
+ kfree(tfm);
+ return ERR_CAST(null_tfm);
+ }
+
tfm->aead = aead;
+ tfm->null_tfm = null_tfm;
return tfm;
}
@@ -873,12 +511,15 @@ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
static void aead_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned int ivlen = crypto_aead_ivsize(
- crypto_aead_reqtfm(&ctx->aead_req));
-
- WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
- aead_put_sgl(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
+
+ af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ crypto_put_default_null_skcipher2();
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
@@ -886,11 +527,11 @@ static void aead_sock_destruct(struct sock *sk)
static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
- struct aead_ctx *ctx;
+ struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
- unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
+ unsigned int len = sizeof(*ctx);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
@@ -905,23 +546,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
}
memset(ctx->iv, 0, ivlen);
+ INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
+ ctx->rcvused = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- ctx->tsgl.cur = 0;
ctx->aead_assoclen = 0;
- af_alg_init_completion(&ctx->completion);
- sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
- INIT_LIST_HEAD(&ctx->list);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
- aead_request_set_tfm(&ctx->aead_req, aead);
- aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
-
sk->sk_destruct = aead_sock_destruct;
return 0;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5e92bd27..76d2e716 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -26,7 +26,7 @@ struct hash_ctx {
u8 *result;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
unsigned int len;
bool more;
@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
- err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
if (err)
goto unlock;
}
@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_update(&ctx->req),
+ &ctx->wait);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
}
unlock:
@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
}
@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
err = crypto_ahash_update(&ctx->req);
}
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) {
- err = af_alg_wait_for_completion(
- crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
if (!result || ctx->more) {
ctx->more = 0;
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 43839b00..9954b078 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -10,6 +10,21 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
+ * The following concept of the memory management is used:
+ *
+ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
+ * filled by user space with the data submitted via sendpage/sendmsg. Filling
+ * up the TX SGL does not cause a crypto operation -- the data will only be
+ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
+ * provide a buffer which is tracked with the RX SGL.
+ *
+ * During the processing of the recvmsg operation, the cipher request is
+ * allocated and prepared. As part of the recvmsg operation, the processed
+ * TX buffers are extracted from the TX SGL into a separate SGL.
+ *
+ * After the completion of the crypto operation, the RX SGL and the cipher
+ * request is released. The extracted TX SGL parts are released together with
+ * the RX SGL release.
*/
#include <crypto/scatterwalk.h>
@@ -18,279 +33,16 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
-#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_sg_list {
- struct list_head list;
-
- int cur;
-
- struct scatterlist sg[0];
-};
-
struct skcipher_tfm {
struct crypto_skcipher *skcipher;
bool has_key;
};
-struct skcipher_ctx {
- struct list_head tsgl;
- struct af_alg_sgl rsgl;
-
- void *iv;
-
- struct af_alg_completion completion;
-
- atomic_t inflight;
- size_t used;
-
- unsigned int len;
- bool more;
- bool merge;
- bool enc;
-
- struct skcipher_request req;
-};
-
-struct skcipher_async_rsgl {
- struct af_alg_sgl sgl;
- struct list_head list;
-};
-
-struct skcipher_async_req {
- struct kiocb *iocb;
- struct skcipher_async_rsgl first_sgl;
- struct list_head list;
- struct scatterlist *tsg;
- atomic_t *inflight;
- struct skcipher_request req;
-};
-
-#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
- sizeof(struct scatterlist) - 1)
-
-static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
-{
- struct skcipher_async_rsgl *rsgl, *tmp;
- struct scatterlist *sgl;
- struct scatterlist *sg;
- int i, n;
-
- list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &sreq->first_sgl)
- kfree(rsgl);
- }
- sgl = sreq->tsg;
- n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
-
- kfree(sreq->tsg);
-}
-
-static void skcipher_async_cb(struct crypto_async_request *req, int err)
-{
- struct skcipher_async_req *sreq = req->data;
- struct kiocb *iocb = sreq->iocb;
-
- atomic_dec(sreq->inflight);
- skcipher_free_async_sgls(sreq);
- kzfree(sreq);
- iocb->ki_complete(iocb, err, err);
-}
-
-static inline int skcipher_sndbuf(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
-
- return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->used, 0);
-}
-
-static inline bool skcipher_writable(struct sock *sk)
-{
- return PAGE_SIZE <= skcipher_sndbuf(sk);
-}
-
-static int skcipher_alloc_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg = NULL;
-
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
- if (!list_empty(&ctx->tsgl))
- sg = sgl->sg;
-
- if (!sg || sgl->cur >= MAX_SGL_ENTS) {
- sgl = sock_kmalloc(sk, sizeof(*sgl) +
- sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
- GFP_KERNEL);
- if (!sgl)
- return -ENOMEM;
-
- sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
- sgl->cur = 0;
-
- if (sg)
- sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
-
- list_add_tail(&sgl->list, &ctx->tsgl);
- }
-
- return 0;
-}
-
-static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int i;
-
- while (!list_empty(&ctx->tsgl)) {
- sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
- list);
- sg = sgl->sg;
-
- for (i = 0; i < sgl->cur; i++) {
- size_t plen = min_t(size_t, used, sg[i].length);
-
- if (!sg_page(sg + i))
- continue;
-
- sg[i].length -= plen;
- sg[i].offset += plen;
-
- used -= plen;
- ctx->used -= plen;
-
- if (sg[i].length)
- return;
- if (put)
- put_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- }
-
- list_del(&sgl->list);
- sock_kfree_s(sk, sgl,
- sizeof(*sgl) + sizeof(sgl->sg[0]) *
- (MAX_SGL_ENTS + 1));
- }
-
- if (!ctx->used)
- ctx->merge = 0;
-}
-
-static void skcipher_free_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
-
- skcipher_pull_sgl(sk, ctx->used, 1);
-}
-
-static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = -ERESTARTSYS;
- long timeout;
-
- if (flags & MSG_DONTWAIT)
- return -EAGAIN;
-
- sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- return err;
-}
-
-static void skcipher_wmem_wakeup(struct sock *sk)
-{
- struct socket_wq *wq;
-
- if (!skcipher_writable(sk))
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- rcu_read_unlock();
-}
-
-static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- long timeout;
- int err = -ERESTARTSYS;
-
- if (flags & MSG_DONTWAIT) {
- return -EAGAIN;
- }
-
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- return err;
-}
-
-static void skcipher_data_wakeup(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct socket_wq *wq;
-
- if (!ctx->used)
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- rcu_read_unlock();
-}
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -298,445 +50,142 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
- struct skcipher_sg_list *sgl;
- struct af_alg_control con = {};
- long copied = 0;
- bool enc = 0;
- bool init = 0;
- int err;
- int i;
-
- if (msg->msg_controllen) {
- err = af_alg_cmsg_send(msg, &con);
- if (err)
- return err;
-
- init = 1;
- switch (con.op) {
- case ALG_OP_ENCRYPT:
- enc = 1;
- break;
- case ALG_OP_DECRYPT:
- enc = 0;
- break;
- default:
- return -EINVAL;
- }
-
- if (con.iv && con.iv->ivlen != ivsize)
- return -EINVAL;
- }
-
- err = -EINVAL;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (init) {
- ctx->enc = enc;
- if (con.iv)
- memcpy(ctx->iv, con.iv->iv, ivsize);
- }
-
- while (size) {
- struct scatterlist *sg;
- unsigned long len = size;
- size_t plen;
-
- if (ctx->merge) {
- sgl = list_entry(ctx->tsgl.prev,
- struct skcipher_sg_list, list);
- sg = sgl->sg + sgl->cur - 1;
- len = min_t(unsigned long, len,
- PAGE_SIZE - sg->offset - sg->length);
-
- err = memcpy_from_msg(page_address(sg_page(sg)) +
- sg->offset + sg->length,
- msg, len);
- if (err)
- goto unlock;
-
- sg->length += len;
- ctx->merge = (sg->offset + sg->length) &
- (PAGE_SIZE - 1);
-
- ctx->used += len;
- copied += len;
- size -= len;
- continue;
- }
- if (!skcipher_writable(sk)) {
- err = skcipher_wait_for_wmem(sk, msg->msg_flags);
- if (err)
- goto unlock;
- }
-
- len = min_t(unsigned long, len, skcipher_sndbuf(sk));
-
- err = skcipher_alloc_sgl(sk);
- if (err)
- goto unlock;
-
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
- sg = sgl->sg;
- if (sgl->cur)
- sg_unmark_end(sg + sgl->cur - 1);
- do {
- i = sgl->cur;
- plen = min_t(size_t, len, PAGE_SIZE);
-
- sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
- err = -ENOMEM;
- if (!sg_page(sg + i))
- goto unlock;
-
- err = memcpy_from_msg(page_address(sg_page(sg + i)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- goto unlock;
- }
-
- sg[i].length = plen;
- len -= plen;
- ctx->used += plen;
- copied += plen;
- size -= plen;
- sgl->cur++;
- } while (len && sgl->cur < MAX_SGL_ENTS);
-
- if (!size)
- sg_mark_end(sg + sgl->cur - 1);
-
- ctx->merge = plen & (PAGE_SIZE - 1);
- }
-
- err = 0;
-
- ctx->more = msg->msg_flags & MSG_MORE;
-
-unlock:
- skcipher_data_wakeup(sk);
- release_sock(sk);
-
- return copied ?: err;
+ return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- int err = -EINVAL;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (!size)
- goto done;
-
- if (!skcipher_writable(sk)) {
- err = skcipher_wait_for_wmem(sk, flags);
- if (err)
- goto unlock;
- }
-
- err = skcipher_alloc_sgl(sk);
- if (err)
- goto unlock;
-
- ctx->merge = 0;
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
-
- if (sgl->cur)
- sg_unmark_end(sgl->sg + sgl->cur - 1);
-
- sg_mark_end(sgl->sg + sgl->cur);
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
-
-done:
- ctx->more = flags & MSG_MORE;
-
-unlock:
- skcipher_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: size;
-}
-
-static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
-{
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int nents = 0;
-
- list_for_each_entry(sgl, &ctx->tsgl, list) {
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- nents += sg_nents(sg);
- }
- return nents;
-}
-
-static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
- int flags)
+static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
+ struct af_alg_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- struct skcipher_async_req *sreq;
- struct skcipher_request *req;
- struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents;
- unsigned int reqsize = crypto_skcipher_reqsize(tfm);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- int err = -ENOMEM;
- bool mark = false;
- char *iv;
-
- sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
- if (unlikely(!sreq))
- goto out;
-
- req = &sreq->req;
- iv = (char *)(req + 1) + reqsize;
- sreq->iocb = msg->msg_iocb;
- INIT_LIST_HEAD(&sreq->list);
- sreq->inflight = &ctx->inflight;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+ struct af_alg_async_req *areq;
+ int err = 0;
+ size_t len = 0;
- lock_sock(sk);
- tx_nents = skcipher_all_sg_nents(ctx);
- sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg))
- goto unlock;
- sg_init_table(sreq->tsg, tx_nents);
- memcpy(iv, ctx->iv, ivsize);
- skcipher_request_set_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- skcipher_async_cb, sreq);
-
- while (iov_iter_count(&msg->msg_iter)) {
- struct skcipher_async_rsgl *rsgl;
- int used;
-
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto free;
- }
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- used = min_t(unsigned long, ctx->used,
- iov_iter_count(&msg->msg_iter));
- used = min_t(unsigned long, used, sg->length);
-
- if (txbufs == tx_nents) {
- struct scatterlist *tmp;
- int x;
- /* Ran out of tx slots in async request
- * need to expand */
- tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto free;
- }
-
- sg_init_table(tmp, tx_nents * 2);
- for (x = 0; x < tx_nents; x++)
- sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
- sreq->tsg[x].length,
- sreq->tsg[x].offset);
- kfree(sreq->tsg);
- sreq->tsg = tmp;
- tx_nents *= 2;
- mark = true;
- }
- /* Need to take over the tx sgl from ctx
- * to the asynch req - these sgls will be freed later */
- sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
- sg->offset);
-
- if (list_empty(&sreq->list)) {
- rsgl = &sreq->first_sgl;
- list_add_tail(&rsgl->list, &sreq->list);
- } else {
- rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
- if (!rsgl) {
- err = -ENOMEM;
- goto free;
- }
- list_add_tail(&rsgl->list, &sreq->list);
- }
+ /* Allocate cipher request for current operation. */
+ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+ crypto_skcipher_reqsize(tfm));
+ if (IS_ERR(areq))
+ return PTR_ERR(areq);
- used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
- err = used;
- if (used < 0)
- goto free;
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
-
- last_rsgl = rsgl;
- len += used;
- skcipher_pull_sgl(sk, used, 0);
- iov_iter_advance(&msg->msg_iter, used);
+ /* convert iovecs of output buffers into RX SGL */
+ err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
+ if (err)
+ goto free;
+
+ /* Process only as much RX buffers for which we have TX data */
+ if (len > ctx->used)
+ len = ctx->used;
+
+ /*
+ * If more buffers are to be expected to be processed, process only
+ * full block size buffers.
+ */
+ if (ctx->more || len < ctx->used)
+ len -= len % bs;
+
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+
+ /* Initialize the crypto operation */
+ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
+ skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
+ areq->first_rsgl.sgl.sg, len, ctx->iv);
+
+ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation */
+ areq->iocb = msg->msg_iocb;
+ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ af_alg_async_cb, areq);
+ err = ctx->enc ?
+ crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+ crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+ } else {
+ /* Synchronous operation */
+ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
+ crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+ crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
+ &ctx->wait);
}
- if (mark)
- sg_mark_end(sreq->tsg + txbufs - 1);
-
- skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, iv);
- err = ctx->enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ /* AIO operation in progress */
if (err == -EINPROGRESS) {
- atomic_inc(&ctx->inflight);
- err = -EIOCBQUEUED;
- sreq = NULL;
- goto unlock;
+ sock_hold(sk);
+
+ /* Remember output size that will be generated. */
+ areq->outlen = len;
+
+ return -EIOCBQUEUED;
}
+
free:
- skcipher_free_async_sgls(sreq);
-unlock:
- skcipher_wmem_wakeup(sk);
- release_sock(sk);
- kzfree(sreq);
-out:
- return err;
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+
+ return err ? err : len;
}
-static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
- int flags)
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct sock *psk = ask->parent;
- struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
- unsigned bs = crypto_skcipher_blocksize(tfm);
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int err = -EAGAIN;
- int used;
- long copied = 0;
+ int ret = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto unlock;
+ int err = _skcipher_recvmsg(sock, msg, ignored, flags);
+
+ /*
+ * This error covers -EIOCBQUEUED which implies that we can
+ * only handle one AIO request. If the caller wants to have
+ * multiple AIO requests in parallel, he must make multiple
+ * separate AIO calls.
+ *
+ * Also return the error if no data has been processed so far.
+ */
+ if (err <= 0) {
+ if (err == -EIOCBQUEUED || !ret)
+ ret = err;
+ goto out;
}
- used = min_t(unsigned long, ctx->used, msg_data_left(msg));
-
- used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
- err = used;
- if (err < 0)
- goto unlock;
-
- if (ctx->more || used < ctx->used)
- used -= used % bs;
-
- err = -EINVAL;
- if (!used)
- goto free;
-
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
- ctx->iv);
-
- err = af_alg_wait_for_completion(
- ctx->enc ?
- crypto_skcipher_encrypt(&ctx->req) :
- crypto_skcipher_decrypt(&ctx->req),
- &ctx->completion);
-
-free:
- af_alg_free_sg(&ctx->rsgl);
-
- if (err)
- goto unlock;
-
- copied += used;
- skcipher_pull_sgl(sk, used, 1);
- iov_iter_advance(&msg->msg_iter, used);
+ ret += err;
}
- err = 0;
-
-unlock:
- skcipher_wmem_wakeup(sk);
+out:
+ af_alg_wmem_wakeup(sk);
release_sock(sk);
-
- return copied ?: err;
-}
-
-static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t ignored, int flags)
-{
- return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
- skcipher_recvmsg_async(sock, msg, flags) :
- skcipher_recvmsg_sync(sock, msg, flags);
+ return ret;
}
-static unsigned int skcipher_poll(struct file *file, struct socket *sock,
- poll_table *wait)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- unsigned int mask;
-
- sock_poll_wait(file, sk_sleep(sk), wait);
- mask = 0;
-
- if (ctx->used)
- mask |= POLLIN | POLLRDNORM;
-
- if (skcipher_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
-
- return mask;
-}
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
@@ -755,9 +204,9 @@ static struct proto_ops algif_skcipher_ops = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
- .sendpage = skcipher_sendpage,
+ .sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
- .poll = skcipher_poll,
+ .poll = af_alg_poll,
};
static int skcipher_check_key(struct socket *sock)
@@ -819,7 +268,7 @@ static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
if (err)
return err;
- return skcipher_sendpage(sock, page, offset, size, flags);
+ return af_alg_sendpage(sock, page, offset, size, flags);
}
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -853,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.sendmsg = skcipher_sendmsg_nokey,
.sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
- .poll = skcipher_poll,
+ .poll = af_alg_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
@@ -895,26 +344,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return err;
}
-static void skcipher_wait(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- int ctr = 0;
-
- while (atomic_read(&ctx->inflight) && ctr++ < 100)
- msleep(100);
-}
-
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
-
- if (atomic_read(&ctx->inflight))
- skcipher_wait(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
- skcipher_free_sgl(sk);
+ af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
@@ -922,11 +361,11 @@ static void skcipher_sock_destruct(struct sock *sk)
static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
- struct skcipher_ctx *ctx;
+ struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_tfm *tfm = private;
struct crypto_skcipher *skcipher = tfm->skcipher;
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
+ unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -941,22 +380,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
- INIT_LIST_HEAD(&ctx->tsgl);
+ INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
+ ctx->rcvused = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- atomic_set(&ctx->inflight, 0);
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, skcipher);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
-
sk->sk_destruct = skcipher_sock_destruct;
return 0;
diff --git a/crypto/api.c b/crypto/api.c
index 941cd4c6..2a2479d1 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -24,6 +24,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/completion.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+ struct crypto_wait *wait = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ wait->err = err;
+ complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 331f6baf..f3702e53 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig ASYMMETRIC_KEY_TYPE
bool "Asymmetric (public-key cryptographic) key type"
depends on KEYS
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index 6516855b..4719aad5 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for asymmetric cryptographic keys
#
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index e4b0ed38..39aecad2 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
+ BUG_ON(!id_0 && !id_1);
+
if (id_0) {
lookup = id_0->data;
len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
if (id_0 && id_1) {
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
- if (!kids->id[0]) {
+ if (!kids->id[1]) {
pr_debug("First ID matches, but second is missing\n");
goto reject;
}
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 1063b644..e284d9cb 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -19,6 +19,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PKCS#7 testing key type");
+MODULE_AUTHOR("Red Hat, Inc.");
static unsigned pkcs7_usage;
module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd864..c1ca1e86 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -19,6 +20,10 @@
#include "pkcs7_parser.h"
#include "pkcs7-asn1.h"
+MODULE_DESCRIPTION("PKCS#7 parser");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
struct pkcs7_parse_context {
struct pkcs7_message *msg; /* Message being constructed */
struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */
@@ -88,6 +93,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
bool want = false;
sinfo = msg->signed_infos;
+ if (!sinfo)
+ goto inconsistent;
+
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 3cd6e12c..bc3035ef 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -22,6 +22,8 @@
#include <crypto/public_key.h>
#include <crypto/akcipher.h>
+MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/*
@@ -57,29 +59,13 @@ static void public_key_destroy(void *payload0, void *payload3)
public_key_signature_free(payload3);
}
-struct public_key_completion {
- struct completion completion;
- int err;
-};
-
-static void public_key_verify_done(struct crypto_async_request *req, int err)
-{
- struct public_key_completion *compl = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- compl->err = err;
- complete(&compl->completion);
-}
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct public_key_completion compl;
+ struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
@@ -131,20 +117,16 @@ int public_key_verify_signature(const struct public_key *pkey,
sg_init_one(&digest_sg, output, outlen);
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
outlen);
- init_completion(&compl.completion);
+ crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- public_key_verify_done, &compl);
+ crypto_req_done, &cwait);
/* Perform the verification calculation. This doesn't actually do the
* verification, but rather calculates the hash expected by the
* signature and returns that to us.
*/
- ret = crypto_akcipher_verify(req);
- if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
- wait_for_completion(&compl.completion);
- ret = compl.err;
- }
+ ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
if (ret < 0)
goto out_free_output;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index eea71dc9..c9013582 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -275,4 +275,5 @@ module_init(x509_key_init);
module_exit(x509_key_exit);
MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index f38a58ae..89bafa2e 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config ASYNC_CORE
tristate
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile
index 462e4abb..056e4824 100644
--- a/crypto/async_tx/Makefile
+++ b/crypto/async_tx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 6f8f6b86..0cf5fefd 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
+ if (!authsize)
+ goto decrypt;
+
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
+decrypt:
+
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1ce37ae0..0a083342 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
- u8 *iv = req->iv;
+ u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
+ memcpy(iv, req->iv, 16);
+
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 8b3c04d6..4a45fa48 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
crypto_chacha20_init(state, ctx, walk.iv);
while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0508c48a..bd43cf5b 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
- bool may_backlog;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
- may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
- if (err == -EBUSY && !may_backlog)
+ if (err == -ENOSPC)
goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 477d9226..854d924f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -65,8 +65,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
unsigned int nbytes = walk->nbytes;
crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, bsize);
}
diff --git a/crypto/cts.c b/crypto/cts.c
index 243f591d..4773c188 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
goto out;
err = cts_cbc_encrypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
goto out;
err = cts_cbc_decrypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
diff --git a/crypto/dh.c b/crypto/dh.c
index b1032a5c..5659fe7f 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -21,19 +21,12 @@ struct dh_ctx {
MPI xa;
};
-static inline void dh_clear_params(struct dh_ctx *ctx)
+static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
mpi_free(ctx->g);
- ctx->p = NULL;
- ctx->g = NULL;
-}
-
-static void dh_free_ctx(struct dh_ctx *ctx)
-{
- dh_clear_params(ctx);
mpi_free(ctx->xa);
- ctx->xa = NULL;
+ memset(ctx, 0, sizeof(*ctx));
}
/*
@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len)
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
{
- if (unlikely(!params->p || !params->g))
- return -EINVAL;
-
if (dh_check_params_length(params->p_size << 3))
return -EINVAL;
@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
return -EINVAL;
ctx->g = mpi_read_raw_data(params->g, params->g_size);
- if (!ctx->g) {
- mpi_free(ctx->p);
+ if (!ctx->g)
return -EINVAL;
- }
return 0;
}
@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct dh params;
/* Free the old MPI key if any */
- dh_free_ctx(ctx);
+ dh_clear_ctx(ctx);
if (crypto_dh_decode_key(buf, len, &params) < 0)
- return -EINVAL;
+ goto err_clear_ctx;
if (dh_set_params(ctx, &params) < 0)
- return -EINVAL;
+ goto err_clear_ctx;
ctx->xa = mpi_read_raw_data(params.key, params.key_size);
- if (!ctx->xa) {
- dh_clear_params(ctx);
- return -EINVAL;
- }
+ if (!ctx->xa)
+ goto err_clear_ctx;
return 0;
+
+err_clear_ctx:
+ dh_clear_ctx(ctx);
+ return -EINVAL;
}
static int dh_compute_value(struct kpp_request *req)
@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
- dh_free_ctx(ctx);
+ dh_clear_ctx(ctx);
}
static struct kpp_alg dh = {
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 8ba8a3f8..24fdb2ec 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
return src + size;
}
-static inline int dh_data_size(const struct dh *p)
+static inline unsigned int dh_data_size(const struct dh *p)
{
return p->key_size + p->p_size + p->g_size;
}
-int crypto_dh_key_len(const struct dh *p)
+unsigned int crypto_dh_key_len(const struct dh *p)
{
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
}
@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
+ /*
+ * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
+ * some drivers assume otherwise.
+ */
+ if (params->key_size > params->p_size ||
+ params->g_size > params->p_size)
+ return -EINVAL;
+
/* Don't allocate memory. Set pointers to data within
* the given buffer
*/
@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
params->p = (void *)(ptr + params->key_size);
params->g = (void *)(ptr + params->key_size + params->p_size);
+ /*
+ * Don't permit 'p' to be 0. It's not a prime number, and it's subject
+ * to corner cases such as 'mod 0' being undefined or
+ * crypto_kpp_maxsize() returning 0.
+ */
+ if (memchr_inv(params->p, 0, params->p_size) == NULL)
+ return -EINVAL;
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 633a88e9..4faa2781 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
- kzfree(drbg->V);
- drbg->Vbuf = NULL;
- kzfree(drbg->C);
- drbg->Cbuf = NULL;
+ kzfree(drbg->Vbuf);
+ drbg->V = NULL;
+ kzfree(drbg->Cbuf);
+ drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
return 0;
}
-static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
-{
- struct drbg_state *drbg = req->data;
-
- if (error == -EINPROGRESS)
- return;
- drbg->ctr_async_err = error;
- complete(&drbg->ctr_completion);
-}
-
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
- init_completion(&drbg->ctr_completion);
+ crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return -ENOMEM;
}
drbg->ctr_req = req;
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- drbg_skcipher_cb, drbg);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
- ret = crypto_skcipher_encrypt(drbg->ctr_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&drbg->ctr_completion);
- if (!drbg->ctr_async_err) {
- reinit_completion(&drbg->ctr_completion);
- break;
- }
- default:
+ ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
+ &drbg->ctr_wait);
+ if (ret)
goto out;
- }
- init_completion(&drbg->ctr_completion);
+
+ crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 03ae5f71..b80f45da 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CRYTO_ECC_CURVE_DEFS_H
#define _CRYTO_ECC_CURVE_DEFS_H
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 61c77089..3aca0933 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -20,8 +20,6 @@ struct ecdh_ctx {
unsigned int curve_id;
unsigned int ndigits;
u64 private_key[ECC_MAX_DIGITS];
- u64 public_key[2 * ECC_MAX_DIGITS];
- u64 shared_secret[ECC_MAX_DIGITS];
};
static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
@@ -70,41 +68,58 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
static int ecdh_compute_value(struct kpp_request *req)
{
- int ret = 0;
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
- size_t copied, nbytes;
+ u64 *public_key;
+ u64 *shared_secret = NULL;
void *buf;
+ size_t copied, nbytes, public_key_sz;
+ int ret = -ENOMEM;
nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ /* Public part is a point thus it has both coordinates */
+ public_key_sz = 2 * nbytes;
+
+ public_key = kmalloc(public_key_sz, GFP_KERNEL);
+ if (!public_key)
+ return -ENOMEM;
if (req->src) {
- copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
- 2 * nbytes);
- if (copied != 2 * nbytes)
- return -EINVAL;
+ shared_secret = kmalloc(nbytes, GFP_KERNEL);
+ if (!shared_secret)
+ goto free_pubkey;
+
+ copied = sg_copy_to_buffer(req->src, 1, public_key,
+ public_key_sz);
+ if (copied != public_key_sz) {
+ ret = -EINVAL;
+ goto free_all;
+ }
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
- ctx->private_key,
- ctx->public_key,
- ctx->shared_secret);
+ ctx->private_key, public_key,
+ shared_secret);
- buf = ctx->shared_secret;
+ buf = shared_secret;
} else {
ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits,
- ctx->private_key, ctx->public_key);
- buf = ctx->public_key;
- /* Public part is a point thus it has both coordinates */
- nbytes *= 2;
+ ctx->private_key, public_key);
+ buf = public_key;
+ nbytes = public_key_sz;
}
if (ret < 0)
- return ret;
+ goto free_all;
copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
if (copied != nbytes)
- return -EINVAL;
+ ret = -EINVAL;
+ /* fall through */
+free_all:
+ kzfree(shared_secret);
+free_pubkey:
+ kfree(public_key);
return ret;
}
@@ -116,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
-static void no_exit_tfm(struct crypto_kpp *tfm)
-{
- return;
-}
-
static struct kpp_alg ecdh = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
- .exit = no_exit_tfm,
.base = {
.cra_name = "ecdh",
.cra_driver_name = "ecdh-generic",
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index f05bea5f..d3af8e8b 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
return src + sz;
}
-int crypto_ecdh_key_len(const struct ecdh *params)
+unsigned int crypto_ecdh_key_len(const struct ecdh *params)
{
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 3841b5ea..8589681f 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -14,9 +14,9 @@
#include <crypto/internal/hash.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
+#include <crypto/gcm.h>
#include <crypto/hash.h>
#include "internal.h"
-#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx {
} u;
};
-struct crypto_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
static struct {
u8 buf[16];
struct scatterlist sg;
@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct crypto_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
be128 hash;
u8 iv[16];
- struct crypto_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_gcm_setkey_done,
- &data->result);
+ crypto_req_done,
+ &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- wait_for_completion(&data->result.completion);
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req)
struct scatterlist *sg;
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
- memcpy(pctx->iv, req->iv, 12);
- memcpy(pctx->iv + 12, &counter, 4);
+ memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
sg_init_table(pctx->src, 3);
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
- inst->alg.ivsize = 12;
+ inst->alg.ivsize = GCM_AES_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1);
- scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+ scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
sg_init_table(rctx->src, 3);
- sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+ sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
if (sg != rctx->src + 1)
sg_chain(rctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(rctx->dst, 3);
- sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+ sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
if (sg != rctx->dst + 1)
sg_chain(rctx->dst, 2, sg);
@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
err = -EINVAL;
/* Underlying IV size must be 12. */
- if (crypto_aead_alg_ivsize(alg) != 12)
+ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg;
/* Not a stream cipher? */
@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
- inst->alg.ivsize = 8;
+ inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
tfm,
sizeof(struct crypto_rfc4543_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
- align + 12);
+ align + GCM_AES_IV_SIZE);
return 0;
@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
err = -EINVAL;
/* Underlying IV size must be 12. */
- if (crypto_aead_alg_ivsize(alg) != 12)
+ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg;
/* Not a stream cipher? */
@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
- inst->alg.ivsize = 8;
+ inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index dc012129..24e60195 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x)
x->b = cpu_to_be64((b << 8) ^ _tt);
}
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ r->a = cpu_to_le64((a << 8) | (b >> 56));
+ r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
void gf128mul_lle(be128 *r, const be128 *b)
{
be128 p[8];
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 72014f96..744e3513 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -93,18 +93,10 @@ struct crypto_kw_ctx {
struct crypto_kw_block {
#define SEMIBSIZE 8
- u8 A[SEMIBSIZE];
- u8 R[SEMIBSIZE];
+ __be64 A;
+ __be64 R;
};
-/* convert 64 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
-{
- __be64 *a = (__be64 *)buf;
-
- *a = cpu_to_be64(val);
-}
-
/*
* Fast forward the SGL to the "end" length minus SEMIBSIZE.
* The start in the SGL defined by the fast-forward is returned with
@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
-
- unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
- crypto_cipher_alignmask(child));
- unsigned int i;
-
- u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
- struct crypto_kw_block *block = (struct crypto_kw_block *)
- PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
- u64 t = 6 * ((nbytes) >> 3);
+ struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
+ u64 t = 6 * ((nbytes) >> 3);
+ unsigned int i;
int ret = 0;
/*
@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
return -EINVAL;
/* Place the IV into block A */
- memcpy(block->A, desc->info, SEMIBSIZE);
+ memcpy(&block.A, desc->info, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
ldst = dst;
for (i = 0; i < 6; i++) {
- u8 tbe_buffer[SEMIBSIZE + alignmask];
- /* alignment for the crypto_xor and the _to_be64 operation */
- u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
- unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk;
+ unsigned int tmp_nbytes = nbytes;
while (tmp_nbytes) {
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
/* get the source block */
- scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
- /* perform KW operation: get counter as byte string */
- crypto_kw_cpu_to_be64(t, tbe);
/* perform KW operation: modify IV with counter */
- crypto_xor(block->A, tbe, SEMIBSIZE);
+ block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(child, (u8*)block,
- (u8*)block);
+ crypto_cipher_decrypt_one(child, (u8*)&block,
+ (u8*)&block);
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
/* Copy block->R into place */
- scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
- SEMIBSIZE))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
ret = -EBADMSG;
- memzero_explicit(block, sizeof(struct crypto_kw_block));
+ memzero_explicit(&block, sizeof(struct crypto_kw_block));
return ret;
}
@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
-
- unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
- crypto_cipher_alignmask(child));
- unsigned int i;
-
- u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
- struct crypto_kw_block *block = (struct crypto_kw_block *)
- PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
- u64 t = 1;
+ struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
+ u64 t = 1;
+ unsigned int i;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
ldst = dst;
for (i = 0; i < 6; i++) {
- u8 tbe_buffer[SEMIBSIZE + alignmask];
- u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
- unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk;
+ unsigned int tmp_nbytes = nbytes;
scatterwalk_start(&src_walk, lsrc);
scatterwalk_start(&dst_walk, ldst);
while (tmp_nbytes) {
/* get the source block */
- scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(child, (u8 *)block,
- (u8 *)block);
- /* perform KW operation: get counter as byte string */
- crypto_kw_cpu_to_be64(t, tbe);
+ crypto_cipher_encrypt_one(child, (u8 *)&block,
+ (u8 *)&block);
/* perform KW operation: modify IV with counter */
- crypto_xor(block->A, tbe, SEMIBSIZE);
+ block.A ^= cpu_to_be64(t);
t++;
/* Copy block->R into place */
- scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
}
/* establish the IV for the caller to pick up */
- memcpy(desc->info, block->A, SEMIBSIZE);
+ memcpy(desc->info, &block.A, SEMIBSIZE);
- memzero_explicit(block, sizeof(struct crypto_kw_block));
+ memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
diff --git a/crypto/lrw.c b/crypto/lrw.c
index a8bfae44..cbbd7c50 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ecb_name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
- }
+ "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_drop_spawn;
+ }
+ } else
+ goto err_drop_spawn;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 29dd2b4a..d9e45a95 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -55,8 +55,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
do {
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, dst, iv);
- memcpy(iv, dst, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
@@ -79,8 +78,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
memcpy(tmpbuf, src, bsize);
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, src, iv);
- memcpy(iv, tmpbuf, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, tmpbuf, src, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
@@ -127,8 +125,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
do {
crypto_cipher_decrypt_one(tfm, dst, src);
crypto_xor(dst, iv, bsize);
- memcpy(iv, src, bsize);
- crypto_xor(iv, dst, bsize);
+ crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
@@ -153,8 +150,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
memcpy(tmpbuf, src, bsize);
crypto_cipher_decrypt_one(tfm, src, src);
crypto_xor(src, iv, bsize);
- memcpy(iv, tmpbuf, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, src, tmpbuf, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
diff --git a/crypto/ripemd.h b/crypto/ripemd.h
index c57a2d4c..93edbf52 100644
--- a/crypto/ripemd.h
+++ b/crypto/ripemd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values for RIPEMD algorithms
*/
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 049486ed..40e053b9 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in)
state[2] = state[3] + aa + bbb;
state[3] = state[0] + bb + ccc;
state[0] = ddd;
-
- return;
}
static int rmd128_init(struct shash_desc *desc)
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index de585e51..5f3e6ea3 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in)
state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc;
state[0] = ddd;
-
- return;
}
static int rmd160_init(struct shash_desc *desc)
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 4ec02a75..f50c025c 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in)
state[5] += bbb;
state[6] += ccc;
state[7] += ddd;
-
- return;
}
static int rmd256_init(struct shash_desc *desc)
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 770f2cb3..e1315e48 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in)
state[7] += ccc;
state[8] += ddd;
state[9] += eee;
-
- return;
}
static int rmd320_init(struct shash_desc *desc)
diff --git a/crypto/rng.c b/crypto/rng.c
index 5e846924..b4a61866 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -43,12 +43,14 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
if (!buf)
return -ENOMEM;
- get_random_bytes(buf, slen);
+ err = get_random_bytes_wait(buf, slen);
+ if (err)
+ goto out;
seed = buf;
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-
+out:
kzfree(buf);
return err;
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 407c64bd..2908f93c 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
ctx->key_size);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_decrypt_complete(req, err);
return err;
@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_sign(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
ctx->key_size);
err = crypto_akcipher_verify(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
return err;
diff --git a/crypto/scompress.c b/crypto/scompress.c
index ae1d3cf2..2075e2c4 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : scomp\n");
}
-static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
static void crypto_scomp_free_scratches(void * __percpu *scratches)
{
int i;
@@ -125,12 +120,26 @@ static int crypto_scomp_alloc_all_scratches(void)
if (!scomp_src_scratches)
return -ENOMEM;
scomp_dst_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_dst_scratches)
+ if (!scomp_dst_scratches) {
+ crypto_scomp_free_scratches(scomp_src_scratches);
+ scomp_src_scratches = NULL;
return -ENOMEM;
+ }
}
return 0;
}
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+ int ret;
+
+ mutex_lock(&scomp_lock);
+ ret = crypto_scomp_alloc_all_scratches();
+ mutex_unlock(&scomp_lock);
+
+ return ret;
+}
+
static void crypto_scomp_sg_free(struct scatterlist *sgl)
{
int i, n;
@@ -211,9 +220,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch_dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
- req->dst = crypto_scomp_sg_alloc(req->dlen,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC);
+ req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
if (!req->dst)
goto out;
}
@@ -240,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
crypto_free_scomp(*ctx);
+
+ mutex_lock(&scomp_lock);
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
}
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
@@ -316,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->base;
- int ret = -ENOMEM;
-
- mutex_lock(&scomp_lock);
- if (crypto_scomp_alloc_all_scratches())
- goto error;
base->cra_type = &crypto_scomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
- ret = crypto_register_alg(base);
- if (ret)
- goto error;
-
- mutex_unlock(&scomp_lock);
- return ret;
-
-error:
- crypto_scomp_free_all_scratches();
- mutex_unlock(&scomp_lock);
- return ret;
+ return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_scomp);
int crypto_unregister_scomp(struct scomp_alg *alg)
{
- int ret;
-
- mutex_lock(&scomp_lock);
- ret = crypto_unregister_alg(&alg->base);
- crypto_scomp_free_all_scratches();
- mutex_unlock(&scomp_lock);
-
- return ret;
+ return crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 94970a79..7c3382fa 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -229,6 +229,46 @@
x4 ^= x2; \
})
+static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
+{
+ k += 100;
+ S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
+ S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
+ S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
+ S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
+ S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
+ S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
+ S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
+ S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
+ S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
+ S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
+ S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
+ S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
+ S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
+ S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
+ k -= 50;
+ S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
+ S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
+ S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
+ S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
+ S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
+ S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
+ S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
+ S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
+ S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
+ S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
+ S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
+ k -= 50;
+ S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
+ S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
+ S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
+ S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
+ S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
+ S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
+ S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
+ S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
+}
+
int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -395,42 +435,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
keyiter(k[23], r1, r0, r3, 131, 31);
/* Apply S-boxes */
-
- S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
- S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
- S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
- S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
- S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
- S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
- S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
- S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
- S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
- S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
- S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
- S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
- S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
- S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
- k -= 50;
- S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
- S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
- S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
- S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
- S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
- S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
- S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
- S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
- S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
- S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
- S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
- k -= 50;
- S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
- S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
- S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
- S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
- S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
- S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
- S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
- S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
+ __serpent_setkey_sbox(r0, r1, r2, r3, r4, ctx->expkey);
return 0;
}
diff --git a/crypto/shash.c b/crypto/shash.c
index 5e31c8d7..325a14da 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_KERNEL);
+ buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- struct scatterlist *sg = req->src;
- unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
int err;
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4faa0fd5..d5692e35 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
static int skcipher_walk_first(struct skcipher_walk *walk)
{
- walk->nbytes = 0;
-
if (WARN_ON_ONCE(in_irq()))
return -EDEADLK;
- if (unlikely(!walk->total))
- return 0;
-
walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ walk->total = req->cryptlen;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- walk->total = req->cryptlen;
walk->iv = req->iv;
walk->oiv = req->iv;
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err;
+ walk->nbytes = 0;
+
+ if (unlikely(!walk->total))
+ return 0;
+
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
new file mode 100644
index 00000000..9e823d99
--- /dev/null
+++ b/crypto/sm3_generic.c
@@ -0,0 +1,210 @@
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
+ * described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
+ 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+ 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+ 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+ 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
+};
+EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
+
+static inline u32 p0(u32 x)
+{
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
+}
+
+static inline u32 p1(u32 x)
+{
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
+}
+
+static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
+{
+ return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
+}
+
+static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
+{
+ return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
+}
+
+static inline u32 t(unsigned int n)
+{
+ return (n < 16) ? SM3_T1 : SM3_T2;
+}
+
+static void sm3_expand(u32 *t, u32 *w, u32 *wt)
+{
+ int i;
+ unsigned int tmp;
+
+ /* load the input */
+ for (i = 0; i <= 15; i++)
+ w[i] = get_unaligned_be32((__u32 *)t + i);
+
+ for (i = 16; i <= 67; i++) {
+ tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
+ w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
+ }
+
+ for (i = 0; i <= 63; i++)
+ wt[i] = w[i] ^ w[i + 4];
+}
+
+static void sm3_compress(u32 *w, u32 *wt, u32 *m)
+{
+ u32 ss1;
+ u32 ss2;
+ u32 tt1;
+ u32 tt2;
+ u32 a, b, c, d, e, f, g, h;
+ int i;
+
+ a = m[0];
+ b = m[1];
+ c = m[2];
+ d = m[3];
+ e = m[4];
+ f = m[5];
+ g = m[6];
+ h = m[7];
+
+ for (i = 0; i <= 63; i++) {
+
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+
+ ss2 = ss1 ^ rol32(a, 12);
+
+ tt1 = ff(i, a, b, c) + d + ss2 + *wt;
+ wt++;
+
+ tt2 = gg(i, e, f, g) + h + ss1 + *w;
+ w++;
+
+ d = c;
+ c = rol32(b, 9);
+ b = a;
+ a = tt1;
+ h = g;
+ g = rol32(f, 19);
+ f = e;
+ e = p0(tt2);
+ }
+
+ m[0] = a ^ m[0];
+ m[1] = b ^ m[1];
+ m[2] = c ^ m[2];
+ m[3] = d ^ m[3];
+ m[4] = e ^ m[4];
+ m[5] = f ^ m[5];
+ m[6] = g ^ m[6];
+ m[7] = h ^ m[7];
+
+ a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
+}
+
+static void sm3_transform(struct sm3_state *sst, u8 const *src)
+{
+ unsigned int w[68];
+ unsigned int wt[64];
+
+ sm3_expand((u32 *)src, w, wt);
+ sm3_compress(w, wt, sst->state);
+
+ memzero_explicit(w, sizeof(w));
+ memzero_explicit(wt, sizeof(wt));
+}
+
+static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
+ int blocks)
+{
+ while (blocks--) {
+ sm3_transform(sst, src);
+ src += SM3_BLOCK_SIZE;
+ }
+}
+
+int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+}
+EXPORT_SYMBOL(crypto_sm3_update);
+
+static int sm3_final(struct shash_desc *desc, u8 *out)
+{
+ sm3_base_do_finalize(desc, sm3_generic_block_fn);
+ return sm3_base_finish(desc, out);
+}
+
+int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
+{
+ sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+ return sm3_final(desc, hash);
+}
+EXPORT_SYMBOL(crypto_sm3_finup);
+
+static struct shash_alg sm3_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = crypto_sm3_update,
+ .final = sm3_final,
+ .finup = crypto_sm3_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sm3_generic_mod_init(void)
+{
+ return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_generic_mod_fini(void)
+{
+ crypto_unregister_shash(&sm3_alg);
+}
+
+module_init(sm3_generic_mod_init);
+module_exit(sm3_generic_mod_fini);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
+
+MODULE_ALIAS_CRYPTO("sm3");
+MODULE_ALIAS_CRYPTO("sm3-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0dd6a432..9267cbdb 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -70,7 +70,7 @@ static int mode;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
- "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
+ "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
@@ -79,34 +79,11 @@ static char *check[] = {
NULL
};
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
+ struct crypto_wait *wait = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
- reinit_completion(&tr->completion);
- }
-
- return ret;
+ return crypto_wait_req(ret, wait);
}
static int test_aead_jiffies(struct aead_request *req, int enc,
@@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
- struct tcrypt_result result;
+ struct crypto_wait wait;
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
@@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out_notfm;
}
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
@@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
i = 0;
do {
@@ -340,7 +317,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
sg_init_aead(sg, xbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? 0 : authsize));
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0));
@@ -348,7 +325,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
sg_set_buf(&sg[0], assoc, aad_size);
sg_set_buf(&sgout[0], assoc, aad_size);
- aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+ aead_request_set_crypt(req, sg, sgout,
+ *b_size + (enc ? 0 : authsize),
+ iv);
aead_request_set_ad(req, aad_size);
if (secs)
@@ -381,7 +360,6 @@ out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
- return;
}
static void test_hash_sg_init(struct scatterlist *sg)
@@ -397,21 +375,16 @@ static void test_hash_sg_init(struct scatterlist *sg)
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
+ struct crypto_wait *wait = req->base.data;
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
+ return crypto_wait_req(ret, wait);
}
struct test_mb_ahash_data {
struct scatterlist sg[TVMEMSIZE];
char result[64];
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
@@ -440,7 +413,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
- init_completion(&data[i].tresult.completion);
+ crypto_init_wait(&data[i].wait);
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
@@ -449,8 +422,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto out;
}
- ahash_request_set_callback(data[i].req, 0,
- tcrypt_complete, &data[i].tresult);
+ ahash_request_set_callback(data[i].req, 0, crypto_req_done,
+ &data[i].wait);
test_hash_sg_init(data[i].sg);
}
@@ -492,16 +465,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (ret)
break;
- complete(&data[k].tresult.completion);
- data[k].tresult.err = 0;
+ crypto_req_done(&data[k].req->base, 0);
}
for (j = 0; j < k; j++) {
- struct tcrypt_result *tr = &data[j].tresult;
+ struct crypto_wait *wait = &data[j].wait;
+ int wait_ret;
- wait_for_completion(&tr->completion);
- if (tr->err)
- ret = tr->err;
+ wait_ret = crypto_wait_req(-EINPROGRESS, wait);
+ if (wait_ret)
+ ret = wait_ret;
}
end = get_cycles();
@@ -679,7 +652,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
struct ahash_request *req;
struct crypto_ahash *tfm;
char *output;
@@ -708,9 +681,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
goto out;
}
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
if (!output)
@@ -765,15 +738,9 @@ static void test_hash_speed(const char *algo, unsigned int secs,
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
-
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
+ struct crypto_wait *wait = req->base.data;
- return ret;
+ return crypto_wait_req(ret, wait);
}
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
@@ -853,7 +820,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
const char *key;
char iv[128];
struct skcipher_request *req;
@@ -866,7 +833,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
else
e = "decryption";
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
@@ -887,7 +854,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
i = 0;
do {
@@ -1269,6 +1236,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
ret += tcrypt_test("sha3-512");
break;
+ case 52:
+ ret += tcrypt_test("sm3");
+ break;
+
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@@ -1404,9 +1375,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
@@ -1603,115 +1574,116 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
-
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
-
/* fall through */
-
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
+ case 326:
+ test_hash_speed("sm3", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ /* fall through */
case 399:
break;
@@ -1720,106 +1692,107 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
-
/* fall through */
-
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
-
+ /* fall through */
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 422:
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 423:
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 424:
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
+ case 425:
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+ /* fall through */
case 499:
break;
@@ -1837,9 +1810,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7125ba38..29d7020b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
#define ENCRYPT 1
#define DECRYPT 0
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
struct aead_test_suite {
struct {
const struct aead_testvec *vecs;
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
buf, len, false);
}
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
-static int wait_async_op(struct tcrypt_result *tr, int ret)
-{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
-}
-
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
- const char *algo, char *result, struct tcrypt_result *tresult)
+ const char *algo, char *result, struct crypto_wait *wait)
{
char *state;
struct ahash_request *req;
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
}
ahash_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, tresult);
+ crypto_req_done, wait);
memcpy(hash_buff, template->plaintext + temp,
template->tap[k]);
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
- ret = wait_async_op(tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
*preq = req;
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
char *result;
char *key;
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
- ret = wait_async_op(&tresult, crypto_ahash_digest(req));
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&tresult.completion);
- reinit_completion(&tresult.completion);
- ret = tresult.err;
- if (!ret)
- break;
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed "
- "on chunking test %d for %s: "
- "ret=%d\n", j, algo, -ret);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
+ j, algo, -ret);
goto out;
}
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
for (k = 1; k < template[i].np; k++) {
ret = ahash_partial_update(&req, tfm, &template[i],
hash_buff, k, temp, &sg[0], algo, result,
- &tresult);
+ &wait);
if (ret) {
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
}
temp += template[i].tap[k];
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int authsize, iv_len;
void *input;
void *output;
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
iv_len = crypto_aead_ivsize(tfm);
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
struct scatterlist sg[8];
struct scatterlist sgout[8];
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
- struct tcrypt_result result;
+ struct crypto_wait wait;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_compress(req));
+ ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max;
int err = -ENOMEM;
struct scatterlist src, dst;
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (!req)
return err;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
if (err < 0)
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
/* Compute party A's public key */
- err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+ err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
if (err) {
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->b_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
if (err) {
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result,
- crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
+ &wait);
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
if (!req)
goto free_xbuf;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
- /* Run asymmetric encrypt */
- crypto_akcipher_encrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature generation */
+ crypto_akcipher_sign(req) :
+ /* Run asymmetric encrypt */
+ crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
- /* Run asymmetric decrypt */
- crypto_akcipher_decrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature verification */
+ crypto_akcipher_verify(req) :
+ /* Run asymmetric decrypt */
+ crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
@@ -3500,6 +3428,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sha512_tv_template)
}
}, {
+ .alg = "sm3",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(sm3_tv_template)
+ }
+ }, {
.alg = "tgr128",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6ceb0e27..a714b629 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = {
}
};
+/* Example vectors below taken from
+ * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+ *
+ * The rest taken from
+ * https://github.com/adamws/oscca-sm3
+ */
+static const struct hash_testvec sm3_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+ .digest = (u8 *)(u8 []) {
+ 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+ 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+ 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+ 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B }
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = (u8 *)(u8 []) {
+ 0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29,
+ 0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C,
+ 0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82,
+ 0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 }
+ }, {
+ /* A.1. Example 1 */
+ .plaintext = "abc",
+ .psize = 3,
+ .digest = (u8 *)(u8 []) {
+ 0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9,
+ 0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2,
+ 0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2,
+ 0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 }
+ }, {
+ .plaintext = "abcdefghijklmnopqrstuvwxyz",
+ .psize = 26,
+ .digest = (u8 *)(u8 []) {
+ 0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC,
+ 0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4,
+ 0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63,
+ 0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 }
+ }, {
+ /* A.1. Example 2 */
+ .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab"
+ "cdabcdabcdabcdabcd",
+ .psize = 64,
+ .digest = (u8 *)(u8 []) {
+ 0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1,
+ 0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D,
+ 0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65,
+ 0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 }
+ }, {
+ .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcd",
+ .psize = 256,
+ .digest = (u8 *)(u8 []) {
+ 0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91,
+ 0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF,
+ 0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9,
+ 0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 }
+ }
+};
+
/*
* SHA1 test vectors from from FIPS PUB 180-1
* Long vector from CAVS 5.0
@@ -32675,6 +32742,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
"\xc4\xfd\x80\x6c\x22\xf2\x21",
.rlen = 375,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 375 - 20, 4, 16 },
+
}, { /* RFC7539 A.2. Test Vector #3 */
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
"\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -33049,6 +33120,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
"\x98",
.rlen = 1281,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 1200, 1, 80 },
},
};
diff --git a/crypto/xor.c b/crypto/xor.c
index 263af9fb..bce9fe7a 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
goto out;
}
- /*
- * Note: Since the memory is not actually used for _anything_ but to
- * test the XOR speed, we don't really want kmemcheck to warn about
- * reading uninitialized bytes here.
- */
- b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
+ b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (!b1) {
printk(KERN_WARNING "xor: Yikes! No memory available.\n");
return -ENOMEM;
diff --git a/crypto/xts.c b/crypto/xts.c
index d86c11a8..f317c48b 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -554,8 +550,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
+ "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_drop_spawn;
+ }
} else
goto err_drop_spawn;