From 2746659a4f2827c47df630ecc7b5ef88f3fea804 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jan 2016 17:15:03 +0900 Subject: crypto: compress - remove unused pcomp interface It is unused now, so remove it. Signed-off-by: Joonsoo Kim Signed-off-by: Herbert Xu --- crypto/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'crypto/Makefile') diff --git a/crypto/Makefile b/crypto/Makefile index 2acdbbd3..ffe18c9c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -28,7 +28,6 @@ crypto_hash-y += ahash.o crypto_hash-y += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o -obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h @@ -99,7 +98,6 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o -obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32.o -- cgit v1.2.3 From 07e57ab519259a1983f1636edf5404e1a64067af Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 29 Jan 2016 18:20:17 +0800 Subject: crypto: crc32 - Rename generic implementation The generic crc32 implementation is currently called crc32. This is a problem because it clashes with the lib implementation of crc32. This patch renames the crypto crc32 to crc32_generic so that it is consistent with crc32c. An alias for the driver is also added. Signed-off-by: Herbert Xu --- crypto/Makefile | 2 +- crypto/crc32.c | 159 ------------------------------------------------ crypto/crc32_generic.c | 160 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 160 deletions(-) delete mode 100644 crypto/crc32.c create mode 100644 crypto/crc32_generic.c (limited to 'crypto/Makefile') diff --git a/crypto/Makefile b/crypto/Makefile index ffe18c9c..059de1bb 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -100,7 +100,7 @@ obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o -obj-$(CONFIG_CRYPTO_CRC32) += crc32.o +obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o diff --git a/crypto/crc32.c b/crypto/crc32.c deleted file mode 100644 index 187ded28..00000000 --- a/crypto/crc32.c +++ /dev/null @@ -1,159 +0,0 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - */ - -/* - * This is crypto api shash wrappers to crc32_le. - */ - -#include -#include -#include -#include -#include -#include - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len) -{ - return crc32_le(crc, p, len); -} - -/** No default init with ~0 */ -static int crc32_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 0; - - return 0; -} - - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = le32_to_cpup((__le32 *)key); - return 0; -} - -static int crc32_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crcp = shash_desc_ctx(desc); - - *crcp = *mctx; - - return 0; -} - -static int crc32_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *crcp = shash_desc_ctx(desc); - - *crcp = __crc32_le(*crcp, data, len); - return 0; -} - -/* No final XOR 0xFFFFFFFF, like crc32_le */ -static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) -{ - *(__le32 *)out = cpu_to_le32(__crc32_le(*crcp, data, len)); - return 0; -} - -static int crc32_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32_finup(shash_desc_ctx(desc), data, len, out); -} - -static int crc32_final(struct shash_desc *desc, u8 *out) -{ - u32 *crcp = shash_desc_ctx(desc); - - *(__le32 *)out = cpu_to_le32p(crcp); - return 0; -} - -static int crc32_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} -static struct shash_alg alg = { - .setkey = crc32_setkey, - .init = crc32_init, - .update = crc32_update, - .final = crc32_final, - .finup = crc32_finup, - .digest = crc32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32", - .cra_driver_name = "crc32-table", - .cra_priority = 100, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32_cra_init, - } -}; - -static int __init crc32_mod_init(void) -{ - return crypto_register_shash(&alg); -} - -static void __exit crc32_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(crc32_mod_init); -module_exit(crc32_mod_fini); - -MODULE_AUTHOR("Alexander Boyko "); -MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crc32"); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c new file mode 100644 index 00000000..aa2a25fc --- /dev/null +++ b/crypto/crc32_generic.c @@ -0,0 +1,160 @@ +/* GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please visit http://www.xyratex.com/contact if you need additional + * information or have any questions. + * + * GPL HEADER END + */ + +/* + * Copyright 2012 Xyratex Technology Limited + */ + +/* + * This is crypto api shash wrappers to crc32_le. + */ + +#include +#include +#include +#include +#include +#include + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len) +{ + return crc32_le(crc, p, len); +} + +/** No default init with ~0 */ +static int crc32_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = 0; + + return 0; +} + + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int crc32_setkey(struct crypto_shash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_shash_ctx(hash); + + if (keylen != sizeof(u32)) { + crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + *mctx = le32_to_cpup((__le32 *)key); + return 0; +} + +static int crc32_init(struct shash_desc *desc) +{ + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *crcp = shash_desc_ctx(desc); + + *crcp = *mctx; + + return 0; +} + +static int crc32_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *crcp = shash_desc_ctx(desc); + + *crcp = __crc32_le(*crcp, data, len); + return 0; +} + +/* No final XOR 0xFFFFFFFF, like crc32_le */ +static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len, + u8 *out) +{ + *(__le32 *)out = cpu_to_le32(__crc32_le(*crcp, data, len)); + return 0; +} + +static int crc32_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(shash_desc_ctx(desc), data, len, out); +} + +static int crc32_final(struct shash_desc *desc, u8 *out) +{ + u32 *crcp = shash_desc_ctx(desc); + + *(__le32 *)out = cpu_to_le32p(crcp); + return 0; +} + +static int crc32_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, + out); +} +static struct shash_alg alg = { + .setkey = crc32_setkey, + .init = crc32_init, + .update = crc32_update, + .final = crc32_final, + .finup = crc32_finup, + .digest = crc32_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + .base = { + .cra_name = "crc32", + .cra_driver_name = "crc32-generic", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static int __init crc32_mod_init(void) +{ + return crypto_register_shash(&alg); +} + +static void __exit crc32_mod_fini(void) +{ + crypto_unregister_shash(&alg); +} + +module_init(crc32_mod_init); +module_exit(crc32_mod_fini); + +MODULE_AUTHOR("Alexander Boyko "); +MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CRYPTO("crc32"); +MODULE_ALIAS_CRYPTO("crc32-generic"); -- cgit v1.2.3 From 0a3eb652fbcb75d3d1977f94831426b0c52456a8 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 26 Jan 2016 20:25:39 +0800 Subject: crypto: engine - Introduce the block request crypto engine framework Now block cipher engines need to implement and maintain their own queue/thread for processing requests, moreover currently helpers provided for only the queue itself (in crypto_enqueue_request() and crypto_dequeue_request()) but they don't help with the mechanics of driving the hardware (things like running the request immediately, DMA map it or providing a thread to process the queue in) even though a lot of that code really shouldn't vary that much from device to device. Thus this patch provides a mechanism for pushing requests to the hardware as it becomes free that drivers could use. And this framework is patterned on the SPI code and has worked out well there. (https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/ drivers/spi/spi.c?id=ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0) Signed-off-by: Baolin Wang Signed-off-by: Herbert Xu --- crypto/Kconfig | 3 + crypto/Makefile | 1 + crypto/crypto_engine.c | 355 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 359 insertions(+) create mode 100644 crypto/crypto_engine.c (limited to 'crypto/Makefile') diff --git a/crypto/Kconfig b/crypto/Kconfig index 099f1f1b..f6bfdda1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -217,6 +217,9 @@ config CRYPTO_GLUE_HELPER_X86 depends on X86 select CRYPTO_ALGAPI +config CRYPTO_ENGINE + tristate + comment "Authenticated Encryption with Associated Data" config CRYPTO_CCM diff --git a/crypto/Makefile b/crypto/Makefile index 059de1bb..4f4ef7ea 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -7,6 +7,7 @@ crypto-y := api.o cipher.o compress.o memneq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o +obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o crypto_algapi-$(CONFIG_PROC_FS) += proc.o diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c new file mode 100644 index 00000000..a55c82dd --- /dev/null +++ b/crypto/crypto_engine.c @@ -0,0 +1,355 @@ +/* + * Handle async block request by crypto hardware engine. + * + * Copyright (C) 2016 Linaro, Inc. + * + * Author: Baolin Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include "internal.h" + +#define CRYPTO_ENGINE_MAX_QLEN 10 + +void crypto_finalize_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); + +/** + * crypto_pump_requests - dequeue one request from engine queue to process + * @engine: the hardware engine + * @in_kthread: true if we are in the context of the request pump thread + * + * This function checks if there is any request in the engine queue that + * needs processing and if so call out to the driver to initialize hardware + * and handle each request. + */ +static void crypto_pump_requests(struct crypto_engine *engine, + bool in_kthread) +{ + struct crypto_async_request *async_req, *backlog; + struct ablkcipher_request *req; + unsigned long flags; + bool was_busy = false; + int ret; + + spin_lock_irqsave(&engine->queue_lock, flags); + + /* Make sure we are not already running a request */ + if (engine->cur_req) + goto out; + + /* If another context is idling then defer */ + if (engine->idling) { + queue_kthread_work(&engine->kworker, &engine->pump_requests); + goto out; + } + + /* Check if the engine queue is idle */ + if (!crypto_queue_len(&engine->queue) || !engine->running) { + if (!engine->busy) + goto out; + + /* Only do teardown in the thread */ + if (!in_kthread) { + queue_kthread_work(&engine->kworker, + &engine->pump_requests); + goto out; + } + + engine->busy = false; + engine->idling = true; + spin_unlock_irqrestore(&engine->queue_lock, flags); + + if (engine->unprepare_crypt_hardware && + engine->unprepare_crypt_hardware(engine)) + pr_err("failed to unprepare crypt hardware\n"); + + spin_lock_irqsave(&engine->queue_lock, flags); + engine->idling = false; + goto out; + } + + /* Get the fist request from the engine queue to handle */ + backlog = crypto_get_backlog(&engine->queue); + async_req = crypto_dequeue_request(&engine->queue); + if (!async_req) + goto out; + + req = ablkcipher_request_cast(async_req); + + engine->cur_req = req; + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + if (engine->busy) + was_busy = true; + else + engine->busy = true; + + spin_unlock_irqrestore(&engine->queue_lock, flags); + + /* Until here we get the request need to be encrypted successfully */ + if (!was_busy && engine->prepare_crypt_hardware) { + ret = engine->prepare_crypt_hardware(engine); + if (ret) { + pr_err("failed to prepare crypt hardware\n"); + goto req_err; + } + } + + if (engine->prepare_request) { + ret = engine->prepare_request(engine, engine->cur_req); + if (ret) { + pr_err("failed to prepare request: %d\n", ret); + goto req_err; + } + engine->cur_req_prepared = true; + } + + ret = engine->crypt_one_request(engine, engine->cur_req); + if (ret) { + pr_err("failed to crypt one request from queue\n"); + goto req_err; + } + return; + +req_err: + crypto_finalize_request(engine, engine->cur_req, ret); + return; + +out: + spin_unlock_irqrestore(&engine->queue_lock, flags); +} + +static void crypto_pump_work(struct kthread_work *work) +{ + struct crypto_engine *engine = + container_of(work, struct crypto_engine, pump_requests); + + crypto_pump_requests(engine, true); +} + +/** + * crypto_transfer_request - transfer the new request into the engine queue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_request(struct crypto_engine *engine, + struct ablkcipher_request *req, bool need_pump) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&engine->queue_lock, flags); + + if (!engine->running) { + spin_unlock_irqrestore(&engine->queue_lock, flags); + return -ESHUTDOWN; + } + + ret = ablkcipher_enqueue_request(&engine->queue, req); + + if (!engine->busy && need_pump) + queue_kthread_work(&engine->kworker, &engine->pump_requests); + + spin_unlock_irqrestore(&engine->queue_lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(crypto_transfer_request); + +/** + * crypto_transfer_request_to_engine - transfer one request to list into the + * engine queue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req) +{ + return crypto_transfer_request(engine, req, true); +} +EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine); + +/** + * crypto_finalize_request - finalize one request if the request is done + * @engine: the hardware engine + * @req: the request need to be finalized + * @err: error number + */ +void crypto_finalize_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err) +{ + unsigned long flags; + bool finalize_cur_req = false; + int ret; + + spin_lock_irqsave(&engine->queue_lock, flags); + if (engine->cur_req == req) + finalize_cur_req = true; + spin_unlock_irqrestore(&engine->queue_lock, flags); + + if (finalize_cur_req) { + if (engine->cur_req_prepared && engine->unprepare_request) { + ret = engine->unprepare_request(engine, req); + if (ret) + pr_err("failed to unprepare request\n"); + } + + spin_lock_irqsave(&engine->queue_lock, flags); + engine->cur_req = NULL; + engine->cur_req_prepared = false; + spin_unlock_irqrestore(&engine->queue_lock, flags); + } + + req->base.complete(&req->base, err); + + queue_kthread_work(&engine->kworker, &engine->pump_requests); +} +EXPORT_SYMBOL_GPL(crypto_finalize_request); + +/** + * crypto_engine_start - start the hardware engine + * @engine: the hardware engine need to be started + * + * Return 0 on success, else on fail. + */ +int crypto_engine_start(struct crypto_engine *engine) +{ + unsigned long flags; + + spin_lock_irqsave(&engine->queue_lock, flags); + + if (engine->running || engine->busy) { + spin_unlock_irqrestore(&engine->queue_lock, flags); + return -EBUSY; + } + + engine->running = true; + spin_unlock_irqrestore(&engine->queue_lock, flags); + + queue_kthread_work(&engine->kworker, &engine->pump_requests); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_engine_start); + +/** + * crypto_engine_stop - stop the hardware engine + * @engine: the hardware engine need to be stopped + * + * Return 0 on success, else on fail. + */ +int crypto_engine_stop(struct crypto_engine *engine) +{ + unsigned long flags; + unsigned limit = 500; + int ret = 0; + + spin_lock_irqsave(&engine->queue_lock, flags); + + /* + * If the engine queue is not empty or the engine is on busy state, + * we need to wait for a while to pump the requests of engine queue. + */ + while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { + spin_unlock_irqrestore(&engine->queue_lock, flags); + msleep(20); + spin_lock_irqsave(&engine->queue_lock, flags); + } + + if (crypto_queue_len(&engine->queue) || engine->busy) + ret = -EBUSY; + else + engine->running = false; + + spin_unlock_irqrestore(&engine->queue_lock, flags); + + if (ret) + pr_warn("could not stop engine\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_stop); + +/** + * crypto_engine_alloc_init - allocate crypto hardware engine structure and + * initialize it. + * @dev: the device attached with one hardware engine + * @rt: whether this queue is set to run as a realtime task + * + * This must be called from context that can sleep. + * Return: the crypto engine structure on success, else NULL. + */ +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct crypto_engine *engine; + + if (!dev) + return NULL; + + engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); + if (!engine) + return NULL; + + engine->rt = rt; + engine->running = false; + engine->busy = false; + engine->idling = false; + engine->cur_req_prepared = false; + engine->priv_data = dev; + snprintf(engine->name, sizeof(engine->name), + "%s-engine", dev_name(dev)); + + crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); + spin_lock_init(&engine->queue_lock); + + init_kthread_worker(&engine->kworker); + engine->kworker_task = kthread_run(kthread_worker_fn, + &engine->kworker, "%s", + engine->name); + if (IS_ERR(engine->kworker_task)) { + dev_err(dev, "failed to create crypto request pump task\n"); + return NULL; + } + init_kthread_work(&engine->pump_requests, crypto_pump_work); + + if (engine->rt) { + dev_info(dev, "will run requests pump with realtime priority\n"); + sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m); + } + + return engine; +} +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); + +/** + * crypto_engine_exit - free the resources of hardware engine when exit + * @engine: the hardware engine need to be freed + * + * Return 0 for success. + */ +int crypto_engine_exit(struct crypto_engine *engine) +{ + int ret; + + ret = crypto_engine_stop(engine); + if (ret) + return ret; + + flush_kthread_worker(&engine->kworker); + kthread_stop(engine->kworker_task); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_engine_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto hardware engine framework"); -- cgit v1.2.3