summaryrefslogtreecommitdiff
path: root/crypto/crypto_engine.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2023-08-13 14:54:32 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2023-08-18 17:01:10 +0800
commite3cee839ef8886c26207127015653214c7a28b24 (patch)
treec02fc8941ce48ec20b997b680ed376dbd1478b87 /crypto/crypto_engine.c
parent61e2a6b7e0ec1c74fbd2951cb4254884ce5a2ad9 (diff)
downloadlinux-crypto-e3cee839ef8886c26207127015653214c7a28b24.tar.gz
linux-crypto-e3cee839ef8886c26207127015653214c7a28b24.zip
crypto: engine - Remove prepare/unprepare request
The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r--crypto/crypto_engine.c42
1 files changed, 1 insertions, 41 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 74fcc089..17f79555 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -26,9 +26,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_req = false;
- int ret;
- struct crypto_engine_ctx *enginectx;
/*
* If hardware cannot enqueue more requests
@@ -38,21 +35,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
- finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
- if (finalize_req || engine->retry_support) {
- enginectx = crypto_tfm_ctx(req->tfm);
- if (enginectx->op.prepare_request &&
- enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- }
lockdep_assert_in_softirq();
crypto_request_complete(req, err);
@@ -141,20 +128,12 @@ start_request:
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err_2;
+ goto req_err_1;
}
}
enginectx = crypto_tfm_ctx(async_req->tfm);
- if (enginectx->op.prepare_request) {
- ret = enginectx->op.prepare_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err_2;
- }
- }
if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
@@ -177,18 +156,6 @@ start_request:
ret);
goto req_err_1;
}
- /*
- * If retry mechanism is supported,
- * unprepare current request and
- * enqueue it back into crypto-engine queue.
- */
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine,
- async_req);
- if (ret)
- dev_err(engine->dev,
- "failed to unprepare request\n");
- }
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
@@ -204,13 +171,6 @@ start_request:
goto retry;
req_err_1:
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, async_req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
-
-req_err_2:
crypto_request_complete(async_req, ret);
retry: