hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/crypto/crypto_engine.c
....@@ -1,19 +1,15 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Handle async block request by crypto hardware engine.
34 *
45 * Copyright (C) 2016 Linaro, Inc.
56 *
67 * Author: Baolin Wang <baolin.wang@linaro.org>
7
- *
8
- * This program is free software; you can redistribute it and/or modify it
9
- * under the terms of the GNU General Public License as published by the Free
10
- * Software Foundation; either version 2 of the License, or (at your option)
11
- * any later version.
12
- *
138 */
149
1510 #include <linux/err.h>
1611 #include <linux/delay.h>
12
+#include <linux/device.h>
1713 #include <crypto/engine.h>
1814 #include <uapi/linux/sched/types.h>
1915 #include "internal.h"
....@@ -27,32 +23,36 @@
2723 * @err: error number
2824 */
2925 static void crypto_finalize_request(struct crypto_engine *engine,
30
- struct crypto_async_request *req, int err)
26
+ struct crypto_async_request *req, int err)
3127 {
3228 unsigned long flags;
33
- bool finalize_cur_req = false;
29
+ bool finalize_req = false;
3430 int ret;
3531 struct crypto_engine_ctx *enginectx;
3632
37
- spin_lock_irqsave(&engine->queue_lock, flags);
38
- if (engine->cur_req == req)
39
- finalize_cur_req = true;
40
- spin_unlock_irqrestore(&engine->queue_lock, flags);
33
+ /*
34
+ * If hardware cannot enqueue more requests
35
+ * and retry mechanism is not supported
36
+ * make sure we are completing the current request
37
+ */
38
+ if (!engine->retry_support) {
39
+ spin_lock_irqsave(&engine->queue_lock, flags);
40
+ if (engine->cur_req == req) {
41
+ finalize_req = true;
42
+ engine->cur_req = NULL;
43
+ }
44
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
45
+ }
4146
42
- if (finalize_cur_req) {
47
+ if (finalize_req || engine->retry_support) {
4348 enginectx = crypto_tfm_ctx(req->tfm);
44
- if (engine->cur_req_prepared &&
49
+ if (enginectx->op.prepare_request &&
4550 enginectx->op.unprepare_request) {
4651 ret = enginectx->op.unprepare_request(engine, req);
4752 if (ret)
4853 dev_err(engine->dev, "failed to unprepare request\n");
4954 }
50
- spin_lock_irqsave(&engine->queue_lock, flags);
51
- engine->cur_req = NULL;
52
- engine->cur_req_prepared = false;
53
- spin_unlock_irqrestore(&engine->queue_lock, flags);
5455 }
55
-
5656 req->complete(req, err);
5757
5858 kthread_queue_work(engine->kworker, &engine->pump_requests);
....@@ -79,7 +79,7 @@
7979 spin_lock_irqsave(&engine->queue_lock, flags);
8080
8181 /* Make sure we are not already running a request */
82
- if (engine->cur_req)
82
+ if (!engine->retry_support && engine->cur_req)
8383 goto out;
8484
8585 /* If another context is idling then defer */
....@@ -113,13 +113,21 @@
113113 goto out;
114114 }
115115
116
+start_request:
116117 /* Get the fist request from the engine queue to handle */
117118 backlog = crypto_get_backlog(&engine->queue);
118119 async_req = crypto_dequeue_request(&engine->queue);
119120 if (!async_req)
120121 goto out;
121122
122
- engine->cur_req = async_req;
123
+ /*
124
+ * If hardware doesn't support the retry mechanism,
125
+ * keep track of the request we are processing now.
126
+ * We'll need it on completion (crypto_finalize_request).
127
+ */
128
+ if (!engine->retry_support)
129
+ engine->cur_req = async_req;
130
+
123131 if (backlog)
124132 backlog->complete(backlog, -EINPROGRESS);
125133
....@@ -135,7 +143,7 @@
135143 ret = engine->prepare_crypt_hardware(engine);
136144 if (ret) {
137145 dev_err(engine->dev, "failed to prepare crypt hardware\n");
138
- goto req_err;
146
+ goto req_err_2;
139147 }
140148 }
141149
....@@ -146,28 +154,90 @@
146154 if (ret) {
147155 dev_err(engine->dev, "failed to prepare request: %d\n",
148156 ret);
149
- goto req_err;
157
+ goto req_err_2;
150158 }
151
- engine->cur_req_prepared = true;
152159 }
153160 if (!enginectx->op.do_one_request) {
154161 dev_err(engine->dev, "failed to do request\n");
155162 ret = -EINVAL;
156
- goto req_err;
163
+ goto req_err_1;
157164 }
158
- ret = enginectx->op.do_one_request(engine, async_req);
159
- if (ret) {
160
- dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161
- goto req_err;
162
- }
163
- return;
164165
165
-req_err:
166
- crypto_finalize_request(engine, async_req, ret);
166
+ ret = enginectx->op.do_one_request(engine, async_req);
167
+
168
+ /* Request unsuccessfully executed by hardware */
169
+ if (ret < 0) {
170
+ /*
171
+ * If hardware queue is full (-ENOSPC), requeue request
172
+ * regardless of backlog flag.
173
+ * Otherwise, unprepare and complete the request.
174
+ */
175
+ if (!engine->retry_support ||
176
+ (ret != -ENOSPC)) {
177
+ dev_err(engine->dev,
178
+ "Failed to do one request from queue: %d\n",
179
+ ret);
180
+ goto req_err_1;
181
+ }
182
+ /*
183
+ * If retry mechanism is supported,
184
+ * unprepare current request and
185
+ * enqueue it back into crypto-engine queue.
186
+ */
187
+ if (enginectx->op.unprepare_request) {
188
+ ret = enginectx->op.unprepare_request(engine,
189
+ async_req);
190
+ if (ret)
191
+ dev_err(engine->dev,
192
+ "failed to unprepare request\n");
193
+ }
194
+ spin_lock_irqsave(&engine->queue_lock, flags);
195
+ /*
196
+ * If hardware was unable to execute request, enqueue it
197
+ * back in front of crypto-engine queue, to keep the order
198
+ * of requests.
199
+ */
200
+ crypto_enqueue_request_head(&engine->queue, async_req);
201
+
202
+ kthread_queue_work(engine->kworker, &engine->pump_requests);
203
+ goto out;
204
+ }
205
+
206
+ goto retry;
207
+
208
+req_err_1:
209
+ if (enginectx->op.unprepare_request) {
210
+ ret = enginectx->op.unprepare_request(engine, async_req);
211
+ if (ret)
212
+ dev_err(engine->dev, "failed to unprepare request\n");
213
+ }
214
+
215
+req_err_2:
216
+ async_req->complete(async_req, ret);
217
+
218
+retry:
219
+ /* If retry mechanism is supported, send new requests to engine */
220
+ if (engine->retry_support) {
221
+ spin_lock_irqsave(&engine->queue_lock, flags);
222
+ goto start_request;
223
+ }
167224 return;
168225
169226 out:
170227 spin_unlock_irqrestore(&engine->queue_lock, flags);
228
+
229
+ /*
230
+ * Batch requests is possible only if
231
+ * hardware can enqueue multiple requests
232
+ */
233
+ if (engine->do_batch_requests) {
234
+ ret = engine->do_batch_requests(engine);
235
+ if (ret)
236
+ dev_err(engine->dev, "failed to do batch requests: %d\n",
237
+ ret);
238
+ }
239
+
240
+ return;
171241 }
172242
173243 static void crypto_pump_work(struct kthread_work *work)
....@@ -217,20 +287,6 @@
217287 {
218288 return crypto_transfer_request(engine, req, true);
219289 }
220
-
221
-/**
222
- * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
223
- * to list into the engine queue
224
- * @engine: the hardware engine
225
- * @req: the request need to be listed into the engine queue
226
- * TODO: Remove this function when skcipher conversion is finished
227
- */
228
-int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
229
- struct ablkcipher_request *req)
230
-{
231
- return crypto_transfer_request_to_engine(engine, &req->base);
232
-}
233
-EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
234290
235291 /**
236292 * crypto_transfer_aead_request_to_engine - transfer one aead_request
....@@ -283,21 +339,6 @@
283339 return crypto_transfer_request_to_engine(engine, &req->base);
284340 }
285341 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
-
287
-/**
288
- * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289
- * the request is done
290
- * @engine: the hardware engine
291
- * @req: the request need to be finalized
292
- * @err: error number
293
- * TODO: Remove this function when skcipher conversion is finished
294
- */
295
-void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
296
- struct ablkcipher_request *req, int err)
297
-{
298
- return crypto_finalize_request(engine, &req->base, err);
299
-}
300
-EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
301342
302343 /**
303344 * crypto_finalize_aead_request - finalize one aead_request if
....@@ -420,17 +461,28 @@
420461 EXPORT_SYMBOL_GPL(crypto_engine_stop);
421462
422463 /**
423
- * crypto_engine_alloc_init - allocate crypto hardware engine structure and
424
- * initialize it.
464
+ * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
465
+ * and initialize it by setting the maximum number of entries in the software
466
+ * crypto-engine queue.
425467 * @dev: the device attached with one hardware engine
468
+ * @retry_support: whether hardware has support for retry mechanism
469
+ * @cbk_do_batch: pointer to a callback function to be invoked when executing
470
+ * a batch of requests.
471
+ * This has the form:
472
+ * callback(struct crypto_engine *engine)
473
+ * where:
474
+ * @engine: the crypto engine structure.
426475 * @rt: whether this queue is set to run as a realtime task
476
+ * @qlen: maximum size of the crypto-engine queue
427477 *
428478 * This must be called from context that can sleep.
429479 * Return: the crypto engine structure on success, else NULL.
430480 */
431
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
481
+struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
482
+ bool retry_support,
483
+ int (*cbk_do_batch)(struct crypto_engine *engine),
484
+ bool rt, int qlen)
432485 {
433
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
434486 struct crypto_engine *engine;
435487
436488 if (!dev)
....@@ -445,12 +497,18 @@
445497 engine->running = false;
446498 engine->busy = false;
447499 engine->idling = false;
448
- engine->cur_req_prepared = false;
500
+ engine->retry_support = retry_support;
449501 engine->priv_data = dev;
502
+ /*
503
+ * Batch requests is possible only if
504
+ * hardware has support for retry mechanism.
505
+ */
506
+ engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
507
+
450508 snprintf(engine->name, sizeof(engine->name),
451509 "%s-engine", dev_name(dev));
452510
453
- crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
511
+ crypto_init_queue(&engine->queue, qlen);
454512 spin_lock_init(&engine->queue_lock);
455513
456514 engine->kworker = kthread_create_worker(0, "%s", engine->name);
....@@ -462,11 +520,27 @@
462520
463521 if (engine->rt) {
464522 dev_info(dev, "will run requests pump with realtime priority\n");
465
- sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
523
+ sched_set_fifo(engine->kworker->task);
466524 }
467525
468526 return engine;
469527 }
528
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
529
+
530
+/**
531
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
532
+ * initialize it.
533
+ * @dev: the device attached with one hardware engine
534
+ * @rt: whether this queue is set to run as a realtime task
535
+ *
536
+ * This must be called from context that can sleep.
537
+ * Return: the crypto engine structure on success, else NULL.
538
+ */
539
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
540
+{
541
+ return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
542
+ CRYPTO_ENGINE_MAX_QLEN);
543
+}
470544 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
471545
472546 /**