hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/crypto/ccree/cc_request_mgr.c
....@@ -1,11 +1,11 @@
11 // SPDX-License-Identifier: GPL-2.0
2
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
2
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
33
44 #include <linux/kernel.h>
5
+#include <linux/nospec.h>
56 #include "cc_driver.h"
67 #include "cc_buffer_mgr.h"
78 #include "cc_request_mgr.h"
8
-#include "cc_ivgen.h"
99 #include "cc_pm.h"
1010
1111 #define CC_MAX_POLL_ITER 10
....@@ -51,10 +51,37 @@
5151 bool notif;
5252 };
5353
54
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
55
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
56
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
57
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
58
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
59
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
60
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
61
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
62
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
63
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
64
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
65
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
66
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
67
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
68
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
69
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
70
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
71
+};
72
+
5473 static void comp_handler(unsigned long devarg);
5574 #ifdef COMP_IN_WQ
5675 static void comp_work_handler(struct work_struct *work);
5776 #endif
77
+
78
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
79
+{
80
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
81
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
82
+
83
+ return cc_cpp_int_masks[alg][slot];
84
+}
5885
5986 void cc_req_mgr_fini(struct cc_drvdata *drvdata)
6087 {
....@@ -80,7 +107,7 @@
80107 /* Kill tasklet */
81108 tasklet_kill(&req_mgr_h->comptask);
82109 #endif
83
- kzfree(req_mgr_h);
110
+ kfree_sensitive(req_mgr_h);
84111 drvdata->request_mgr_handle = NULL;
85112 }
86113
....@@ -179,12 +206,13 @@
179206 }
180207 }
181208
182
-/*!
183
- * Completion will take place if and only if user requested completion
184
- * by cc_send_sync_request().
209
+/**
210
+ * request_mgr_complete() - Completion will take place if and only if user
211
+ * requested completion by cc_send_sync_request().
185212 *
186
- * \param dev
187
- * \param dx_compl_h The completion event to signal
213
+ * @dev: Device pointer
214
+ * @dx_compl_h: The completion event to signal
215
+ * @dummy: unused error code
188216 */
189217 static void request_mgr_complete(struct device *dev, void *dx_compl_h,
190218 int dummy)
....@@ -202,7 +230,7 @@
202230 struct device *dev = drvdata_to_dev(drvdata);
203231
204232 /* SW queue is checked only once as it will not
205
- * be chaned during the poll because the spinlock_bh
233
+ * be changed during the poll because the spinlock_bh
206234 * is held by the thread
207235 */
208236 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
....@@ -237,51 +265,26 @@
237265 return -ENOSPC;
238266 }
239267
240
-/*!
241
- * Enqueue caller request to crypto hardware.
268
+/**
269
+ * cc_do_send_request() - Enqueue caller request to crypto hardware.
242270 * Need to be called with HW lock held and PM running
243271 *
244
- * \param drvdata
245
- * \param cc_req The request to enqueue
246
- * \param desc The crypto sequence
247
- * \param len The crypto sequence length
248
- * \param add_comp If "true": add an artificial dout DMA to mark completion
272
+ * @drvdata: Associated device driver context
273
+ * @cc_req: The request to enqueue
274
+ * @desc: The crypto sequence
275
+ * @len: The crypto sequence length
276
+ * @add_comp: If "true": add an artificial dout DMA to mark completion
249277 *
250
- * \return int Returns -EINPROGRESS or error code
251278 */
252
-static int cc_do_send_request(struct cc_drvdata *drvdata,
253
- struct cc_crypto_req *cc_req,
254
- struct cc_hw_desc *desc, unsigned int len,
255
- bool add_comp, bool ivgen)
279
+static void cc_do_send_request(struct cc_drvdata *drvdata,
280
+ struct cc_crypto_req *cc_req,
281
+ struct cc_hw_desc *desc, unsigned int len,
282
+ bool add_comp)
256283 {
257284 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
258285 unsigned int used_sw_slots;
259
- unsigned int iv_seq_len = 0;
260286 unsigned int total_seq_len = len; /*initial sequence length*/
261
- struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
262287 struct device *dev = drvdata_to_dev(drvdata);
263
- int rc;
264
-
265
- if (ivgen) {
266
- dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
267
- cc_req->ivgen_dma_addr_len,
268
- &cc_req->ivgen_dma_addr[0],
269
- &cc_req->ivgen_dma_addr[1],
270
- &cc_req->ivgen_dma_addr[2],
271
- cc_req->ivgen_size);
272
-
273
- /* Acquire IV from pool */
274
- rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
275
- cc_req->ivgen_dma_addr_len,
276
- cc_req->ivgen_size, iv_seq, &iv_seq_len);
277
-
278
- if (rc) {
279
- dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
280
- return rc;
281
- }
282
-
283
- total_seq_len += iv_seq_len;
284
- }
285288
286289 used_sw_slots = ((req_mgr_h->req_queue_head -
287290 req_mgr_h->req_queue_tail) &
....@@ -293,20 +296,17 @@
293296 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
294297 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
295298 (MAX_REQUEST_QUEUE_SIZE - 1);
296
- /* TODO: Use circ_buf.h ? */
297299
298300 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
299301
300302 /*
301303 * We are about to push command to the HW via the command registers
302
- * that may refernece hsot memory. We need to issue a memory barrier
303
- * to make sure there are no outstnading memory writes
304
+ * that may reference host memory. We need to issue a memory barrier
305
+ * to make sure there are no outstanding memory writes
304306 */
305307 wmb();
306308
307309 /* STAT_PHASE_4: Push sequence */
308
- if (ivgen)
309
- enqueue_seq(drvdata, iv_seq, iv_seq_len);
310310
311311 enqueue_seq(drvdata, desc, len);
312312
....@@ -326,19 +326,18 @@
326326 /* Update the free slots in HW queue */
327327 req_mgr_h->q_free_slots -= total_seq_len;
328328 }
329
-
330
- /* Operation still in process */
331
- return -EINPROGRESS;
332329 }
333330
334331 static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
335332 struct cc_bl_item *bli)
336333 {
337334 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
335
+ struct device *dev = drvdata_to_dev(drvdata);
338336
339337 spin_lock_bh(&mgr->bl_lock);
340338 list_add_tail(&bli->list, &mgr->backlog);
341339 ++mgr->bl_len;
340
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
342341 spin_unlock_bh(&mgr->bl_lock);
343342 tasklet_schedule(&mgr->comptask);
344343 }
....@@ -348,9 +347,7 @@
348347 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
349348 struct cc_bl_item *bli;
350349 struct cc_crypto_req *creq;
351
- struct crypto_async_request *req;
352
- bool ivgen;
353
- unsigned int total_len;
350
+ void *req;
354351 struct device *dev = drvdata_to_dev(drvdata);
355352 int rc;
356353
....@@ -358,29 +355,29 @@
358355
359356 while (mgr->bl_len) {
360357 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
358
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
359
+
361360 spin_unlock(&mgr->bl_lock);
362361
362
+
363363 creq = &bli->creq;
364
- req = (struct crypto_async_request *)creq->user_arg;
364
+ req = creq->user_arg;
365365
366366 /*
367367 * Notify the request we're moving out of the backlog
368368 * but only if we haven't done so already.
369369 */
370370 if (!bli->notif) {
371
- req->complete(req, -EINPROGRESS);
371
+ creq->user_cb(dev, req, -EINPROGRESS);
372372 bli->notif = true;
373373 }
374374
375
- ivgen = !!creq->ivgen_dma_addr_len;
376
- total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
377
-
378375 spin_lock(&mgr->hw_lock);
379376
380
- rc = cc_queues_status(drvdata, mgr, total_len);
377
+ rc = cc_queues_status(drvdata, mgr, bli->len);
381378 if (rc) {
382379 /*
383
- * There is still not room in the FIFO for
380
+ * There is still no room in the FIFO for
384381 * this request. Bail out. We'll return here
385382 * on the next completion irq.
386383 */
....@@ -388,15 +385,9 @@
388385 return;
389386 }
390387
391
- rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
392
- bli->len, false, ivgen);
393
-
388
+ cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len,
389
+ false);
394390 spin_unlock(&mgr->hw_lock);
395
-
396
- if (rc != -EINPROGRESS) {
397
- cc_pm_put_suspend(dev);
398
- creq->user_cb(dev, req, rc);
399
- }
400391
401392 /* Remove ourselves from the backlog list */
402393 spin_lock(&mgr->bl_lock);
....@@ -414,8 +405,6 @@
414405 {
415406 int rc;
416407 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
417
- bool ivgen = !!cc_req->ivgen_dma_addr_len;
418
- unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
419408 struct device *dev = drvdata_to_dev(drvdata);
420409 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
421410 gfp_t flags = cc_gfp_flags(req);
....@@ -423,12 +412,12 @@
423412
424413 rc = cc_pm_get(dev);
425414 if (rc) {
426
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
415
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
427416 return rc;
428417 }
429418
430419 spin_lock_bh(&mgr->hw_lock);
431
- rc = cc_queues_status(drvdata, mgr, total_len);
420
+ rc = cc_queues_status(drvdata, mgr, len);
432421
433422 #ifdef CC_DEBUG_FORCE_BACKLOG
434423 if (backlog_ok)
....@@ -452,9 +441,10 @@
452441 return -EBUSY;
453442 }
454443
455
- if (!rc)
456
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
457
- ivgen);
444
+ if (!rc) {
445
+ cc_do_send_request(drvdata, cc_req, desc, len, false);
446
+ rc = -EINPROGRESS;
447
+ }
458448
459449 spin_unlock_bh(&mgr->hw_lock);
460450 return rc;
....@@ -474,7 +464,7 @@
474464
475465 rc = cc_pm_get(dev);
476466 if (rc) {
477
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
467
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
478468 return rc;
479469 }
480470
....@@ -486,36 +476,28 @@
486476 break;
487477
488478 spin_unlock_bh(&mgr->hw_lock);
489
- if (rc != -EAGAIN) {
490
- cc_pm_put_suspend(dev);
491
- return rc;
492
- }
493479 wait_for_completion_interruptible(&drvdata->hw_queue_avail);
494480 reinit_completion(&drvdata->hw_queue_avail);
495481 }
496482
497
- rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
483
+ cc_do_send_request(drvdata, cc_req, desc, len, true);
498484 spin_unlock_bh(&mgr->hw_lock);
499
-
500
- if (rc != -EINPROGRESS) {
501
- cc_pm_put_suspend(dev);
502
- return rc;
503
- }
504
-
505485 wait_for_completion(&cc_req->seq_compl);
506486 return 0;
507487 }
508488
509
-/*!
510
- * Enqueue caller request to crypto hardware during init process.
511
- * assume this function is not called in middle of a flow,
489
+/**
490
+ * send_request_init() - Enqueue caller request to crypto hardware during init
491
+ * process.
492
+ * Assume this function is not called in the middle of a flow,
512493 * since we set QUEUE_LAST_IND flag in the last descriptor.
513494 *
514
- * \param drvdata
515
- * \param desc The crypto sequence
516
- * \param len The crypto sequence length
495
+ * @drvdata: Associated device driver context
496
+ * @desc: The crypto sequence
497
+ * @len: The crypto sequence length
517498 *
518
- * \return int Returns "0" upon success
499
+ * Return:
500
+ * Returns "0" upon success
519501 */
520502 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
521503 unsigned int len)
....@@ -534,8 +516,8 @@
534516
535517 /*
536518 * We are about to push command to the HW via the command registers
537
- * that may refernece hsot memory. We need to issue a memory barrier
538
- * to make sure there are no outstnading memory writes
519
+ * that may reference host memory. We need to issue a memory barrier
520
+ * to make sure there are no outstanding memory writes
539521 */
540522 wmb();
541523 enqueue_seq(drvdata, desc, len);
....@@ -579,6 +561,8 @@
579561 drvdata->request_mgr_handle;
580562 unsigned int *tail = &request_mgr_handle->req_queue_tail;
581563 unsigned int *head = &request_mgr_handle->req_queue_head;
564
+ int rc;
565
+ u32 mask;
582566
583567 while (request_mgr_handle->axi_completed) {
584568 request_mgr_handle->axi_completed--;
....@@ -596,8 +580,22 @@
596580
597581 cc_req = &request_mgr_handle->req_queue[*tail];
598582
583
+ if (cc_req->cpp.is_cpp) {
584
+
585
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
586
+ cc_req->cpp.slot, cc_req->cpp.alg);
587
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
588
+ cc_req->cpp.slot);
589
+ rc = (drvdata->irq & mask ? -EPERM : 0);
590
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
591
+ drvdata->irq, rc);
592
+ } else {
593
+ dev_dbg(dev, "None CPP request completion\n");
594
+ rc = 0;
595
+ }
596
+
599597 if (cc_req->user_cb)
600
- cc_req->user_cb(dev, cc_req->user_arg, 0);
598
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
601599 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
602600 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
603601 dev_dbg(dev, "Request completed. axi_completed=%d\n",
....@@ -618,45 +616,48 @@
618616 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
619617 struct cc_req_mgr_handle *request_mgr_handle =
620618 drvdata->request_mgr_handle;
621
-
619
+ struct device *dev = drvdata_to_dev(drvdata);
622620 u32 irq;
623621
624
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
622
+ dev_dbg(dev, "Completion handler called!\n");
623
+ irq = (drvdata->irq & drvdata->comp_mask);
625624
626
- if (irq & CC_COMP_IRQ_MASK) {
627
- /* To avoid the interrupt from firing as we unmask it,
628
- * we clear it now
629
- */
630
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
625
+ /* To avoid the interrupt from firing as we unmask it,
626
+ * we clear it now
627
+ */
628
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
631629
632
- /* Avoid race with above clear: Test completion counter
633
- * once more
634
- */
635
- request_mgr_handle->axi_completed +=
636
- cc_axi_comp_count(drvdata);
630
+ /* Avoid race with above clear: Test completion counter once more */
637631
638
- while (request_mgr_handle->axi_completed) {
639
- do {
640
- proc_completions(drvdata);
641
- /* At this point (after proc_completions()),
642
- * request_mgr_handle->axi_completed is 0.
643
- */
644
- request_mgr_handle->axi_completed =
645
- cc_axi_comp_count(drvdata);
646
- } while (request_mgr_handle->axi_completed > 0);
632
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
647633
648
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
649
- CC_COMP_IRQ_MASK);
634
+ dev_dbg(dev, "AXI completion after updated: %d\n",
635
+ request_mgr_handle->axi_completed);
650636
637
+ while (request_mgr_handle->axi_completed) {
638
+ do {
639
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
640
+ irq = (drvdata->irq & drvdata->comp_mask);
641
+ proc_completions(drvdata);
642
+
643
+ /* At this point (after proc_completions()),
644
+ * request_mgr_handle->axi_completed is 0.
645
+ */
651646 request_mgr_handle->axi_completed +=
652
- cc_axi_comp_count(drvdata);
653
- }
647
+ cc_axi_comp_count(drvdata);
648
+ } while (request_mgr_handle->axi_completed > 0);
649
+
650
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
651
+
652
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
654653 }
655
- /* after verifing that there is nothing to do,
654
+
655
+ /* after verifying that there is nothing to do,
656656 * unmask AXI completion interrupt
657657 */
658658 cc_iowrite(drvdata, CC_REG(HOST_IMR),
659
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
659
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
660660
661661 cc_proc_backlog(drvdata);
662
+ dev_dbg(dev, "Comp. handler done.\n");
662663 }