hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/crypto/rockchip/rk_crypto_core.c
....@@ -272,27 +272,46 @@
272272 static void rk_crypto_irq_timer_handle(struct timer_list *t)
273273 {
274274 struct rk_crypto_dev *rk_dev = from_timer(rk_dev, t, timer);
275
+ unsigned long flags;
276
+
277
+ spin_lock_irqsave(&rk_dev->lock, flags);
275278
276279 rk_dev->err = -ETIMEDOUT;
277280 rk_dev->stat.timeout_cnt++;
281
+
282
+ rk_unload_data(rk_dev);
283
+
284
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
285
+
278286 tasklet_schedule(&rk_dev->done_task);
279287 }
280288
281289 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
282290 {
283291 struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
284
- struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
292
+ struct rk_alg_ctx *alg_ctx;
293
+ unsigned long flags;
285294
286
- spin_lock(&rk_dev->lock);
295
+ spin_lock_irqsave(&rk_dev->lock, flags);
296
+
297
+ /* reset timeout timer */
298
+ start_irq_timer(rk_dev);
299
+
300
+ alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
287301
288302 rk_dev->stat.irq_cnt++;
289303
290304 if (alg_ctx->ops.irq_handle)
291305 alg_ctx->ops.irq_handle(irq, dev_id);
292306
293
- tasklet_schedule(&rk_dev->done_task);
307
+ /* already trigger timeout */
308
+ if (rk_dev->err != -ETIMEDOUT) {
309
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
310
+ tasklet_schedule(&rk_dev->done_task);
311
+ } else {
312
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
313
+ }
294314
295
- spin_unlock(&rk_dev->lock);
296315 return IRQ_HANDLED;
297316 }
298317
....@@ -390,23 +409,22 @@
390409 struct crypto_async_request *async_req, *backlog;
391410 unsigned long flags;
392411
412
+ spin_lock_irqsave(&rk_dev->lock, flags);
393413 if (rk_dev->async_req) {
394414 dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__);
395
- return;
415
+ goto exit;
396416 }
397417
398418 rk_dev->err = 0;
399
- spin_lock_irqsave(&rk_dev->lock, flags);
419
+
400420 backlog = crypto_get_backlog(&rk_dev->queue);
401421 async_req = crypto_dequeue_request(&rk_dev->queue);
402422
403423 if (!async_req) {
404424 rk_dev->busy = false;
405
- spin_unlock_irqrestore(&rk_dev->lock, flags);
406
- return;
425
+ goto exit;
407426 }
408427 rk_dev->stat.dequeue_cnt++;
409
- spin_unlock_irqrestore(&rk_dev->lock, flags);
410428
411429 if (backlog) {
412430 backlog->complete(backlog, -EINPROGRESS);
....@@ -417,12 +435,26 @@
417435 rk_dev->err = rk_start_op(rk_dev);
418436 if (rk_dev->err)
419437 rk_complete_op(rk_dev, rk_dev->err);
438
+
439
+exit:
440
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
420441 }
421442
422443 static void rk_crypto_done_task_cb(unsigned long data)
423444 {
424445 struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data;
425
- struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
446
+ struct rk_alg_ctx *alg_ctx;
447
+ unsigned long flags;
448
+
449
+ spin_lock_irqsave(&rk_dev->lock, flags);
450
+
451
+ if (!rk_dev->async_req) {
452
+ dev_err(rk_dev->dev, "done task receive invalid async_req\n");
453
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
454
+ return;
455
+ }
456
+
457
+ alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
426458
427459 rk_dev->stat.done_cnt++;
428460
....@@ -440,9 +472,12 @@
440472 if (rk_dev->err)
441473 goto exit;
442474
475
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
476
+
443477 return;
444478 exit:
445479 rk_complete_op(rk_dev, rk_dev->err);
480
+ spin_unlock_irqrestore(&rk_dev->lock, flags);
446481 }
447482
448483 static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev,
....@@ -663,6 +698,12 @@
663698 static const struct of_device_id crypto_of_id_table[] = {
664699
665700 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ROCKCHIP_V3)
701
+ /* crypto v4 in belows same with crypto-v3*/
702
+ {
703
+ .compatible = "rockchip,crypto-v4",
704
+ .data = (void *)&cryto_v3_soc_data,
705
+ },
706
+
666707 /* crypto v3 in belows */
667708 {
668709 .compatible = "rockchip,crypto-v3",