From f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 31 Jan 2024 01:04:47 +0000
Subject: [PATCH] add driver 5G

---
 kernel/drivers/crypto/rockchip/rk_crypto_core.c |   61 +++++++++++++++++++++++++-----
 1 files changed, 51 insertions(+), 10 deletions(-)

diff --git a/kernel/drivers/crypto/rockchip/rk_crypto_core.c b/kernel/drivers/crypto/rockchip/rk_crypto_core.c
index f25d722..2a9cf2d 100644
--- a/kernel/drivers/crypto/rockchip/rk_crypto_core.c
+++ b/kernel/drivers/crypto/rockchip/rk_crypto_core.c
@@ -272,27 +272,46 @@
 static void rk_crypto_irq_timer_handle(struct timer_list *t)
 {
 	struct rk_crypto_dev *rk_dev = from_timer(rk_dev, t, timer);
+	unsigned long flags;
+
+	spin_lock_irqsave(&rk_dev->lock, flags);
 
 	rk_dev->err = -ETIMEDOUT;
 	rk_dev->stat.timeout_cnt++;
+
+	rk_unload_data(rk_dev);
+
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
+
 	tasklet_schedule(&rk_dev->done_task);
 }
 
 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
 {
 	struct rk_crypto_dev *rk_dev  = platform_get_drvdata(dev_id);
-	struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
+	struct rk_alg_ctx *alg_ctx;
+	unsigned long flags;
 
-	spin_lock(&rk_dev->lock);
+	spin_lock_irqsave(&rk_dev->lock, flags);
+
+	/* reset timeout timer */
+	start_irq_timer(rk_dev);
+
+	alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
 
 	rk_dev->stat.irq_cnt++;
 
 	if (alg_ctx->ops.irq_handle)
 		alg_ctx->ops.irq_handle(irq, dev_id);
 
-	tasklet_schedule(&rk_dev->done_task);
+	/* already trigger timeout */
+	if (rk_dev->err != -ETIMEDOUT) {
+		spin_unlock_irqrestore(&rk_dev->lock, flags);
+		tasklet_schedule(&rk_dev->done_task);
+	} else {
+		spin_unlock_irqrestore(&rk_dev->lock, flags);
+	}
 
-	spin_unlock(&rk_dev->lock);
 	return IRQ_HANDLED;
 }
 
@@ -390,23 +409,22 @@
 	struct crypto_async_request *async_req, *backlog;
 	unsigned long flags;
 
+	spin_lock_irqsave(&rk_dev->lock, flags);
 	if (rk_dev->async_req) {
 		dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__);
-		return;
+		goto exit;
 	}
 
 	rk_dev->err = 0;
-	spin_lock_irqsave(&rk_dev->lock, flags);
+
 	backlog   = crypto_get_backlog(&rk_dev->queue);
 	async_req = crypto_dequeue_request(&rk_dev->queue);
 
 	if (!async_req) {
 		rk_dev->busy = false;
-		spin_unlock_irqrestore(&rk_dev->lock, flags);
-		return;
+		goto exit;
 	}
 	rk_dev->stat.dequeue_cnt++;
-	spin_unlock_irqrestore(&rk_dev->lock, flags);
 
 	if (backlog) {
 		backlog->complete(backlog, -EINPROGRESS);
@@ -417,12 +435,26 @@
 	rk_dev->err = rk_start_op(rk_dev);
 	if (rk_dev->err)
 		rk_complete_op(rk_dev, rk_dev->err);
+
+exit:
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
 }
 
 static void rk_crypto_done_task_cb(unsigned long data)
 {
 	struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data;
-	struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
+	struct rk_alg_ctx *alg_ctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rk_dev->lock, flags);
+
+	if (!rk_dev->async_req) {
+		dev_err(rk_dev->dev, "done task receive invalid async_req\n");
+		spin_unlock_irqrestore(&rk_dev->lock, flags);
+		return;
+	}
+
+	alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
 
 	rk_dev->stat.done_cnt++;
 
@@ -440,9 +472,12 @@
 	if (rk_dev->err)
 		goto exit;
 
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
+
 	return;
 exit:
 	rk_complete_op(rk_dev, rk_dev->err);
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
 }
 
 static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev,
@@ -663,6 +698,12 @@
 static const struct of_device_id crypto_of_id_table[] = {
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ROCKCHIP_V3)
+	/* crypto v4 in belows same with crypto-v3*/
+	{
+		.compatible = "rockchip,crypto-v4",
+		.data = (void *)&cryto_v3_soc_data,
+	},
+
 	/* crypto v3 in belows */
 	{
 		.compatible = "rockchip,crypto-v3",

--
Gitblit v1.6.2