From e3e12f52b214121840b44c91de5b3e5af5d3eb84 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 06 Nov 2023 03:04:41 +0000
Subject: [PATCH] rk3568 rt init

---
 kernel/drivers/crypto/rockchip/rk_crypto_core.c |   38 +++++++++++++++++++++++++++++++-------
 1 files changed, 31 insertions(+), 7 deletions(-)

diff --git a/kernel/drivers/crypto/rockchip/rk_crypto_core.c b/kernel/drivers/crypto/rockchip/rk_crypto_core.c
index f25d722..56a50d5 100644
--- a/kernel/drivers/crypto/rockchip/rk_crypto_core.c
+++ b/kernel/drivers/crypto/rockchip/rk_crypto_core.c
@@ -281,9 +281,11 @@
 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
 {
 	struct rk_crypto_dev *rk_dev  = platform_get_drvdata(dev_id);
-	struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
+	struct rk_alg_ctx *alg_ctx;
 
 	spin_lock(&rk_dev->lock);
+
+	alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
 
 	rk_dev->stat.irq_cnt++;
 
@@ -390,23 +392,22 @@
 	struct crypto_async_request *async_req, *backlog;
 	unsigned long flags;
 
+	spin_lock_irqsave(&rk_dev->lock, flags);
 	if (rk_dev->async_req) {
 		dev_err(rk_dev->dev, "%s: Unexpected crypto paths.\n", __func__);
-		return;
+		goto exit;
 	}
 
 	rk_dev->err = 0;
-	spin_lock_irqsave(&rk_dev->lock, flags);
+
 	backlog   = crypto_get_backlog(&rk_dev->queue);
 	async_req = crypto_dequeue_request(&rk_dev->queue);
 
 	if (!async_req) {
 		rk_dev->busy = false;
-		spin_unlock_irqrestore(&rk_dev->lock, flags);
-		return;
+		goto exit;
 	}
 	rk_dev->stat.dequeue_cnt++;
-	spin_unlock_irqrestore(&rk_dev->lock, flags);
 
 	if (backlog) {
 		backlog->complete(backlog, -EINPROGRESS);
@@ -417,12 +418,26 @@
 	rk_dev->err = rk_start_op(rk_dev);
 	if (rk_dev->err)
 		rk_complete_op(rk_dev, rk_dev->err);
+
+exit:
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
 }
 
 static void rk_crypto_done_task_cb(unsigned long data)
 {
 	struct rk_crypto_dev *rk_dev = (struct rk_crypto_dev *)data;
-	struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
+	struct rk_alg_ctx *alg_ctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rk_dev->lock, flags);
+
+	if (!rk_dev->async_req) {
+		dev_err(rk_dev->dev, "done task receive invalid async_req\n");
+		spin_unlock_irqrestore(&rk_dev->lock, flags);
+		return;
+	}
+
+	alg_ctx = rk_alg_ctx_cast(rk_dev->async_req);
 
 	rk_dev->stat.done_cnt++;
 
@@ -440,9 +455,12 @@
 	if (rk_dev->err)
 		goto exit;
 
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
+
 	return;
 exit:
 	rk_complete_op(rk_dev, rk_dev->err);
+	spin_unlock_irqrestore(&rk_dev->lock, flags);
 }
 
 static struct rk_crypto_algt *rk_crypto_find_algs(struct rk_crypto_dev *rk_dev,
@@ -663,6 +681,12 @@
 static const struct of_device_id crypto_of_id_table[] = {
 
 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ROCKCHIP_V3)
+	/* crypto v4 in belows same with crypto-v3*/
+	{
+		.compatible = "rockchip,crypto-v4",
+		.data = (void *)&cryto_v3_soc_data,
+	},
+
 	/* crypto v3 in belows */
 	{
 		.compatible = "rockchip,crypto-v3",

--
Gitblit v1.6.2