From 071106ecf68c401173c58808b1cf5f68cc50d390 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 05 Jan 2024 08:39:27 +0000
Subject: [PATCH] change wifi driver to cypress
---
kernel/drivers/soc/qcom/rpmh.c | 78 ++++++++++++++++++---------------------
1 files changed, 36 insertions(+), 42 deletions(-)
diff --git a/kernel/drivers/soc/qcom/rpmh.c b/kernel/drivers/soc/qcom/rpmh.c
index 8e19a8b..b61e183 100644
--- a/kernel/drivers/soc/qcom/rpmh.c
+++ b/kernel/drivers/soc/qcom/rpmh.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -23,7 +24,7 @@
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
-#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
@@ -33,7 +34,7 @@
}, \
.cmd = { { 0 } }, \
.completion = q, \
- .dev = dev, \
+ .dev = device, \
.needs_free = false, \
}
@@ -195,9 +196,8 @@
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
- &rpm_msg->msg);
/* Clean up our call by spoofing tx_done */
+ ret = 0;
rpmh_tx_done(&rpm_msg->msg, ret);
}
@@ -298,12 +298,10 @@
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -313,7 +311,6 @@
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret;
}
@@ -419,11 +416,10 @@
req->sleep_val != req->wake_val);
}
-static int send_single(const struct device *dev, enum rpmh_state state,
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
- DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
@@ -435,64 +431,64 @@
}
/**
- * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
*
- * @dev: The device making the request
+ * @ctrlr: Controller making request to flush cached data
*
- * Return: -EBUSY if the controller is busy, probably waiting on a response
- * to a RPMH request sent earlier.
- *
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
*/
-int rpmh_flush(const struct device *dev)
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- return 0;
+ goto exit;
}
/* Invalidate the TCSes first to avoid stale data */
- do {
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
- } while (ret == -EAGAIN);
- if (ret)
- return ret;
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
- return ret;
+ goto exit;
- /*
- * Nobody else should be calling this function other than system PM,
- * hence we can run without locks.
- */
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
- ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
if (ret)
- return ret;
- ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
- p->addr, p->wake_val);
+ goto exit;
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
if (ret)
- return ret;
+ goto exit;
}
ctrlr->dirty = false;
- return 0;
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
}
-EXPORT_SYMBOL(rpmh_flush);
/**
* rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
@@ -501,7 +497,7 @@
*
* Invalidate the sleep and wake values in batch_cache.
*/
-int rpmh_invalidate(const struct device *dev)
+void rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
struct batch_cache_req *req, *tmp;
@@ -513,7 +509,5 @@
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
--
Gitblit v1.6.2