hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/soc/qcom/rpmh.c
....@@ -9,6 +9,7 @@
99 #include <linux/jiffies.h>
1010 #include <linux/kernel.h>
1111 #include <linux/list.h>
12
+#include <linux/lockdep.h>
1213 #include <linux/module.h>
1314 #include <linux/of.h>
1415 #include <linux/platform_device.h>
....@@ -23,7 +24,7 @@
2324
2425 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
2526
26
-#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
27
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
2728 struct rpmh_request name = { \
2829 .msg = { \
2930 .state = s, \
....@@ -33,7 +34,7 @@
3334 }, \
3435 .cmd = { { 0 } }, \
3536 .completion = q, \
36
- .dev = dev, \
37
+ .dev = device, \
3738 .needs_free = false, \
3839 }
3940
....@@ -195,9 +196,8 @@
195196 WARN_ON(irqs_disabled());
196197 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
197198 } else {
198
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
199
- &rpm_msg->msg);
200199 /* Clean up our call by spoofing tx_done */
200
+ ret = 0;
201201 rpmh_tx_done(&rpm_msg->msg, ret);
202202 }
203203
....@@ -298,12 +298,10 @@
298298 {
299299 struct batch_cache_req *req;
300300 const struct rpmh_request *rpm_msg;
301
- unsigned long flags;
302301 int ret = 0;
303302 int i;
304303
305304 /* Send Sleep/Wake requests to the controller, expect no response */
306
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
307305 list_for_each_entry(req, &ctrlr->batch_cache, list) {
308306 for (i = 0; i < req->count; i++) {
309307 rpm_msg = req->rpm_msgs + i;
....@@ -313,7 +311,6 @@
313311 break;
314312 }
315313 }
316
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
317314
318315 return ret;
319316 }
....@@ -419,11 +416,10 @@
419416 req->sleep_val != req->wake_val);
420417 }
421418
422
-static int send_single(const struct device *dev, enum rpmh_state state,
419
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
423420 u32 addr, u32 data)
424421 {
425
- DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
426
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
422
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
427423
428424 /* Wake sets are always complete and sleep sets are not */
429425 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
....@@ -435,64 +431,64 @@
435431 }
436432
437433 /**
438
- * rpmh_flush: Flushes the buffered active and sleep sets to TCS
434
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
439435 *
440
- * @dev: The device making the request
436
+ * @ctrlr: Controller making request to flush cached data
441437 *
442
- * Return: -EBUSY if the controller is busy, probably waiting on a response
443
- * to a RPMH request sent earlier.
444
- *
445
- * This function is always called from the sleep code from the last CPU
446
- * that is powering down the entire system. Since no other RPMH API would be
447
- * executing at this time, it is safe to run lockless.
438
+ * Return:
439
+ * * 0 - Success
440
+ * * Error code - Otherwise
448441 */
449
-int rpmh_flush(const struct device *dev)
442
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
450443 {
451444 struct cache_req *p;
452
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
453
- int ret;
445
+ int ret = 0;
446
+
447
+ lockdep_assert_irqs_disabled();
448
+
449
+ /*
450
+ * Currently rpmh_flush() is only called when we think we're running
451
+ * on the last processor. If the lock is busy it means another
452
+ * processor is up and it's better to abort than spin.
453
+ */
454
+ if (!spin_trylock(&ctrlr->cache_lock))
455
+ return -EBUSY;
454456
455457 if (!ctrlr->dirty) {
456458 pr_debug("Skipping flush, TCS has latest data.\n");
457
- return 0;
459
+ goto exit;
458460 }
459461
460462 /* Invalidate the TCSes first to avoid stale data */
461
- do {
462
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
463
- } while (ret == -EAGAIN);
464
- if (ret)
465
- return ret;
463
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
466464
467465 /* First flush the cached batch requests */
468466 ret = flush_batch(ctrlr);
469467 if (ret)
470
- return ret;
468
+ goto exit;
471469
472
- /*
473
- * Nobody else should be calling this function other than system PM,
474
- * hence we can run without locks.
475
- */
476470 list_for_each_entry(p, &ctrlr->cache, list) {
477471 if (!is_req_valid(p)) {
478472 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
479473 __func__, p->addr, p->sleep_val, p->wake_val);
480474 continue;
481475 }
482
- ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
476
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
477
+ p->sleep_val);
483478 if (ret)
484
- return ret;
485
- ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
486
- p->addr, p->wake_val);
479
+ goto exit;
480
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
481
+ p->wake_val);
487482 if (ret)
488
- return ret;
483
+ goto exit;
489484 }
490485
491486 ctrlr->dirty = false;
492487
493
- return 0;
488
+exit:
489
+ spin_unlock(&ctrlr->cache_lock);
490
+ return ret;
494491 }
495
-EXPORT_SYMBOL(rpmh_flush);
496492
497493 /**
498494 * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
....@@ -501,7 +497,7 @@
501497 *
502498 * Invalidate the sleep and wake values in batch_cache.
503499 */
504
-int rpmh_invalidate(const struct device *dev)
500
+void rpmh_invalidate(const struct device *dev)
505501 {
506502 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
507503 struct batch_cache_req *req, *tmp;
....@@ -513,7 +509,5 @@
513509 INIT_LIST_HEAD(&ctrlr->batch_cache);
514510 ctrlr->dirty = true;
515511 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
516
-
517
- return 0;
518512 }
519513 EXPORT_SYMBOL(rpmh_invalidate);