.. | .. |
---|
9 | 9 | #include <linux/jiffies.h> |
---|
10 | 10 | #include <linux/kernel.h> |
---|
11 | 11 | #include <linux/list.h> |
---|
| 12 | +#include <linux/lockdep.h> |
---|
12 | 13 | #include <linux/module.h> |
---|
13 | 14 | #include <linux/of.h> |
---|
14 | 15 | #include <linux/platform_device.h> |
---|
.. | .. |
---|
23 | 24 | |
---|
24 | 25 | #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) |
---|
25 | 26 | |
---|
26 | | -#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \ |
---|
| 27 | +#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ |
---|
27 | 28 | struct rpmh_request name = { \ |
---|
28 | 29 | .msg = { \ |
---|
29 | 30 | .state = s, \ |
---|
.. | .. |
---|
33 | 34 | }, \ |
---|
34 | 35 | .cmd = { { 0 } }, \ |
---|
35 | 36 | .completion = q, \ |
---|
36 | | - .dev = dev, \ |
---|
| 37 | + .dev = device, \ |
---|
37 | 38 | .needs_free = false, \ |
---|
38 | 39 | } |
---|
39 | 40 | |
---|
.. | .. |
---|
195 | 196 | WARN_ON(irqs_disabled()); |
---|
196 | 197 | ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); |
---|
197 | 198 | } else { |
---|
198 | | - ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), |
---|
199 | | - &rpm_msg->msg); |
---|
200 | 199 | /* Clean up our call by spoofing tx_done */ |
---|
| 200 | + ret = 0; |
---|
201 | 201 | rpmh_tx_done(&rpm_msg->msg, ret); |
---|
202 | 202 | } |
---|
203 | 203 | |
---|
.. | .. |
---|
298 | 298 | { |
---|
299 | 299 | struct batch_cache_req *req; |
---|
300 | 300 | const struct rpmh_request *rpm_msg; |
---|
301 | | - unsigned long flags; |
---|
302 | 301 | int ret = 0; |
---|
303 | 302 | int i; |
---|
304 | 303 | |
---|
305 | 304 | /* Send Sleep/Wake requests to the controller, expect no response */ |
---|
306 | | - spin_lock_irqsave(&ctrlr->cache_lock, flags); |
---|
307 | 305 | list_for_each_entry(req, &ctrlr->batch_cache, list) { |
---|
308 | 306 | for (i = 0; i < req->count; i++) { |
---|
309 | 307 | rpm_msg = req->rpm_msgs + i; |
---|
.. | .. |
---|
313 | 311 | break; |
---|
314 | 312 | } |
---|
315 | 313 | } |
---|
316 | | - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); |
---|
317 | 314 | |
---|
318 | 315 | return ret; |
---|
319 | 316 | } |
---|
.. | .. |
---|
419 | 416 | req->sleep_val != req->wake_val); |
---|
420 | 417 | } |
---|
421 | 418 | |
---|
422 | | -static int send_single(const struct device *dev, enum rpmh_state state, |
---|
| 419 | +static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, |
---|
423 | 420 | u32 addr, u32 data) |
---|
424 | 421 | { |
---|
425 | | - DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg); |
---|
426 | | - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
---|
| 422 | + DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); |
---|
427 | 423 | |
---|
428 | 424 | /* Wake sets are always complete and sleep sets are not */ |
---|
429 | 425 | rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); |
---|
.. | .. |
---|
435 | 431 | } |
---|
436 | 432 | |
---|
437 | 433 | /** |
---|
438 | | - * rpmh_flush: Flushes the buffered active and sleep sets to TCS |
---|
| 434 | + * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes |
---|
439 | 435 | * |
---|
440 | | - * @dev: The device making the request |
---|
| 436 | + * @ctrlr: Controller making request to flush cached data |
---|
441 | 437 | * |
---|
442 | | - * Return: -EBUSY if the controller is busy, probably waiting on a response |
---|
443 | | - * to a RPMH request sent earlier. |
---|
444 | | - * |
---|
445 | | - * This function is always called from the sleep code from the last CPU |
---|
446 | | - * that is powering down the entire system. Since no other RPMH API would be |
---|
447 | | - * executing at this time, it is safe to run lockless. |
---|
| 438 | + * Return: |
---|
| 439 | + * * 0 - Success |
---|
| 440 | + * * Error code - Otherwise |
---|
448 | 441 | */ |
---|
449 | | -int rpmh_flush(const struct device *dev) |
---|
| 442 | +int rpmh_flush(struct rpmh_ctrlr *ctrlr) |
---|
450 | 443 | { |
---|
451 | 444 | struct cache_req *p; |
---|
452 | | - struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
---|
453 | | - int ret; |
---|
| 445 | + int ret = 0; |
---|
| 446 | + |
---|
| 447 | + lockdep_assert_irqs_disabled(); |
---|
| 448 | + |
---|
| 449 | + /* |
---|
| 450 | + * Currently rpmh_flush() is only called when we think we're running |
---|
| 451 | + * on the last processor. If the lock is busy it means another |
---|
| 452 | + * processor is up and it's better to abort than spin. |
---|
| 453 | + */ |
---|
| 454 | + if (!spin_trylock(&ctrlr->cache_lock)) |
---|
| 455 | + return -EBUSY; |
---|
454 | 456 | |
---|
455 | 457 | if (!ctrlr->dirty) { |
---|
456 | 458 | pr_debug("Skipping flush, TCS has latest data.\n"); |
---|
457 | | - return 0; |
---|
| 459 | + goto exit; |
---|
458 | 460 | } |
---|
459 | 461 | |
---|
460 | 462 | /* Invalidate the TCSes first to avoid stale data */ |
---|
461 | | - do { |
---|
462 | | - ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); |
---|
463 | | - } while (ret == -EAGAIN); |
---|
464 | | - if (ret) |
---|
465 | | - return ret; |
---|
| 463 | + rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); |
---|
466 | 464 | |
---|
467 | 465 | /* First flush the cached batch requests */ |
---|
468 | 466 | ret = flush_batch(ctrlr); |
---|
469 | 467 | if (ret) |
---|
470 | | - return ret; |
---|
| 468 | + goto exit; |
---|
471 | 469 | |
---|
472 | | - /* |
---|
473 | | - * Nobody else should be calling this function other than system PM, |
---|
474 | | - * hence we can run without locks. |
---|
475 | | - */ |
---|
476 | 470 | list_for_each_entry(p, &ctrlr->cache, list) { |
---|
477 | 471 | if (!is_req_valid(p)) { |
---|
478 | 472 | pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", |
---|
479 | 473 | __func__, p->addr, p->sleep_val, p->wake_val); |
---|
480 | 474 | continue; |
---|
481 | 475 | } |
---|
482 | | - ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val); |
---|
| 476 | + ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, |
---|
| 477 | + p->sleep_val); |
---|
483 | 478 | if (ret) |
---|
484 | | - return ret; |
---|
485 | | - ret = send_single(dev, RPMH_WAKE_ONLY_STATE, |
---|
486 | | - p->addr, p->wake_val); |
---|
| 479 | + goto exit; |
---|
| 480 | + ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, |
---|
| 481 | + p->wake_val); |
---|
487 | 482 | if (ret) |
---|
488 | | - return ret; |
---|
| 483 | + goto exit; |
---|
489 | 484 | } |
---|
490 | 485 | |
---|
491 | 486 | ctrlr->dirty = false; |
---|
492 | 487 | |
---|
493 | | - return 0; |
---|
| 488 | +exit: |
---|
| 489 | + spin_unlock(&ctrlr->cache_lock); |
---|
| 490 | + return ret; |
---|
494 | 491 | } |
---|
495 | | -EXPORT_SYMBOL(rpmh_flush); |
---|
496 | 492 | |
---|
497 | 493 | /** |
---|
498 | 494 | * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache |
---|
.. | .. |
---|
501 | 497 | * |
---|
502 | 498 | * Invalidate the sleep and wake values in batch_cache. |
---|
503 | 499 | */ |
---|
504 | | -int rpmh_invalidate(const struct device *dev) |
---|
| 500 | +void rpmh_invalidate(const struct device *dev) |
---|
505 | 501 | { |
---|
506 | 502 | struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); |
---|
507 | 503 | struct batch_cache_req *req, *tmp; |
---|
.. | .. |
---|
513 | 509 | INIT_LIST_HEAD(&ctrlr->batch_cache); |
---|
514 | 510 | ctrlr->dirty = true; |
---|
515 | 511 | spin_unlock_irqrestore(&ctrlr->cache_lock, flags); |
---|
516 | | - |
---|
517 | | - return 0; |
---|
518 | 512 | } |
---|
519 | 513 | EXPORT_SYMBOL(rpmh_invalidate); |
---|