| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * drivers/base/power/runtime.c - Helper functions for device runtime PM |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
|---|
| 5 | 6 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This file is released under the GPLv2. |
|---|
| 8 | 7 | */ |
|---|
| 9 | | - |
|---|
| 10 | 8 | #include <linux/sched/mm.h> |
|---|
| 9 | +#include <linux/ktime.h> |
|---|
| 10 | +#include <linux/hrtimer.h> |
|---|
| 11 | 11 | #include <linux/export.h> |
|---|
| 12 | 12 | #include <linux/pm_runtime.h> |
|---|
| 13 | 13 | #include <linux/pm_wakeirq.h> |
|---|
| .. | .. |
|---|
| 62 | 62 | * runtime_status field is updated, to account the time in the old state |
|---|
| 63 | 63 | * correctly. |
|---|
| 64 | 64 | */ |
|---|
| 65 | | -void update_pm_runtime_accounting(struct device *dev) |
|---|
| 65 | +static void update_pm_runtime_accounting(struct device *dev) |
|---|
| 66 | 66 | { |
|---|
| 67 | | - unsigned long now = jiffies; |
|---|
| 68 | | - unsigned long delta; |
|---|
| 69 | | - |
|---|
| 70 | | - delta = now - dev->power.accounting_timestamp; |
|---|
| 71 | | - |
|---|
| 72 | | - dev->power.accounting_timestamp = now; |
|---|
| 67 | + u64 now, last, delta; |
|---|
| 73 | 68 | |
|---|
| 74 | 69 | if (dev->power.disable_depth > 0) |
|---|
| 75 | 70 | return; |
|---|
| 76 | 71 | |
|---|
| 72 | + last = dev->power.accounting_timestamp; |
|---|
| 73 | + |
|---|
| 74 | + now = ktime_get_mono_fast_ns(); |
|---|
| 75 | + dev->power.accounting_timestamp = now; |
|---|
| 76 | + |
|---|
| 77 | + /* |
|---|
| 78 | + * Because ktime_get_mono_fast_ns() is not monotonic during |
|---|
| 79 | + * timekeeping updates, ensure that 'now' is after the last saved |
|---|
| 80 | + * timesptamp. |
|---|
| 81 | + */ |
|---|
| 82 | + if (now < last) |
|---|
| 83 | + return; |
|---|
| 84 | + |
|---|
| 85 | + delta = now - last; |
|---|
| 86 | + |
|---|
| 77 | 87 | if (dev->power.runtime_status == RPM_SUSPENDED) |
|---|
| 78 | | - dev->power.suspended_jiffies += delta; |
|---|
| 88 | + dev->power.suspended_time += delta; |
|---|
| 79 | 89 | else |
|---|
| 80 | | - dev->power.active_jiffies += delta; |
|---|
| 90 | + dev->power.active_time += delta; |
|---|
| 81 | 91 | } |
|---|
| 82 | 92 | |
|---|
| 83 | 93 | static void __update_runtime_status(struct device *dev, enum rpm_status status) |
|---|
| .. | .. |
|---|
| 86 | 96 | dev->power.runtime_status = status; |
|---|
| 87 | 97 | } |
|---|
| 88 | 98 | |
|---|
| 99 | +static u64 rpm_get_accounted_time(struct device *dev, bool suspended) |
|---|
| 100 | +{ |
|---|
| 101 | + u64 time; |
|---|
| 102 | + unsigned long flags; |
|---|
| 103 | + |
|---|
| 104 | + spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| 105 | + |
|---|
| 106 | + update_pm_runtime_accounting(dev); |
|---|
| 107 | + time = suspended ? dev->power.suspended_time : dev->power.active_time; |
|---|
| 108 | + |
|---|
| 109 | + spin_unlock_irqrestore(&dev->power.lock, flags); |
|---|
| 110 | + |
|---|
| 111 | + return time; |
|---|
| 112 | +} |
|---|
| 113 | + |
|---|
| 114 | +u64 pm_runtime_active_time(struct device *dev) |
|---|
| 115 | +{ |
|---|
| 116 | + return rpm_get_accounted_time(dev, false); |
|---|
| 117 | +} |
|---|
| 118 | + |
|---|
| 119 | +u64 pm_runtime_suspended_time(struct device *dev) |
|---|
| 120 | +{ |
|---|
| 121 | + return rpm_get_accounted_time(dev, true); |
|---|
| 122 | +} |
|---|
| 123 | +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); |
|---|
| 124 | + |
|---|
| 89 | 125 | /** |
|---|
| 90 | 126 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
|---|
| 91 | 127 | * @dev: Device to handle. |
|---|
| .. | .. |
|---|
| 93 | 129 | static void pm_runtime_deactivate_timer(struct device *dev) |
|---|
| 94 | 130 | { |
|---|
| 95 | 131 | if (dev->power.timer_expires > 0) { |
|---|
| 96 | | - del_timer(&dev->power.suspend_timer); |
|---|
| 132 | + hrtimer_try_to_cancel(&dev->power.suspend_timer); |
|---|
| 97 | 133 | dev->power.timer_expires = 0; |
|---|
| 98 | 134 | } |
|---|
| 99 | 135 | } |
|---|
| .. | .. |
|---|
| 119 | 155 | * Compute the autosuspend-delay expiration time based on the device's |
|---|
| 120 | 156 | * power.last_busy time. If the delay has already expired or is disabled |
|---|
| 121 | 157 | * (negative) or the power.use_autosuspend flag isn't set, return 0. |
|---|
| 122 | | - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). |
|---|
| 158 | + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). |
|---|
| 123 | 159 | * |
|---|
| 124 | 160 | * This function may be called either with or without dev->power.lock held. |
|---|
| 125 | 161 | * Either way it can be racy, since power.last_busy may be updated at any time. |
|---|
| 126 | 162 | */ |
|---|
| 127 | | -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
|---|
| 163 | +u64 pm_runtime_autosuspend_expiration(struct device *dev) |
|---|
| 128 | 164 | { |
|---|
| 129 | 165 | int autosuspend_delay; |
|---|
| 130 | | - long elapsed; |
|---|
| 131 | | - unsigned long last_busy; |
|---|
| 132 | | - unsigned long expires = 0; |
|---|
| 166 | + u64 expires; |
|---|
| 133 | 167 | |
|---|
| 134 | 168 | if (!dev->power.use_autosuspend) |
|---|
| 135 | | - goto out; |
|---|
| 169 | + return 0; |
|---|
| 136 | 170 | |
|---|
| 137 | 171 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
|---|
| 138 | 172 | if (autosuspend_delay < 0) |
|---|
| 139 | | - goto out; |
|---|
| 173 | + return 0; |
|---|
| 140 | 174 | |
|---|
| 141 | | - last_busy = READ_ONCE(dev->power.last_busy); |
|---|
| 142 | | - elapsed = jiffies - last_busy; |
|---|
| 143 | | - if (elapsed < 0) |
|---|
| 144 | | - goto out; /* jiffies has wrapped around. */ |
|---|
| 175 | + expires = READ_ONCE(dev->power.last_busy); |
|---|
| 176 | + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; |
|---|
| 177 | + if (expires > ktime_get_mono_fast_ns()) |
|---|
| 178 | + return expires; /* Expires in the future */ |
|---|
| 145 | 179 | |
|---|
| 146 | | - /* |
|---|
| 147 | | - * If the autosuspend_delay is >= 1 second, align the timer by rounding |
|---|
| 148 | | - * up to the nearest second. |
|---|
| 149 | | - */ |
|---|
| 150 | | - expires = last_busy + msecs_to_jiffies(autosuspend_delay); |
|---|
| 151 | | - if (autosuspend_delay >= 1000) |
|---|
| 152 | | - expires = round_jiffies(expires); |
|---|
| 153 | | - expires += !expires; |
|---|
| 154 | | - if (elapsed >= expires - last_busy) |
|---|
| 155 | | - expires = 0; /* Already expired. */ |
|---|
| 156 | | - |
|---|
| 157 | | - out: |
|---|
| 158 | | - return expires; |
|---|
| 180 | + return 0; |
|---|
| 159 | 181 | } |
|---|
| 160 | 182 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); |
|---|
| 161 | 183 | |
|---|
| .. | .. |
|---|
| 253 | 275 | || (dev->power.request_pending |
|---|
| 254 | 276 | && dev->power.request == RPM_REQ_RESUME)) |
|---|
| 255 | 277 | retval = -EAGAIN; |
|---|
| 256 | | - else if (__dev_pm_qos_read_value(dev) == 0) |
|---|
| 278 | + else if (__dev_pm_qos_resume_latency(dev) == 0) |
|---|
| 257 | 279 | retval = -EPERM; |
|---|
| 258 | 280 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
|---|
| 259 | 281 | retval = 1; |
|---|
| .. | .. |
|---|
| 265 | 287 | { |
|---|
| 266 | 288 | struct device_link *link; |
|---|
| 267 | 289 | |
|---|
| 268 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { |
|---|
| 290 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
|---|
| 291 | + device_links_read_lock_held()) { |
|---|
| 269 | 292 | int retval; |
|---|
| 270 | 293 | |
|---|
| 271 | | - if (!(link->flags & DL_FLAG_PM_RUNTIME) || |
|---|
| 272 | | - READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
|---|
| 294 | + if (!(link->flags & DL_FLAG_PM_RUNTIME)) |
|---|
| 273 | 295 | continue; |
|---|
| 274 | 296 | |
|---|
| 275 | 297 | retval = pm_runtime_get_sync(link->supplier); |
|---|
| .. | .. |
|---|
| 283 | 305 | return 0; |
|---|
| 284 | 306 | } |
|---|
| 285 | 307 | |
|---|
| 308 | +/** |
|---|
| 309 | + * pm_runtime_release_supplier - Drop references to device link's supplier. |
|---|
| 310 | + * @link: Target device link. |
|---|
| 311 | + * |
|---|
| 312 | + * Drop all runtime PM references associated with @link to its supplier device. |
|---|
| 313 | + */ |
|---|
| 314 | +void pm_runtime_release_supplier(struct device_link *link) |
|---|
| 315 | +{ |
|---|
| 316 | + struct device *supplier = link->supplier; |
|---|
| 317 | + |
|---|
| 318 | + /* |
|---|
| 319 | + * The additional power.usage_count check is a safety net in case |
|---|
| 320 | + * the rpm_active refcount becomes saturated, in which case |
|---|
| 321 | + * refcount_dec_not_one() would return true forever, but it is not |
|---|
| 322 | + * strictly necessary. |
|---|
| 323 | + */ |
|---|
| 324 | + while (refcount_dec_not_one(&link->rpm_active) && |
|---|
| 325 | + atomic_read(&supplier->power.usage_count) > 0) |
|---|
| 326 | + pm_runtime_put_noidle(supplier); |
|---|
| 327 | +} |
|---|
| 328 | + |
|---|
| 286 | 329 | static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) |
|---|
| 287 | 330 | { |
|---|
| 288 | 331 | struct device_link *link; |
|---|
| 289 | 332 | |
|---|
| 290 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { |
|---|
| 291 | | - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
|---|
| 292 | | - continue; |
|---|
| 293 | | - |
|---|
| 294 | | - while (refcount_dec_not_one(&link->rpm_active)) |
|---|
| 295 | | - pm_runtime_put_noidle(link->supplier); |
|---|
| 296 | | - |
|---|
| 333 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
|---|
| 334 | + device_links_read_lock_held()) { |
|---|
| 335 | + pm_runtime_release_supplier(link); |
|---|
| 297 | 336 | if (try_to_suspend) |
|---|
| 298 | 337 | pm_request_idle(link->supplier); |
|---|
| 299 | 338 | } |
|---|
| .. | .. |
|---|
| 309 | 348 | struct device_link *link; |
|---|
| 310 | 349 | int idx = device_links_read_lock(); |
|---|
| 311 | 350 | |
|---|
| 312 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
|---|
| 351 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
|---|
| 352 | + device_links_read_lock_held()) |
|---|
| 313 | 353 | pm_request_idle(link->supplier); |
|---|
| 314 | 354 | |
|---|
| 315 | 355 | device_links_read_unlock(idx); |
|---|
| .. | .. |
|---|
| 424 | 464 | /* Pending requests need to be canceled. */ |
|---|
| 425 | 465 | dev->power.request = RPM_REQ_NONE; |
|---|
| 426 | 466 | |
|---|
| 427 | | - if (dev->power.no_callbacks) |
|---|
| 467 | + callback = RPM_GET_CALLBACK(dev, runtime_idle); |
|---|
| 468 | + |
|---|
| 469 | + /* If no callback assume success. */ |
|---|
| 470 | + if (!callback || dev->power.no_callbacks) |
|---|
| 428 | 471 | goto out; |
|---|
| 429 | 472 | |
|---|
| 430 | 473 | /* Carry out an asynchronous or a synchronous idle notification. */ |
|---|
| .. | .. |
|---|
| 440 | 483 | |
|---|
| 441 | 484 | dev->power.idle_notification = true; |
|---|
| 442 | 485 | |
|---|
| 443 | | - callback = RPM_GET_CALLBACK(dev, runtime_idle); |
|---|
| 486 | + if (dev->power.irq_safe) |
|---|
| 487 | + spin_unlock(&dev->power.lock); |
|---|
| 488 | + else |
|---|
| 489 | + spin_unlock_irq(&dev->power.lock); |
|---|
| 444 | 490 | |
|---|
| 445 | | - if (callback) |
|---|
| 446 | | - retval = __rpm_callback(callback, dev); |
|---|
| 491 | + retval = callback(dev); |
|---|
| 492 | + |
|---|
| 493 | + if (dev->power.irq_safe) |
|---|
| 494 | + spin_lock(&dev->power.lock); |
|---|
| 495 | + else |
|---|
| 496 | + spin_lock_irq(&dev->power.lock); |
|---|
| 447 | 497 | |
|---|
| 448 | 498 | dev->power.idle_notification = false; |
|---|
| 449 | 499 | wake_up_all(&dev->power.wait_queue); |
|---|
| .. | .. |
|---|
| 520 | 570 | |
|---|
| 521 | 571 | repeat: |
|---|
| 522 | 572 | retval = rpm_check_suspend_allowed(dev); |
|---|
| 523 | | - |
|---|
| 524 | 573 | if (retval < 0) |
|---|
| 525 | | - ; /* Conditions are wrong. */ |
|---|
| 574 | + goto out; /* Conditions are wrong. */ |
|---|
| 526 | 575 | |
|---|
| 527 | 576 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ |
|---|
| 528 | | - else if (dev->power.runtime_status == RPM_RESUMING && |
|---|
| 529 | | - !(rpmflags & RPM_ASYNC)) |
|---|
| 577 | + if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) |
|---|
| 530 | 578 | retval = -EAGAIN; |
|---|
| 531 | 579 | if (retval) |
|---|
| 532 | 580 | goto out; |
|---|
| .. | .. |
|---|
| 534 | 582 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ |
|---|
| 535 | 583 | if ((rpmflags & RPM_AUTO) |
|---|
| 536 | 584 | && dev->power.runtime_status != RPM_SUSPENDING) { |
|---|
| 537 | | - unsigned long expires = pm_runtime_autosuspend_expiration(dev); |
|---|
| 585 | + u64 expires = pm_runtime_autosuspend_expiration(dev); |
|---|
| 538 | 586 | |
|---|
| 539 | 587 | if (expires != 0) { |
|---|
| 540 | 588 | /* Pending requests need to be canceled. */ |
|---|
| .. | .. |
|---|
| 547 | 595 | * expire; pm_suspend_timer_fn() will take care of the |
|---|
| 548 | 596 | * rest. |
|---|
| 549 | 597 | */ |
|---|
| 550 | | - if (!(dev->power.timer_expires && time_before_eq( |
|---|
| 551 | | - dev->power.timer_expires, expires))) { |
|---|
| 598 | + if (!(dev->power.timer_expires && |
|---|
| 599 | + dev->power.timer_expires <= expires)) { |
|---|
| 600 | + /* |
|---|
| 601 | + * We add a slack of 25% to gather wakeups |
|---|
| 602 | + * without sacrificing the granularity. |
|---|
| 603 | + */ |
|---|
| 604 | + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * |
|---|
| 605 | + (NSEC_PER_MSEC >> 2); |
|---|
| 606 | + |
|---|
| 552 | 607 | dev->power.timer_expires = expires; |
|---|
| 553 | | - mod_timer(&dev->power.suspend_timer, expires); |
|---|
| 608 | + hrtimer_start_range_ns(&dev->power.suspend_timer, |
|---|
| 609 | + ns_to_ktime(expires), |
|---|
| 610 | + slack, |
|---|
| 611 | + HRTIMER_MODE_ABS); |
|---|
| 554 | 612 | } |
|---|
| 555 | 613 | dev->power.timer_autosuspends = 1; |
|---|
| 556 | 614 | goto out; |
|---|
| .. | .. |
|---|
| 617 | 675 | if (retval) |
|---|
| 618 | 676 | goto fail; |
|---|
| 619 | 677 | |
|---|
| 678 | + dev_pm_enable_wake_irq_complete(dev); |
|---|
| 679 | + |
|---|
| 620 | 680 | no_callback: |
|---|
| 621 | 681 | __update_runtime_status(dev, RPM_SUSPENDED); |
|---|
| 622 | 682 | pm_runtime_deactivate_timer(dev); |
|---|
| .. | .. |
|---|
| 662 | 722 | return retval; |
|---|
| 663 | 723 | |
|---|
| 664 | 724 | fail: |
|---|
| 665 | | - dev_pm_disable_wake_irq_check(dev); |
|---|
| 725 | + dev_pm_disable_wake_irq_check(dev, true); |
|---|
| 666 | 726 | __update_runtime_status(dev, RPM_ACTIVE); |
|---|
| 667 | 727 | dev->power.deferred_resume = false; |
|---|
| 668 | 728 | wake_up_all(&dev->power.wait_queue); |
|---|
| .. | .. |
|---|
| 845 | 905 | |
|---|
| 846 | 906 | callback = RPM_GET_CALLBACK(dev, runtime_resume); |
|---|
| 847 | 907 | |
|---|
| 848 | | - dev_pm_disable_wake_irq_check(dev); |
|---|
| 908 | + dev_pm_disable_wake_irq_check(dev, false); |
|---|
| 849 | 909 | retval = rpm_callback(callback, dev); |
|---|
| 850 | 910 | if (retval) { |
|---|
| 851 | 911 | __update_runtime_status(dev, RPM_SUSPENDED); |
|---|
| .. | .. |
|---|
| 925 | 985 | * |
|---|
| 926 | 986 | * Check if the time is right and queue a suspend request. |
|---|
| 927 | 987 | */ |
|---|
| 928 | | -static void pm_suspend_timer_fn(struct timer_list *t) |
|---|
| 988 | +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) |
|---|
| 929 | 989 | { |
|---|
| 930 | | - struct device *dev = from_timer(dev, t, power.suspend_timer); |
|---|
| 990 | + struct device *dev = container_of(timer, struct device, power.suspend_timer); |
|---|
| 931 | 991 | unsigned long flags; |
|---|
| 932 | | - unsigned long expires; |
|---|
| 992 | + u64 expires; |
|---|
| 933 | 993 | |
|---|
| 934 | 994 | spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| 935 | 995 | |
|---|
| 936 | 996 | expires = dev->power.timer_expires; |
|---|
| 937 | | - /* If 'expire' is after 'jiffies' we've been called too early. */ |
|---|
| 938 | | - if (expires > 0 && !time_after(expires, jiffies)) { |
|---|
| 997 | + /* |
|---|
| 998 | + * If 'expires' is after the current time, we've been called |
|---|
| 999 | + * too early. |
|---|
| 1000 | + */ |
|---|
| 1001 | + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { |
|---|
| 939 | 1002 | dev->power.timer_expires = 0; |
|---|
| 940 | 1003 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
|---|
| 941 | 1004 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); |
|---|
| 942 | 1005 | } |
|---|
| 943 | 1006 | |
|---|
| 944 | 1007 | spin_unlock_irqrestore(&dev->power.lock, flags); |
|---|
| 1008 | + |
|---|
| 1009 | + return HRTIMER_NORESTART; |
|---|
| 945 | 1010 | } |
|---|
| 946 | 1011 | |
|---|
| 947 | 1012 | /** |
|---|
| .. | .. |
|---|
| 952 | 1017 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
|---|
| 953 | 1018 | { |
|---|
| 954 | 1019 | unsigned long flags; |
|---|
| 1020 | + u64 expires; |
|---|
| 955 | 1021 | int retval; |
|---|
| 956 | 1022 | |
|---|
| 957 | 1023 | spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| .. | .. |
|---|
| 968 | 1034 | /* Other scheduled or pending requests need to be canceled. */ |
|---|
| 969 | 1035 | pm_runtime_cancel_pending(dev); |
|---|
| 970 | 1036 | |
|---|
| 971 | | - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
|---|
| 972 | | - dev->power.timer_expires += !dev->power.timer_expires; |
|---|
| 1037 | + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; |
|---|
| 1038 | + dev->power.timer_expires = expires; |
|---|
| 973 | 1039 | dev->power.timer_autosuspends = 0; |
|---|
| 974 | | - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
|---|
| 1040 | + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); |
|---|
| 975 | 1041 | |
|---|
| 976 | 1042 | out: |
|---|
| 977 | 1043 | spin_unlock_irqrestore(&dev->power.lock, flags); |
|---|
| .. | .. |
|---|
| 998 | 1064 | int retval; |
|---|
| 999 | 1065 | |
|---|
| 1000 | 1066 | if (rpmflags & RPM_GET_PUT) { |
|---|
| 1001 | | - if (!atomic_dec_and_test(&dev->power.usage_count)) |
|---|
| 1067 | + if (!atomic_dec_and_test(&dev->power.usage_count)) { |
|---|
| 1068 | + trace_rpm_usage_rcuidle(dev, rpmflags); |
|---|
| 1002 | 1069 | return 0; |
|---|
| 1070 | + } |
|---|
| 1003 | 1071 | } |
|---|
| 1004 | 1072 | |
|---|
| 1005 | 1073 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); |
|---|
| .. | .. |
|---|
| 1030 | 1098 | int retval; |
|---|
| 1031 | 1099 | |
|---|
| 1032 | 1100 | if (rpmflags & RPM_GET_PUT) { |
|---|
| 1033 | | - if (!atomic_dec_and_test(&dev->power.usage_count)) |
|---|
| 1101 | + if (!atomic_dec_and_test(&dev->power.usage_count)) { |
|---|
| 1102 | + trace_rpm_usage_rcuidle(dev, rpmflags); |
|---|
| 1034 | 1103 | return 0; |
|---|
| 1104 | + } |
|---|
| 1035 | 1105 | } |
|---|
| 1036 | 1106 | |
|---|
| 1037 | 1107 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); |
|---|
| .. | .. |
|---|
| 1075 | 1145 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
|---|
| 1076 | 1146 | |
|---|
| 1077 | 1147 | /** |
|---|
| 1078 | | - * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. |
|---|
| 1148 | + * pm_runtime_get_if_active - Conditionally bump up device usage counter. |
|---|
| 1079 | 1149 | * @dev: Device to handle. |
|---|
| 1150 | + * @ign_usage_count: Whether or not to look at the current usage counter value. |
|---|
| 1080 | 1151 | * |
|---|
| 1081 | | - * Return -EINVAL if runtime PM is disabled for the device. |
|---|
| 1152 | + * Return -EINVAL if runtime PM is disabled for @dev. |
|---|
| 1082 | 1153 | * |
|---|
| 1083 | | - * If that's not the case and if the device's runtime PM status is RPM_ACTIVE |
|---|
| 1084 | | - * and the runtime PM usage counter is nonzero, increment the counter and |
|---|
| 1085 | | - * return 1. Otherwise return 0 without changing the counter. |
|---|
| 1154 | + * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either |
|---|
| 1155 | + * @ign_usage_count is %true or the runtime PM usage counter of @dev is not |
|---|
| 1156 | + * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 |
|---|
| 1157 | + * without changing the usage counter. |
|---|
| 1158 | + * |
|---|
| 1159 | + * If @ign_usage_count is %true, this function can be used to prevent suspending |
|---|
| 1160 | + * the device when its runtime PM status is %RPM_ACTIVE. |
|---|
| 1161 | + * |
|---|
| 1162 | + * If @ign_usage_count is %false, this function can be used to prevent |
|---|
| 1163 | + * suspending the device when both its runtime PM status is %RPM_ACTIVE and its |
|---|
| 1164 | + * runtime PM usage counter is not zero. |
|---|
| 1165 | + * |
|---|
| 1166 | + * The caller is resposible for decrementing the runtime PM usage counter of |
|---|
| 1167 | + * @dev after this function has returned a positive value for it. |
|---|
| 1086 | 1168 | */ |
|---|
| 1087 | | -int pm_runtime_get_if_in_use(struct device *dev) |
|---|
| 1169 | +int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) |
|---|
| 1088 | 1170 | { |
|---|
| 1089 | 1171 | unsigned long flags; |
|---|
| 1090 | 1172 | int retval; |
|---|
| 1091 | 1173 | |
|---|
| 1092 | 1174 | spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| 1093 | | - retval = dev->power.disable_depth > 0 ? -EINVAL : |
|---|
| 1094 | | - dev->power.runtime_status == RPM_ACTIVE |
|---|
| 1095 | | - && atomic_inc_not_zero(&dev->power.usage_count); |
|---|
| 1175 | + if (dev->power.disable_depth > 0) { |
|---|
| 1176 | + retval = -EINVAL; |
|---|
| 1177 | + } else if (dev->power.runtime_status != RPM_ACTIVE) { |
|---|
| 1178 | + retval = 0; |
|---|
| 1179 | + } else if (ign_usage_count) { |
|---|
| 1180 | + retval = 1; |
|---|
| 1181 | + atomic_inc(&dev->power.usage_count); |
|---|
| 1182 | + } else { |
|---|
| 1183 | + retval = atomic_inc_not_zero(&dev->power.usage_count); |
|---|
| 1184 | + } |
|---|
| 1185 | + trace_rpm_usage_rcuidle(dev, 0); |
|---|
| 1096 | 1186 | spin_unlock_irqrestore(&dev->power.lock, flags); |
|---|
| 1187 | + |
|---|
| 1097 | 1188 | return retval; |
|---|
| 1098 | 1189 | } |
|---|
| 1099 | | -EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); |
|---|
| 1190 | +EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); |
|---|
| 1100 | 1191 | |
|---|
| 1101 | 1192 | /** |
|---|
| 1102 | 1193 | * __pm_runtime_set_status - Set runtime PM status of a device. |
|---|
| .. | .. |
|---|
| 1114 | 1205 | * and the device parent's counter of unsuspended children is modified to |
|---|
| 1115 | 1206 | * reflect the new status. If the new status is RPM_SUSPENDED, an idle |
|---|
| 1116 | 1207 | * notification request for the parent is submitted. |
|---|
| 1208 | + * |
|---|
| 1209 | + * If @dev has any suppliers (as reflected by device links to them), and @status |
|---|
| 1210 | + * is RPM_ACTIVE, they will be activated upfront and if the activation of one |
|---|
| 1211 | + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead |
|---|
| 1212 | + * of the @status value) and the suppliers will be deacticated on exit. The |
|---|
| 1213 | + * error returned by the failing supplier activation will be returned in that |
|---|
| 1214 | + * case. |
|---|
| 1117 | 1215 | */ |
|---|
| 1118 | 1216 | int __pm_runtime_set_status(struct device *dev, unsigned int status) |
|---|
| 1119 | 1217 | { |
|---|
| 1120 | 1218 | struct device *parent = dev->parent; |
|---|
| 1121 | | - unsigned long flags; |
|---|
| 1122 | 1219 | bool notify_parent = false; |
|---|
| 1123 | 1220 | int error = 0; |
|---|
| 1124 | 1221 | |
|---|
| 1125 | 1222 | if (status != RPM_ACTIVE && status != RPM_SUSPENDED) |
|---|
| 1126 | 1223 | return -EINVAL; |
|---|
| 1127 | 1224 | |
|---|
| 1128 | | - spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| 1225 | + spin_lock_irq(&dev->power.lock); |
|---|
| 1129 | 1226 | |
|---|
| 1130 | | - if (!dev->power.runtime_error && !dev->power.disable_depth) { |
|---|
| 1227 | + /* |
|---|
| 1228 | + * Prevent PM-runtime from being enabled for the device or return an |
|---|
| 1229 | + * error if it is enabled already and working. |
|---|
| 1230 | + */ |
|---|
| 1231 | + if (dev->power.runtime_error || dev->power.disable_depth) |
|---|
| 1232 | + dev->power.disable_depth++; |
|---|
| 1233 | + else |
|---|
| 1131 | 1234 | error = -EAGAIN; |
|---|
| 1132 | | - goto out; |
|---|
| 1235 | + |
|---|
| 1236 | + spin_unlock_irq(&dev->power.lock); |
|---|
| 1237 | + |
|---|
| 1238 | + if (error) |
|---|
| 1239 | + return error; |
|---|
| 1240 | + |
|---|
| 1241 | + /* |
|---|
| 1242 | + * If the new status is RPM_ACTIVE, the suppliers can be activated |
|---|
| 1243 | + * upfront regardless of the current status, because next time |
|---|
| 1244 | + * rpm_put_suppliers() runs, the rpm_active refcounts of the links |
|---|
| 1245 | + * involved will be dropped down to one anyway. |
|---|
| 1246 | + */ |
|---|
| 1247 | + if (status == RPM_ACTIVE) { |
|---|
| 1248 | + int idx = device_links_read_lock(); |
|---|
| 1249 | + |
|---|
| 1250 | + error = rpm_get_suppliers(dev); |
|---|
| 1251 | + if (error) |
|---|
| 1252 | + status = RPM_SUSPENDED; |
|---|
| 1253 | + |
|---|
| 1254 | + device_links_read_unlock(idx); |
|---|
| 1133 | 1255 | } |
|---|
| 1256 | + |
|---|
| 1257 | + spin_lock_irq(&dev->power.lock); |
|---|
| 1134 | 1258 | |
|---|
| 1135 | 1259 | if (dev->power.runtime_status == status || !parent) |
|---|
| 1136 | 1260 | goto out_set; |
|---|
| .. | .. |
|---|
| 1159 | 1283 | |
|---|
| 1160 | 1284 | spin_unlock(&parent->power.lock); |
|---|
| 1161 | 1285 | |
|---|
| 1162 | | - if (error) |
|---|
| 1286 | + if (error) { |
|---|
| 1287 | + status = RPM_SUSPENDED; |
|---|
| 1163 | 1288 | goto out; |
|---|
| 1289 | + } |
|---|
| 1164 | 1290 | } |
|---|
| 1165 | 1291 | |
|---|
| 1166 | 1292 | out_set: |
|---|
| 1167 | 1293 | __update_runtime_status(dev, status); |
|---|
| 1168 | | - dev->power.runtime_error = 0; |
|---|
| 1294 | + if (!error) |
|---|
| 1295 | + dev->power.runtime_error = 0; |
|---|
| 1296 | + |
|---|
| 1169 | 1297 | out: |
|---|
| 1170 | | - spin_unlock_irqrestore(&dev->power.lock, flags); |
|---|
| 1298 | + spin_unlock_irq(&dev->power.lock); |
|---|
| 1171 | 1299 | |
|---|
| 1172 | 1300 | if (notify_parent) |
|---|
| 1173 | 1301 | pm_request_idle(parent); |
|---|
| 1302 | + |
|---|
| 1303 | + if (status == RPM_SUSPENDED) { |
|---|
| 1304 | + int idx = device_links_read_lock(); |
|---|
| 1305 | + |
|---|
| 1306 | + rpm_put_suppliers(dev); |
|---|
| 1307 | + |
|---|
| 1308 | + device_links_read_unlock(idx); |
|---|
| 1309 | + } |
|---|
| 1310 | + |
|---|
| 1311 | + pm_runtime_enable(dev); |
|---|
| 1174 | 1312 | |
|---|
| 1175 | 1313 | return error; |
|---|
| 1176 | 1314 | } |
|---|
| .. | .. |
|---|
| 1299 | 1437 | pm_runtime_put_noidle(dev); |
|---|
| 1300 | 1438 | } |
|---|
| 1301 | 1439 | |
|---|
| 1440 | + /* Update time accounting before disabling PM-runtime. */ |
|---|
| 1441 | + update_pm_runtime_accounting(dev); |
|---|
| 1442 | + |
|---|
| 1302 | 1443 | if (!dev->power.disable_depth++) |
|---|
| 1303 | 1444 | __pm_runtime_barrier(dev); |
|---|
| 1304 | 1445 | |
|---|
| .. | .. |
|---|
| 1317 | 1458 | |
|---|
| 1318 | 1459 | spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| 1319 | 1460 | |
|---|
| 1320 | | - if (dev->power.disable_depth > 0) |
|---|
| 1461 | + if (dev->power.disable_depth > 0) { |
|---|
| 1321 | 1462 | dev->power.disable_depth--; |
|---|
| 1322 | | - else |
|---|
| 1463 | + |
|---|
| 1464 | + /* About to enable runtime pm, set accounting_timestamp to now */ |
|---|
| 1465 | + if (!dev->power.disable_depth) |
|---|
| 1466 | + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); |
|---|
| 1467 | + } else { |
|---|
| 1323 | 1468 | dev_warn(dev, "Unbalanced %s!\n", __func__); |
|---|
| 1469 | + } |
|---|
| 1324 | 1470 | |
|---|
| 1325 | 1471 | WARN(!dev->power.disable_depth && |
|---|
| 1326 | 1472 | dev->power.runtime_status == RPM_SUSPENDED && |
|---|
| .. | .. |
|---|
| 1371 | 1517 | dev->power.runtime_auto = true; |
|---|
| 1372 | 1518 | if (atomic_dec_and_test(&dev->power.usage_count)) |
|---|
| 1373 | 1519 | rpm_idle(dev, RPM_AUTO | RPM_ASYNC); |
|---|
| 1520 | + else |
|---|
| 1521 | + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); |
|---|
| 1374 | 1522 | |
|---|
| 1375 | 1523 | out: |
|---|
| 1376 | 1524 | spin_unlock_irq(&dev->power.lock); |
|---|
| .. | .. |
|---|
| 1438 | 1586 | if (!old_use || old_delay >= 0) { |
|---|
| 1439 | 1587 | atomic_inc(&dev->power.usage_count); |
|---|
| 1440 | 1588 | rpm_resume(dev, 0); |
|---|
| 1589 | + } else { |
|---|
| 1590 | + trace_rpm_usage_rcuidle(dev, 0); |
|---|
| 1441 | 1591 | } |
|---|
| 1442 | 1592 | } |
|---|
| 1443 | 1593 | |
|---|
| .. | .. |
|---|
| 1517 | 1667 | dev->power.request_pending = false; |
|---|
| 1518 | 1668 | dev->power.request = RPM_REQ_NONE; |
|---|
| 1519 | 1669 | dev->power.deferred_resume = false; |
|---|
| 1520 | | - dev->power.accounting_timestamp = jiffies; |
|---|
| 1670 | + dev->power.needs_force_resume = 0; |
|---|
| 1521 | 1671 | INIT_WORK(&dev->power.work, pm_runtime_work); |
|---|
| 1522 | 1672 | |
|---|
| 1523 | 1673 | dev->power.timer_expires = 0; |
|---|
| 1524 | | - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); |
|---|
| 1674 | + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
|---|
| 1675 | + dev->power.suspend_timer.function = pm_suspend_timer_fn; |
|---|
| 1525 | 1676 | |
|---|
| 1526 | 1677 | init_waitqueue_head(&dev->power.wait_queue); |
|---|
| 1527 | 1678 | } |
|---|
| .. | .. |
|---|
| 1556 | 1707 | } |
|---|
| 1557 | 1708 | |
|---|
| 1558 | 1709 | /** |
|---|
| 1559 | | - * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. |
|---|
| 1560 | | - * @dev: Device whose driver is going to be removed. |
|---|
| 1561 | | - * |
|---|
| 1562 | | - * Check links from this device to any consumers and if any of them have active |
|---|
| 1563 | | - * runtime PM references to the device, drop the usage counter of the device |
|---|
| 1564 | | - * (as many times as needed). |
|---|
| 1565 | | - * |
|---|
| 1566 | | - * Links with the DL_FLAG_MANAGED flag unset are ignored. |
|---|
| 1567 | | - * |
|---|
| 1568 | | - * Since the device is guaranteed to be runtime-active at the point this is |
|---|
| 1569 | | - * called, nothing else needs to be done here. |
|---|
| 1570 | | - * |
|---|
| 1571 | | - * Moreover, this is called after device_links_busy() has returned 'false', so |
|---|
| 1572 | | - * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and |
|---|
| 1573 | | - * therefore rpm_active can't be manipulated concurrently. |
|---|
| 1574 | | - */ |
|---|
| 1575 | | -void pm_runtime_clean_up_links(struct device *dev) |
|---|
| 1576 | | -{ |
|---|
| 1577 | | - struct device_link *link; |
|---|
| 1578 | | - int idx; |
|---|
| 1579 | | - |
|---|
| 1580 | | - idx = device_links_read_lock(); |
|---|
| 1581 | | - |
|---|
| 1582 | | - list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { |
|---|
| 1583 | | - if (!(link->flags & DL_FLAG_MANAGED)) |
|---|
| 1584 | | - continue; |
|---|
| 1585 | | - |
|---|
| 1586 | | - while (refcount_dec_not_one(&link->rpm_active)) |
|---|
| 1587 | | - pm_runtime_put_noidle(dev); |
|---|
| 1588 | | - } |
|---|
| 1589 | | - |
|---|
| 1590 | | - device_links_read_unlock(idx); |
|---|
| 1591 | | -} |
|---|
| 1592 | | - |
|---|
| 1593 | | -/** |
|---|
| 1594 | 1710 | * pm_runtime_get_suppliers - Resume and reference-count supplier devices. |
|---|
| 1595 | 1711 | * @dev: Consumer device. |
|---|
| 1596 | 1712 | */ |
|---|
| .. | .. |
|---|
| 1601 | 1717 | |
|---|
| 1602 | 1718 | idx = device_links_read_lock(); |
|---|
| 1603 | 1719 | |
|---|
| 1604 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
|---|
| 1720 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
|---|
| 1721 | + device_links_read_lock_held()) |
|---|
| 1605 | 1722 | if (link->flags & DL_FLAG_PM_RUNTIME) { |
|---|
| 1606 | 1723 | link->supplier_preactivated = true; |
|---|
| 1607 | 1724 | pm_runtime_get_sync(link->supplier); |
|---|
| .. | .. |
|---|
| 1624 | 1741 | |
|---|
| 1625 | 1742 | idx = device_links_read_lock(); |
|---|
| 1626 | 1743 | |
|---|
| 1627 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
|---|
| 1744 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
|---|
| 1745 | + device_links_read_lock_held()) |
|---|
| 1628 | 1746 | if (link->supplier_preactivated) { |
|---|
| 1629 | 1747 | link->supplier_preactivated = false; |
|---|
| 1630 | 1748 | spin_lock_irqsave(&dev->power.lock, flags); |
|---|
| .. | .. |
|---|
| 1645 | 1763 | spin_unlock_irq(&dev->power.lock); |
|---|
| 1646 | 1764 | } |
|---|
| 1647 | 1765 | |
|---|
| 1648 | | -void pm_runtime_drop_link(struct device *dev) |
|---|
| 1766 | +static void pm_runtime_drop_link_count(struct device *dev) |
|---|
| 1649 | 1767 | { |
|---|
| 1650 | 1768 | spin_lock_irq(&dev->power.lock); |
|---|
| 1651 | 1769 | WARN_ON(dev->power.links_count == 0); |
|---|
| 1652 | 1770 | dev->power.links_count--; |
|---|
| 1653 | 1771 | spin_unlock_irq(&dev->power.lock); |
|---|
| 1772 | +} |
|---|
| 1773 | + |
|---|
| 1774 | +/** |
|---|
| 1775 | + * pm_runtime_drop_link - Prepare for device link removal. |
|---|
| 1776 | + * @link: Device link going away. |
|---|
| 1777 | + * |
|---|
| 1778 | + * Drop the link count of the consumer end of @link and decrement the supplier |
|---|
| 1779 | + * device's runtime PM usage counter as many times as needed to drop all of the |
|---|
| 1780 | + * PM runtime reference to it from the consumer. |
|---|
| 1781 | + */ |
|---|
| 1782 | +void pm_runtime_drop_link(struct device_link *link) |
|---|
| 1783 | +{ |
|---|
| 1784 | + if (!(link->flags & DL_FLAG_PM_RUNTIME)) |
|---|
| 1785 | + return; |
|---|
| 1786 | + |
|---|
| 1787 | + pm_runtime_drop_link_count(link->consumer); |
|---|
| 1788 | + pm_runtime_release_supplier(link); |
|---|
| 1789 | + pm_request_idle(link->supplier); |
|---|
| 1654 | 1790 | } |
|---|
| 1655 | 1791 | |
|---|
| 1656 | 1792 | static bool pm_runtime_need_not_resume(struct device *dev) |
|---|
| .. | .. |
|---|
| 1698 | 1834 | * its parent, but set its status to RPM_SUSPENDED anyway in case this |
|---|
| 1699 | 1835 | * function will be called again for it in the meantime. |
|---|
| 1700 | 1836 | */ |
|---|
| 1701 | | - if (pm_runtime_need_not_resume(dev)) |
|---|
| 1837 | + if (pm_runtime_need_not_resume(dev)) { |
|---|
| 1702 | 1838 | pm_runtime_set_suspended(dev); |
|---|
| 1703 | | - else |
|---|
| 1839 | + } else { |
|---|
| 1704 | 1840 | __update_runtime_status(dev, RPM_SUSPENDED); |
|---|
| 1841 | + dev->power.needs_force_resume = 1; |
|---|
| 1842 | + } |
|---|
| 1705 | 1843 | |
|---|
| 1706 | 1844 | return 0; |
|---|
| 1707 | 1845 | |
|---|
| .. | .. |
|---|
| 1728 | 1866 | int (*callback)(struct device *); |
|---|
| 1729 | 1867 | int ret = 0; |
|---|
| 1730 | 1868 | |
|---|
| 1731 | | - if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) |
|---|
| 1869 | + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) |
|---|
| 1732 | 1870 | goto out; |
|---|
| 1733 | 1871 | |
|---|
| 1734 | 1872 | /* |
|---|
| .. | .. |
|---|
| 1747 | 1885 | |
|---|
| 1748 | 1886 | pm_runtime_mark_last_busy(dev); |
|---|
| 1749 | 1887 | out: |
|---|
| 1888 | + dev->power.needs_force_resume = 0; |
|---|
| 1750 | 1889 | pm_runtime_enable(dev); |
|---|
| 1751 | 1890 | return ret; |
|---|
| 1752 | 1891 | } |
|---|