.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * drivers/base/power/runtime.c - Helper functions for device runtime PM |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
---|
5 | 6 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> |
---|
6 | | - * |
---|
7 | | - * This file is released under the GPLv2. |
---|
8 | 7 | */ |
---|
9 | | - |
---|
10 | 8 | #include <linux/sched/mm.h> |
---|
| 9 | +#include <linux/ktime.h> |
---|
| 10 | +#include <linux/hrtimer.h> |
---|
11 | 11 | #include <linux/export.h> |
---|
12 | 12 | #include <linux/pm_runtime.h> |
---|
13 | 13 | #include <linux/pm_wakeirq.h> |
---|
.. | .. |
---|
62 | 62 | * runtime_status field is updated, to account the time in the old state |
---|
63 | 63 | * correctly. |
---|
64 | 64 | */ |
---|
65 | | -void update_pm_runtime_accounting(struct device *dev) |
---|
| 65 | +static void update_pm_runtime_accounting(struct device *dev) |
---|
66 | 66 | { |
---|
67 | | - unsigned long now = jiffies; |
---|
68 | | - unsigned long delta; |
---|
69 | | - |
---|
70 | | - delta = now - dev->power.accounting_timestamp; |
---|
71 | | - |
---|
72 | | - dev->power.accounting_timestamp = now; |
---|
| 67 | + u64 now, last, delta; |
---|
73 | 68 | |
---|
74 | 69 | if (dev->power.disable_depth > 0) |
---|
75 | 70 | return; |
---|
76 | 71 | |
---|
| 72 | + last = dev->power.accounting_timestamp; |
---|
| 73 | + |
---|
| 74 | + now = ktime_get_mono_fast_ns(); |
---|
| 75 | + dev->power.accounting_timestamp = now; |
---|
| 76 | + |
---|
| 77 | + /* |
---|
| 78 | + * Because ktime_get_mono_fast_ns() is not monotonic during |
---|
| 79 | + * timekeeping updates, ensure that 'now' is after the last saved |
---|
| 80 | + * timesptamp. |
---|
| 81 | + */ |
---|
| 82 | + if (now < last) |
---|
| 83 | + return; |
---|
| 84 | + |
---|
| 85 | + delta = now - last; |
---|
| 86 | + |
---|
77 | 87 | if (dev->power.runtime_status == RPM_SUSPENDED) |
---|
78 | | - dev->power.suspended_jiffies += delta; |
---|
| 88 | + dev->power.suspended_time += delta; |
---|
79 | 89 | else |
---|
80 | | - dev->power.active_jiffies += delta; |
---|
| 90 | + dev->power.active_time += delta; |
---|
81 | 91 | } |
---|
82 | 92 | |
---|
83 | 93 | static void __update_runtime_status(struct device *dev, enum rpm_status status) |
---|
.. | .. |
---|
86 | 96 | dev->power.runtime_status = status; |
---|
87 | 97 | } |
---|
88 | 98 | |
---|
| 99 | +static u64 rpm_get_accounted_time(struct device *dev, bool suspended) |
---|
| 100 | +{ |
---|
| 101 | + u64 time; |
---|
| 102 | + unsigned long flags; |
---|
| 103 | + |
---|
| 104 | + spin_lock_irqsave(&dev->power.lock, flags); |
---|
| 105 | + |
---|
| 106 | + update_pm_runtime_accounting(dev); |
---|
| 107 | + time = suspended ? dev->power.suspended_time : dev->power.active_time; |
---|
| 108 | + |
---|
| 109 | + spin_unlock_irqrestore(&dev->power.lock, flags); |
---|
| 110 | + |
---|
| 111 | + return time; |
---|
| 112 | +} |
---|
| 113 | + |
---|
| 114 | +u64 pm_runtime_active_time(struct device *dev) |
---|
| 115 | +{ |
---|
| 116 | + return rpm_get_accounted_time(dev, false); |
---|
| 117 | +} |
---|
| 118 | + |
---|
| 119 | +u64 pm_runtime_suspended_time(struct device *dev) |
---|
| 120 | +{ |
---|
| 121 | + return rpm_get_accounted_time(dev, true); |
---|
| 122 | +} |
---|
| 123 | +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); |
---|
| 124 | + |
---|
89 | 125 | /** |
---|
90 | 126 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
---|
91 | 127 | * @dev: Device to handle. |
---|
.. | .. |
---|
93 | 129 | static void pm_runtime_deactivate_timer(struct device *dev) |
---|
94 | 130 | { |
---|
95 | 131 | if (dev->power.timer_expires > 0) { |
---|
96 | | - del_timer(&dev->power.suspend_timer); |
---|
| 132 | + hrtimer_try_to_cancel(&dev->power.suspend_timer); |
---|
97 | 133 | dev->power.timer_expires = 0; |
---|
98 | 134 | } |
---|
99 | 135 | } |
---|
.. | .. |
---|
119 | 155 | * Compute the autosuspend-delay expiration time based on the device's |
---|
120 | 156 | * power.last_busy time. If the delay has already expired or is disabled |
---|
121 | 157 | * (negative) or the power.use_autosuspend flag isn't set, return 0. |
---|
122 | | - * Otherwise return the expiration time in jiffies (adjusted to be nonzero). |
---|
| 158 | + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). |
---|
123 | 159 | * |
---|
124 | 160 | * This function may be called either with or without dev->power.lock held. |
---|
125 | 161 | * Either way it can be racy, since power.last_busy may be updated at any time. |
---|
126 | 162 | */ |
---|
127 | | -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
---|
| 163 | +u64 pm_runtime_autosuspend_expiration(struct device *dev) |
---|
128 | 164 | { |
---|
129 | 165 | int autosuspend_delay; |
---|
130 | | - long elapsed; |
---|
131 | | - unsigned long last_busy; |
---|
132 | | - unsigned long expires = 0; |
---|
| 166 | + u64 expires; |
---|
133 | 167 | |
---|
134 | 168 | if (!dev->power.use_autosuspend) |
---|
135 | | - goto out; |
---|
| 169 | + return 0; |
---|
136 | 170 | |
---|
137 | 171 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
---|
138 | 172 | if (autosuspend_delay < 0) |
---|
139 | | - goto out; |
---|
| 173 | + return 0; |
---|
140 | 174 | |
---|
141 | | - last_busy = READ_ONCE(dev->power.last_busy); |
---|
142 | | - elapsed = jiffies - last_busy; |
---|
143 | | - if (elapsed < 0) |
---|
144 | | - goto out; /* jiffies has wrapped around. */ |
---|
| 175 | + expires = READ_ONCE(dev->power.last_busy); |
---|
| 176 | + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; |
---|
| 177 | + if (expires > ktime_get_mono_fast_ns()) |
---|
| 178 | + return expires; /* Expires in the future */ |
---|
145 | 179 | |
---|
146 | | - /* |
---|
147 | | - * If the autosuspend_delay is >= 1 second, align the timer by rounding |
---|
148 | | - * up to the nearest second. |
---|
149 | | - */ |
---|
150 | | - expires = last_busy + msecs_to_jiffies(autosuspend_delay); |
---|
151 | | - if (autosuspend_delay >= 1000) |
---|
152 | | - expires = round_jiffies(expires); |
---|
153 | | - expires += !expires; |
---|
154 | | - if (elapsed >= expires - last_busy) |
---|
155 | | - expires = 0; /* Already expired. */ |
---|
156 | | - |
---|
157 | | - out: |
---|
158 | | - return expires; |
---|
| 180 | + return 0; |
---|
159 | 181 | } |
---|
160 | 182 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); |
---|
161 | 183 | |
---|
.. | .. |
---|
253 | 275 | || (dev->power.request_pending |
---|
254 | 276 | && dev->power.request == RPM_REQ_RESUME)) |
---|
255 | 277 | retval = -EAGAIN; |
---|
256 | | - else if (__dev_pm_qos_read_value(dev) == 0) |
---|
| 278 | + else if (__dev_pm_qos_resume_latency(dev) == 0) |
---|
257 | 279 | retval = -EPERM; |
---|
258 | 280 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
---|
259 | 281 | retval = 1; |
---|
.. | .. |
---|
265 | 287 | { |
---|
266 | 288 | struct device_link *link; |
---|
267 | 289 | |
---|
268 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { |
---|
| 290 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
---|
| 291 | + device_links_read_lock_held()) { |
---|
269 | 292 | int retval; |
---|
270 | 293 | |
---|
271 | | - if (!(link->flags & DL_FLAG_PM_RUNTIME) || |
---|
272 | | - READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
---|
| 294 | + if (!(link->flags & DL_FLAG_PM_RUNTIME)) |
---|
273 | 295 | continue; |
---|
274 | 296 | |
---|
275 | 297 | retval = pm_runtime_get_sync(link->supplier); |
---|
.. | .. |
---|
283 | 305 | return 0; |
---|
284 | 306 | } |
---|
285 | 307 | |
---|
| 308 | +/** |
---|
| 309 | + * pm_runtime_release_supplier - Drop references to device link's supplier. |
---|
| 310 | + * @link: Target device link. |
---|
| 311 | + * |
---|
| 312 | + * Drop all runtime PM references associated with @link to its supplier device. |
---|
| 313 | + */ |
---|
| 314 | +void pm_runtime_release_supplier(struct device_link *link) |
---|
| 315 | +{ |
---|
| 316 | + struct device *supplier = link->supplier; |
---|
| 317 | + |
---|
| 318 | + /* |
---|
| 319 | + * The additional power.usage_count check is a safety net in case |
---|
| 320 | + * the rpm_active refcount becomes saturated, in which case |
---|
| 321 | + * refcount_dec_not_one() would return true forever, but it is not |
---|
| 322 | + * strictly necessary. |
---|
| 323 | + */ |
---|
| 324 | + while (refcount_dec_not_one(&link->rpm_active) && |
---|
| 325 | + atomic_read(&supplier->power.usage_count) > 0) |
---|
| 326 | + pm_runtime_put_noidle(supplier); |
---|
| 327 | +} |
---|
| 328 | + |
---|
286 | 329 | static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) |
---|
287 | 330 | { |
---|
288 | 331 | struct device_link *link; |
---|
289 | 332 | |
---|
290 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { |
---|
291 | | - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) |
---|
292 | | - continue; |
---|
293 | | - |
---|
294 | | - while (refcount_dec_not_one(&link->rpm_active)) |
---|
295 | | - pm_runtime_put_noidle(link->supplier); |
---|
296 | | - |
---|
| 333 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
---|
| 334 | + device_links_read_lock_held()) { |
---|
| 335 | + pm_runtime_release_supplier(link); |
---|
297 | 336 | if (try_to_suspend) |
---|
298 | 337 | pm_request_idle(link->supplier); |
---|
299 | 338 | } |
---|
.. | .. |
---|
309 | 348 | struct device_link *link; |
---|
310 | 349 | int idx = device_links_read_lock(); |
---|
311 | 350 | |
---|
312 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
---|
| 351 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
---|
| 352 | + device_links_read_lock_held()) |
---|
313 | 353 | pm_request_idle(link->supplier); |
---|
314 | 354 | |
---|
315 | 355 | device_links_read_unlock(idx); |
---|
.. | .. |
---|
520 | 560 | |
---|
521 | 561 | repeat: |
---|
522 | 562 | retval = rpm_check_suspend_allowed(dev); |
---|
523 | | - |
---|
524 | 563 | if (retval < 0) |
---|
525 | | - ; /* Conditions are wrong. */ |
---|
| 564 | + goto out; /* Conditions are wrong. */ |
---|
526 | 565 | |
---|
527 | 566 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ |
---|
528 | | - else if (dev->power.runtime_status == RPM_RESUMING && |
---|
529 | | - !(rpmflags & RPM_ASYNC)) |
---|
| 567 | + if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) |
---|
530 | 568 | retval = -EAGAIN; |
---|
531 | 569 | if (retval) |
---|
532 | 570 | goto out; |
---|
.. | .. |
---|
534 | 572 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ |
---|
535 | 573 | if ((rpmflags & RPM_AUTO) |
---|
536 | 574 | && dev->power.runtime_status != RPM_SUSPENDING) { |
---|
537 | | - unsigned long expires = pm_runtime_autosuspend_expiration(dev); |
---|
| 575 | + u64 expires = pm_runtime_autosuspend_expiration(dev); |
---|
538 | 576 | |
---|
539 | 577 | if (expires != 0) { |
---|
540 | 578 | /* Pending requests need to be canceled. */ |
---|
.. | .. |
---|
547 | 585 | * expire; pm_suspend_timer_fn() will take care of the |
---|
548 | 586 | * rest. |
---|
549 | 587 | */ |
---|
550 | | - if (!(dev->power.timer_expires && time_before_eq( |
---|
551 | | - dev->power.timer_expires, expires))) { |
---|
| 588 | + if (!(dev->power.timer_expires && |
---|
| 589 | + dev->power.timer_expires <= expires)) { |
---|
| 590 | + /* |
---|
| 591 | + * We add a slack of 25% to gather wakeups |
---|
| 592 | + * without sacrificing the granularity. |
---|
| 593 | + */ |
---|
| 594 | + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * |
---|
| 595 | + (NSEC_PER_MSEC >> 2); |
---|
| 596 | + |
---|
552 | 597 | dev->power.timer_expires = expires; |
---|
553 | | - mod_timer(&dev->power.suspend_timer, expires); |
---|
| 598 | + hrtimer_start_range_ns(&dev->power.suspend_timer, |
---|
| 599 | + ns_to_ktime(expires), |
---|
| 600 | + slack, |
---|
| 601 | + HRTIMER_MODE_ABS); |
---|
554 | 602 | } |
---|
555 | 603 | dev->power.timer_autosuspends = 1; |
---|
556 | 604 | goto out; |
---|
.. | .. |
---|
925 | 973 | * |
---|
926 | 974 | * Check if the time is right and queue a suspend request. |
---|
927 | 975 | */ |
---|
928 | | -static void pm_suspend_timer_fn(struct timer_list *t) |
---|
| 976 | +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) |
---|
929 | 977 | { |
---|
930 | | - struct device *dev = from_timer(dev, t, power.suspend_timer); |
---|
| 978 | + struct device *dev = container_of(timer, struct device, power.suspend_timer); |
---|
931 | 979 | unsigned long flags; |
---|
932 | | - unsigned long expires; |
---|
| 980 | + u64 expires; |
---|
933 | 981 | |
---|
934 | 982 | spin_lock_irqsave(&dev->power.lock, flags); |
---|
935 | 983 | |
---|
936 | 984 | expires = dev->power.timer_expires; |
---|
937 | | - /* If 'expire' is after 'jiffies' we've been called too early. */ |
---|
938 | | - if (expires > 0 && !time_after(expires, jiffies)) { |
---|
| 985 | + /* |
---|
| 986 | + * If 'expires' is after the current time, we've been called |
---|
| 987 | + * too early. |
---|
| 988 | + */ |
---|
| 989 | + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { |
---|
939 | 990 | dev->power.timer_expires = 0; |
---|
940 | 991 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
---|
941 | 992 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); |
---|
942 | 993 | } |
---|
943 | 994 | |
---|
944 | 995 | spin_unlock_irqrestore(&dev->power.lock, flags); |
---|
| 996 | + |
---|
| 997 | + return HRTIMER_NORESTART; |
---|
945 | 998 | } |
---|
946 | 999 | |
---|
947 | 1000 | /** |
---|
.. | .. |
---|
952 | 1005 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
---|
953 | 1006 | { |
---|
954 | 1007 | unsigned long flags; |
---|
| 1008 | + u64 expires; |
---|
955 | 1009 | int retval; |
---|
956 | 1010 | |
---|
957 | 1011 | spin_lock_irqsave(&dev->power.lock, flags); |
---|
.. | .. |
---|
968 | 1022 | /* Other scheduled or pending requests need to be canceled. */ |
---|
969 | 1023 | pm_runtime_cancel_pending(dev); |
---|
970 | 1024 | |
---|
971 | | - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
---|
972 | | - dev->power.timer_expires += !dev->power.timer_expires; |
---|
| 1025 | + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; |
---|
| 1026 | + dev->power.timer_expires = expires; |
---|
973 | 1027 | dev->power.timer_autosuspends = 0; |
---|
974 | | - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
---|
| 1028 | + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); |
---|
975 | 1029 | |
---|
976 | 1030 | out: |
---|
977 | 1031 | spin_unlock_irqrestore(&dev->power.lock, flags); |
---|
.. | .. |
---|
998 | 1052 | int retval; |
---|
999 | 1053 | |
---|
1000 | 1054 | if (rpmflags & RPM_GET_PUT) { |
---|
1001 | | - if (!atomic_dec_and_test(&dev->power.usage_count)) |
---|
| 1055 | + if (!atomic_dec_and_test(&dev->power.usage_count)) { |
---|
| 1056 | + trace_rpm_usage_rcuidle(dev, rpmflags); |
---|
1002 | 1057 | return 0; |
---|
| 1058 | + } |
---|
1003 | 1059 | } |
---|
1004 | 1060 | |
---|
1005 | 1061 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); |
---|
.. | .. |
---|
1030 | 1086 | int retval; |
---|
1031 | 1087 | |
---|
1032 | 1088 | if (rpmflags & RPM_GET_PUT) { |
---|
1033 | | - if (!atomic_dec_and_test(&dev->power.usage_count)) |
---|
| 1089 | + if (!atomic_dec_and_test(&dev->power.usage_count)) { |
---|
| 1090 | + trace_rpm_usage_rcuidle(dev, rpmflags); |
---|
1034 | 1091 | return 0; |
---|
| 1092 | + } |
---|
1035 | 1093 | } |
---|
1036 | 1094 | |
---|
1037 | 1095 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); |
---|
.. | .. |
---|
1075 | 1133 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
---|
1076 | 1134 | |
---|
1077 | 1135 | /** |
---|
1078 | | - * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. |
---|
| 1136 | + * pm_runtime_get_if_active - Conditionally bump up device usage counter. |
---|
1079 | 1137 | * @dev: Device to handle. |
---|
| 1138 | + * @ign_usage_count: Whether or not to look at the current usage counter value. |
---|
1080 | 1139 | * |
---|
1081 | | - * Return -EINVAL if runtime PM is disabled for the device. |
---|
| 1140 | + * Return -EINVAL if runtime PM is disabled for @dev. |
---|
1082 | 1141 | * |
---|
1083 | | - * If that's not the case and if the device's runtime PM status is RPM_ACTIVE |
---|
1084 | | - * and the runtime PM usage counter is nonzero, increment the counter and |
---|
1085 | | - * return 1. Otherwise return 0 without changing the counter. |
---|
| 1142 | + * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either |
---|
| 1143 | + * @ign_usage_count is %true or the runtime PM usage counter of @dev is not |
---|
| 1144 | + * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 |
---|
| 1145 | + * without changing the usage counter. |
---|
| 1146 | + * |
---|
| 1147 | + * If @ign_usage_count is %true, this function can be used to prevent suspending |
---|
| 1148 | + * the device when its runtime PM status is %RPM_ACTIVE. |
---|
| 1149 | + * |
---|
| 1150 | + * If @ign_usage_count is %false, this function can be used to prevent |
---|
| 1151 | + * suspending the device when both its runtime PM status is %RPM_ACTIVE and its |
---|
| 1152 | + * runtime PM usage counter is not zero. |
---|
| 1153 | + * |
---|
| 1154 | + * The caller is resposible for decrementing the runtime PM usage counter of |
---|
| 1155 | + * @dev after this function has returned a positive value for it. |
---|
1086 | 1156 | */ |
---|
1087 | | -int pm_runtime_get_if_in_use(struct device *dev) |
---|
| 1157 | +int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) |
---|
1088 | 1158 | { |
---|
1089 | 1159 | unsigned long flags; |
---|
1090 | 1160 | int retval; |
---|
1091 | 1161 | |
---|
1092 | 1162 | spin_lock_irqsave(&dev->power.lock, flags); |
---|
1093 | | - retval = dev->power.disable_depth > 0 ? -EINVAL : |
---|
1094 | | - dev->power.runtime_status == RPM_ACTIVE |
---|
1095 | | - && atomic_inc_not_zero(&dev->power.usage_count); |
---|
| 1163 | + if (dev->power.disable_depth > 0) { |
---|
| 1164 | + retval = -EINVAL; |
---|
| 1165 | + } else if (dev->power.runtime_status != RPM_ACTIVE) { |
---|
| 1166 | + retval = 0; |
---|
| 1167 | + } else if (ign_usage_count) { |
---|
| 1168 | + retval = 1; |
---|
| 1169 | + atomic_inc(&dev->power.usage_count); |
---|
| 1170 | + } else { |
---|
| 1171 | + retval = atomic_inc_not_zero(&dev->power.usage_count); |
---|
| 1172 | + } |
---|
| 1173 | + trace_rpm_usage_rcuidle(dev, 0); |
---|
1096 | 1174 | spin_unlock_irqrestore(&dev->power.lock, flags); |
---|
| 1175 | + |
---|
1097 | 1176 | return retval; |
---|
1098 | 1177 | } |
---|
1099 | | -EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); |
---|
| 1178 | +EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); |
---|
1100 | 1179 | |
---|
1101 | 1180 | /** |
---|
1102 | 1181 | * __pm_runtime_set_status - Set runtime PM status of a device. |
---|
.. | .. |
---|
1114 | 1193 | * and the device parent's counter of unsuspended children is modified to |
---|
1115 | 1194 | * reflect the new status. If the new status is RPM_SUSPENDED, an idle |
---|
1116 | 1195 | * notification request for the parent is submitted. |
---|
| 1196 | + * |
---|
| 1197 | + * If @dev has any suppliers (as reflected by device links to them), and @status |
---|
| 1198 | + * is RPM_ACTIVE, they will be activated upfront and if the activation of one |
---|
| 1199 | + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead |
---|
| 1200 | + * of the @status value) and the suppliers will be deacticated on exit. The |
---|
| 1201 | + * error returned by the failing supplier activation will be returned in that |
---|
| 1202 | + * case. |
---|
1117 | 1203 | */ |
---|
1118 | 1204 | int __pm_runtime_set_status(struct device *dev, unsigned int status) |
---|
1119 | 1205 | { |
---|
1120 | 1206 | struct device *parent = dev->parent; |
---|
1121 | | - unsigned long flags; |
---|
1122 | 1207 | bool notify_parent = false; |
---|
1123 | 1208 | int error = 0; |
---|
1124 | 1209 | |
---|
1125 | 1210 | if (status != RPM_ACTIVE && status != RPM_SUSPENDED) |
---|
1126 | 1211 | return -EINVAL; |
---|
1127 | 1212 | |
---|
1128 | | - spin_lock_irqsave(&dev->power.lock, flags); |
---|
| 1213 | + spin_lock_irq(&dev->power.lock); |
---|
1129 | 1214 | |
---|
1130 | | - if (!dev->power.runtime_error && !dev->power.disable_depth) { |
---|
| 1215 | + /* |
---|
| 1216 | + * Prevent PM-runtime from being enabled for the device or return an |
---|
| 1217 | + * error if it is enabled already and working. |
---|
| 1218 | + */ |
---|
| 1219 | + if (dev->power.runtime_error || dev->power.disable_depth) |
---|
| 1220 | + dev->power.disable_depth++; |
---|
| 1221 | + else |
---|
1131 | 1222 | error = -EAGAIN; |
---|
1132 | | - goto out; |
---|
| 1223 | + |
---|
| 1224 | + spin_unlock_irq(&dev->power.lock); |
---|
| 1225 | + |
---|
| 1226 | + if (error) |
---|
| 1227 | + return error; |
---|
| 1228 | + |
---|
| 1229 | + /* |
---|
| 1230 | + * If the new status is RPM_ACTIVE, the suppliers can be activated |
---|
| 1231 | + * upfront regardless of the current status, because next time |
---|
| 1232 | + * rpm_put_suppliers() runs, the rpm_active refcounts of the links |
---|
| 1233 | + * involved will be dropped down to one anyway. |
---|
| 1234 | + */ |
---|
| 1235 | + if (status == RPM_ACTIVE) { |
---|
| 1236 | + int idx = device_links_read_lock(); |
---|
| 1237 | + |
---|
| 1238 | + error = rpm_get_suppliers(dev); |
---|
| 1239 | + if (error) |
---|
| 1240 | + status = RPM_SUSPENDED; |
---|
| 1241 | + |
---|
| 1242 | + device_links_read_unlock(idx); |
---|
1133 | 1243 | } |
---|
| 1244 | + |
---|
| 1245 | + spin_lock_irq(&dev->power.lock); |
---|
1134 | 1246 | |
---|
1135 | 1247 | if (dev->power.runtime_status == status || !parent) |
---|
1136 | 1248 | goto out_set; |
---|
.. | .. |
---|
1159 | 1271 | |
---|
1160 | 1272 | spin_unlock(&parent->power.lock); |
---|
1161 | 1273 | |
---|
1162 | | - if (error) |
---|
| 1274 | + if (error) { |
---|
| 1275 | + status = RPM_SUSPENDED; |
---|
1163 | 1276 | goto out; |
---|
| 1277 | + } |
---|
1164 | 1278 | } |
---|
1165 | 1279 | |
---|
1166 | 1280 | out_set: |
---|
1167 | 1281 | __update_runtime_status(dev, status); |
---|
1168 | | - dev->power.runtime_error = 0; |
---|
| 1282 | + if (!error) |
---|
| 1283 | + dev->power.runtime_error = 0; |
---|
| 1284 | + |
---|
1169 | 1285 | out: |
---|
1170 | | - spin_unlock_irqrestore(&dev->power.lock, flags); |
---|
| 1286 | + spin_unlock_irq(&dev->power.lock); |
---|
1171 | 1287 | |
---|
1172 | 1288 | if (notify_parent) |
---|
1173 | 1289 | pm_request_idle(parent); |
---|
| 1290 | + |
---|
| 1291 | + if (status == RPM_SUSPENDED) { |
---|
| 1292 | + int idx = device_links_read_lock(); |
---|
| 1293 | + |
---|
| 1294 | + rpm_put_suppliers(dev); |
---|
| 1295 | + |
---|
| 1296 | + device_links_read_unlock(idx); |
---|
| 1297 | + } |
---|
| 1298 | + |
---|
| 1299 | + pm_runtime_enable(dev); |
---|
1174 | 1300 | |
---|
1175 | 1301 | return error; |
---|
1176 | 1302 | } |
---|
.. | .. |
---|
1299 | 1425 | pm_runtime_put_noidle(dev); |
---|
1300 | 1426 | } |
---|
1301 | 1427 | |
---|
| 1428 | + /* Update time accounting before disabling PM-runtime. */ |
---|
| 1429 | + update_pm_runtime_accounting(dev); |
---|
| 1430 | + |
---|
1302 | 1431 | if (!dev->power.disable_depth++) |
---|
1303 | 1432 | __pm_runtime_barrier(dev); |
---|
1304 | 1433 | |
---|
.. | .. |
---|
1317 | 1446 | |
---|
1318 | 1447 | spin_lock_irqsave(&dev->power.lock, flags); |
---|
1319 | 1448 | |
---|
1320 | | - if (dev->power.disable_depth > 0) |
---|
| 1449 | + if (dev->power.disable_depth > 0) { |
---|
1321 | 1450 | dev->power.disable_depth--; |
---|
1322 | | - else |
---|
| 1451 | + |
---|
| 1452 | + /* About to enable runtime pm, set accounting_timestamp to now */ |
---|
| 1453 | + if (!dev->power.disable_depth) |
---|
| 1454 | + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); |
---|
| 1455 | + } else { |
---|
1323 | 1456 | dev_warn(dev, "Unbalanced %s!\n", __func__); |
---|
| 1457 | + } |
---|
1324 | 1458 | |
---|
1325 | 1459 | WARN(!dev->power.disable_depth && |
---|
1326 | 1460 | dev->power.runtime_status == RPM_SUSPENDED && |
---|
.. | .. |
---|
1371 | 1505 | dev->power.runtime_auto = true; |
---|
1372 | 1506 | if (atomic_dec_and_test(&dev->power.usage_count)) |
---|
1373 | 1507 | rpm_idle(dev, RPM_AUTO | RPM_ASYNC); |
---|
| 1508 | + else |
---|
| 1509 | + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); |
---|
1374 | 1510 | |
---|
1375 | 1511 | out: |
---|
1376 | 1512 | spin_unlock_irq(&dev->power.lock); |
---|
.. | .. |
---|
1438 | 1574 | if (!old_use || old_delay >= 0) { |
---|
1439 | 1575 | atomic_inc(&dev->power.usage_count); |
---|
1440 | 1576 | rpm_resume(dev, 0); |
---|
| 1577 | + } else { |
---|
| 1578 | + trace_rpm_usage_rcuidle(dev, 0); |
---|
1441 | 1579 | } |
---|
1442 | 1580 | } |
---|
1443 | 1581 | |
---|
.. | .. |
---|
1517 | 1655 | dev->power.request_pending = false; |
---|
1518 | 1656 | dev->power.request = RPM_REQ_NONE; |
---|
1519 | 1657 | dev->power.deferred_resume = false; |
---|
1520 | | - dev->power.accounting_timestamp = jiffies; |
---|
| 1658 | + dev->power.needs_force_resume = 0; |
---|
1521 | 1659 | INIT_WORK(&dev->power.work, pm_runtime_work); |
---|
1522 | 1660 | |
---|
1523 | 1661 | dev->power.timer_expires = 0; |
---|
1524 | | - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); |
---|
| 1662 | + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
---|
| 1663 | + dev->power.suspend_timer.function = pm_suspend_timer_fn; |
---|
1525 | 1664 | |
---|
1526 | 1665 | init_waitqueue_head(&dev->power.wait_queue); |
---|
1527 | 1666 | } |
---|
.. | .. |
---|
1556 | 1695 | } |
---|
1557 | 1696 | |
---|
1558 | 1697 | /** |
---|
1559 | | - * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. |
---|
1560 | | - * @dev: Device whose driver is going to be removed. |
---|
1561 | | - * |
---|
1562 | | - * Check links from this device to any consumers and if any of them have active |
---|
1563 | | - * runtime PM references to the device, drop the usage counter of the device |
---|
1564 | | - * (as many times as needed). |
---|
1565 | | - * |
---|
1566 | | - * Links with the DL_FLAG_MANAGED flag unset are ignored. |
---|
1567 | | - * |
---|
1568 | | - * Since the device is guaranteed to be runtime-active at the point this is |
---|
1569 | | - * called, nothing else needs to be done here. |
---|
1570 | | - * |
---|
1571 | | - * Moreover, this is called after device_links_busy() has returned 'false', so |
---|
1572 | | - * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and |
---|
1573 | | - * therefore rpm_active can't be manipulated concurrently. |
---|
1574 | | - */ |
---|
1575 | | -void pm_runtime_clean_up_links(struct device *dev) |
---|
1576 | | -{ |
---|
1577 | | - struct device_link *link; |
---|
1578 | | - int idx; |
---|
1579 | | - |
---|
1580 | | - idx = device_links_read_lock(); |
---|
1581 | | - |
---|
1582 | | - list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { |
---|
1583 | | - if (!(link->flags & DL_FLAG_MANAGED)) |
---|
1584 | | - continue; |
---|
1585 | | - |
---|
1586 | | - while (refcount_dec_not_one(&link->rpm_active)) |
---|
1587 | | - pm_runtime_put_noidle(dev); |
---|
1588 | | - } |
---|
1589 | | - |
---|
1590 | | - device_links_read_unlock(idx); |
---|
1591 | | -} |
---|
1592 | | - |
---|
1593 | | -/** |
---|
1594 | 1698 | * pm_runtime_get_suppliers - Resume and reference-count supplier devices. |
---|
1595 | 1699 | * @dev: Consumer device. |
---|
1596 | 1700 | */ |
---|
.. | .. |
---|
1601 | 1705 | |
---|
1602 | 1706 | idx = device_links_read_lock(); |
---|
1603 | 1707 | |
---|
1604 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
---|
| 1708 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
---|
| 1709 | + device_links_read_lock_held()) |
---|
1605 | 1710 | if (link->flags & DL_FLAG_PM_RUNTIME) { |
---|
1606 | 1711 | link->supplier_preactivated = true; |
---|
1607 | 1712 | pm_runtime_get_sync(link->supplier); |
---|
.. | .. |
---|
1624 | 1729 | |
---|
1625 | 1730 | idx = device_links_read_lock(); |
---|
1626 | 1731 | |
---|
1627 | | - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) |
---|
| 1732 | + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, |
---|
| 1733 | + device_links_read_lock_held()) |
---|
1628 | 1734 | if (link->supplier_preactivated) { |
---|
1629 | 1735 | link->supplier_preactivated = false; |
---|
1630 | 1736 | spin_lock_irqsave(&dev->power.lock, flags); |
---|
.. | .. |
---|
1645 | 1751 | spin_unlock_irq(&dev->power.lock); |
---|
1646 | 1752 | } |
---|
1647 | 1753 | |
---|
1648 | | -void pm_runtime_drop_link(struct device *dev) |
---|
| 1754 | +static void pm_runtime_drop_link_count(struct device *dev) |
---|
1649 | 1755 | { |
---|
1650 | 1756 | spin_lock_irq(&dev->power.lock); |
---|
1651 | 1757 | WARN_ON(dev->power.links_count == 0); |
---|
1652 | 1758 | dev->power.links_count--; |
---|
1653 | 1759 | spin_unlock_irq(&dev->power.lock); |
---|
| 1760 | +} |
---|
| 1761 | + |
---|
| 1762 | +/** |
---|
| 1763 | + * pm_runtime_drop_link - Prepare for device link removal. |
---|
| 1764 | + * @link: Device link going away. |
---|
| 1765 | + * |
---|
| 1766 | + * Drop the link count of the consumer end of @link and decrement the supplier |
---|
| 1767 | + * device's runtime PM usage counter as many times as needed to drop all of the |
---|
| 1768 | + * PM runtime reference to it from the consumer. |
---|
| 1769 | + */ |
---|
| 1770 | +void pm_runtime_drop_link(struct device_link *link) |
---|
| 1771 | +{ |
---|
| 1772 | + if (!(link->flags & DL_FLAG_PM_RUNTIME)) |
---|
| 1773 | + return; |
---|
| 1774 | + |
---|
| 1775 | + pm_runtime_drop_link_count(link->consumer); |
---|
| 1776 | + pm_runtime_release_supplier(link); |
---|
| 1777 | + pm_request_idle(link->supplier); |
---|
1654 | 1778 | } |
---|
1655 | 1779 | |
---|
1656 | 1780 | static bool pm_runtime_need_not_resume(struct device *dev) |
---|
.. | .. |
---|
1698 | 1822 | * its parent, but set its status to RPM_SUSPENDED anyway in case this |
---|
1699 | 1823 | * function will be called again for it in the meantime. |
---|
1700 | 1824 | */ |
---|
1701 | | - if (pm_runtime_need_not_resume(dev)) |
---|
| 1825 | + if (pm_runtime_need_not_resume(dev)) { |
---|
1702 | 1826 | pm_runtime_set_suspended(dev); |
---|
1703 | | - else |
---|
| 1827 | + } else { |
---|
1704 | 1828 | __update_runtime_status(dev, RPM_SUSPENDED); |
---|
| 1829 | + dev->power.needs_force_resume = 1; |
---|
| 1830 | + } |
---|
1705 | 1831 | |
---|
1706 | 1832 | return 0; |
---|
1707 | 1833 | |
---|
.. | .. |
---|
1728 | 1854 | int (*callback)(struct device *); |
---|
1729 | 1855 | int ret = 0; |
---|
1730 | 1856 | |
---|
1731 | | - if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) |
---|
| 1857 | + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) |
---|
1732 | 1858 | goto out; |
---|
1733 | 1859 | |
---|
1734 | 1860 | /* |
---|
.. | .. |
---|
1747 | 1873 | |
---|
1748 | 1874 | pm_runtime_mark_last_busy(dev); |
---|
1749 | 1875 | out: |
---|
| 1876 | + dev->power.needs_force_resume = 0; |
---|
1750 | 1877 | pm_runtime_enable(dev); |
---|
1751 | 1878 | return ret; |
---|
1752 | 1879 | } |
---|