forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/base/power/runtime.c
....@@ -1,13 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * drivers/base/power/runtime.c - Helper functions for device runtime PM
34 *
45 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
56 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6
- *
7
- * This file is released under the GPLv2.
87 */
9
-
108 #include <linux/sched/mm.h>
9
+#include <linux/ktime.h>
10
+#include <linux/hrtimer.h>
1111 #include <linux/export.h>
1212 #include <linux/pm_runtime.h>
1313 #include <linux/pm_wakeirq.h>
....@@ -62,22 +62,32 @@
6262 * runtime_status field is updated, to account the time in the old state
6363 * correctly.
6464 */
65
-void update_pm_runtime_accounting(struct device *dev)
65
+static void update_pm_runtime_accounting(struct device *dev)
6666 {
67
- unsigned long now = jiffies;
68
- unsigned long delta;
69
-
70
- delta = now - dev->power.accounting_timestamp;
71
-
72
- dev->power.accounting_timestamp = now;
67
+ u64 now, last, delta;
7368
7469 if (dev->power.disable_depth > 0)
7570 return;
7671
72
+ last = dev->power.accounting_timestamp;
73
+
74
+ now = ktime_get_mono_fast_ns();
75
+ dev->power.accounting_timestamp = now;
76
+
77
+ /*
78
+ * Because ktime_get_mono_fast_ns() is not monotonic during
79
+ * timekeeping updates, ensure that 'now' is after the last saved
80
+ * timesptamp.
81
+ */
82
+ if (now < last)
83
+ return;
84
+
85
+ delta = now - last;
86
+
7787 if (dev->power.runtime_status == RPM_SUSPENDED)
78
- dev->power.suspended_jiffies += delta;
88
+ dev->power.suspended_time += delta;
7989 else
80
- dev->power.active_jiffies += delta;
90
+ dev->power.active_time += delta;
8191 }
8292
8393 static void __update_runtime_status(struct device *dev, enum rpm_status status)
....@@ -86,6 +96,32 @@
8696 dev->power.runtime_status = status;
8797 }
8898
99
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100
+{
101
+ u64 time;
102
+ unsigned long flags;
103
+
104
+ spin_lock_irqsave(&dev->power.lock, flags);
105
+
106
+ update_pm_runtime_accounting(dev);
107
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
+
109
+ spin_unlock_irqrestore(&dev->power.lock, flags);
110
+
111
+ return time;
112
+}
113
+
114
+u64 pm_runtime_active_time(struct device *dev)
115
+{
116
+ return rpm_get_accounted_time(dev, false);
117
+}
118
+
119
+u64 pm_runtime_suspended_time(struct device *dev)
120
+{
121
+ return rpm_get_accounted_time(dev, true);
122
+}
123
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
+
89125 /**
90126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91127 * @dev: Device to handle.
....@@ -93,7 +129,7 @@
93129 static void pm_runtime_deactivate_timer(struct device *dev)
94130 {
95131 if (dev->power.timer_expires > 0) {
96
- del_timer(&dev->power.suspend_timer);
132
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
97133 dev->power.timer_expires = 0;
98134 }
99135 }
....@@ -119,43 +155,29 @@
119155 * Compute the autosuspend-delay expiration time based on the device's
120156 * power.last_busy time. If the delay has already expired or is disabled
121157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
158
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
123159 *
124160 * This function may be called either with or without dev->power.lock held.
125161 * Either way it can be racy, since power.last_busy may be updated at any time.
126162 */
127
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
163
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
128164 {
129165 int autosuspend_delay;
130
- long elapsed;
131
- unsigned long last_busy;
132
- unsigned long expires = 0;
166
+ u64 expires;
133167
134168 if (!dev->power.use_autosuspend)
135
- goto out;
169
+ return 0;
136170
137171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
138172 if (autosuspend_delay < 0)
139
- goto out;
173
+ return 0;
140174
141
- last_busy = READ_ONCE(dev->power.last_busy);
142
- elapsed = jiffies - last_busy;
143
- if (elapsed < 0)
144
- goto out; /* jiffies has wrapped around. */
175
+ expires = READ_ONCE(dev->power.last_busy);
176
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177
+ if (expires > ktime_get_mono_fast_ns())
178
+ return expires; /* Expires in the future */
145179
146
- /*
147
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
148
- * up to the nearest second.
149
- */
150
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151
- if (autosuspend_delay >= 1000)
152
- expires = round_jiffies(expires);
153
- expires += !expires;
154
- if (elapsed >= expires - last_busy)
155
- expires = 0; /* Already expired. */
156
-
157
- out:
158
- return expires;
180
+ return 0;
159181 }
160182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161183
....@@ -253,7 +275,7 @@
253275 || (dev->power.request_pending
254276 && dev->power.request == RPM_REQ_RESUME))
255277 retval = -EAGAIN;
256
- else if (__dev_pm_qos_read_value(dev) == 0)
278
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
257279 retval = -EPERM;
258280 else if (dev->power.runtime_status == RPM_SUSPENDED)
259281 retval = 1;
....@@ -265,11 +287,11 @@
265287 {
266288 struct device_link *link;
267289
268
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
290
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291
+ device_links_read_lock_held()) {
269292 int retval;
270293
271
- if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
272
- READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
294
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
273295 continue;
274296
275297 retval = pm_runtime_get_sync(link->supplier);
....@@ -283,17 +305,34 @@
283305 return 0;
284306 }
285307
308
+/**
309
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
310
+ * @link: Target device link.
311
+ *
312
+ * Drop all runtime PM references associated with @link to its supplier device.
313
+ */
314
+void pm_runtime_release_supplier(struct device_link *link)
315
+{
316
+ struct device *supplier = link->supplier;
317
+
318
+ /*
319
+ * The additional power.usage_count check is a safety net in case
320
+ * the rpm_active refcount becomes saturated, in which case
321
+ * refcount_dec_not_one() would return true forever, but it is not
322
+ * strictly necessary.
323
+ */
324
+ while (refcount_dec_not_one(&link->rpm_active) &&
325
+ atomic_read(&supplier->power.usage_count) > 0)
326
+ pm_runtime_put_noidle(supplier);
327
+}
328
+
286329 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
287330 {
288331 struct device_link *link;
289332
290
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
291
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
292
- continue;
293
-
294
- while (refcount_dec_not_one(&link->rpm_active))
295
- pm_runtime_put_noidle(link->supplier);
296
-
333
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
334
+ device_links_read_lock_held()) {
335
+ pm_runtime_release_supplier(link);
297336 if (try_to_suspend)
298337 pm_request_idle(link->supplier);
299338 }
....@@ -309,7 +348,8 @@
309348 struct device_link *link;
310349 int idx = device_links_read_lock();
311350
312
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
351
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
352
+ device_links_read_lock_held())
313353 pm_request_idle(link->supplier);
314354
315355 device_links_read_unlock(idx);
....@@ -424,7 +464,10 @@
424464 /* Pending requests need to be canceled. */
425465 dev->power.request = RPM_REQ_NONE;
426466
427
- if (dev->power.no_callbacks)
467
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
468
+
469
+ /* If no callback assume success. */
470
+ if (!callback || dev->power.no_callbacks)
428471 goto out;
429472
430473 /* Carry out an asynchronous or a synchronous idle notification. */
....@@ -440,10 +483,17 @@
440483
441484 dev->power.idle_notification = true;
442485
443
- callback = RPM_GET_CALLBACK(dev, runtime_idle);
486
+ if (dev->power.irq_safe)
487
+ spin_unlock(&dev->power.lock);
488
+ else
489
+ spin_unlock_irq(&dev->power.lock);
444490
445
- if (callback)
446
- retval = __rpm_callback(callback, dev);
491
+ retval = callback(dev);
492
+
493
+ if (dev->power.irq_safe)
494
+ spin_lock(&dev->power.lock);
495
+ else
496
+ spin_lock_irq(&dev->power.lock);
447497
448498 dev->power.idle_notification = false;
449499 wake_up_all(&dev->power.wait_queue);
....@@ -520,13 +570,11 @@
520570
521571 repeat:
522572 retval = rpm_check_suspend_allowed(dev);
523
-
524573 if (retval < 0)
525
- ; /* Conditions are wrong. */
574
+ goto out; /* Conditions are wrong. */
526575
527576 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
528
- else if (dev->power.runtime_status == RPM_RESUMING &&
529
- !(rpmflags & RPM_ASYNC))
577
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
530578 retval = -EAGAIN;
531579 if (retval)
532580 goto out;
....@@ -534,7 +582,7 @@
534582 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
535583 if ((rpmflags & RPM_AUTO)
536584 && dev->power.runtime_status != RPM_SUSPENDING) {
537
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
585
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
538586
539587 if (expires != 0) {
540588 /* Pending requests need to be canceled. */
....@@ -547,10 +595,20 @@
547595 * expire; pm_suspend_timer_fn() will take care of the
548596 * rest.
549597 */
550
- if (!(dev->power.timer_expires && time_before_eq(
551
- dev->power.timer_expires, expires))) {
598
+ if (!(dev->power.timer_expires &&
599
+ dev->power.timer_expires <= expires)) {
600
+ /*
601
+ * We add a slack of 25% to gather wakeups
602
+ * without sacrificing the granularity.
603
+ */
604
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
605
+ (NSEC_PER_MSEC >> 2);
606
+
552607 dev->power.timer_expires = expires;
553
- mod_timer(&dev->power.suspend_timer, expires);
608
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
609
+ ns_to_ktime(expires),
610
+ slack,
611
+ HRTIMER_MODE_ABS);
554612 }
555613 dev->power.timer_autosuspends = 1;
556614 goto out;
....@@ -617,6 +675,8 @@
617675 if (retval)
618676 goto fail;
619677
678
+ dev_pm_enable_wake_irq_complete(dev);
679
+
620680 no_callback:
621681 __update_runtime_status(dev, RPM_SUSPENDED);
622682 pm_runtime_deactivate_timer(dev);
....@@ -662,7 +722,7 @@
662722 return retval;
663723
664724 fail:
665
- dev_pm_disable_wake_irq_check(dev);
725
+ dev_pm_disable_wake_irq_check(dev, true);
666726 __update_runtime_status(dev, RPM_ACTIVE);
667727 dev->power.deferred_resume = false;
668728 wake_up_all(&dev->power.wait_queue);
....@@ -845,7 +905,7 @@
845905
846906 callback = RPM_GET_CALLBACK(dev, runtime_resume);
847907
848
- dev_pm_disable_wake_irq_check(dev);
908
+ dev_pm_disable_wake_irq_check(dev, false);
849909 retval = rpm_callback(callback, dev);
850910 if (retval) {
851911 __update_runtime_status(dev, RPM_SUSPENDED);
....@@ -925,23 +985,28 @@
925985 *
926986 * Check if the time is right and queue a suspend request.
927987 */
928
-static void pm_suspend_timer_fn(struct timer_list *t)
988
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
929989 {
930
- struct device *dev = from_timer(dev, t, power.suspend_timer);
990
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
931991 unsigned long flags;
932
- unsigned long expires;
992
+ u64 expires;
933993
934994 spin_lock_irqsave(&dev->power.lock, flags);
935995
936996 expires = dev->power.timer_expires;
937
- /* If 'expire' is after 'jiffies' we've been called too early. */
938
- if (expires > 0 && !time_after(expires, jiffies)) {
997
+ /*
998
+ * If 'expires' is after the current time, we've been called
999
+ * too early.
1000
+ */
1001
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
9391002 dev->power.timer_expires = 0;
9401003 rpm_suspend(dev, dev->power.timer_autosuspends ?
9411004 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
9421005 }
9431006
9441007 spin_unlock_irqrestore(&dev->power.lock, flags);
1008
+
1009
+ return HRTIMER_NORESTART;
9451010 }
9461011
9471012 /**
....@@ -952,6 +1017,7 @@
9521017 int pm_schedule_suspend(struct device *dev, unsigned int delay)
9531018 {
9541019 unsigned long flags;
1020
+ u64 expires;
9551021 int retval;
9561022
9571023 spin_lock_irqsave(&dev->power.lock, flags);
....@@ -968,10 +1034,10 @@
9681034 /* Other scheduled or pending requests need to be canceled. */
9691035 pm_runtime_cancel_pending(dev);
9701036
971
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
972
- dev->power.timer_expires += !dev->power.timer_expires;
1037
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1038
+ dev->power.timer_expires = expires;
9731039 dev->power.timer_autosuspends = 0;
974
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
1040
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
9751041
9761042 out:
9771043 spin_unlock_irqrestore(&dev->power.lock, flags);
....@@ -998,8 +1064,10 @@
9981064 int retval;
9991065
10001066 if (rpmflags & RPM_GET_PUT) {
1001
- if (!atomic_dec_and_test(&dev->power.usage_count))
1067
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
1068
+ trace_rpm_usage_rcuidle(dev, rpmflags);
10021069 return 0;
1070
+ }
10031071 }
10041072
10051073 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
....@@ -1030,8 +1098,10 @@
10301098 int retval;
10311099
10321100 if (rpmflags & RPM_GET_PUT) {
1033
- if (!atomic_dec_and_test(&dev->power.usage_count))
1101
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
1102
+ trace_rpm_usage_rcuidle(dev, rpmflags);
10341103 return 0;
1104
+ }
10351105 }
10361106
10371107 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
....@@ -1075,28 +1145,49 @@
10751145 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
10761146
10771147 /**
1078
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1148
+ * pm_runtime_get_if_active - Conditionally bump up device usage counter.
10791149 * @dev: Device to handle.
1150
+ * @ign_usage_count: Whether or not to look at the current usage counter value.
10801151 *
1081
- * Return -EINVAL if runtime PM is disabled for the device.
1152
+ * Return -EINVAL if runtime PM is disabled for @dev.
10821153 *
1083
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1084
- * and the runtime PM usage counter is nonzero, increment the counter and
1085
- * return 1. Otherwise return 0 without changing the counter.
1154
+ * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1155
+ * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1156
+ * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1157
+ * without changing the usage counter.
1158
+ *
1159
+ * If @ign_usage_count is %true, this function can be used to prevent suspending
1160
+ * the device when its runtime PM status is %RPM_ACTIVE.
1161
+ *
1162
+ * If @ign_usage_count is %false, this function can be used to prevent
1163
+ * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1164
+ * runtime PM usage counter is not zero.
1165
+ *
1166
+ * The caller is resposible for decrementing the runtime PM usage counter of
1167
+ * @dev after this function has returned a positive value for it.
10861168 */
1087
-int pm_runtime_get_if_in_use(struct device *dev)
1169
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
10881170 {
10891171 unsigned long flags;
10901172 int retval;
10911173
10921174 spin_lock_irqsave(&dev->power.lock, flags);
1093
- retval = dev->power.disable_depth > 0 ? -EINVAL :
1094
- dev->power.runtime_status == RPM_ACTIVE
1095
- && atomic_inc_not_zero(&dev->power.usage_count);
1175
+ if (dev->power.disable_depth > 0) {
1176
+ retval = -EINVAL;
1177
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
1178
+ retval = 0;
1179
+ } else if (ign_usage_count) {
1180
+ retval = 1;
1181
+ atomic_inc(&dev->power.usage_count);
1182
+ } else {
1183
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
1184
+ }
1185
+ trace_rpm_usage_rcuidle(dev, 0);
10961186 spin_unlock_irqrestore(&dev->power.lock, flags);
1187
+
10971188 return retval;
10981189 }
1099
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1190
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
11001191
11011192 /**
11021193 * __pm_runtime_set_status - Set runtime PM status of a device.
....@@ -1114,23 +1205,56 @@
11141205 * and the device parent's counter of unsuspended children is modified to
11151206 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
11161207 * notification request for the parent is submitted.
1208
+ *
1209
+ * If @dev has any suppliers (as reflected by device links to them), and @status
1210
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1211
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1212
+ * of the @status value) and the suppliers will be deacticated on exit. The
1213
+ * error returned by the failing supplier activation will be returned in that
1214
+ * case.
11171215 */
11181216 int __pm_runtime_set_status(struct device *dev, unsigned int status)
11191217 {
11201218 struct device *parent = dev->parent;
1121
- unsigned long flags;
11221219 bool notify_parent = false;
11231220 int error = 0;
11241221
11251222 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
11261223 return -EINVAL;
11271224
1128
- spin_lock_irqsave(&dev->power.lock, flags);
1225
+ spin_lock_irq(&dev->power.lock);
11291226
1130
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
1227
+ /*
1228
+ * Prevent PM-runtime from being enabled for the device or return an
1229
+ * error if it is enabled already and working.
1230
+ */
1231
+ if (dev->power.runtime_error || dev->power.disable_depth)
1232
+ dev->power.disable_depth++;
1233
+ else
11311234 error = -EAGAIN;
1132
- goto out;
1235
+
1236
+ spin_unlock_irq(&dev->power.lock);
1237
+
1238
+ if (error)
1239
+ return error;
1240
+
1241
+ /*
1242
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
1243
+ * upfront regardless of the current status, because next time
1244
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1245
+ * involved will be dropped down to one anyway.
1246
+ */
1247
+ if (status == RPM_ACTIVE) {
1248
+ int idx = device_links_read_lock();
1249
+
1250
+ error = rpm_get_suppliers(dev);
1251
+ if (error)
1252
+ status = RPM_SUSPENDED;
1253
+
1254
+ device_links_read_unlock(idx);
11331255 }
1256
+
1257
+ spin_lock_irq(&dev->power.lock);
11341258
11351259 if (dev->power.runtime_status == status || !parent)
11361260 goto out_set;
....@@ -1159,18 +1283,32 @@
11591283
11601284 spin_unlock(&parent->power.lock);
11611285
1162
- if (error)
1286
+ if (error) {
1287
+ status = RPM_SUSPENDED;
11631288 goto out;
1289
+ }
11641290 }
11651291
11661292 out_set:
11671293 __update_runtime_status(dev, status);
1168
- dev->power.runtime_error = 0;
1294
+ if (!error)
1295
+ dev->power.runtime_error = 0;
1296
+
11691297 out:
1170
- spin_unlock_irqrestore(&dev->power.lock, flags);
1298
+ spin_unlock_irq(&dev->power.lock);
11711299
11721300 if (notify_parent)
11731301 pm_request_idle(parent);
1302
+
1303
+ if (status == RPM_SUSPENDED) {
1304
+ int idx = device_links_read_lock();
1305
+
1306
+ rpm_put_suppliers(dev);
1307
+
1308
+ device_links_read_unlock(idx);
1309
+ }
1310
+
1311
+ pm_runtime_enable(dev);
11741312
11751313 return error;
11761314 }
....@@ -1299,6 +1437,9 @@
12991437 pm_runtime_put_noidle(dev);
13001438 }
13011439
1440
+ /* Update time accounting before disabling PM-runtime. */
1441
+ update_pm_runtime_accounting(dev);
1442
+
13021443 if (!dev->power.disable_depth++)
13031444 __pm_runtime_barrier(dev);
13041445
....@@ -1317,10 +1458,15 @@
13171458
13181459 spin_lock_irqsave(&dev->power.lock, flags);
13191460
1320
- if (dev->power.disable_depth > 0)
1461
+ if (dev->power.disable_depth > 0) {
13211462 dev->power.disable_depth--;
1322
- else
1463
+
1464
+ /* About to enable runtime pm, set accounting_timestamp to now */
1465
+ if (!dev->power.disable_depth)
1466
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1467
+ } else {
13231468 dev_warn(dev, "Unbalanced %s!\n", __func__);
1469
+ }
13241470
13251471 WARN(!dev->power.disable_depth &&
13261472 dev->power.runtime_status == RPM_SUSPENDED &&
....@@ -1371,6 +1517,8 @@
13711517 dev->power.runtime_auto = true;
13721518 if (atomic_dec_and_test(&dev->power.usage_count))
13731519 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1520
+ else
1521
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
13741522
13751523 out:
13761524 spin_unlock_irq(&dev->power.lock);
....@@ -1438,6 +1586,8 @@
14381586 if (!old_use || old_delay >= 0) {
14391587 atomic_inc(&dev->power.usage_count);
14401588 rpm_resume(dev, 0);
1589
+ } else {
1590
+ trace_rpm_usage_rcuidle(dev, 0);
14411591 }
14421592 }
14431593
....@@ -1517,11 +1667,12 @@
15171667 dev->power.request_pending = false;
15181668 dev->power.request = RPM_REQ_NONE;
15191669 dev->power.deferred_resume = false;
1520
- dev->power.accounting_timestamp = jiffies;
1670
+ dev->power.needs_force_resume = 0;
15211671 INIT_WORK(&dev->power.work, pm_runtime_work);
15221672
15231673 dev->power.timer_expires = 0;
1524
- timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1674
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1675
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
15251676
15261677 init_waitqueue_head(&dev->power.wait_queue);
15271678 }
....@@ -1556,41 +1707,6 @@
15561707 }
15571708
15581709 /**
1559
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1560
- * @dev: Device whose driver is going to be removed.
1561
- *
1562
- * Check links from this device to any consumers and if any of them have active
1563
- * runtime PM references to the device, drop the usage counter of the device
1564
- * (as many times as needed).
1565
- *
1566
- * Links with the DL_FLAG_MANAGED flag unset are ignored.
1567
- *
1568
- * Since the device is guaranteed to be runtime-active at the point this is
1569
- * called, nothing else needs to be done here.
1570
- *
1571
- * Moreover, this is called after device_links_busy() has returned 'false', so
1572
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1573
- * therefore rpm_active can't be manipulated concurrently.
1574
- */
1575
-void pm_runtime_clean_up_links(struct device *dev)
1576
-{
1577
- struct device_link *link;
1578
- int idx;
1579
-
1580
- idx = device_links_read_lock();
1581
-
1582
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1583
- if (!(link->flags & DL_FLAG_MANAGED))
1584
- continue;
1585
-
1586
- while (refcount_dec_not_one(&link->rpm_active))
1587
- pm_runtime_put_noidle(dev);
1588
- }
1589
-
1590
- device_links_read_unlock(idx);
1591
-}
1592
-
1593
-/**
15941710 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
15951711 * @dev: Consumer device.
15961712 */
....@@ -1601,7 +1717,8 @@
16011717
16021718 idx = device_links_read_lock();
16031719
1604
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1720
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1721
+ device_links_read_lock_held())
16051722 if (link->flags & DL_FLAG_PM_RUNTIME) {
16061723 link->supplier_preactivated = true;
16071724 pm_runtime_get_sync(link->supplier);
....@@ -1624,7 +1741,8 @@
16241741
16251742 idx = device_links_read_lock();
16261743
1627
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1744
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1745
+ device_links_read_lock_held())
16281746 if (link->supplier_preactivated) {
16291747 link->supplier_preactivated = false;
16301748 spin_lock_irqsave(&dev->power.lock, flags);
....@@ -1645,12 +1763,30 @@
16451763 spin_unlock_irq(&dev->power.lock);
16461764 }
16471765
1648
-void pm_runtime_drop_link(struct device *dev)
1766
+static void pm_runtime_drop_link_count(struct device *dev)
16491767 {
16501768 spin_lock_irq(&dev->power.lock);
16511769 WARN_ON(dev->power.links_count == 0);
16521770 dev->power.links_count--;
16531771 spin_unlock_irq(&dev->power.lock);
1772
+}
1773
+
1774
+/**
1775
+ * pm_runtime_drop_link - Prepare for device link removal.
1776
+ * @link: Device link going away.
1777
+ *
1778
+ * Drop the link count of the consumer end of @link and decrement the supplier
1779
+ * device's runtime PM usage counter as many times as needed to drop all of the
1780
+ * PM runtime reference to it from the consumer.
1781
+ */
1782
+void pm_runtime_drop_link(struct device_link *link)
1783
+{
1784
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
1785
+ return;
1786
+
1787
+ pm_runtime_drop_link_count(link->consumer);
1788
+ pm_runtime_release_supplier(link);
1789
+ pm_request_idle(link->supplier);
16541790 }
16551791
16561792 static bool pm_runtime_need_not_resume(struct device *dev)
....@@ -1698,10 +1834,12 @@
16981834 * its parent, but set its status to RPM_SUSPENDED anyway in case this
16991835 * function will be called again for it in the meantime.
17001836 */
1701
- if (pm_runtime_need_not_resume(dev))
1837
+ if (pm_runtime_need_not_resume(dev)) {
17021838 pm_runtime_set_suspended(dev);
1703
- else
1839
+ } else {
17041840 __update_runtime_status(dev, RPM_SUSPENDED);
1841
+ dev->power.needs_force_resume = 1;
1842
+ }
17051843
17061844 return 0;
17071845
....@@ -1728,7 +1866,7 @@
17281866 int (*callback)(struct device *);
17291867 int ret = 0;
17301868
1731
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1869
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
17321870 goto out;
17331871
17341872 /*
....@@ -1747,6 +1885,7 @@
17471885
17481886 pm_runtime_mark_last_busy(dev);
17491887 out:
1888
+ dev->power.needs_force_resume = 0;
17501889 pm_runtime_enable(dev);
17511890 return ret;
17521891 }