hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/base/power/runtime.c
....@@ -1,13 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * drivers/base/power/runtime.c - Helper functions for device runtime PM
34 *
45 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
56 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6
- *
7
- * This file is released under the GPLv2.
87 */
9
-
108 #include <linux/sched/mm.h>
9
+#include <linux/ktime.h>
10
+#include <linux/hrtimer.h>
1111 #include <linux/export.h>
1212 #include <linux/pm_runtime.h>
1313 #include <linux/pm_wakeirq.h>
....@@ -62,22 +62,32 @@
6262 * runtime_status field is updated, to account the time in the old state
6363 * correctly.
6464 */
65
-void update_pm_runtime_accounting(struct device *dev)
65
+static void update_pm_runtime_accounting(struct device *dev)
6666 {
67
- unsigned long now = jiffies;
68
- unsigned long delta;
69
-
70
- delta = now - dev->power.accounting_timestamp;
71
-
72
- dev->power.accounting_timestamp = now;
67
+ u64 now, last, delta;
7368
7469 if (dev->power.disable_depth > 0)
7570 return;
7671
72
+ last = dev->power.accounting_timestamp;
73
+
74
+ now = ktime_get_mono_fast_ns();
75
+ dev->power.accounting_timestamp = now;
76
+
77
+ /*
78
+ * Because ktime_get_mono_fast_ns() is not monotonic during
79
+ * timekeeping updates, ensure that 'now' is after the last saved
80
+ * timesptamp.
81
+ */
82
+ if (now < last)
83
+ return;
84
+
85
+ delta = now - last;
86
+
7787 if (dev->power.runtime_status == RPM_SUSPENDED)
78
- dev->power.suspended_jiffies += delta;
88
+ dev->power.suspended_time += delta;
7989 else
80
- dev->power.active_jiffies += delta;
90
+ dev->power.active_time += delta;
8191 }
8292
8393 static void __update_runtime_status(struct device *dev, enum rpm_status status)
....@@ -86,6 +96,32 @@
8696 dev->power.runtime_status = status;
8797 }
8898
99
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100
+{
101
+ u64 time;
102
+ unsigned long flags;
103
+
104
+ spin_lock_irqsave(&dev->power.lock, flags);
105
+
106
+ update_pm_runtime_accounting(dev);
107
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
+
109
+ spin_unlock_irqrestore(&dev->power.lock, flags);
110
+
111
+ return time;
112
+}
113
+
114
+u64 pm_runtime_active_time(struct device *dev)
115
+{
116
+ return rpm_get_accounted_time(dev, false);
117
+}
118
+
119
+u64 pm_runtime_suspended_time(struct device *dev)
120
+{
121
+ return rpm_get_accounted_time(dev, true);
122
+}
123
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
+
89125 /**
90126 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91127 * @dev: Device to handle.
....@@ -93,7 +129,7 @@
93129 static void pm_runtime_deactivate_timer(struct device *dev)
94130 {
95131 if (dev->power.timer_expires > 0) {
96
- del_timer(&dev->power.suspend_timer);
132
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
97133 dev->power.timer_expires = 0;
98134 }
99135 }
....@@ -119,43 +155,29 @@
119155 * Compute the autosuspend-delay expiration time based on the device's
120156 * power.last_busy time. If the delay has already expired or is disabled
121157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
122
- * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
158
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
123159 *
124160 * This function may be called either with or without dev->power.lock held.
125161 * Either way it can be racy, since power.last_busy may be updated at any time.
126162 */
127
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
163
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
128164 {
129165 int autosuspend_delay;
130
- long elapsed;
131
- unsigned long last_busy;
132
- unsigned long expires = 0;
166
+ u64 expires;
133167
134168 if (!dev->power.use_autosuspend)
135
- goto out;
169
+ return 0;
136170
137171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
138172 if (autosuspend_delay < 0)
139
- goto out;
173
+ return 0;
140174
141
- last_busy = READ_ONCE(dev->power.last_busy);
142
- elapsed = jiffies - last_busy;
143
- if (elapsed < 0)
144
- goto out; /* jiffies has wrapped around. */
175
+ expires = READ_ONCE(dev->power.last_busy);
176
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177
+ if (expires > ktime_get_mono_fast_ns())
178
+ return expires; /* Expires in the future */
145179
146
- /*
147
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
148
- * up to the nearest second.
149
- */
150
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151
- if (autosuspend_delay >= 1000)
152
- expires = round_jiffies(expires);
153
- expires += !expires;
154
- if (elapsed >= expires - last_busy)
155
- expires = 0; /* Already expired. */
156
-
157
- out:
158
- return expires;
180
+ return 0;
159181 }
160182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161183
....@@ -253,7 +275,7 @@
253275 || (dev->power.request_pending
254276 && dev->power.request == RPM_REQ_RESUME))
255277 retval = -EAGAIN;
256
- else if (__dev_pm_qos_read_value(dev) == 0)
278
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
257279 retval = -EPERM;
258280 else if (dev->power.runtime_status == RPM_SUSPENDED)
259281 retval = 1;
....@@ -265,11 +287,11 @@
265287 {
266288 struct device_link *link;
267289
268
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
290
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291
+ device_links_read_lock_held()) {
269292 int retval;
270293
271
- if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
272
- READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
294
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
273295 continue;
274296
275297 retval = pm_runtime_get_sync(link->supplier);
....@@ -283,17 +305,34 @@
283305 return 0;
284306 }
285307
308
+/**
309
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
310
+ * @link: Target device link.
311
+ *
312
+ * Drop all runtime PM references associated with @link to its supplier device.
313
+ */
314
+void pm_runtime_release_supplier(struct device_link *link)
315
+{
316
+ struct device *supplier = link->supplier;
317
+
318
+ /*
319
+ * The additional power.usage_count check is a safety net in case
320
+ * the rpm_active refcount becomes saturated, in which case
321
+ * refcount_dec_not_one() would return true forever, but it is not
322
+ * strictly necessary.
323
+ */
324
+ while (refcount_dec_not_one(&link->rpm_active) &&
325
+ atomic_read(&supplier->power.usage_count) > 0)
326
+ pm_runtime_put_noidle(supplier);
327
+}
328
+
286329 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
287330 {
288331 struct device_link *link;
289332
290
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
291
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
292
- continue;
293
-
294
- while (refcount_dec_not_one(&link->rpm_active))
295
- pm_runtime_put_noidle(link->supplier);
296
-
333
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
334
+ device_links_read_lock_held()) {
335
+ pm_runtime_release_supplier(link);
297336 if (try_to_suspend)
298337 pm_request_idle(link->supplier);
299338 }
....@@ -309,7 +348,8 @@
309348 struct device_link *link;
310349 int idx = device_links_read_lock();
311350
312
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
351
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
352
+ device_links_read_lock_held())
313353 pm_request_idle(link->supplier);
314354
315355 device_links_read_unlock(idx);
....@@ -520,13 +560,11 @@
520560
521561 repeat:
522562 retval = rpm_check_suspend_allowed(dev);
523
-
524563 if (retval < 0)
525
- ; /* Conditions are wrong. */
564
+ goto out; /* Conditions are wrong. */
526565
527566 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
528
- else if (dev->power.runtime_status == RPM_RESUMING &&
529
- !(rpmflags & RPM_ASYNC))
567
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
530568 retval = -EAGAIN;
531569 if (retval)
532570 goto out;
....@@ -534,7 +572,7 @@
534572 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
535573 if ((rpmflags & RPM_AUTO)
536574 && dev->power.runtime_status != RPM_SUSPENDING) {
537
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
575
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
538576
539577 if (expires != 0) {
540578 /* Pending requests need to be canceled. */
....@@ -547,10 +585,20 @@
547585 * expire; pm_suspend_timer_fn() will take care of the
548586 * rest.
549587 */
550
- if (!(dev->power.timer_expires && time_before_eq(
551
- dev->power.timer_expires, expires))) {
588
+ if (!(dev->power.timer_expires &&
589
+ dev->power.timer_expires <= expires)) {
590
+ /*
591
+ * We add a slack of 25% to gather wakeups
592
+ * without sacrificing the granularity.
593
+ */
594
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
595
+ (NSEC_PER_MSEC >> 2);
596
+
552597 dev->power.timer_expires = expires;
553
- mod_timer(&dev->power.suspend_timer, expires);
598
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
599
+ ns_to_ktime(expires),
600
+ slack,
601
+ HRTIMER_MODE_ABS);
554602 }
555603 dev->power.timer_autosuspends = 1;
556604 goto out;
....@@ -925,23 +973,28 @@
925973 *
926974 * Check if the time is right and queue a suspend request.
927975 */
928
-static void pm_suspend_timer_fn(struct timer_list *t)
976
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
929977 {
930
- struct device *dev = from_timer(dev, t, power.suspend_timer);
978
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
931979 unsigned long flags;
932
- unsigned long expires;
980
+ u64 expires;
933981
934982 spin_lock_irqsave(&dev->power.lock, flags);
935983
936984 expires = dev->power.timer_expires;
937
- /* If 'expire' is after 'jiffies' we've been called too early. */
938
- if (expires > 0 && !time_after(expires, jiffies)) {
985
+ /*
986
+ * If 'expires' is after the current time, we've been called
987
+ * too early.
988
+ */
989
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
939990 dev->power.timer_expires = 0;
940991 rpm_suspend(dev, dev->power.timer_autosuspends ?
941992 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
942993 }
943994
944995 spin_unlock_irqrestore(&dev->power.lock, flags);
996
+
997
+ return HRTIMER_NORESTART;
945998 }
946999
9471000 /**
....@@ -952,6 +1005,7 @@
9521005 int pm_schedule_suspend(struct device *dev, unsigned int delay)
9531006 {
9541007 unsigned long flags;
1008
+ u64 expires;
9551009 int retval;
9561010
9571011 spin_lock_irqsave(&dev->power.lock, flags);
....@@ -968,10 +1022,10 @@
9681022 /* Other scheduled or pending requests need to be canceled. */
9691023 pm_runtime_cancel_pending(dev);
9701024
971
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
972
- dev->power.timer_expires += !dev->power.timer_expires;
1025
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1026
+ dev->power.timer_expires = expires;
9731027 dev->power.timer_autosuspends = 0;
974
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
1028
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
9751029
9761030 out:
9771031 spin_unlock_irqrestore(&dev->power.lock, flags);
....@@ -998,8 +1052,10 @@
9981052 int retval;
9991053
10001054 if (rpmflags & RPM_GET_PUT) {
1001
- if (!atomic_dec_and_test(&dev->power.usage_count))
1055
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
1056
+ trace_rpm_usage_rcuidle(dev, rpmflags);
10021057 return 0;
1058
+ }
10031059 }
10041060
10051061 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
....@@ -1030,8 +1086,10 @@
10301086 int retval;
10311087
10321088 if (rpmflags & RPM_GET_PUT) {
1033
- if (!atomic_dec_and_test(&dev->power.usage_count))
1089
+ if (!atomic_dec_and_test(&dev->power.usage_count)) {
1090
+ trace_rpm_usage_rcuidle(dev, rpmflags);
10341091 return 0;
1092
+ }
10351093 }
10361094
10371095 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
....@@ -1075,28 +1133,49 @@
10751133 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
10761134
10771135 /**
1078
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1136
+ * pm_runtime_get_if_active - Conditionally bump up device usage counter.
10791137 * @dev: Device to handle.
1138
+ * @ign_usage_count: Whether or not to look at the current usage counter value.
10801139 *
1081
- * Return -EINVAL if runtime PM is disabled for the device.
1140
+ * Return -EINVAL if runtime PM is disabled for @dev.
10821141 *
1083
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1084
- * and the runtime PM usage counter is nonzero, increment the counter and
1085
- * return 1. Otherwise return 0 without changing the counter.
1142
+ * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1143
+ * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1144
+ * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1145
+ * without changing the usage counter.
1146
+ *
1147
+ * If @ign_usage_count is %true, this function can be used to prevent suspending
1148
+ * the device when its runtime PM status is %RPM_ACTIVE.
1149
+ *
1150
+ * If @ign_usage_count is %false, this function can be used to prevent
1151
+ * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1152
+ * runtime PM usage counter is not zero.
1153
+ *
1154
+ * The caller is resposible for decrementing the runtime PM usage counter of
1155
+ * @dev after this function has returned a positive value for it.
10861156 */
1087
-int pm_runtime_get_if_in_use(struct device *dev)
1157
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
10881158 {
10891159 unsigned long flags;
10901160 int retval;
10911161
10921162 spin_lock_irqsave(&dev->power.lock, flags);
1093
- retval = dev->power.disable_depth > 0 ? -EINVAL :
1094
- dev->power.runtime_status == RPM_ACTIVE
1095
- && atomic_inc_not_zero(&dev->power.usage_count);
1163
+ if (dev->power.disable_depth > 0) {
1164
+ retval = -EINVAL;
1165
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
1166
+ retval = 0;
1167
+ } else if (ign_usage_count) {
1168
+ retval = 1;
1169
+ atomic_inc(&dev->power.usage_count);
1170
+ } else {
1171
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
1172
+ }
1173
+ trace_rpm_usage_rcuidle(dev, 0);
10961174 spin_unlock_irqrestore(&dev->power.lock, flags);
1175
+
10971176 return retval;
10981177 }
1099
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1178
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
11001179
11011180 /**
11021181 * __pm_runtime_set_status - Set runtime PM status of a device.
....@@ -1114,23 +1193,56 @@
11141193 * and the device parent's counter of unsuspended children is modified to
11151194 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
11161195 * notification request for the parent is submitted.
1196
+ *
1197
+ * If @dev has any suppliers (as reflected by device links to them), and @status
1198
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1199
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1200
+ * of the @status value) and the suppliers will be deacticated on exit. The
1201
+ * error returned by the failing supplier activation will be returned in that
1202
+ * case.
11171203 */
11181204 int __pm_runtime_set_status(struct device *dev, unsigned int status)
11191205 {
11201206 struct device *parent = dev->parent;
1121
- unsigned long flags;
11221207 bool notify_parent = false;
11231208 int error = 0;
11241209
11251210 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
11261211 return -EINVAL;
11271212
1128
- spin_lock_irqsave(&dev->power.lock, flags);
1213
+ spin_lock_irq(&dev->power.lock);
11291214
1130
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
1215
+ /*
1216
+ * Prevent PM-runtime from being enabled for the device or return an
1217
+ * error if it is enabled already and working.
1218
+ */
1219
+ if (dev->power.runtime_error || dev->power.disable_depth)
1220
+ dev->power.disable_depth++;
1221
+ else
11311222 error = -EAGAIN;
1132
- goto out;
1223
+
1224
+ spin_unlock_irq(&dev->power.lock);
1225
+
1226
+ if (error)
1227
+ return error;
1228
+
1229
+ /*
1230
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
1231
+ * upfront regardless of the current status, because next time
1232
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1233
+ * involved will be dropped down to one anyway.
1234
+ */
1235
+ if (status == RPM_ACTIVE) {
1236
+ int idx = device_links_read_lock();
1237
+
1238
+ error = rpm_get_suppliers(dev);
1239
+ if (error)
1240
+ status = RPM_SUSPENDED;
1241
+
1242
+ device_links_read_unlock(idx);
11331243 }
1244
+
1245
+ spin_lock_irq(&dev->power.lock);
11341246
11351247 if (dev->power.runtime_status == status || !parent)
11361248 goto out_set;
....@@ -1159,18 +1271,32 @@
11591271
11601272 spin_unlock(&parent->power.lock);
11611273
1162
- if (error)
1274
+ if (error) {
1275
+ status = RPM_SUSPENDED;
11631276 goto out;
1277
+ }
11641278 }
11651279
11661280 out_set:
11671281 __update_runtime_status(dev, status);
1168
- dev->power.runtime_error = 0;
1282
+ if (!error)
1283
+ dev->power.runtime_error = 0;
1284
+
11691285 out:
1170
- spin_unlock_irqrestore(&dev->power.lock, flags);
1286
+ spin_unlock_irq(&dev->power.lock);
11711287
11721288 if (notify_parent)
11731289 pm_request_idle(parent);
1290
+
1291
+ if (status == RPM_SUSPENDED) {
1292
+ int idx = device_links_read_lock();
1293
+
1294
+ rpm_put_suppliers(dev);
1295
+
1296
+ device_links_read_unlock(idx);
1297
+ }
1298
+
1299
+ pm_runtime_enable(dev);
11741300
11751301 return error;
11761302 }
....@@ -1299,6 +1425,9 @@
12991425 pm_runtime_put_noidle(dev);
13001426 }
13011427
1428
+ /* Update time accounting before disabling PM-runtime. */
1429
+ update_pm_runtime_accounting(dev);
1430
+
13021431 if (!dev->power.disable_depth++)
13031432 __pm_runtime_barrier(dev);
13041433
....@@ -1317,10 +1446,15 @@
13171446
13181447 spin_lock_irqsave(&dev->power.lock, flags);
13191448
1320
- if (dev->power.disable_depth > 0)
1449
+ if (dev->power.disable_depth > 0) {
13211450 dev->power.disable_depth--;
1322
- else
1451
+
1452
+ /* About to enable runtime pm, set accounting_timestamp to now */
1453
+ if (!dev->power.disable_depth)
1454
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1455
+ } else {
13231456 dev_warn(dev, "Unbalanced %s!\n", __func__);
1457
+ }
13241458
13251459 WARN(!dev->power.disable_depth &&
13261460 dev->power.runtime_status == RPM_SUSPENDED &&
....@@ -1371,6 +1505,8 @@
13711505 dev->power.runtime_auto = true;
13721506 if (atomic_dec_and_test(&dev->power.usage_count))
13731507 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1508
+ else
1509
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
13741510
13751511 out:
13761512 spin_unlock_irq(&dev->power.lock);
....@@ -1438,6 +1574,8 @@
14381574 if (!old_use || old_delay >= 0) {
14391575 atomic_inc(&dev->power.usage_count);
14401576 rpm_resume(dev, 0);
1577
+ } else {
1578
+ trace_rpm_usage_rcuidle(dev, 0);
14411579 }
14421580 }
14431581
....@@ -1517,11 +1655,12 @@
15171655 dev->power.request_pending = false;
15181656 dev->power.request = RPM_REQ_NONE;
15191657 dev->power.deferred_resume = false;
1520
- dev->power.accounting_timestamp = jiffies;
1658
+ dev->power.needs_force_resume = 0;
15211659 INIT_WORK(&dev->power.work, pm_runtime_work);
15221660
15231661 dev->power.timer_expires = 0;
1524
- timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1662
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1663
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
15251664
15261665 init_waitqueue_head(&dev->power.wait_queue);
15271666 }
....@@ -1556,41 +1695,6 @@
15561695 }
15571696
15581697 /**
1559
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1560
- * @dev: Device whose driver is going to be removed.
1561
- *
1562
- * Check links from this device to any consumers and if any of them have active
1563
- * runtime PM references to the device, drop the usage counter of the device
1564
- * (as many times as needed).
1565
- *
1566
- * Links with the DL_FLAG_MANAGED flag unset are ignored.
1567
- *
1568
- * Since the device is guaranteed to be runtime-active at the point this is
1569
- * called, nothing else needs to be done here.
1570
- *
1571
- * Moreover, this is called after device_links_busy() has returned 'false', so
1572
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1573
- * therefore rpm_active can't be manipulated concurrently.
1574
- */
1575
-void pm_runtime_clean_up_links(struct device *dev)
1576
-{
1577
- struct device_link *link;
1578
- int idx;
1579
-
1580
- idx = device_links_read_lock();
1581
-
1582
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1583
- if (!(link->flags & DL_FLAG_MANAGED))
1584
- continue;
1585
-
1586
- while (refcount_dec_not_one(&link->rpm_active))
1587
- pm_runtime_put_noidle(dev);
1588
- }
1589
-
1590
- device_links_read_unlock(idx);
1591
-}
1592
-
1593
-/**
15941698 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
15951699 * @dev: Consumer device.
15961700 */
....@@ -1601,7 +1705,8 @@
16011705
16021706 idx = device_links_read_lock();
16031707
1604
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1708
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1709
+ device_links_read_lock_held())
16051710 if (link->flags & DL_FLAG_PM_RUNTIME) {
16061711 link->supplier_preactivated = true;
16071712 pm_runtime_get_sync(link->supplier);
....@@ -1624,7 +1729,8 @@
16241729
16251730 idx = device_links_read_lock();
16261731
1627
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1732
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1733
+ device_links_read_lock_held())
16281734 if (link->supplier_preactivated) {
16291735 link->supplier_preactivated = false;
16301736 spin_lock_irqsave(&dev->power.lock, flags);
....@@ -1645,12 +1751,30 @@
16451751 spin_unlock_irq(&dev->power.lock);
16461752 }
16471753
1648
-void pm_runtime_drop_link(struct device *dev)
1754
+static void pm_runtime_drop_link_count(struct device *dev)
16491755 {
16501756 spin_lock_irq(&dev->power.lock);
16511757 WARN_ON(dev->power.links_count == 0);
16521758 dev->power.links_count--;
16531759 spin_unlock_irq(&dev->power.lock);
1760
+}
1761
+
1762
+/**
1763
+ * pm_runtime_drop_link - Prepare for device link removal.
1764
+ * @link: Device link going away.
1765
+ *
1766
+ * Drop the link count of the consumer end of @link and decrement the supplier
1767
+ * device's runtime PM usage counter as many times as needed to drop all of the
1768
+ * PM runtime reference to it from the consumer.
1769
+ */
1770
+void pm_runtime_drop_link(struct device_link *link)
1771
+{
1772
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
1773
+ return;
1774
+
1775
+ pm_runtime_drop_link_count(link->consumer);
1776
+ pm_runtime_release_supplier(link);
1777
+ pm_request_idle(link->supplier);
16541778 }
16551779
16561780 static bool pm_runtime_need_not_resume(struct device *dev)
....@@ -1698,10 +1822,12 @@
16981822 * its parent, but set its status to RPM_SUSPENDED anyway in case this
16991823 * function will be called again for it in the meantime.
17001824 */
1701
- if (pm_runtime_need_not_resume(dev))
1825
+ if (pm_runtime_need_not_resume(dev)) {
17021826 pm_runtime_set_suspended(dev);
1703
- else
1827
+ } else {
17041828 __update_runtime_status(dev, RPM_SUSPENDED);
1829
+ dev->power.needs_force_resume = 1;
1830
+ }
17051831
17061832 return 0;
17071833
....@@ -1728,7 +1854,7 @@
17281854 int (*callback)(struct device *);
17291855 int ret = 0;
17301856
1731
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1857
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
17321858 goto out;
17331859
17341860 /*
....@@ -1747,6 +1873,7 @@
17471873
17481874 pm_runtime_mark_last_busy(dev);
17491875 out:
1876
+ dev->power.needs_force_resume = 0;
17501877 pm_runtime_enable(dev);
17511878 return ret;
17521879 }