hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/base/power/main.c
....@@ -1,11 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * drivers/base/power/main.c - Where the driver meets power management.
34 *
45 * Copyright (c) 2003 Patrick Mochel
56 * Copyright (c) 2003 Open Source Development Lab
6
- *
7
- * This file is released under the GPLv2
8
- *
97 *
108 * The driver model core calls device_pm_add() when a device is registered.
119 * This will initialize the embedded device_pm_info object in the device
....@@ -16,6 +14,8 @@
1614 * domain dependencies may differ from the ancestral dependencies that the
1715 * subsystem list maintains.
1816 */
17
+
18
+#define pr_fmt(fmt) "PM: " fmt
1919
2020 #include <linux/device.h>
2121 #include <linux/export.h>
....@@ -32,6 +32,7 @@
3232 #include <trace/events/power.h>
3333 #include <linux/cpufreq.h>
3434 #include <linux/cpuidle.h>
35
+#include <linux/devfreq.h>
3536 #include <linux/timer.h>
3637 #include <linux/wakeup_reason.h>
3738
....@@ -39,6 +40,10 @@
3940 #include "power.h"
4041
4142 typedef int (*pm_callback_t)(struct device *);
43
+
44
+#define list_for_each_entry_rcu_locked(pos, head, member) \
45
+ list_for_each_entry_rcu(pos, head, member, \
46
+ device_links_read_lock_held())
4247
4348 /*
4449 * The entries in the dpm_list list are in a depth first order, simply
....@@ -128,7 +133,7 @@
128133 if (device_pm_not_required(dev))
129134 return;
130135
131
- pr_debug("PM: Adding info for %s:%s\n",
136
+ pr_debug("Adding info for %s:%s\n",
132137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133138 device_pm_check_callbacks(dev);
134139 mutex_lock(&dpm_list_mtx);
....@@ -149,7 +154,7 @@
149154 if (device_pm_not_required(dev))
150155 return;
151156
152
- pr_debug("PM: Removing info for %s:%s\n",
157
+ pr_debug("Removing info for %s:%s\n",
153158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154159 complete_all(&dev->power.completion);
155160 mutex_lock(&dpm_list_mtx);
....@@ -168,7 +173,7 @@
168173 */
169174 void device_pm_move_before(struct device *deva, struct device *devb)
170175 {
171
- pr_debug("PM: Moving %s:%s before %s:%s\n",
176
+ pr_debug("Moving %s:%s before %s:%s\n",
172177 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173178 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174179 /* Delete deva from dpm_list and reinsert before devb. */
....@@ -182,7 +187,7 @@
182187 */
183188 void device_pm_move_after(struct device *deva, struct device *devb)
184189 {
185
- pr_debug("PM: Moving %s:%s after %s:%s\n",
190
+ pr_debug("Moving %s:%s after %s:%s\n",
186191 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187192 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188193 /* Delete deva from dpm_list and reinsert after devb. */
....@@ -195,7 +200,7 @@
195200 */
196201 void device_pm_move_last(struct device *dev)
197202 {
198
- pr_debug("PM: Moving %s:%s to end of list\n",
203
+ pr_debug("Moving %s:%s to end of list\n",
199204 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200205 list_move_tail(&dev->power.entry, &dpm_list);
201206 }
....@@ -205,7 +210,7 @@
205210 if (!pm_print_times_enabled)
206211 return 0;
207212
208
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
213
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
209214 task_pid_nr(current),
210215 dev->parent ? dev_name(dev->parent) : "none");
211216 return ktime_get();
....@@ -223,7 +228,7 @@
223228 rettime = ktime_get();
224229 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225230
226
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
231
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227232 (unsigned long long)nsecs >> 10);
228233 }
229234
....@@ -266,7 +271,7 @@
266271 * callbacks freeing the link objects for the links in the list we're
267272 * walking.
268273 */
269
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
274
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
270275 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271276 dpm_wait(link->supplier, async);
272277
....@@ -323,7 +328,7 @@
323328 * continue instead of trying to continue in parallel with its
324329 * unregistration).
325330 */
326
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
331
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
327332 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328333 dpm_wait(link->consumer, async);
329334
....@@ -359,7 +364,6 @@
359364 case PM_EVENT_THAW:
360365 case PM_EVENT_RECOVER:
361366 return ops->thaw;
362
- break;
363367 case PM_EVENT_RESTORE:
364368 return ops->restore;
365369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
....@@ -446,8 +450,8 @@
446450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447451 int error)
448452 {
449
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
450
- dev_name(dev), pm_verb(state.event), info, error);
453
+ pr_err("Device %s failed to %s%s: error %d\n",
454
+ dev_name(dev), pm_verb(state.event), info, error);
451455 }
452456
453457 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
....@@ -504,7 +508,7 @@
504508
505509 /**
506510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507
- * @data: Watchdog object address.
511
+ * @t: The timer that PM watchdog depends on.
508512 *
509513 * Called when a driver has timed out suspending or resuming.
510514 * There's not much we can do here to recover so panic() to
....@@ -515,7 +519,7 @@
515519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
516520
517521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518
- show_stack(wd->tsk, NULL);
522
+ show_stack(wd->tsk, NULL, KERN_EMERG);
519523 panic("%s %s: unrecoverable failure\n",
520524 dev_driver_string(wd->dev), dev_name(wd->dev));
521525 }
....@@ -558,86 +562,25 @@
558562 /*------------------------- Resume routines -------------------------*/
559563
560564 /**
561
- * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
565
+ * dev_pm_skip_resume - System-wide device resume optimization check.
562566 * @dev: Target device.
563567 *
564
- * Make the core skip the "early resume" and "resume" phases for @dev.
565
- *
566
- * This function can be called by middle-layer code during the "noirq" phase of
567
- * system resume if necessary, but not by device drivers.
568
+ * Return:
569
+ * - %false if the transition under way is RESTORE.
570
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
571
+ * - The logical negation of %power.must_resume otherwise (that is, when the
572
+ * transition under way is RESUME).
568573 */
569
-void dev_pm_skip_next_resume_phases(struct device *dev)
574
+bool dev_pm_skip_resume(struct device *dev)
570575 {
571
- dev->power.is_late_suspended = false;
572
- dev->power.is_suspended = false;
576
+ if (pm_transition.event == PM_EVENT_RESTORE)
577
+ return false;
578
+
579
+ if (pm_transition.event == PM_EVENT_THAW)
580
+ return dev_pm_skip_suspend(dev);
581
+
582
+ return !dev->power.must_resume;
573583 }
574
-
575
-/**
576
- * suspend_event - Return a "suspend" message for given "resume" one.
577
- * @resume_msg: PM message representing a system-wide resume transition.
578
- */
579
-static pm_message_t suspend_event(pm_message_t resume_msg)
580
-{
581
- switch (resume_msg.event) {
582
- case PM_EVENT_RESUME:
583
- return PMSG_SUSPEND;
584
- case PM_EVENT_THAW:
585
- case PM_EVENT_RESTORE:
586
- return PMSG_FREEZE;
587
- case PM_EVENT_RECOVER:
588
- return PMSG_HIBERNATE;
589
- }
590
- return PMSG_ON;
591
-}
592
-
593
-/**
594
- * dev_pm_may_skip_resume - System-wide device resume optimization check.
595
- * @dev: Target device.
596
- *
597
- * Checks whether or not the device may be left in suspend after a system-wide
598
- * transition to the working state.
599
- */
600
-bool dev_pm_may_skip_resume(struct device *dev)
601
-{
602
- return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
603
-}
604
-
605
-static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
606
- pm_message_t state,
607
- const char **info_p)
608
-{
609
- pm_callback_t callback;
610
- const char *info;
611
-
612
- if (dev->pm_domain) {
613
- info = "noirq power domain ";
614
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
615
- } else if (dev->type && dev->type->pm) {
616
- info = "noirq type ";
617
- callback = pm_noirq_op(dev->type->pm, state);
618
- } else if (dev->class && dev->class->pm) {
619
- info = "noirq class ";
620
- callback = pm_noirq_op(dev->class->pm, state);
621
- } else if (dev->bus && dev->bus->pm) {
622
- info = "noirq bus ";
623
- callback = pm_noirq_op(dev->bus->pm, state);
624
- } else {
625
- return NULL;
626
- }
627
-
628
- if (info_p)
629
- *info_p = info;
630
-
631
- return callback;
632
-}
633
-
634
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
635
- pm_message_t state,
636
- const char **info_p);
637
-
638
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
639
- pm_message_t state,
640
- const char **info_p);
641584
642585 /**
643586 * device_resume_noirq - Execute a "noirq resume" callback for given device.
....@@ -650,8 +593,8 @@
650593 */
651594 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
652595 {
653
- pm_callback_t callback;
654
- const char *info;
596
+ pm_callback_t callback = NULL;
597
+ const char *info = NULL;
655598 bool skip_resume;
656599 int error = 0;
657600
....@@ -667,36 +610,40 @@
667610 if (!dpm_wait_for_superior(dev, async))
668611 goto Out;
669612
670
- skip_resume = dev_pm_may_skip_resume(dev);
613
+ skip_resume = dev_pm_skip_resume(dev);
614
+ /*
615
+ * If the driver callback is skipped below or by the middle layer
616
+ * callback and device_resume_early() also skips the driver callback for
617
+ * this device later, it needs to appear as "suspended" to PM-runtime,
618
+ * so change its status accordingly.
619
+ *
620
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
621
+ * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
622
+ * to avoid confusing drivers that don't use it.
623
+ */
624
+ if (skip_resume)
625
+ pm_runtime_set_suspended(dev);
626
+ else if (dev_pm_skip_suspend(dev))
627
+ pm_runtime_set_active(dev);
671628
672
- callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
629
+ if (dev->pm_domain) {
630
+ info = "noirq power domain ";
631
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
632
+ } else if (dev->type && dev->type->pm) {
633
+ info = "noirq type ";
634
+ callback = pm_noirq_op(dev->type->pm, state);
635
+ } else if (dev->class && dev->class->pm) {
636
+ info = "noirq class ";
637
+ callback = pm_noirq_op(dev->class->pm, state);
638
+ } else if (dev->bus && dev->bus->pm) {
639
+ info = "noirq bus ";
640
+ callback = pm_noirq_op(dev->bus->pm, state);
641
+ }
673642 if (callback)
674643 goto Run;
675644
676645 if (skip_resume)
677646 goto Skip;
678
-
679
- if (dev_pm_smart_suspend_and_suspended(dev)) {
680
- pm_message_t suspend_msg = suspend_event(state);
681
-
682
- /*
683
- * If "freeze" callbacks have been skipped during a transition
684
- * related to hibernation, the subsequent "thaw" callbacks must
685
- * be skipped too or bad things may happen. Otherwise, resume
686
- * callbacks are going to be run for the device, so its runtime
687
- * PM status must be changed to reflect the new state after the
688
- * transition under way.
689
- */
690
- if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
691
- !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
692
- if (state.event == PM_EVENT_THAW) {
693
- skip_resume = true;
694
- goto Skip;
695
- } else {
696
- pm_runtime_set_active(dev);
697
- }
698
- }
699
- }
700647
701648 if (dev->driver && dev->driver->pm) {
702649 info = "noirq driver ";
....@@ -709,18 +656,6 @@
709656 Skip:
710657 dev->power.is_noirq_suspended = false;
711658
712
- if (skip_resume) {
713
- /*
714
- * The device is going to be left in suspend, but it might not
715
- * have been in runtime suspend before the system suspended, so
716
- * its runtime PM status needs to be updated to avoid confusing
717
- * the runtime PM framework when runtime PM is enabled for the
718
- * device again.
719
- */
720
- pm_runtime_set_suspended(dev);
721
- dev_pm_skip_next_resume_phases(dev);
722
- }
723
-
724659 Out:
725660 complete_all(&dev->power.completion);
726661 TRACE_RESUME(error);
....@@ -731,6 +666,19 @@
731666 {
732667 return dev->power.async_suspend && pm_async_enabled
733668 && !pm_trace_is_enabled();
669
+}
670
+
671
+static bool dpm_async_fn(struct device *dev, async_func_t func)
672
+{
673
+ reinit_completion(&dev->power.completion);
674
+
675
+ if (is_async(dev)) {
676
+ get_device(dev);
677
+ async_schedule_dev(func, dev);
678
+ return true;
679
+ }
680
+
681
+ return false;
734682 }
735683
736684 static void async_resume_noirq(void *data, async_cookie_t cookie)
....@@ -745,7 +693,7 @@
745693 put_device(dev);
746694 }
747695
748
-void dpm_noirq_resume_devices(pm_message_t state)
696
+static void dpm_noirq_resume_devices(pm_message_t state)
749697 {
750698 struct device *dev;
751699 ktime_t starttime = ktime_get();
....@@ -759,13 +707,8 @@
759707 * in case the starting of async threads is
760708 * delayed by non-async resuming devices.
761709 */
762
- list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
763
- reinit_completion(&dev->power.completion);
764
- if (is_async(dev)) {
765
- get_device(dev);
766
- async_schedule(async_resume_noirq, dev);
767
- }
768
- }
710
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
711
+ dpm_async_fn(dev, async_resume_noirq);
769712
770713 while (!list_empty(&dpm_noirq_list)) {
771714 dev = to_device(dpm_noirq_list.next);
....@@ -794,13 +737,6 @@
794737 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
795738 }
796739
797
-void dpm_noirq_end(void)
798
-{
799
- resume_device_irqs();
800
- device_wakeup_disarm_wake_irqs();
801
- cpuidle_resume();
802
-}
803
-
804740 /**
805741 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
806742 * @state: PM transition of the system being carried out.
....@@ -811,36 +747,11 @@
811747 void dpm_resume_noirq(pm_message_t state)
812748 {
813749 dpm_noirq_resume_devices(state);
814
- dpm_noirq_end();
815
-}
816750
817
-static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
818
- pm_message_t state,
819
- const char **info_p)
820
-{
821
- pm_callback_t callback;
822
- const char *info;
751
+ resume_device_irqs();
752
+ device_wakeup_disarm_wake_irqs();
823753
824
- if (dev->pm_domain) {
825
- info = "early power domain ";
826
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
827
- } else if (dev->type && dev->type->pm) {
828
- info = "early type ";
829
- callback = pm_late_early_op(dev->type->pm, state);
830
- } else if (dev->class && dev->class->pm) {
831
- info = "early class ";
832
- callback = pm_late_early_op(dev->class->pm, state);
833
- } else if (dev->bus && dev->bus->pm) {
834
- info = "early bus ";
835
- callback = pm_late_early_op(dev->bus->pm, state);
836
- } else {
837
- return NULL;
838
- }
839
-
840
- if (info_p)
841
- *info_p = info;
842
-
843
- return callback;
754
+ cpuidle_resume();
844755 }
845756
846757 /**
....@@ -853,8 +764,8 @@
853764 */
854765 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
855766 {
856
- pm_callback_t callback;
857
- const char *info;
767
+ pm_callback_t callback = NULL;
768
+ const char *info = NULL;
858769 int error = 0;
859770
860771 TRACE_DEVICE(dev);
....@@ -869,17 +780,37 @@
869780 if (!dpm_wait_for_superior(dev, async))
870781 goto Out;
871782
872
- callback = dpm_subsys_resume_early_cb(dev, state, &info);
783
+ if (dev->pm_domain) {
784
+ info = "early power domain ";
785
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
786
+ } else if (dev->type && dev->type->pm) {
787
+ info = "early type ";
788
+ callback = pm_late_early_op(dev->type->pm, state);
789
+ } else if (dev->class && dev->class->pm) {
790
+ info = "early class ";
791
+ callback = pm_late_early_op(dev->class->pm, state);
792
+ } else if (dev->bus && dev->bus->pm) {
793
+ info = "early bus ";
794
+ callback = pm_late_early_op(dev->bus->pm, state);
795
+ }
796
+ if (callback)
797
+ goto Run;
873798
874
- if (!callback && dev->driver && dev->driver->pm) {
799
+ if (dev_pm_skip_resume(dev))
800
+ goto Skip;
801
+
802
+ if (dev->driver && dev->driver->pm) {
875803 info = "early driver ";
876804 callback = pm_late_early_op(dev->driver->pm, state);
877805 }
878806
807
+Run:
879808 error = dpm_run_callback(callback, dev, state, info);
809
+
810
+Skip:
880811 dev->power.is_late_suspended = false;
881812
882
- Out:
813
+Out:
883814 TRACE_RESUME(error);
884815
885816 pm_runtime_enable(dev);
....@@ -917,13 +848,8 @@
917848 * in case the starting of async threads is
918849 * delayed by non-async resuming devices.
919850 */
920
- list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
921
- reinit_completion(&dev->power.completion);
922
- if (is_async(dev)) {
923
- get_device(dev);
924
- async_schedule(async_resume_early, dev);
925
- }
926
- }
851
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
852
+ dpm_async_fn(dev, async_resume_early);
927853
928854 while (!list_empty(&dpm_late_early_list)) {
929855 dev = to_device(dpm_late_early_list.next);
....@@ -1083,13 +1009,8 @@
10831009 pm_transition = state;
10841010 async_error = 0;
10851011
1086
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1087
- reinit_completion(&dev->power.completion);
1088
- if (is_async(dev)) {
1089
- get_device(dev);
1090
- async_schedule(async_resume, dev);
1091
- }
1092
- }
1012
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013
+ dpm_async_fn(dev, async_resume);
10931014
10941015 while (!list_empty(&dpm_suspended_list)) {
10951016 dev = to_device(dpm_suspended_list.next);
....@@ -1118,6 +1039,7 @@
11181039 dpm_show_time(starttime, state, 0, NULL);
11191040
11201041 cpufreq_resume();
1042
+ devfreq_resume();
11211043 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
11221044 }
11231045
....@@ -1132,7 +1054,7 @@
11321054 const char *info = NULL;
11331055
11341056 if (dev->power.syscore)
1135
- return;
1057
+ goto out;
11361058
11371059 device_lock(dev);
11381060
....@@ -1162,6 +1084,7 @@
11621084
11631085 device_unlock(dev);
11641086
1087
+out:
11651088 pm_runtime_put(dev);
11661089 }
11671090
....@@ -1252,65 +1175,10 @@
12521175
12531176 idx = device_links_read_lock();
12541177
1255
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1178
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
12561179 link->supplier->power.must_resume = true;
12571180
12581181 device_links_read_unlock(idx);
1259
-}
1260
-
1261
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1262
- pm_message_t state,
1263
- const char **info_p)
1264
-{
1265
- pm_callback_t callback;
1266
- const char *info;
1267
-
1268
- if (dev->pm_domain) {
1269
- info = "noirq power domain ";
1270
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
1271
- } else if (dev->type && dev->type->pm) {
1272
- info = "noirq type ";
1273
- callback = pm_noirq_op(dev->type->pm, state);
1274
- } else if (dev->class && dev->class->pm) {
1275
- info = "noirq class ";
1276
- callback = pm_noirq_op(dev->class->pm, state);
1277
- } else if (dev->bus && dev->bus->pm) {
1278
- info = "noirq bus ";
1279
- callback = pm_noirq_op(dev->bus->pm, state);
1280
- } else {
1281
- return NULL;
1282
- }
1283
-
1284
- if (info_p)
1285
- *info_p = info;
1286
-
1287
- return callback;
1288
-}
1289
-
1290
-static bool device_must_resume(struct device *dev, pm_message_t state,
1291
- bool no_subsys_suspend_noirq)
1292
-{
1293
- pm_message_t resume_msg = resume_event(state);
1294
-
1295
- /*
1296
- * If all of the device driver's "noirq", "late" and "early" callbacks
1297
- * are invoked directly by the core, the decision to allow the device to
1298
- * stay in suspend can be based on its current runtime PM status and its
1299
- * wakeup settings.
1300
- */
1301
- if (no_subsys_suspend_noirq &&
1302
- !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1303
- !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1304
- !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1305
- return !pm_runtime_status_suspended(dev) &&
1306
- (resume_msg.event != PM_EVENT_RESUME ||
1307
- (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1308
-
1309
- /*
1310
- * The only safe strategy here is to require that if the device may not
1311
- * be left in suspend, resume callbacks must be invoked for it.
1312
- */
1313
- return !dev->power.may_skip_resume;
13141182 }
13151183
13161184 /**
....@@ -1324,9 +1192,8 @@
13241192 */
13251193 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
13261194 {
1327
- pm_callback_t callback;
1328
- const char *info;
1329
- bool no_subsys_cb = false;
1195
+ pm_callback_t callback = NULL;
1196
+ const char *info = NULL;
13301197 int error = 0;
13311198
13321199 TRACE_DEVICE(dev);
....@@ -1337,21 +1204,26 @@
13371204 if (async_error)
13381205 goto Complete;
13391206
1340
- if (pm_wakeup_pending()) {
1341
- async_error = -EBUSY;
1342
- goto Complete;
1343
- }
1344
-
13451207 if (dev->power.syscore || dev->power.direct_complete)
13461208 goto Complete;
13471209
1348
- callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1210
+ if (dev->pm_domain) {
1211
+ info = "noirq power domain ";
1212
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
1213
+ } else if (dev->type && dev->type->pm) {
1214
+ info = "noirq type ";
1215
+ callback = pm_noirq_op(dev->type->pm, state);
1216
+ } else if (dev->class && dev->class->pm) {
1217
+ info = "noirq class ";
1218
+ callback = pm_noirq_op(dev->class->pm, state);
1219
+ } else if (dev->bus && dev->bus->pm) {
1220
+ info = "noirq bus ";
1221
+ callback = pm_noirq_op(dev->bus->pm, state);
1222
+ }
13491223 if (callback)
13501224 goto Run;
13511225
1352
- no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1353
-
1354
- if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1226
+ if (dev_pm_skip_suspend(dev))
13551227 goto Skip;
13561228
13571229 if (dev->driver && dev->driver->pm) {
....@@ -1363,21 +1235,24 @@
13631235 error = dpm_run_callback(callback, dev, state, info);
13641236 if (error) {
13651237 async_error = error;
1366
- log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1367
- dev_name(dev), callback, error);
1238
+ log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
1239
+ dev_name(dev), pm_verb(state.event), error);
13681240 goto Complete;
13691241 }
13701242
13711243 Skip:
13721244 dev->power.is_noirq_suspended = true;
13731245
1374
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1375
- dev->power.must_resume = dev->power.must_resume ||
1376
- atomic_read(&dev->power.usage_count) > 1 ||
1377
- device_must_resume(dev, state, no_subsys_cb);
1378
- } else {
1246
+ /*
1247
+ * Skipping the resume of devices that were in use right before the
1248
+ * system suspend (as indicated by their PM-runtime usage counters)
1249
+ * would be suboptimal. Also resume them if doing that is not allowed
1250
+ * to be skipped.
1251
+ */
1252
+ if (atomic_read(&dev->power.usage_count) > 1 ||
1253
+ !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1254
+ dev->power.may_skip_resume))
13791255 dev->power.must_resume = true;
1380
- }
13811256
13821257 if (dev->power.must_resume)
13831258 dpm_superior_set_must_resume(dev);
....@@ -1404,24 +1279,13 @@
14041279
14051280 static int device_suspend_noirq(struct device *dev)
14061281 {
1407
- reinit_completion(&dev->power.completion);
1408
-
1409
- if (is_async(dev)) {
1410
- get_device(dev);
1411
- async_schedule(async_suspend_noirq, dev);
1282
+ if (dpm_async_fn(dev, async_suspend_noirq))
14121283 return 0;
1413
- }
1284
+
14141285 return __device_suspend_noirq(dev, pm_transition, false);
14151286 }
14161287
1417
-void dpm_noirq_begin(void)
1418
-{
1419
- cpuidle_pause();
1420
- device_wakeup_arm_wake_irqs();
1421
- suspend_device_irqs();
1422
-}
1423
-
1424
-int dpm_noirq_suspend_devices(pm_message_t state)
1288
+static int dpm_noirq_suspend_devices(pm_message_t state)
14251289 {
14261290 ktime_t starttime = ktime_get();
14271291 int error = 0;
....@@ -1478,7 +1342,11 @@
14781342 {
14791343 int ret;
14801344
1481
- dpm_noirq_begin();
1345
+ cpuidle_pause();
1346
+
1347
+ device_wakeup_arm_wake_irqs();
1348
+ suspend_device_irqs();
1349
+
14821350 ret = dpm_noirq_suspend_devices(state);
14831351 if (ret)
14841352 dpm_resume_noirq(resume_event(state));
....@@ -1501,35 +1369,6 @@
15011369 spin_unlock_irq(&parent->power.lock);
15021370 }
15031371
1504
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1505
- pm_message_t state,
1506
- const char **info_p)
1507
-{
1508
- pm_callback_t callback;
1509
- const char *info;
1510
-
1511
- if (dev->pm_domain) {
1512
- info = "late power domain ";
1513
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
1514
- } else if (dev->type && dev->type->pm) {
1515
- info = "late type ";
1516
- callback = pm_late_early_op(dev->type->pm, state);
1517
- } else if (dev->class && dev->class->pm) {
1518
- info = "late class ";
1519
- callback = pm_late_early_op(dev->class->pm, state);
1520
- } else if (dev->bus && dev->bus->pm) {
1521
- info = "late bus ";
1522
- callback = pm_late_early_op(dev->bus->pm, state);
1523
- } else {
1524
- return NULL;
1525
- }
1526
-
1527
- if (info_p)
1528
- *info_p = info;
1529
-
1530
- return callback;
1531
-}
1532
-
15331372 /**
15341373 * __device_suspend_late - Execute a "late suspend" callback for given device.
15351374 * @dev: Device to handle.
....@@ -1540,8 +1379,8 @@
15401379 */
15411380 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
15421381 {
1543
- pm_callback_t callback;
1544
- const char *info;
1382
+ pm_callback_t callback = NULL;
1383
+ const char *info = NULL;
15451384 int error = 0;
15461385
15471386 TRACE_DEVICE(dev);
....@@ -1562,12 +1401,23 @@
15621401 if (dev->power.syscore || dev->power.direct_complete)
15631402 goto Complete;
15641403
1565
- callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1404
+ if (dev->pm_domain) {
1405
+ info = "late power domain ";
1406
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
1407
+ } else if (dev->type && dev->type->pm) {
1408
+ info = "late type ";
1409
+ callback = pm_late_early_op(dev->type->pm, state);
1410
+ } else if (dev->class && dev->class->pm) {
1411
+ info = "late class ";
1412
+ callback = pm_late_early_op(dev->class->pm, state);
1413
+ } else if (dev->bus && dev->bus->pm) {
1414
+ info = "late bus ";
1415
+ callback = pm_late_early_op(dev->bus->pm, state);
1416
+ }
15661417 if (callback)
15671418 goto Run;
15681419
1569
- if (dev_pm_smart_suspend_and_suspended(dev) &&
1570
- !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1420
+ if (dev_pm_skip_suspend(dev))
15711421 goto Skip;
15721422
15731423 if (dev->driver && dev->driver->pm) {
....@@ -1579,8 +1429,8 @@
15791429 error = dpm_run_callback(callback, dev, state, info);
15801430 if (error) {
15811431 async_error = error;
1582
- log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1583
- dev_name(dev), callback, error);
1432
+ log_suspend_abort_reason("Device %s failed to %s late: error %d",
1433
+ dev_name(dev), pm_verb(state.event), error);
15841434 goto Complete;
15851435 }
15861436 dpm_propagate_wakeup_to_parent(dev);
....@@ -1609,13 +1459,8 @@
16091459
16101460 static int device_suspend_late(struct device *dev)
16111461 {
1612
- reinit_completion(&dev->power.completion);
1613
-
1614
- if (is_async(dev)) {
1615
- get_device(dev);
1616
- async_schedule(async_suspend_late, dev);
1462
+ if (dpm_async_fn(dev, async_suspend_late))
16171463 return 0;
1618
- }
16191464
16201465 return __device_suspend_late(dev, pm_transition, false);
16211466 }
....@@ -1677,17 +1522,20 @@
16771522 */
16781523 int dpm_suspend_end(pm_message_t state)
16791524 {
1680
- int error = dpm_suspend_late(state);
1525
+ ktime_t starttime = ktime_get();
1526
+ int error;
1527
+
1528
+ error = dpm_suspend_late(state);
16811529 if (error)
1682
- return error;
1530
+ goto out;
16831531
16841532 error = dpm_suspend_noirq(state);
1685
- if (error) {
1533
+ if (error)
16861534 dpm_resume_early(resume_event(state));
1687
- return error;
1688
- }
16891535
1690
- return 0;
1536
+out:
1537
+ dpm_show_time(starttime, state, error, "end");
1538
+ return error;
16911539 }
16921540 EXPORT_SYMBOL_GPL(dpm_suspend_end);
16931541
....@@ -1730,7 +1578,7 @@
17301578
17311579 idx = device_links_read_lock();
17321580
1733
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1581
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
17341582 spin_lock_irq(&link->supplier->power.lock);
17351583 link->supplier->power.direct_complete = false;
17361584 spin_unlock_irq(&link->supplier->power.lock);
....@@ -1791,16 +1639,18 @@
17911639 if (dev->power.direct_complete) {
17921640 if (pm_runtime_status_suspended(dev)) {
17931641 pm_runtime_disable(dev);
1794
- if (pm_runtime_status_suspended(dev))
1642
+ if (pm_runtime_status_suspended(dev)) {
1643
+ pm_dev_dbg(dev, state, "direct-complete ");
17951644 goto Complete;
1645
+ }
17961646
17971647 pm_runtime_enable(dev);
17981648 }
17991649 dev->power.direct_complete = false;
18001650 }
18011651
1802
- dev->power.may_skip_resume = false;
1803
- dev->power.must_resume = false;
1652
+ dev->power.may_skip_resume = true;
1653
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
18041654
18051655 dpm_watchdog_set(&wd, dev);
18061656 device_lock(dev);
....@@ -1852,8 +1702,8 @@
18521702 dpm_propagate_wakeup_to_parent(dev);
18531703 dpm_clear_superiors_direct_complete(dev);
18541704 } else {
1855
- log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
1856
- dev_name(dev), callback, error);
1705
+ log_suspend_abort_reason("Device %s failed to %s: error %d",
1706
+ dev_name(dev), pm_verb(state.event), error);
18571707 }
18581708
18591709 device_unlock(dev);
....@@ -1884,13 +1734,8 @@
18841734
18851735 static int device_suspend(struct device *dev)
18861736 {
1887
- reinit_completion(&dev->power.completion);
1888
-
1889
- if (is_async(dev)) {
1890
- get_device(dev);
1891
- async_schedule(async_suspend, dev);
1737
+ if (dpm_async_fn(dev, async_suspend))
18921738 return 0;
1893
- }
18941739
18951740 return __device_suspend(dev, pm_transition, false);
18961741 }
....@@ -1907,6 +1752,7 @@
19071752 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
19081753 might_sleep();
19091754
1755
+ devfreq_suspend();
19101756 cpufreq_suspend();
19111757
19121758 mutex_lock(&dpm_list_mtx);
....@@ -1959,13 +1805,6 @@
19591805 int (*callback)(struct device *) = NULL;
19601806 int ret = 0;
19611807
1962
- if (dev->power.syscore)
1963
- return 0;
1964
-
1965
- WARN_ON(!pm_runtime_enabled(dev) &&
1966
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1967
- DPM_FLAG_LEAVE_SUSPENDED));
1968
-
19691808 /*
19701809 * If a device's parent goes into runtime suspend at the wrong time,
19711810 * it won't be possible to resume the device. To prevent this we
....@@ -1973,6 +1812,9 @@
19731812 * it again during the complete phase.
19741813 */
19751814 pm_runtime_get_noresume(dev);
1815
+
1816
+ if (dev->power.syscore)
1817
+ return 0;
19761818
19771819 device_lock(dev);
19781820
....@@ -2013,9 +1855,8 @@
20131855 */
20141856 spin_lock_irq(&dev->power.lock);
20151857 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2016
- ((pm_runtime_suspended(dev) && ret > 0) ||
2017
- dev->power.no_pm_callbacks) &&
2018
- !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1858
+ (ret > 0 || dev->power.no_pm_callbacks) &&
1859
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
20191860 spin_unlock_irq(&dev->power.lock);
20201861 return 0;
20211862 }
....@@ -2065,8 +1906,7 @@
20651906 error = 0;
20661907 continue;
20671908 }
2068
- printk(KERN_INFO "PM: Device %s not prepared "
2069
- "for power transition: code %d\n",
1909
+ pr_info("Device %s not prepared for power transition: code %d\n",
20701910 dev_name(dev), error);
20711911 log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
20721912 dev_name(dev), error);
....@@ -2093,6 +1933,7 @@
20931933 */
20941934 int dpm_suspend_start(pm_message_t state)
20951935 {
1936
+ ktime_t starttime = ktime_get();
20961937 int error;
20971938
20981939 error = dpm_prepare(state);
....@@ -2101,6 +1942,7 @@
21011942 dpm_save_failed_step(SUSPEND_PREPARE);
21021943 } else
21031944 error = dpm_suspend(state);
1945
+ dpm_show_time(starttime, state, error, "start");
21041946 return error;
21051947 }
21061948 EXPORT_SYMBOL_GPL(dpm_suspend_start);
....@@ -2108,14 +1950,14 @@
21081950 void __suspend_report_result(const char *function, void *fn, int ret)
21091951 {
21101952 if (ret)
2111
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1953
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
21121954 }
21131955 EXPORT_SYMBOL_GPL(__suspend_report_result);
21141956
21151957 /**
21161958 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2117
- * @dev: Device to wait for.
21181959 * @subordinate: Device that needs to wait for @dev.
1960
+ * @dev: Device to wait for.
21191961 */
21201962 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
21211963 {
....@@ -2163,7 +2005,9 @@
21632005
21642006 void device_pm_check_callbacks(struct device *dev)
21652007 {
2166
- spin_lock_irq(&dev->power.lock);
2008
+ unsigned long flags;
2009
+
2010
+ spin_lock_irqsave(&dev->power.lock, flags);
21672011 dev->power.no_pm_callbacks =
21682012 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
21692013 !dev->bus->suspend && !dev->bus->resume)) &&
....@@ -2172,10 +2016,10 @@
21722016 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
21732017 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
21742018 !dev->driver->suspend && !dev->driver->resume));
2175
- spin_unlock_irq(&dev->power.lock);
2019
+ spin_unlock_irqrestore(&dev->power.lock, flags);
21762020 }
21772021
2178
-bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2022
+bool dev_pm_skip_suspend(struct device *dev)
21792023 {
21802024 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
21812025 pm_runtime_status_suspended(dev);