hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/drivers/base/power/domain.c
....@@ -1,10 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * drivers/base/power/domain.c - Common code related to device power domains.
34 *
45 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5
- *
6
- * This file is released under the GPLv2.
76 */
7
+#define pr_fmt(fmt) "PM: " fmt
88
99 #include <linux/delay.h>
1010 #include <linux/kernel.h>
....@@ -20,6 +20,8 @@
2020 #include <linux/sched.h>
2121 #include <linux/suspend.h>
2222 #include <linux/export.h>
23
+#include <linux/cpu.h>
24
+#include <linux/debugfs.h>
2325
2426 #include "power.h"
2527
....@@ -122,10 +124,12 @@
122124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
123125 #define genpd_unlock(p) p->lock_ops->unlock(p)
124126
125
-#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
127
+#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
126128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
127129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
128130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
129133
130134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
131135 const struct generic_pm_domain *genpd)
....@@ -146,29 +150,24 @@
146150 return ret;
147151 }
148152
153
+static int genpd_runtime_suspend(struct device *dev);
154
+
149155 /*
150156 * Get the generic PM domain for a particular struct device.
151157 * This validates the struct device pointer, the PM domain pointer,
152158 * and checks that the PM domain pointer is a real generic PM domain.
153159 * Any failure results in NULL being returned.
154160 */
155
-static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
161
+static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
156162 {
157
- struct generic_pm_domain *genpd = NULL, *gpd;
158
-
159163 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
160164 return NULL;
161165
162
- mutex_lock(&gpd_list_lock);
163
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
164
- if (&gpd->domain == dev->pm_domain) {
165
- genpd = gpd;
166
- break;
167
- }
168
- }
169
- mutex_unlock(&gpd_list_lock);
166
+ /* A genpd's always have its ->runtime_suspend() callback assigned. */
167
+ if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168
+ return pd_to_genpd(dev->pm_domain);
170169
171
- return genpd;
170
+ return NULL;
172171 }
173172
174173 /*
....@@ -212,6 +211,21 @@
212211 }
213212
214213 #ifdef CONFIG_DEBUG_FS
214
+static struct dentry *genpd_debugfs_dir;
215
+
216
+static void genpd_debug_add(struct generic_pm_domain *genpd);
217
+
218
+static void genpd_debug_remove(struct generic_pm_domain *genpd)
219
+{
220
+ struct dentry *d;
221
+
222
+ if (!genpd_debugfs_dir)
223
+ return;
224
+
225
+ d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
226
+ debugfs_remove(d);
227
+}
228
+
215229 static void genpd_update_accounting(struct generic_pm_domain *genpd)
216230 {
217231 ktime_t delta, now;
....@@ -224,7 +238,7 @@
224238 * out of off and so update the idle time and vice
225239 * versa.
226240 */
227
- if (genpd->status == GPD_STATE_ACTIVE) {
241
+ if (genpd->status == GENPD_STATE_ON) {
228242 int state_idx = genpd->state_idx;
229243
230244 genpd->states[state_idx].idle_time =
....@@ -236,8 +250,131 @@
236250 genpd->accounting_time = now;
237251 }
238252 #else
253
+static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
254
+static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
239255 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
240256 #endif
257
+
258
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
259
+ unsigned int state)
260
+{
261
+ struct generic_pm_domain_data *pd_data;
262
+ struct pm_domain_data *pdd;
263
+ struct gpd_link *link;
264
+
265
+ /* New requested state is same as Max requested state */
266
+ if (state == genpd->performance_state)
267
+ return state;
268
+
269
+ /* New requested state is higher than Max requested state */
270
+ if (state > genpd->performance_state)
271
+ return state;
272
+
273
+ /* Traverse all devices within the domain */
274
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
275
+ pd_data = to_gpd_data(pdd);
276
+
277
+ if (pd_data->performance_state > state)
278
+ state = pd_data->performance_state;
279
+ }
280
+
281
+ /*
282
+ * Traverse all sub-domains within the domain. This can be
283
+ * done without any additional locking as the link->performance_state
284
+ * field is protected by the parent genpd->lock, which is already taken.
285
+ *
286
+ * Also note that link->performance_state (subdomain's performance state
287
+ * requirement to parent domain) is different from
288
+ * link->child->performance_state (current performance state requirement
289
+ * of the devices/sub-domains of the subdomain) and so can have a
290
+ * different value.
291
+ *
292
+ * Note that we also take vote from powered-off sub-domains into account
293
+ * as the same is done for devices right now.
294
+ */
295
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
296
+ if (link->performance_state > state)
297
+ state = link->performance_state;
298
+ }
299
+
300
+ return state;
301
+}
302
+
303
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
304
+ unsigned int state, int depth)
305
+{
306
+ struct generic_pm_domain *parent;
307
+ struct gpd_link *link;
308
+ int parent_state, ret;
309
+
310
+ if (state == genpd->performance_state)
311
+ return 0;
312
+
313
+ /* Propagate to parents of genpd */
314
+ list_for_each_entry(link, &genpd->child_links, child_node) {
315
+ parent = link->parent;
316
+
317
+ if (!parent->set_performance_state)
318
+ continue;
319
+
320
+ /* Find parent's performance state */
321
+ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
322
+ parent->opp_table,
323
+ state);
324
+ if (unlikely(ret < 0))
325
+ goto err;
326
+
327
+ parent_state = ret;
328
+
329
+ genpd_lock_nested(parent, depth + 1);
330
+
331
+ link->prev_performance_state = link->performance_state;
332
+ link->performance_state = parent_state;
333
+ parent_state = _genpd_reeval_performance_state(parent,
334
+ parent_state);
335
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
336
+ if (ret)
337
+ link->performance_state = link->prev_performance_state;
338
+
339
+ genpd_unlock(parent);
340
+
341
+ if (ret)
342
+ goto err;
343
+ }
344
+
345
+ ret = genpd->set_performance_state(genpd, state);
346
+ if (ret)
347
+ goto err;
348
+
349
+ genpd->performance_state = state;
350
+ return 0;
351
+
352
+err:
353
+ /* Encountered an error, lets rollback */
354
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
355
+ child_node) {
356
+ parent = link->parent;
357
+
358
+ if (!parent->set_performance_state)
359
+ continue;
360
+
361
+ genpd_lock_nested(parent, depth + 1);
362
+
363
+ parent_state = link->prev_performance_state;
364
+ link->performance_state = parent_state;
365
+
366
+ parent_state = _genpd_reeval_performance_state(parent,
367
+ parent_state);
368
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
369
+ pr_err("%s: Failed to roll back to %d performance state\n",
370
+ parent->name, parent_state);
371
+ }
372
+
373
+ genpd_unlock(parent);
374
+ }
375
+
376
+ return ret;
377
+}
241378
242379 /**
243380 * dev_pm_genpd_set_performance_state- Set performance state of device's power
....@@ -257,23 +394,20 @@
257394 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
258395 {
259396 struct generic_pm_domain *genpd;
260
- struct generic_pm_domain_data *gpd_data, *pd_data;
261
- struct pm_domain_data *pdd;
397
+ struct generic_pm_domain_data *gpd_data;
262398 unsigned int prev;
263
- int ret = 0;
399
+ int ret;
264400
265
- genpd = dev_to_genpd(dev);
266
- if (IS_ERR(genpd))
401
+ genpd = dev_to_genpd_safe(dev);
402
+ if (!genpd)
267403 return -ENODEV;
268404
269405 if (unlikely(!genpd->set_performance_state))
270406 return -EINVAL;
271407
272
- if (unlikely(!dev->power.subsys_data ||
273
- !dev->power.subsys_data->domain_data)) {
274
- WARN_ON(1);
408
+ if (WARN_ON(!dev->power.subsys_data ||
409
+ !dev->power.subsys_data->domain_data))
275410 return -EINVAL;
276
- }
277411
278412 genpd_lock(genpd);
279413
....@@ -281,52 +415,45 @@
281415 prev = gpd_data->performance_state;
282416 gpd_data->performance_state = state;
283417
284
- /* New requested state is same as Max requested state */
285
- if (state == genpd->performance_state)
286
- goto unlock;
418
+ state = _genpd_reeval_performance_state(genpd, state);
419
+ ret = _genpd_set_performance_state(genpd, state, 0);
420
+ if (ret)
421
+ gpd_data->performance_state = prev;
287422
288
- /* New requested state is higher than Max requested state */
289
- if (state > genpd->performance_state)
290
- goto update_state;
291
-
292
- /* Traverse all devices within the domain */
293
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
294
- pd_data = to_gpd_data(pdd);
295
-
296
- if (pd_data->performance_state > state)
297
- state = pd_data->performance_state;
298
- }
299
-
300
- if (state == genpd->performance_state)
301
- goto unlock;
302
-
303
- /*
304
- * We aren't propagating performance state changes of a subdomain to its
305
- * masters as we don't have hardware that needs it. Over that, the
306
- * performance states of subdomain and its masters may not have
307
- * one-to-one mapping and would require additional information. We can
308
- * get back to this once we have hardware that needs it. For that
309
- * reason, we don't have to consider performance state of the subdomains
310
- * of genpd here.
311
- */
312
-
313
-update_state:
314
- if (genpd_status_on(genpd)) {
315
- ret = genpd->set_performance_state(genpd, state);
316
- if (ret) {
317
- gpd_data->performance_state = prev;
318
- goto unlock;
319
- }
320
- }
321
-
322
- genpd->performance_state = state;
323
-
324
-unlock:
325423 genpd_unlock(genpd);
326424
327425 return ret;
328426 }
329427 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
428
+
429
+/**
430
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
431
+ *
432
+ * @dev: Device to handle
433
+ * @next: impending interrupt/wakeup for the device
434
+ *
435
+ *
436
+ * Allow devices to inform of the next wakeup. It's assumed that the users
437
+ * guarantee that the genpd wouldn't be detached while this routine is getting
438
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
439
+ * (RPM_SUSPENDED)."
440
+ * Although devices are expected to update the next_wakeup after the end of
441
+ * their usecase as well, it is possible the devices themselves may not know
442
+ * about that, so stale @next will be ignored when powering off the domain.
443
+ */
444
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
445
+{
446
+ struct generic_pm_domain_data *gpd_data;
447
+ struct generic_pm_domain *genpd;
448
+
449
+ genpd = dev_to_genpd_safe(dev);
450
+ if (!genpd)
451
+ return;
452
+
453
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
454
+ gpd_data->next_wakeup = next;
455
+}
456
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
330457
331458 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
332459 {
....@@ -335,35 +462,45 @@
335462 s64 elapsed_ns;
336463 int ret;
337464
338
- if (!genpd->power_on)
339
- return 0;
465
+ /* Notify consumers that we are about to power on. */
466
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
467
+ GENPD_NOTIFY_PRE_ON,
468
+ GENPD_NOTIFY_OFF, NULL);
469
+ ret = notifier_to_errno(ret);
470
+ if (ret)
471
+ return ret;
340472
341
- if (!timed)
342
- return genpd->power_on(genpd);
473
+ if (!genpd->power_on)
474
+ goto out;
475
+
476
+ if (!timed) {
477
+ ret = genpd->power_on(genpd);
478
+ if (ret)
479
+ goto err;
480
+
481
+ goto out;
482
+ }
343483
344484 time_start = ktime_get();
345485 ret = genpd->power_on(genpd);
346486 if (ret)
347
- return ret;
487
+ goto err;
348488
349489 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
350
-
351
- if (unlikely(genpd->set_performance_state)) {
352
- ret = genpd->set_performance_state(genpd, genpd->performance_state);
353
- if (ret) {
354
- pr_warn("%s: Failed to set performance state %d (%d)\n",
355
- genpd->name, genpd->performance_state, ret);
356
- }
357
- }
358
-
359490 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
360
- return ret;
491
+ goto out;
361492
362493 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
363494 genpd->max_off_time_changed = true;
364495 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
365496 genpd->name, "on", elapsed_ns);
366497
498
+out:
499
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
500
+ return 0;
501
+err:
502
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
503
+ NULL);
367504 return ret;
368505 }
369506
....@@ -374,26 +511,45 @@
374511 s64 elapsed_ns;
375512 int ret;
376513
377
- if (!genpd->power_off)
378
- return 0;
514
+ /* Notify consumers that we are about to power off. */
515
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
516
+ GENPD_NOTIFY_PRE_OFF,
517
+ GENPD_NOTIFY_ON, NULL);
518
+ ret = notifier_to_errno(ret);
519
+ if (ret)
520
+ return ret;
379521
380
- if (!timed)
381
- return genpd->power_off(genpd);
522
+ if (!genpd->power_off)
523
+ goto out;
524
+
525
+ if (!timed) {
526
+ ret = genpd->power_off(genpd);
527
+ if (ret)
528
+ goto busy;
529
+
530
+ goto out;
531
+ }
382532
383533 time_start = ktime_get();
384534 ret = genpd->power_off(genpd);
385
- if (ret == -EBUSY)
386
- return ret;
535
+ if (ret)
536
+ goto busy;
387537
388538 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
389539 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
390
- return ret;
540
+ goto out;
391541
392542 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
393543 genpd->max_off_time_changed = true;
394544 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
395545 genpd->name, "off", elapsed_ns);
396546
547
+out:
548
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
549
+ NULL);
550
+ return 0;
551
+busy:
552
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
397553 return ret;
398554 }
399555
....@@ -426,6 +582,7 @@
426582 struct pm_domain_data *pdd;
427583 struct gpd_link *link;
428584 unsigned int not_suspended = 0;
585
+ int ret;
429586
430587 /*
431588 * Do not try to power off the domain in the following situations:
....@@ -440,7 +597,9 @@
440597 * (1) The domain is configured as always on.
441598 * (2) When the domain has a subdomain being powered on.
442599 */
443
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
600
+ if (genpd_is_always_on(genpd) ||
601
+ genpd_is_rpm_always_on(genpd) ||
602
+ atomic_read(&genpd->sd_count) > 0)
444603 return -EBUSY;
445604
446605 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
....@@ -471,44 +630,36 @@
471630 if (!genpd->gov)
472631 genpd->state_idx = 0;
473632
474
- if (genpd->power_off) {
475
- int ret;
633
+ /* Don't power off, if a child domain is waiting to power on. */
634
+ if (atomic_read(&genpd->sd_count) > 0)
635
+ return -EBUSY;
476636
477
- if (atomic_read(&genpd->sd_count) > 0)
478
- return -EBUSY;
479
-
480
- /*
481
- * If sd_count > 0 at this point, one of the subdomains hasn't
482
- * managed to call genpd_power_on() for the master yet after
483
- * incrementing it. In that case genpd_power_on() will wait
484
- * for us to drop the lock, so we can call .power_off() and let
485
- * the genpd_power_on() restore power for us (this shouldn't
486
- * happen very often).
487
- */
488
- ret = _genpd_power_off(genpd, true);
489
- if (ret)
490
- return ret;
637
+ ret = _genpd_power_off(genpd, true);
638
+ if (ret) {
639
+ genpd->states[genpd->state_idx].rejected++;
640
+ return ret;
491641 }
492642
493
- genpd->status = GPD_STATE_POWER_OFF;
643
+ genpd->status = GENPD_STATE_OFF;
494644 genpd_update_accounting(genpd);
645
+ genpd->states[genpd->state_idx].usage++;
495646
496
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
497
- genpd_sd_counter_dec(link->master);
498
- genpd_lock_nested(link->master, depth + 1);
499
- genpd_power_off(link->master, false, depth + 1);
500
- genpd_unlock(link->master);
647
+ list_for_each_entry(link, &genpd->child_links, child_node) {
648
+ genpd_sd_counter_dec(link->parent);
649
+ genpd_lock_nested(link->parent, depth + 1);
650
+ genpd_power_off(link->parent, false, depth + 1);
651
+ genpd_unlock(link->parent);
501652 }
502653
503654 return 0;
504655 }
505656
506657 /**
507
- * genpd_power_on - Restore power to a given PM domain and its masters.
658
+ * genpd_power_on - Restore power to a given PM domain and its parents.
508659 * @genpd: PM domain to power up.
509660 * @depth: nesting count for lockdep.
510661 *
511
- * Restore power to @genpd and all of its masters so that it is possible to
662
+ * Restore power to @genpd and all of its parents so that it is possible to
512663 * resume a device belonging to it.
513664 */
514665 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
....@@ -521,20 +672,20 @@
521672
522673 /*
523674 * The list is guaranteed not to change while the loop below is being
524
- * executed, unless one of the masters' .power_on() callbacks fiddles
675
+ * executed, unless one of the parents' .power_on() callbacks fiddles
525676 * with it.
526677 */
527
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
528
- struct generic_pm_domain *master = link->master;
678
+ list_for_each_entry(link, &genpd->child_links, child_node) {
679
+ struct generic_pm_domain *parent = link->parent;
529680
530
- genpd_sd_counter_inc(master);
681
+ genpd_sd_counter_inc(parent);
531682
532
- genpd_lock_nested(master, depth + 1);
533
- ret = genpd_power_on(master, depth + 1);
534
- genpd_unlock(master);
683
+ genpd_lock_nested(parent, depth + 1);
684
+ ret = genpd_power_on(parent, depth + 1);
685
+ genpd_unlock(parent);
535686
536687 if (ret) {
537
- genpd_sd_counter_dec(master);
688
+ genpd_sd_counter_dec(parent);
538689 goto err;
539690 }
540691 }
....@@ -543,22 +694,29 @@
543694 if (ret)
544695 goto err;
545696
546
- genpd->status = GPD_STATE_ACTIVE;
697
+ genpd->status = GENPD_STATE_ON;
547698 genpd_update_accounting(genpd);
548699
549700 return 0;
550701
551702 err:
552703 list_for_each_entry_continue_reverse(link,
553
- &genpd->slave_links,
554
- slave_node) {
555
- genpd_sd_counter_dec(link->master);
556
- genpd_lock_nested(link->master, depth + 1);
557
- genpd_power_off(link->master, false, depth + 1);
558
- genpd_unlock(link->master);
704
+ &genpd->child_links,
705
+ child_node) {
706
+ genpd_sd_counter_dec(link->parent);
707
+ genpd_lock_nested(link->parent, depth + 1);
708
+ genpd_power_off(link->parent, false, depth + 1);
709
+ genpd_unlock(link->parent);
559710 }
560711
561712 return ret;
713
+}
714
+
715
+static int genpd_dev_pm_start(struct device *dev)
716
+{
717
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
718
+
719
+ return genpd_start_dev(genpd, dev);
562720 }
563721
564722 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
....@@ -849,34 +1007,16 @@
8491007 }
8501008 late_initcall(genpd_power_off_unused);
8511009
852
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
853
-
854
-static bool genpd_present(const struct generic_pm_domain *genpd)
855
-{
856
- const struct generic_pm_domain *gpd;
857
-
858
- if (IS_ERR_OR_NULL(genpd))
859
- return false;
860
-
861
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
862
- if (gpd == genpd)
863
- return true;
864
-
865
- return false;
866
-}
867
-
868
-#endif
869
-
8701010 #ifdef CONFIG_PM_SLEEP
8711011
8721012 /**
873
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
1013
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
8741014 * @genpd: PM domain to power off, if possible.
8751015 * @use_lock: use the lock.
8761016 * @depth: nesting count for lockdep.
8771017 *
8781018 * Check if the given PM domain can be powered off (during system suspend or
879
- * hibernation) and do that if so. Also, in that case propagate to its masters.
1019
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
8801020 *
8811021 * This function is only called in "noirq" and "syscore" stages of system power
8821022 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
....@@ -899,23 +1039,23 @@
8991039 if (_genpd_power_off(genpd, false))
9001040 return;
9011041
902
- genpd->status = GPD_STATE_POWER_OFF;
1042
+ genpd->status = GENPD_STATE_OFF;
9031043
904
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
905
- genpd_sd_counter_dec(link->master);
1044
+ list_for_each_entry(link, &genpd->child_links, child_node) {
1045
+ genpd_sd_counter_dec(link->parent);
9061046
9071047 if (use_lock)
908
- genpd_lock_nested(link->master, depth + 1);
1048
+ genpd_lock_nested(link->parent, depth + 1);
9091049
910
- genpd_sync_power_off(link->master, use_lock, depth + 1);
1050
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
9111051
9121052 if (use_lock)
913
- genpd_unlock(link->master);
1053
+ genpd_unlock(link->parent);
9141054 }
9151055 }
9161056
9171057 /**
918
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
1058
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
9191059 * @genpd: PM domain to power on.
9201060 * @use_lock: use the lock.
9211061 * @depth: nesting count for lockdep.
....@@ -932,21 +1072,20 @@
9321072 if (genpd_status_on(genpd))
9331073 return;
9341074
935
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
936
- genpd_sd_counter_inc(link->master);
1075
+ list_for_each_entry(link, &genpd->child_links, child_node) {
1076
+ genpd_sd_counter_inc(link->parent);
9371077
9381078 if (use_lock)
939
- genpd_lock_nested(link->master, depth + 1);
1079
+ genpd_lock_nested(link->parent, depth + 1);
9401080
941
- genpd_sync_power_on(link->master, use_lock, depth + 1);
1081
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
9421082
9431083 if (use_lock)
944
- genpd_unlock(link->master);
1084
+ genpd_unlock(link->parent);
9451085 }
9461086
9471087 _genpd_power_on(genpd, false);
948
-
949
- genpd->status = GPD_STATE_ACTIVE;
1088
+ genpd->status = GENPD_STATE_ON;
9501089 }
9511090
9521091 /**
....@@ -1007,10 +1146,8 @@
10071146
10081147 genpd_lock(genpd);
10091148
1010
- if (genpd->prepared_count++ == 0) {
1149
+ if (genpd->prepared_count++ == 0)
10111150 genpd->suspended_count = 0;
1012
- genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
1013
- }
10141151
10151152 genpd_unlock(genpd);
10161153
....@@ -1052,8 +1189,7 @@
10521189 if (ret)
10531190 return ret;
10541191
1055
- if (genpd->suspend_power_off ||
1056
- (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)))
1192
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
10571193 return 0;
10581194
10591195 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
....@@ -1107,8 +1243,7 @@
11071243 if (IS_ERR(genpd))
11081244 return -EINVAL;
11091245
1110
- if (genpd->suspend_power_off ||
1111
- (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)))
1246
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
11121247 return pm_generic_resume_noirq(dev);
11131248
11141249 genpd_lock(genpd);
....@@ -1223,13 +1358,14 @@
12231358 * first time for the given domain in the present cycle.
12241359 */
12251360 genpd_lock(genpd);
1226
- if (genpd->suspended_count++ == 0)
1361
+ if (genpd->suspended_count++ == 0) {
12271362 /*
12281363 * The boot kernel might put the domain into arbitrary state,
12291364 * so make it appear as powered off to genpd_sync_power_on(),
12301365 * so that it tries to power it on in case it was really off.
12311366 */
1232
- genpd->status = GPD_STATE_POWER_OFF;
1367
+ genpd->status = GENPD_STATE_OFF;
1368
+ }
12331369
12341370 genpd_sync_power_on(genpd, true, 0);
12351371 genpd_unlock(genpd);
....@@ -1274,41 +1410,60 @@
12741410 genpd_unlock(genpd);
12751411 }
12761412
1277
-/**
1278
- * genpd_syscore_switch - Switch power during system core suspend or resume.
1279
- * @dev: Device that normally is marked as "always on" to switch power for.
1280
- *
1281
- * This routine may only be called during the system core (syscore) suspend or
1282
- * resume phase for devices whose "always on" flags are set.
1283
- */
1284
-static void genpd_syscore_switch(struct device *dev, bool suspend)
1413
+static void genpd_switch_state(struct device *dev, bool suspend)
12851414 {
12861415 struct generic_pm_domain *genpd;
1416
+ bool use_lock;
12871417
1288
- genpd = dev_to_genpd(dev);
1289
- if (!genpd_present(genpd))
1418
+ genpd = dev_to_genpd_safe(dev);
1419
+ if (!genpd)
12901420 return;
1421
+
1422
+ use_lock = genpd_is_irq_safe(genpd);
1423
+
1424
+ if (use_lock)
1425
+ genpd_lock(genpd);
12911426
12921427 if (suspend) {
12931428 genpd->suspended_count++;
1294
- genpd_sync_power_off(genpd, false, 0);
1429
+ genpd_sync_power_off(genpd, use_lock, 0);
12951430 } else {
1296
- genpd_sync_power_on(genpd, false, 0);
1431
+ genpd_sync_power_on(genpd, use_lock, 0);
12971432 genpd->suspended_count--;
12981433 }
1434
+
1435
+ if (use_lock)
1436
+ genpd_unlock(genpd);
12991437 }
13001438
1301
-void pm_genpd_syscore_poweroff(struct device *dev)
1439
+/**
1440
+ * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1441
+ * @dev: The device that is attached to the genpd, that can be suspended.
1442
+ *
1443
+ * This routine should typically be called for a device that needs to be
1444
+ * suspended during the syscore suspend phase. It may also be called during
1445
+ * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1446
+ * genpd.
1447
+ */
1448
+void dev_pm_genpd_suspend(struct device *dev)
13021449 {
1303
- genpd_syscore_switch(dev, true);
1450
+ genpd_switch_state(dev, true);
13041451 }
1305
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1452
+EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
13061453
1307
-void pm_genpd_syscore_poweron(struct device *dev)
1454
+/**
1455
+ * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1456
+ * @dev: The device that is attached to the genpd, which needs to be resumed.
1457
+ *
1458
+ * This routine should typically be called for a device that needs to be resumed
1459
+ * during the syscore resume phase. It may also be called during suspend-to-idle
1460
+ * to resume a corresponding CPU device that is attached to a genpd.
1461
+ */
1462
+void dev_pm_genpd_resume(struct device *dev)
13081463 {
1309
- genpd_syscore_switch(dev, false);
1464
+ genpd_switch_state(dev, false);
13101465 }
1311
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1466
+EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
13121467
13131468 #else /* !CONFIG_PM_SLEEP */
13141469
....@@ -1323,8 +1478,7 @@
13231478
13241479 #endif /* CONFIG_PM_SLEEP */
13251480
1326
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1327
- struct gpd_timing_data *td)
1481
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
13281482 {
13291483 struct generic_pm_domain_data *gpd_data;
13301484 int ret;
....@@ -1339,13 +1493,11 @@
13391493 goto err_put;
13401494 }
13411495
1342
- if (td)
1343
- gpd_data->td = *td;
1344
-
13451496 gpd_data->base.dev = dev;
13461497 gpd_data->td.constraint_changed = true;
13471498 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
13481499 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1500
+ gpd_data->next_wakeup = KTIME_MAX;
13491501
13501502 spin_lock_irq(&dev->power.lock);
13511503
....@@ -1381,8 +1533,57 @@
13811533 dev_pm_put_subsys_data(dev);
13821534 }
13831535
1536
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1537
+ int cpu, bool set, unsigned int depth)
1538
+{
1539
+ struct gpd_link *link;
1540
+
1541
+ if (!genpd_is_cpu_domain(genpd))
1542
+ return;
1543
+
1544
+ list_for_each_entry(link, &genpd->child_links, child_node) {
1545
+ struct generic_pm_domain *parent = link->parent;
1546
+
1547
+ genpd_lock_nested(parent, depth + 1);
1548
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
1549
+ genpd_unlock(parent);
1550
+ }
1551
+
1552
+ if (set)
1553
+ cpumask_set_cpu(cpu, genpd->cpus);
1554
+ else
1555
+ cpumask_clear_cpu(cpu, genpd->cpus);
1556
+}
1557
+
1558
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1559
+{
1560
+ if (cpu >= 0)
1561
+ genpd_update_cpumask(genpd, cpu, true, 0);
1562
+}
1563
+
1564
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1565
+{
1566
+ if (cpu >= 0)
1567
+ genpd_update_cpumask(genpd, cpu, false, 0);
1568
+}
1569
+
1570
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1571
+{
1572
+ int cpu;
1573
+
1574
+ if (!genpd_is_cpu_domain(genpd))
1575
+ return -1;
1576
+
1577
+ for_each_possible_cpu(cpu) {
1578
+ if (get_cpu_device(cpu) == dev)
1579
+ return cpu;
1580
+ }
1581
+
1582
+ return -1;
1583
+}
1584
+
13841585 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1385
- struct gpd_timing_data *td)
1586
+ struct device *base_dev)
13861587 {
13871588 struct generic_pm_domain_data *gpd_data;
13881589 int ret;
....@@ -1392,9 +1593,11 @@
13921593 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
13931594 return -EINVAL;
13941595
1395
- gpd_data = genpd_alloc_dev_data(dev, td);
1596
+ gpd_data = genpd_alloc_dev_data(dev);
13961597 if (IS_ERR(gpd_data))
13971598 return PTR_ERR(gpd_data);
1599
+
1600
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
13981601
13991602 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
14001603 if (ret)
....@@ -1402,6 +1605,7 @@
14021605
14031606 genpd_lock(genpd);
14041607
1608
+ genpd_set_cpumask(genpd, gpd_data->cpu);
14051609 dev_pm_domain_set(dev, &genpd->domain);
14061610
14071611 genpd->device_count++;
....@@ -1414,7 +1618,8 @@
14141618 if (ret)
14151619 genpd_free_dev_data(dev, gpd_data);
14161620 else
1417
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1621
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1622
+ DEV_PM_QOS_RESUME_LATENCY);
14181623
14191624 return ret;
14201625 }
....@@ -1429,7 +1634,7 @@
14291634 int ret;
14301635
14311636 mutex_lock(&gpd_list_lock);
1432
- ret = genpd_add_device(genpd, dev, NULL);
1637
+ ret = genpd_add_device(genpd, dev, dev);
14331638 mutex_unlock(&gpd_list_lock);
14341639
14351640 return ret;
....@@ -1447,7 +1652,8 @@
14471652
14481653 pdd = dev->power.subsys_data->domain_data;
14491654 gpd_data = to_gpd_data(pdd);
1450
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1655
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1656
+ DEV_PM_QOS_RESUME_LATENCY);
14511657
14521658 genpd_lock(genpd);
14531659
....@@ -1459,6 +1665,7 @@
14591665 genpd->device_count--;
14601666 genpd->max_off_time_changed = true;
14611667
1668
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
14621669 dev_pm_domain_set(dev, NULL);
14631670
14641671 list_del_init(&pdd->list_node);
....@@ -1474,7 +1681,7 @@
14741681
14751682 out:
14761683 genpd_unlock(genpd);
1477
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1684
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
14781685
14791686 return ret;
14801687 }
....@@ -1485,7 +1692,7 @@
14851692 */
14861693 int pm_genpd_remove_device(struct device *dev)
14871694 {
1488
- struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1695
+ struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
14891696
14901697 if (!genpd)
14911698 return -EINVAL;
....@@ -1493,6 +1700,101 @@
14931700 return genpd_remove_device(genpd, dev);
14941701 }
14951702 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1703
+
1704
+/**
1705
+ * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1706
+ *
1707
+ * @dev: Device that should be associated with the notifier
1708
+ * @nb: The notifier block to register
1709
+ *
1710
+ * Users may call this function to add a genpd power on/off notifier for an
1711
+ * attached @dev. Only one notifier per device is allowed. The notifier is
1712
+ * sent when genpd is powering on/off the PM domain.
1713
+ *
1714
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
1715
+ * while this routine is getting called.
1716
+ *
1717
+ * Returns 0 on success and negative error values on failures.
1718
+ */
1719
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1720
+{
1721
+ struct generic_pm_domain *genpd;
1722
+ struct generic_pm_domain_data *gpd_data;
1723
+ int ret;
1724
+
1725
+ genpd = dev_to_genpd_safe(dev);
1726
+ if (!genpd)
1727
+ return -ENODEV;
1728
+
1729
+ if (WARN_ON(!dev->power.subsys_data ||
1730
+ !dev->power.subsys_data->domain_data))
1731
+ return -EINVAL;
1732
+
1733
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1734
+ if (gpd_data->power_nb)
1735
+ return -EEXIST;
1736
+
1737
+ genpd_lock(genpd);
1738
+ ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1739
+ genpd_unlock(genpd);
1740
+
1741
+ if (ret) {
1742
+ dev_warn(dev, "failed to add notifier for PM domain %s\n",
1743
+ genpd->name);
1744
+ return ret;
1745
+ }
1746
+
1747
+ gpd_data->power_nb = nb;
1748
+ return 0;
1749
+}
1750
+EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1751
+
1752
+/**
1753
+ * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1754
+ *
1755
+ * @dev: Device that is associated with the notifier
1756
+ *
1757
+ * Users may call this function to remove a genpd power on/off notifier for an
1758
+ * attached @dev.
1759
+ *
1760
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
1761
+ * while this routine is getting called.
1762
+ *
1763
+ * Returns 0 on success and negative error values on failures.
1764
+ */
1765
+int dev_pm_genpd_remove_notifier(struct device *dev)
1766
+{
1767
+ struct generic_pm_domain *genpd;
1768
+ struct generic_pm_domain_data *gpd_data;
1769
+ int ret;
1770
+
1771
+ genpd = dev_to_genpd_safe(dev);
1772
+ if (!genpd)
1773
+ return -ENODEV;
1774
+
1775
+ if (WARN_ON(!dev->power.subsys_data ||
1776
+ !dev->power.subsys_data->domain_data))
1777
+ return -EINVAL;
1778
+
1779
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1780
+ if (!gpd_data->power_nb)
1781
+ return -ENODEV;
1782
+
1783
+ genpd_lock(genpd);
1784
+ ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1785
+ gpd_data->power_nb);
1786
+ genpd_unlock(genpd);
1787
+
1788
+ if (ret) {
1789
+ dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1790
+ genpd->name);
1791
+ return ret;
1792
+ }
1793
+
1794
+ gpd_data->power_nb = NULL;
1795
+ return 0;
1796
+}
1797
+EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
14961798
14971799 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
14981800 struct generic_pm_domain *subdomain)
....@@ -1527,17 +1829,17 @@
15271829 goto out;
15281830 }
15291831
1530
- list_for_each_entry(itr, &genpd->master_links, master_node) {
1531
- if (itr->slave == subdomain && itr->master == genpd) {
1832
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1833
+ if (itr->child == subdomain && itr->parent == genpd) {
15321834 ret = -EINVAL;
15331835 goto out;
15341836 }
15351837 }
15361838
1537
- link->master = genpd;
1538
- list_add_tail(&link->master_node, &genpd->master_links);
1539
- link->slave = subdomain;
1540
- list_add_tail(&link->slave_node, &subdomain->slave_links);
1839
+ link->parent = genpd;
1840
+ list_add_tail(&link->parent_node, &genpd->parent_links);
1841
+ link->child = subdomain;
1842
+ list_add_tail(&link->child_node, &subdomain->child_links);
15411843 if (genpd_status_on(subdomain))
15421844 genpd_sd_counter_inc(genpd);
15431845
....@@ -1551,7 +1853,7 @@
15511853
15521854 /**
15531855 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1554
- * @genpd: Master PM domain to add the subdomain to.
1856
+ * @genpd: Leader PM domain to add the subdomain to.
15551857 * @subdomain: Subdomain to be added.
15561858 */
15571859 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
....@@ -1569,7 +1871,7 @@
15691871
15701872 /**
15711873 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1572
- * @genpd: Master PM domain to remove the subdomain from.
1874
+ * @genpd: Leader PM domain to remove the subdomain from.
15731875 * @subdomain: Subdomain to be removed.
15741876 */
15751877 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
....@@ -1584,19 +1886,19 @@
15841886 genpd_lock(subdomain);
15851887 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
15861888
1587
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1588
- pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1589
- subdomain->name);
1889
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1890
+ pr_warn("%s: unable to remove subdomain %s\n",
1891
+ genpd->name, subdomain->name);
15901892 ret = -EBUSY;
15911893 goto out;
15921894 }
15931895
1594
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
1595
- if (link->slave != subdomain)
1896
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1897
+ if (link->child != subdomain)
15961898 continue;
15971899
1598
- list_del(&link->master_node);
1599
- list_del(&link->slave_node);
1900
+ list_del(&link->parent_node);
1901
+ list_del(&link->child_node);
16001902 kfree(link);
16011903 if (genpd_status_on(subdomain))
16021904 genpd_sd_counter_dec(genpd);
....@@ -1613,6 +1915,12 @@
16131915 }
16141916 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
16151917
1918
+static void genpd_free_default_power_state(struct genpd_power_state *states,
1919
+ unsigned int state_count)
1920
+{
1921
+ kfree(states);
1922
+}
1923
+
16161924 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
16171925 {
16181926 struct genpd_power_state *state;
....@@ -1623,7 +1931,7 @@
16231931
16241932 genpd->states = state;
16251933 genpd->state_count = 1;
1626
- genpd->free = state;
1934
+ genpd->free_states = genpd_free_default_power_state;
16271935
16281936 return 0;
16291937 }
....@@ -1655,17 +1963,19 @@
16551963 if (IS_ERR_OR_NULL(genpd))
16561964 return -EINVAL;
16571965
1658
- INIT_LIST_HEAD(&genpd->master_links);
1659
- INIT_LIST_HEAD(&genpd->slave_links);
1966
+ INIT_LIST_HEAD(&genpd->parent_links);
1967
+ INIT_LIST_HEAD(&genpd->child_links);
16601968 INIT_LIST_HEAD(&genpd->dev_list);
1969
+ RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
16611970 genpd_lock_init(genpd);
16621971 genpd->gov = gov;
16631972 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
16641973 atomic_set(&genpd->sd_count, 0);
1665
- genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1974
+ genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
16661975 genpd->device_count = 0;
16671976 genpd->max_off_time_ns = -1;
16681977 genpd->max_off_time_changed = true;
1978
+ genpd->next_wakeup = KTIME_MAX;
16691979 genpd->provider = NULL;
16701980 genpd->has_provider = false;
16711981 genpd->accounting_time = ktime_get();
....@@ -1679,6 +1989,7 @@
16791989 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
16801990 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
16811991 genpd->domain.ops.complete = genpd_complete;
1992
+ genpd->domain.start = genpd_dev_pm_start;
16821993
16831994 if (genpd->flags & GENPD_FLAG_PM_CLK) {
16841995 genpd->dev_ops.stop = pm_clk_suspend;
....@@ -1686,16 +1997,24 @@
16861997 }
16871998
16881999 /* Always-on domains must be powered on at initialization. */
1689
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
2000
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2001
+ !genpd_status_on(genpd))
16902002 return -EINVAL;
2003
+
2004
+ if (genpd_is_cpu_domain(genpd) &&
2005
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2006
+ return -ENOMEM;
16912007
16922008 /* Use only one "off" state if there were no states declared */
16932009 if (genpd->state_count == 0) {
16942010 ret = genpd_set_default_power_state(genpd);
1695
- if (ret)
2011
+ if (ret) {
2012
+ if (genpd_is_cpu_domain(genpd))
2013
+ free_cpumask_var(genpd->cpus);
16962014 return ret;
1697
- } else if (!gov) {
1698
- pr_warn("%s : no governor for states\n", genpd->name);
2015
+ }
2016
+ } else if (!gov && genpd->state_count > 1) {
2017
+ pr_warn("%s: no governor for states\n", genpd->name);
16992018 }
17002019
17012020 device_initialize(&genpd->dev);
....@@ -1703,6 +2022,7 @@
17032022
17042023 mutex_lock(&gpd_list_lock);
17052024 list_add(&genpd->gpd_list_node, &gpd_list);
2025
+ genpd_debug_add(genpd);
17062026 mutex_unlock(&gpd_list_lock);
17072027
17082028 return 0;
....@@ -1724,22 +2044,27 @@
17242044 return -EBUSY;
17252045 }
17262046
1727
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
2047
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
17282048 genpd_unlock(genpd);
17292049 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
17302050 return -EBUSY;
17312051 }
17322052
1733
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1734
- list_del(&link->master_node);
1735
- list_del(&link->slave_node);
2053
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2054
+ list_del(&link->parent_node);
2055
+ list_del(&link->child_node);
17362056 kfree(link);
17372057 }
17382058
17392059 list_del(&genpd->gpd_list_node);
17402060 genpd_unlock(genpd);
2061
+ genpd_debug_remove(genpd);
17412062 cancel_work_sync(&genpd->power_off_work);
1742
- kfree(genpd->free);
2063
+ if (genpd_is_cpu_domain(genpd))
2064
+ free_cpumask_var(genpd->cpus);
2065
+ if (genpd->free_states)
2066
+ genpd->free_states(genpd->states, genpd->state_count);
2067
+
17432068 pr_debug("%s: removed %s\n", __func__, genpd->name);
17442069
17452070 return 0;
....@@ -1873,6 +2198,7 @@
18732198 cp->node = of_node_get(np);
18742199 cp->data = data;
18752200 cp->xlate = xlate;
2201
+ fwnode_dev_initialized(&np->fwnode, true);
18762202
18772203 mutex_lock(&of_genpd_mutex);
18782204 list_add(&cp->link, &of_genpd_providers);
....@@ -1880,6 +2206,16 @@
18802206 pr_debug("Added domain provider from %pOF\n", np);
18812207
18822208 return 0;
2209
+}
2210
+
2211
+static bool genpd_present(const struct generic_pm_domain *genpd)
2212
+{
2213
+ const struct generic_pm_domain *gpd;
2214
+
2215
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2216
+ if (gpd == genpd)
2217
+ return true;
2218
+ return false;
18832219 }
18842220
18852221 /**
....@@ -1906,16 +2242,26 @@
19062242 if (genpd->set_performance_state) {
19072243 ret = dev_pm_opp_of_add_table(&genpd->dev);
19082244 if (ret) {
1909
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
1910
- ret);
2245
+ if (ret != -EPROBE_DEFER)
2246
+ dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2247
+ ret);
19112248 goto unlock;
19122249 }
2250
+
2251
+ /*
2252
+ * Save table for faster processing while setting performance
2253
+ * state.
2254
+ */
2255
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2256
+ WARN_ON(IS_ERR(genpd->opp_table));
19132257 }
19142258
19152259 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
19162260 if (ret) {
1917
- if (genpd->set_performance_state)
2261
+ if (genpd->set_performance_state) {
2262
+ dev_pm_opp_put_opp_table(genpd->opp_table);
19182263 dev_pm_opp_of_remove_table(&genpd->dev);
2264
+ }
19192265
19202266 goto unlock;
19212267 }
....@@ -1964,10 +2310,18 @@
19642310 if (genpd->set_performance_state) {
19652311 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
19662312 if (ret) {
1967
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
1968
- i, ret);
2313
+ if (ret != -EPROBE_DEFER)
2314
+ dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2315
+ i, ret);
19692316 goto error;
19702317 }
2318
+
2319
+ /*
2320
+ * Save table for faster processing while setting
2321
+ * performance state.
2322
+ */
2323
+ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2324
+ WARN_ON(IS_ERR(genpd->opp_table));
19712325 }
19722326
19732327 genpd->provider = &np->fwnode;
....@@ -1992,8 +2346,10 @@
19922346 genpd->provider = NULL;
19932347 genpd->has_provider = false;
19942348
1995
- if (genpd->set_performance_state)
2349
+ if (genpd->set_performance_state) {
2350
+ dev_pm_opp_put_opp_table(genpd->opp_table);
19962351 dev_pm_opp_of_remove_table(&genpd->dev);
2352
+ }
19972353 }
19982354
19992355 mutex_unlock(&gpd_list_lock);
....@@ -2027,10 +2383,12 @@
20272383 if (!gpd->set_performance_state)
20282384 continue;
20292385
2386
+ dev_pm_opp_put_opp_table(gpd->opp_table);
20302387 dev_pm_opp_of_remove_table(&gpd->dev);
20312388 }
20322389 }
20332390
2391
+ fwnode_dev_initialized(&cp->node->fwnode, false);
20342392 list_del(&cp->link);
20352393 of_node_put(cp->node);
20362394 kfree(cp);
....@@ -2098,7 +2456,7 @@
20982456 goto out;
20992457 }
21002458
2101
- ret = genpd_add_device(genpd, dev, NULL);
2459
+ ret = genpd_add_device(genpd, dev, dev);
21022460
21032461 out:
21042462 mutex_unlock(&gpd_list_lock);
....@@ -2146,6 +2504,44 @@
21462504 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
21472505
21482506 /**
2507
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2508
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
2509
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
2510
+ *
2511
+ * Looks-up a parent PM domain and subdomain based upon phandle args
2512
+ * provided and removes the subdomain from the parent PM domain. Returns a
2513
+ * negative error code on failure.
2514
+ */
2515
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2516
+ struct of_phandle_args *subdomain_spec)
2517
+{
2518
+ struct generic_pm_domain *parent, *subdomain;
2519
+ int ret;
2520
+
2521
+ mutex_lock(&gpd_list_lock);
2522
+
2523
+ parent = genpd_get_from_provider(parent_spec);
2524
+ if (IS_ERR(parent)) {
2525
+ ret = PTR_ERR(parent);
2526
+ goto out;
2527
+ }
2528
+
2529
+ subdomain = genpd_get_from_provider(subdomain_spec);
2530
+ if (IS_ERR(subdomain)) {
2531
+ ret = PTR_ERR(subdomain);
2532
+ goto out;
2533
+ }
2534
+
2535
+ ret = pm_genpd_remove_subdomain(parent, subdomain);
2536
+
2537
+out:
2538
+ mutex_unlock(&gpd_list_lock);
2539
+
2540
+ return ret;
2541
+}
2542
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2543
+
2544
+/**
21492545 * of_genpd_remove_last - Remove the last PM domain registered for a provider
21502546 * @provider: Pointer to device structure associated with provider
21512547 *
....@@ -2182,6 +2578,7 @@
21822578
21832579 static void genpd_release_dev(struct device *dev)
21842580 {
2581
+ of_node_put(dev->of_node);
21852582 kfree(dev);
21862583 }
21872584
....@@ -2243,14 +2640,14 @@
22432640 genpd_queue_power_off_work(pd);
22442641 }
22452642
2246
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2643
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
22472644 unsigned int index, bool power_on)
22482645 {
22492646 struct of_phandle_args pd_args;
22502647 struct generic_pm_domain *pd;
22512648 int ret;
22522649
2253
- ret = of_parse_phandle_with_args(np, "power-domains",
2650
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
22542651 "#power-domain-cells", index, &pd_args);
22552652 if (ret < 0)
22562653 return ret;
....@@ -2262,12 +2659,12 @@
22622659 mutex_unlock(&gpd_list_lock);
22632660 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
22642661 __func__, PTR_ERR(pd));
2265
- return driver_deferred_probe_check_state(dev);
2662
+ return driver_deferred_probe_check_state(base_dev);
22662663 }
22672664
22682665 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
22692666
2270
- ret = genpd_add_device(pd, dev, NULL);
2667
+ ret = genpd_add_device(pd, dev, base_dev);
22712668 mutex_unlock(&gpd_list_lock);
22722669
22732670 if (ret < 0) {
....@@ -2318,7 +2715,7 @@
23182715 "#power-domain-cells") != 1)
23192716 return 0;
23202717
2321
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2718
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
23222719 }
23232720 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
23242721
....@@ -2341,45 +2738,46 @@
23412738 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
23422739 unsigned int index)
23432740 {
2344
- struct device *genpd_dev;
2741
+ struct device *virt_dev;
23452742 int num_domains;
23462743 int ret;
23472744
23482745 if (!dev->of_node)
23492746 return NULL;
23502747
2351
- /* Deal only with devices using multiple PM domains. */
2748
+ /* Verify that the index is within a valid range. */
23522749 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
23532750 "#power-domain-cells");
2354
- if (num_domains < 2 || index >= num_domains)
2751
+ if (index >= num_domains)
23552752 return NULL;
23562753
23572754 /* Allocate and register device on the genpd bus. */
2358
- genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL);
2359
- if (!genpd_dev)
2755
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2756
+ if (!virt_dev)
23602757 return ERR_PTR(-ENOMEM);
23612758
2362
- dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev));
2363
- genpd_dev->bus = &genpd_bus_type;
2364
- genpd_dev->release = genpd_release_dev;
2759
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2760
+ virt_dev->bus = &genpd_bus_type;
2761
+ virt_dev->release = genpd_release_dev;
2762
+ virt_dev->of_node = of_node_get(dev->of_node);
23652763
2366
- ret = device_register(genpd_dev);
2764
+ ret = device_register(virt_dev);
23672765 if (ret) {
2368
- kfree(genpd_dev);
2766
+ put_device(virt_dev);
23692767 return ERR_PTR(ret);
23702768 }
23712769
23722770 /* Try to attach the device to the PM domain at the specified index. */
2373
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
2771
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
23742772 if (ret < 1) {
2375
- device_unregister(genpd_dev);
2773
+ device_unregister(virt_dev);
23762774 return ret ? ERR_PTR(ret) : NULL;
23772775 }
23782776
2379
- pm_runtime_enable(genpd_dev);
2380
- genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
2777
+ pm_runtime_enable(virt_dev);
2778
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
23812779
2382
- return genpd_dev;
2780
+ return virt_dev;
23832781 }
23842782 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
23852783
....@@ -2392,7 +2790,7 @@
23922790 * power-domain-names DT property. For further description see
23932791 * genpd_dev_pm_attach_by_id().
23942792 */
2395
-struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
2793
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
23962794 {
23972795 int index;
23982796
....@@ -2423,7 +2821,7 @@
24232821 &entry_latency);
24242822 if (err) {
24252823 pr_debug(" * %pOF missing entry-latency-us property\n",
2426
- state_node);
2824
+ state_node);
24272825 return -EINVAL;
24282826 }
24292827
....@@ -2431,16 +2829,16 @@
24312829 &exit_latency);
24322830 if (err) {
24332831 pr_debug(" * %pOF missing exit-latency-us property\n",
2434
- state_node);
2832
+ state_node);
24352833 return -EINVAL;
24362834 }
24372835
24382836 err = of_property_read_u32(state_node, "min-residency-us", &residency);
24392837 if (!err)
2440
- genpd_state->residency_ns = 1000 * residency;
2838
+ genpd_state->residency_ns = 1000LL * residency;
24412839
2442
- genpd_state->power_on_latency_ns = 1000 * exit_latency;
2443
- genpd_state->power_off_latency_ns = 1000 * entry_latency;
2840
+ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2841
+ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
24442842 genpd_state->fwnode = &state_node->fwnode;
24452843
24462844 return 0;
....@@ -2456,13 +2854,17 @@
24562854
24572855 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
24582856 if (ret <= 0)
2459
- return ret;
2857
+ return ret == -ENOENT ? 0 : ret;
24602858
24612859 /* Loop over the phandles until all the requested entry is found */
24622860 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
24632861 np = it.node;
24642862 if (!of_match_node(idle_state_match, np))
24652863 continue;
2864
+
2865
+ if (!of_device_is_available(np))
2866
+ continue;
2867
+
24662868 if (states) {
24672869 ret = genpd_parse_state(&states[i], np);
24682870 if (ret) {
....@@ -2487,8 +2889,8 @@
24872889 *
24882890 * Returns the device states parsed from the OF node. The memory for the states
24892891 * is allocated by this function and is the responsibility of the caller to
2490
- * free the memory after use. If no domain idle states is found it returns
2491
- * -EINVAL and in case of errors, a negative error code.
2892
+ * free the memory after use. If any or zero compatible domain idle states is
2893
+ * found it returns 0 and in case of errors, a negative error code is returned.
24922894 */
24932895 int of_genpd_parse_idle_states(struct device_node *dn,
24942896 struct genpd_power_state **states, int *n)
....@@ -2497,8 +2899,14 @@
24972899 int ret;
24982900
24992901 ret = genpd_iterate_idle_states(dn, NULL);
2500
- if (ret <= 0)
2501
- return ret < 0 ? ret : -EINVAL;
2902
+ if (ret < 0)
2903
+ return ret;
2904
+
2905
+ if (!ret) {
2906
+ *states = NULL;
2907
+ *n = 0;
2908
+ return 0;
2909
+ }
25022910
25032911 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
25042912 if (!st)
....@@ -2518,52 +2926,36 @@
25182926 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
25192927
25202928 /**
2521
- * of_genpd_opp_to_performance_state- Gets performance state of device's
2522
- * power domain corresponding to a DT node's "required-opps" property.
2929
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
25232930 *
2524
- * @dev: Device for which the performance-state needs to be found.
2525
- * @np: DT node where the "required-opps" property is present. This can be
2526
- * the device node itself (if it doesn't have an OPP table) or a node
2527
- * within the OPP table of a device (if device has an OPP table).
2931
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2932
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2933
+ * state.
25282934 *
2529
- * Returns performance state corresponding to the "required-opps" property of
2530
- * a DT node. This calls platform specific genpd->opp_to_performance_state()
2531
- * callback to translate power domain OPP to performance state.
2935
+ * Returns performance state encoded in the OPP of the genpd. This calls
2936
+ * platform specific genpd->opp_to_performance_state() callback to translate
2937
+ * power domain OPP to performance state.
25322938 *
25332939 * Returns performance state on success and 0 on failure.
25342940 */
2535
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
2536
- struct device_node *np)
2941
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2942
+ struct dev_pm_opp *opp)
25372943 {
2538
- struct generic_pm_domain *genpd;
2539
- struct dev_pm_opp *opp;
2540
- int state = 0;
2944
+ struct generic_pm_domain *genpd = NULL;
2945
+ int state;
25412946
2542
- genpd = dev_to_genpd(dev);
2543
- if (IS_ERR(genpd))
2544
- return 0;
2947
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
25452948
2546
- if (unlikely(!genpd->set_performance_state))
2949
+ if (unlikely(!genpd->opp_to_performance_state))
25472950 return 0;
25482951
25492952 genpd_lock(genpd);
2550
-
2551
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
2552
- if (IS_ERR(opp)) {
2553
- dev_err(dev, "Failed to find required OPP: %ld\n",
2554
- PTR_ERR(opp));
2555
- goto unlock;
2556
- }
2557
-
25582953 state = genpd->opp_to_performance_state(genpd, opp);
2559
- dev_pm_opp_put(opp);
2560
-
2561
-unlock:
25622954 genpd_unlock(genpd);
25632955
25642956 return state;
25652957 }
2566
-EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
2958
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
25672959
25682960 static int __init genpd_bus_init(void)
25692961 {
....@@ -2577,14 +2969,6 @@
25772969 /*** debugfs support ***/
25782970
25792971 #ifdef CONFIG_DEBUG_FS
2580
-#include <linux/pm.h>
2581
-#include <linux/device.h>
2582
-#include <linux/debugfs.h>
2583
-#include <linux/seq_file.h>
2584
-#include <linux/init.h>
2585
-#include <linux/kobject.h>
2586
-static struct dentry *genpd_debugfs_dir;
2587
-
25882972 /*
25892973 * TODO: This function is a slightly modified version of rtpm_status_show
25902974 * from sysfs.c, so generalize it.
....@@ -2615,8 +2999,8 @@
26152999 struct generic_pm_domain *genpd)
26163000 {
26173001 static const char * const status_lookup[] = {
2618
- [GPD_STATE_ACTIVE] = "on",
2619
- [GPD_STATE_POWER_OFF] = "off"
3002
+ [GENPD_STATE_ON] = "on",
3003
+ [GENPD_STATE_OFF] = "off"
26203004 };
26213005 struct pm_domain_data *pm_data;
26223006 const char *kobj_path;
....@@ -2640,12 +3024,12 @@
26403024
26413025 /*
26423026 * Modifications on the list require holding locks on both
2643
- * master and slave, so we are safe.
3027
+ * parent and child, so we are safe.
26443028 * Also genpd->name is immutable.
26453029 */
2646
- list_for_each_entry(link, &genpd->master_links, master_node) {
2647
- seq_printf(s, "%s", link->slave->name);
2648
- if (!list_is_last(&link->master_node, &genpd->master_links))
3030
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
3031
+ seq_printf(s, "%s", link->child->name);
3032
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
26493033 seq_puts(s, ", ");
26503034 }
26513035
....@@ -2668,12 +3052,12 @@
26683052 return 0;
26693053 }
26703054
2671
-static int genpd_summary_show(struct seq_file *s, void *data)
3055
+static int summary_show(struct seq_file *s, void *data)
26723056 {
26733057 struct generic_pm_domain *genpd;
26743058 int ret = 0;
26753059
2676
- seq_puts(s, "domain status slaves\n");
3060
+ seq_puts(s, "domain status children\n");
26773061 seq_puts(s, " /device runtime status\n");
26783062 seq_puts(s, "----------------------------------------------------------------------\n");
26793063
....@@ -2691,11 +3075,11 @@
26913075 return ret;
26923076 }
26933077
2694
-static int genpd_status_show(struct seq_file *s, void *data)
3078
+static int status_show(struct seq_file *s, void *data)
26953079 {
26963080 static const char * const status_lookup[] = {
2697
- [GPD_STATE_ACTIVE] = "on",
2698
- [GPD_STATE_POWER_OFF] = "off"
3081
+ [GENPD_STATE_ON] = "on",
3082
+ [GENPD_STATE_OFF] = "off"
26993083 };
27003084
27013085 struct generic_pm_domain *genpd = s->private;
....@@ -2708,7 +3092,7 @@
27083092 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
27093093 goto exit;
27103094
2711
- if (genpd->status == GPD_STATE_POWER_OFF)
3095
+ if (genpd->status == GENPD_STATE_OFF)
27123096 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
27133097 genpd->state_idx);
27143098 else
....@@ -2718,7 +3102,7 @@
27183102 return ret;
27193103 }
27203104
2721
-static int genpd_sub_domains_show(struct seq_file *s, void *data)
3105
+static int sub_domains_show(struct seq_file *s, void *data)
27223106 {
27233107 struct generic_pm_domain *genpd = s->private;
27243108 struct gpd_link *link;
....@@ -2728,14 +3112,14 @@
27283112 if (ret)
27293113 return -ERESTARTSYS;
27303114
2731
- list_for_each_entry(link, &genpd->master_links, master_node)
2732
- seq_printf(s, "%s\n", link->slave->name);
3115
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
3116
+ seq_printf(s, "%s\n", link->child->name);
27333117
27343118 genpd_unlock(genpd);
27353119 return ret;
27363120 }
27373121
2738
-static int genpd_idle_states_show(struct seq_file *s, void *data)
3122
+static int idle_states_show(struct seq_file *s, void *data)
27393123 {
27403124 struct generic_pm_domain *genpd = s->private;
27413125 unsigned int i;
....@@ -2745,26 +3129,27 @@
27453129 if (ret)
27463130 return -ERESTARTSYS;
27473131
2748
- seq_puts(s, "State Time Spent(ms)\n");
3132
+ seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
27493133
27503134 for (i = 0; i < genpd->state_count; i++) {
27513135 ktime_t delta = 0;
27523136 s64 msecs;
27533137
2754
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
3138
+ if ((genpd->status == GENPD_STATE_OFF) &&
27553139 (genpd->state_idx == i))
27563140 delta = ktime_sub(ktime_get(), genpd->accounting_time);
27573141
27583142 msecs = ktime_to_ms(
27593143 ktime_add(genpd->states[i].idle_time, delta));
2760
- seq_printf(s, "S%-13i %lld\n", i, msecs);
3144
+ seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3145
+ genpd->states[i].usage, genpd->states[i].rejected);
27613146 }
27623147
27633148 genpd_unlock(genpd);
27643149 return ret;
27653150 }
27663151
2767
-static int genpd_active_time_show(struct seq_file *s, void *data)
3152
+static int active_time_show(struct seq_file *s, void *data)
27683153 {
27693154 struct generic_pm_domain *genpd = s->private;
27703155 ktime_t delta = 0;
....@@ -2774,7 +3159,7 @@
27743159 if (ret)
27753160 return -ERESTARTSYS;
27763161
2777
- if (genpd->status == GPD_STATE_ACTIVE)
3162
+ if (genpd->status == GENPD_STATE_ON)
27783163 delta = ktime_sub(ktime_get(), genpd->accounting_time);
27793164
27803165 seq_printf(s, "%lld ms\n", ktime_to_ms(
....@@ -2784,7 +3169,7 @@
27843169 return ret;
27853170 }
27863171
2787
-static int genpd_total_idle_time_show(struct seq_file *s, void *data)
3172
+static int total_idle_time_show(struct seq_file *s, void *data)
27883173 {
27893174 struct generic_pm_domain *genpd = s->private;
27903175 ktime_t delta = 0, total = 0;
....@@ -2797,7 +3182,7 @@
27973182
27983183 for (i = 0; i < genpd->state_count; i++) {
27993184
2800
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
3185
+ if ((genpd->status == GENPD_STATE_OFF) &&
28013186 (genpd->state_idx == i))
28023187 delta = ktime_sub(ktime_get(), genpd->accounting_time);
28033188
....@@ -2812,7 +3197,7 @@
28123197 }
28133198
28143199
2815
-static int genpd_devices_show(struct seq_file *s, void *data)
3200
+static int devices_show(struct seq_file *s, void *data)
28163201 {
28173202 struct generic_pm_domain *genpd = s->private;
28183203 struct pm_domain_data *pm_data;
....@@ -2838,7 +3223,7 @@
28383223 return ret;
28393224 }
28403225
2841
-static int genpd_perf_state_show(struct seq_file *s, void *data)
3226
+static int perf_state_show(struct seq_file *s, void *data)
28423227 {
28433228 struct generic_pm_domain *genpd = s->private;
28443229
....@@ -2851,74 +3236,52 @@
28513236 return 0;
28523237 }
28533238
2854
-#define define_genpd_open_function(name) \
2855
-static int genpd_##name##_open(struct inode *inode, struct file *file) \
2856
-{ \
2857
- return single_open(file, genpd_##name##_show, inode->i_private); \
3239
+DEFINE_SHOW_ATTRIBUTE(summary);
3240
+DEFINE_SHOW_ATTRIBUTE(status);
3241
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
3242
+DEFINE_SHOW_ATTRIBUTE(idle_states);
3243
+DEFINE_SHOW_ATTRIBUTE(active_time);
3244
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3245
+DEFINE_SHOW_ATTRIBUTE(devices);
3246
+DEFINE_SHOW_ATTRIBUTE(perf_state);
3247
+
3248
+static void genpd_debug_add(struct generic_pm_domain *genpd)
3249
+{
3250
+ struct dentry *d;
3251
+
3252
+ if (!genpd_debugfs_dir)
3253
+ return;
3254
+
3255
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3256
+
3257
+ debugfs_create_file("current_state", 0444,
3258
+ d, genpd, &status_fops);
3259
+ debugfs_create_file("sub_domains", 0444,
3260
+ d, genpd, &sub_domains_fops);
3261
+ debugfs_create_file("idle_states", 0444,
3262
+ d, genpd, &idle_states_fops);
3263
+ debugfs_create_file("active_time", 0444,
3264
+ d, genpd, &active_time_fops);
3265
+ debugfs_create_file("total_idle_time", 0444,
3266
+ d, genpd, &total_idle_time_fops);
3267
+ debugfs_create_file("devices", 0444,
3268
+ d, genpd, &devices_fops);
3269
+ if (genpd->set_performance_state)
3270
+ debugfs_create_file("perf_state", 0444,
3271
+ d, genpd, &perf_state_fops);
28583272 }
2859
-
2860
-define_genpd_open_function(summary);
2861
-define_genpd_open_function(status);
2862
-define_genpd_open_function(sub_domains);
2863
-define_genpd_open_function(idle_states);
2864
-define_genpd_open_function(active_time);
2865
-define_genpd_open_function(total_idle_time);
2866
-define_genpd_open_function(devices);
2867
-define_genpd_open_function(perf_state);
2868
-
2869
-#define define_genpd_debugfs_fops(name) \
2870
-static const struct file_operations genpd_##name##_fops = { \
2871
- .open = genpd_##name##_open, \
2872
- .read = seq_read, \
2873
- .llseek = seq_lseek, \
2874
- .release = single_release, \
2875
-}
2876
-
2877
-define_genpd_debugfs_fops(summary);
2878
-define_genpd_debugfs_fops(status);
2879
-define_genpd_debugfs_fops(sub_domains);
2880
-define_genpd_debugfs_fops(idle_states);
2881
-define_genpd_debugfs_fops(active_time);
2882
-define_genpd_debugfs_fops(total_idle_time);
2883
-define_genpd_debugfs_fops(devices);
2884
-define_genpd_debugfs_fops(perf_state);
28853273
28863274 static int __init genpd_debug_init(void)
28873275 {
2888
- struct dentry *d;
28893276 struct generic_pm_domain *genpd;
28903277
28913278 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
28923279
2893
- if (!genpd_debugfs_dir)
2894
- return -ENOMEM;
3280
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3281
+ NULL, &summary_fops);
28953282
2896
- d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2897
- genpd_debugfs_dir, NULL, &genpd_summary_fops);
2898
- if (!d)
2899
- return -ENOMEM;
2900
-
2901
- list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2902
- d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
2903
- if (!d)
2904
- return -ENOMEM;
2905
-
2906
- debugfs_create_file("current_state", 0444,
2907
- d, genpd, &genpd_status_fops);
2908
- debugfs_create_file("sub_domains", 0444,
2909
- d, genpd, &genpd_sub_domains_fops);
2910
- debugfs_create_file("idle_states", 0444,
2911
- d, genpd, &genpd_idle_states_fops);
2912
- debugfs_create_file("active_time", 0444,
2913
- d, genpd, &genpd_active_time_fops);
2914
- debugfs_create_file("total_idle_time", 0444,
2915
- d, genpd, &genpd_total_idle_time_fops);
2916
- debugfs_create_file("devices", 0444,
2917
- d, genpd, &genpd_devices_fops);
2918
- if (genpd->set_performance_state)
2919
- debugfs_create_file("perf_state", 0444,
2920
- d, genpd, &genpd_perf_state_fops);
2921
- }
3283
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3284
+ genpd_debug_add(genpd);
29223285
29233286 return 0;
29243287 }