hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/devfreq/devfreq.c
....@@ -1,18 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
34 * for Non-CPU Devices.
45 *
56 * Copyright (C) 2011 Samsung Electronics
67 * MyungJoo Ham <myungjoo.ham@samsung.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
118 */
129
1310 #include <linux/kernel.h>
1411 #include <linux/kmod.h>
1512 #include <linux/sched.h>
13
+#include <linux/debugfs.h>
1614 #include <linux/errno.h>
1715 #include <linux/err.h>
1816 #include <linux/init.h>
....@@ -27,9 +25,16 @@
2725 #include <linux/printk.h>
2826 #include <linux/hrtimer.h>
2927 #include <linux/of.h>
28
+#include <linux/pm_qos.h>
3029 #include "governor.h"
3130
31
+#define CREATE_TRACE_POINTS
32
+#include <trace/events/devfreq.h>
33
+
34
+#define HZ_PER_KHZ 1000
35
+
3236 static struct class *devfreq_class;
37
+static struct dentry *devfreq_debugfs;
3338
3439 /*
3540 * devfreq core provides delayed work based load monitoring helper
....@@ -44,6 +49,11 @@
4449 static LIST_HEAD(devfreq_list);
4550 static DEFINE_MUTEX(devfreq_list_lock);
4651
52
+static const char timer_name[][DEVFREQ_NAME_LEN] = {
53
+ [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
54
+ [DEVFREQ_TIMER_DELAYED] = { "delayed" },
55
+};
56
+
4757 /**
4858 * find_device_devfreq() - find devfreq struct using device pointer
4959 * @dev: device pointer used to lookup device devfreq.
....@@ -55,12 +65,12 @@
5565 {
5666 struct devfreq *tmp_devfreq;
5767
68
+ lockdep_assert_held(&devfreq_list_lock);
69
+
5870 if (IS_ERR_OR_NULL(dev)) {
5971 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
6072 return ERR_PTR(-EINVAL);
6173 }
62
- WARN(!mutex_is_locked(&devfreq_list_lock),
63
- "devfreq_list_lock must be locked.");
6474
6575 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
6676 if (tmp_devfreq->dev.parent == dev)
....@@ -96,6 +106,54 @@
96106 dev_pm_opp_put(opp);
97107
98108 return max_freq;
109
+}
110
+
111
+/**
112
+ * get_freq_range() - Get the current freq range
113
+ * @devfreq: the devfreq instance
114
+ * @min_freq: the min frequency
115
+ * @max_freq: the max frequency
116
+ *
117
+ * This takes into consideration all constraints.
118
+ */
119
+static void get_freq_range(struct devfreq *devfreq,
120
+ unsigned long *min_freq,
121
+ unsigned long *max_freq)
122
+{
123
+ unsigned long *freq_table = devfreq->profile->freq_table;
124
+ s32 qos_min_freq, qos_max_freq;
125
+
126
+ lockdep_assert_held(&devfreq->lock);
127
+
128
+ /*
129
+ * Initialize minimum/maximum frequency from freq table.
130
+ * The devfreq drivers can initialize this in either ascending or
131
+ * descending order and devfreq core supports both.
132
+ */
133
+ if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
134
+ *min_freq = freq_table[0];
135
+ *max_freq = freq_table[devfreq->profile->max_state - 1];
136
+ } else {
137
+ *min_freq = freq_table[devfreq->profile->max_state - 1];
138
+ *max_freq = freq_table[0];
139
+ }
140
+
141
+ /* Apply constraints from PM QoS */
142
+ qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
143
+ DEV_PM_QOS_MIN_FREQUENCY);
144
+ qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
145
+ DEV_PM_QOS_MAX_FREQUENCY);
146
+ *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
147
+ if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
148
+ *max_freq = min(*max_freq,
149
+ (unsigned long)HZ_PER_KHZ * qos_max_freq);
150
+
151
+ /* Apply constraints from OPP interface */
152
+ *min_freq = max(*min_freq, devfreq->scaling_min_freq);
153
+ *max_freq = min(*max_freq, devfreq->scaling_max_freq);
154
+
155
+ if (*min_freq > *max_freq)
156
+ *min_freq = *max_freq;
99157 }
100158
101159 /**
....@@ -158,10 +216,10 @@
158216 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
159217 {
160218 int lev, prev_lev, ret = 0;
161
- unsigned long cur_time;
219
+ u64 cur_time;
162220
163221 lockdep_assert_held(&devfreq->lock);
164
- cur_time = jiffies;
222
+ cur_time = get_jiffies_64();
165223
166224 /* Immediately exit if previous_freq is not initialized yet. */
167225 if (!devfreq->previous_freq)
....@@ -173,8 +231,8 @@
173231 goto out;
174232 }
175233
176
- devfreq->time_in_state[prev_lev] +=
177
- cur_time - devfreq->last_stat_updated;
234
+ devfreq->stats.time_in_state[prev_lev] +=
235
+ cur_time - devfreq->stats.last_update;
178236
179237 lev = devfreq_get_freq_level(devfreq, freq);
180238 if (lev < 0) {
....@@ -183,13 +241,13 @@
183241 }
184242
185243 if (lev != prev_lev) {
186
- devfreq->trans_table[(prev_lev *
187
- devfreq->profile->max_state) + lev]++;
188
- devfreq->total_trans++;
244
+ devfreq->stats.trans_table[
245
+ (prev_lev * devfreq->profile->max_state) + lev]++;
246
+ devfreq->stats.total_trans++;
189247 }
190248
191249 out:
192
- devfreq->last_stat_updated = cur_time;
250
+ devfreq->stats.last_update = cur_time;
193251 return ret;
194252 }
195253 EXPORT_SYMBOL(devfreq_update_status);
....@@ -205,12 +263,12 @@
205263 {
206264 struct devfreq_governor *tmp_governor;
207265
266
+ lockdep_assert_held(&devfreq_list_lock);
267
+
208268 if (IS_ERR_OR_NULL(name)) {
209269 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
210270 return ERR_PTR(-EINVAL);
211271 }
212
- WARN(!mutex_is_locked(&devfreq_list_lock),
213
- "devfreq_list_lock must be locked.");
214272
215273 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
216274 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
....@@ -236,12 +294,12 @@
236294 struct devfreq_governor *governor;
237295 int err = 0;
238296
297
+ lockdep_assert_held(&devfreq_list_lock);
298
+
239299 if (IS_ERR_OR_NULL(name)) {
240300 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
241301 return ERR_PTR(-EINVAL);
242302 }
243
- WARN(!mutex_is_locked(&devfreq_list_lock),
244
- "devfreq_list_lock must be locked.");
245303
246304 governor = find_devfreq_governor(name);
247305 if (IS_ERR(governor)) {
....@@ -286,6 +344,44 @@
286344 return 0;
287345 }
288346
347
+static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
348
+ u32 flags)
349
+{
350
+ struct devfreq_freqs freqs;
351
+ unsigned long cur_freq;
352
+ int err = 0;
353
+
354
+ if (devfreq->profile->get_cur_freq)
355
+ devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
356
+ else
357
+ cur_freq = devfreq->previous_freq;
358
+
359
+ freqs.old = cur_freq;
360
+ freqs.new = new_freq;
361
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
362
+
363
+ err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
364
+ if (err) {
365
+ freqs.new = cur_freq;
366
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
367
+ return err;
368
+ }
369
+
370
+ freqs.new = new_freq;
371
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
372
+
373
+ if (devfreq_update_status(devfreq, new_freq))
374
+ dev_err(&devfreq->dev,
375
+ "Couldn't update frequency transition information.\n");
376
+
377
+ devfreq->previous_freq = new_freq;
378
+
379
+ if (devfreq->suspend_freq)
380
+ devfreq->resume_freq = new_freq;
381
+
382
+ return err;
383
+}
384
+
289385 /* Load monitoring helper functions for governors use */
290386
291387 /**
....@@ -297,39 +393,20 @@
297393 */
298394 int update_devfreq(struct devfreq *devfreq)
299395 {
300
- struct devfreq_policy *policy = &devfreq->policy;
301
- struct devfreq_freqs freqs;
302
- unsigned long freq, cur_freq, min_freq, max_freq;
396
+ unsigned long freq, min_freq, max_freq;
303397 int err = 0;
304398 u32 flags = 0;
305399
306
- if (!mutex_is_locked(&devfreq->lock)) {
307
- WARN(true, "devfreq->lock must be locked by the caller.\n");
308
- return -EINVAL;
309
- }
400
+ lockdep_assert_held(&devfreq->lock);
310401
311402 if (!devfreq->governor)
312403 return -EINVAL;
313
-
314
- policy->max = devfreq->scaling_max_freq;
315
- policy->min = devfreq->scaling_min_freq;
316
- srcu_notifier_call_chain(&devfreq->policy_notifier_list, DEVFREQ_ADJUST,
317
- policy);
318404
319405 /* Reevaluate the proper frequency */
320406 err = devfreq->governor->get_target_freq(devfreq, &freq);
321407 if (err)
322408 return err;
323
-
324
- /*
325
- * Adjust the frequency with user freq, QoS and available freq.
326
- *
327
- * List from the highest priority
328
- * max_freq
329
- * min_freq
330
- */
331
- max_freq = min(policy->max, devfreq->max_freq);
332
- min_freq = max(policy->min, devfreq->min_freq);
409
+ get_freq_range(devfreq, &min_freq, &max_freq);
333410
334411 if (freq < min_freq) {
335412 freq = min_freq;
....@@ -340,31 +417,8 @@
340417 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
341418 }
342419
343
- if (devfreq->profile->get_cur_freq)
344
- devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
345
- else
346
- cur_freq = devfreq->previous_freq;
420
+ return devfreq_set_target(devfreq, freq, flags);
347421
348
- freqs.old = cur_freq;
349
- freqs.new = freq;
350
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
351
-
352
- err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
353
- if (err) {
354
- freqs.new = cur_freq;
355
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
356
- return err;
357
- }
358
-
359
- freqs.new = freq;
360
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
361
-
362
- if (devfreq_update_status(devfreq, freq))
363
- dev_err(&devfreq->dev,
364
- "Couldn't update frequency transition information.\n");
365
-
366
- devfreq->previous_freq = freq;
367
- return err;
368422 }
369423 EXPORT_SYMBOL(update_devfreq);
370424
....@@ -387,20 +441,35 @@
387441 queue_delayed_work(devfreq_wq, &devfreq->work,
388442 msecs_to_jiffies(devfreq->profile->polling_ms));
389443 mutex_unlock(&devfreq->lock);
444
+
445
+ trace_devfreq_monitor(devfreq);
390446 }
391447
392448 /**
393449 * devfreq_monitor_start() - Start load monitoring of devfreq instance
394450 * @devfreq: the devfreq instance.
395451 *
396
- * Helper function for starting devfreq device load monitoing. By
452
+ * Helper function for starting devfreq device load monitoring. By
397453 * default delayed work based monitoring is supported. Function
398454 * to be called from governor in response to DEVFREQ_GOV_START
399455 * event when device is added to devfreq framework.
400456 */
401457 void devfreq_monitor_start(struct devfreq *devfreq)
402458 {
403
- INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
459
+ if (devfreq->governor->interrupt_driven)
460
+ return;
461
+
462
+ switch (devfreq->profile->timer) {
463
+ case DEVFREQ_TIMER_DEFERRABLE:
464
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
465
+ break;
466
+ case DEVFREQ_TIMER_DELAYED:
467
+ INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
468
+ break;
469
+ default:
470
+ return;
471
+ }
472
+
404473 if (devfreq->profile->polling_ms)
405474 queue_delayed_work(devfreq_wq, &devfreq->work,
406475 msecs_to_jiffies(devfreq->profile->polling_ms));
....@@ -411,12 +480,15 @@
411480 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
412481 * @devfreq: the devfreq instance.
413482 *
414
- * Helper function to stop devfreq device load monitoing. Function
483
+ * Helper function to stop devfreq device load monitoring. Function
415484 * to be called from governor in response to DEVFREQ_GOV_STOP
416485 * event when device is removed from devfreq framework.
417486 */
418487 void devfreq_monitor_stop(struct devfreq *devfreq)
419488 {
489
+ if (devfreq->governor->interrupt_driven)
490
+ return;
491
+
420492 cancel_delayed_work_sync(&devfreq->work);
421493 }
422494 EXPORT_SYMBOL(devfreq_monitor_stop);
....@@ -425,7 +497,7 @@
425497 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
426498 * @devfreq: the devfreq instance.
427499 *
428
- * Helper function to suspend devfreq device load monitoing. Function
500
+ * Helper function to suspend devfreq device load monitoring. Function
429501 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
430502 * event or when polling interval is set to zero.
431503 *
....@@ -444,6 +516,10 @@
444516 devfreq_update_status(devfreq, devfreq->previous_freq);
445517 devfreq->stop_polling = true;
446518 mutex_unlock(&devfreq->lock);
519
+
520
+ if (devfreq->governor->interrupt_driven)
521
+ return;
522
+
447523 cancel_delayed_work_sync(&devfreq->work);
448524 }
449525 EXPORT_SYMBOL(devfreq_monitor_suspend);
....@@ -452,7 +528,7 @@
452528 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
453529 * @devfreq: the devfreq instance.
454530 *
455
- * Helper function to resume devfreq device load monitoing. Function
531
+ * Helper function to resume devfreq device load monitoring. Function
456532 * to be called from governor in response to DEVFREQ_GOV_RESUME
457533 * event or when polling interval is set to non-zero.
458534 */
....@@ -464,12 +540,16 @@
464540 if (!devfreq->stop_polling)
465541 goto out;
466542
543
+ if (devfreq->governor->interrupt_driven)
544
+ goto out_update;
545
+
467546 if (!delayed_work_pending(&devfreq->work) &&
468547 devfreq->profile->polling_ms)
469548 queue_delayed_work(devfreq_wq, &devfreq->work,
470549 msecs_to_jiffies(devfreq->profile->polling_ms));
471550
472
- devfreq->last_stat_updated = jiffies;
551
+out_update:
552
+ devfreq->stats.last_update = get_jiffies_64();
473553 devfreq->stop_polling = false;
474554
475555 if (devfreq->profile->get_cur_freq &&
....@@ -482,14 +562,14 @@
482562 EXPORT_SYMBOL(devfreq_monitor_resume);
483563
484564 /**
485
- * devfreq_interval_update() - Update device devfreq monitoring interval
565
+ * devfreq_update_interval() - Update device devfreq monitoring interval
486566 * @devfreq: the devfreq instance.
487567 * @delay: new polling interval to be set.
488568 *
489569 * Helper function to set new load monitoring polling interval. Function
490
- * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
570
+ * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
491571 */
492
-void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
572
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
493573 {
494574 unsigned int cur_delay = devfreq->profile->polling_ms;
495575 unsigned int new_delay = *delay;
....@@ -498,6 +578,9 @@
498578 devfreq->profile->polling_ms = new_delay;
499579
500580 if (devfreq->stop_polling)
581
+ goto out;
582
+
583
+ if (devfreq->governor->interrupt_driven)
501584 goto out;
502585
503586 /* if new delay is zero, stop polling */
....@@ -521,16 +604,16 @@
521604 mutex_lock(&devfreq->lock);
522605 if (!devfreq->stop_polling)
523606 queue_delayed_work(devfreq_wq, &devfreq->work,
524
- msecs_to_jiffies(devfreq->profile->polling_ms));
607
+ msecs_to_jiffies(devfreq->profile->polling_ms));
525608 }
526609 out:
527610 mutex_unlock(&devfreq->lock);
528611 }
529
-EXPORT_SYMBOL(devfreq_interval_update);
612
+EXPORT_SYMBOL(devfreq_update_interval);
530613
531614 /**
532615 * devfreq_notifier_call() - Notify that the device frequency requirements
533
- * has been changed out of devfreq framework.
616
+ * has been changed out of devfreq framework.
534617 * @nb: the notifier_block (supposed to be devfreq->nb)
535618 * @type: not used
536619 * @devp: not used
....@@ -546,6 +629,8 @@
546629 mutex_lock(&devfreq->lock);
547630
548631 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
632
+ if (!devfreq->scaling_min_freq)
633
+ goto out;
549634
550635 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
551636 if (!devfreq->scaling_max_freq) {
....@@ -566,6 +651,45 @@
566651 }
567652
568653 /**
654
+ * qos_notifier_call() - Common handler for QoS constraints.
655
+ * @devfreq: the devfreq instance.
656
+ */
657
+static int qos_notifier_call(struct devfreq *devfreq)
658
+{
659
+ int err;
660
+
661
+ mutex_lock(&devfreq->lock);
662
+ err = update_devfreq(devfreq);
663
+ mutex_unlock(&devfreq->lock);
664
+ if (err)
665
+ dev_err(devfreq->dev.parent,
666
+ "failed to update frequency from PM QoS (%d)\n",
667
+ err);
668
+
669
+ return NOTIFY_OK;
670
+}
671
+
672
+/**
673
+ * qos_min_notifier_call() - Callback for QoS min_freq changes.
674
+ * @nb: Should be devfreq->nb_min
675
+ */
676
+static int qos_min_notifier_call(struct notifier_block *nb,
677
+ unsigned long val, void *ptr)
678
+{
679
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
680
+}
681
+
682
+/**
683
+ * qos_max_notifier_call() - Callback for QoS max_freq changes.
684
+ * @nb: Should be devfreq->nb_max
685
+ */
686
+static int qos_max_notifier_call(struct notifier_block *nb,
687
+ unsigned long val, void *ptr)
688
+{
689
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
690
+}
691
+
692
+/**
569693 * devfreq_dev_release() - Callback for struct device to release the device.
570694 * @dev: the devfreq device
571695 *
....@@ -574,16 +698,40 @@
574698 static void devfreq_dev_release(struct device *dev)
575699 {
576700 struct devfreq *devfreq = to_devfreq(dev);
701
+ int err;
577702
578703 mutex_lock(&devfreq_list_lock);
579704 list_del(&devfreq->node);
580705 mutex_unlock(&devfreq_list_lock);
581706
707
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
708
+ DEV_PM_QOS_MAX_FREQUENCY);
709
+ if (err && err != -ENOENT)
710
+ dev_warn(dev->parent,
711
+ "Failed to remove max_freq notifier: %d\n", err);
712
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
713
+ DEV_PM_QOS_MIN_FREQUENCY);
714
+ if (err && err != -ENOENT)
715
+ dev_warn(dev->parent,
716
+ "Failed to remove min_freq notifier: %d\n", err);
717
+
718
+ if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
719
+ err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
720
+ if (err < 0)
721
+ dev_warn(dev->parent,
722
+ "Failed to remove max_freq request: %d\n", err);
723
+ }
724
+ if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
725
+ err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
726
+ if (err < 0)
727
+ dev_warn(dev->parent,
728
+ "Failed to remove min_freq request: %d\n", err);
729
+ }
730
+
582731 if (devfreq->profile->exit)
583732 devfreq->profile->exit(devfreq->dev.parent);
584733
585734 mutex_destroy(&devfreq->lock);
586
- mutex_destroy(&devfreq->event_lock);
587735 kfree(devfreq);
588736 }
589737
....@@ -613,7 +761,7 @@
613761 devfreq = find_device_devfreq(dev);
614762 mutex_unlock(&devfreq_list_lock);
615763 if (!IS_ERR(devfreq)) {
616
- dev_err(dev, "%s: Unable to create devfreq for the device.\n",
764
+ dev_err(dev, "%s: devfreq device already exists!\n",
617765 __func__);
618766 err = -EINVAL;
619767 goto err_out;
....@@ -626,30 +774,39 @@
626774 }
627775
628776 mutex_init(&devfreq->lock);
629
- mutex_init(&devfreq->event_lock);
630777 mutex_lock(&devfreq->lock);
631778 devfreq->dev.parent = dev;
632779 devfreq->dev.class = devfreq_class;
633780 devfreq->dev.release = devfreq_dev_release;
634781 INIT_LIST_HEAD(&devfreq->node);
635782 devfreq->profile = profile;
636
- strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
783
+ strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
637784 devfreq->previous_freq = profile->initial_freq;
638785 devfreq->last_status.current_frequency = profile->initial_freq;
639786 devfreq->data = data;
640787 devfreq->nb.notifier_call = devfreq_notifier_call;
641788
789
+ if (devfreq->profile->timer < 0
790
+ || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
791
+ mutex_unlock(&devfreq->lock);
792
+ err = -EINVAL;
793
+ goto err_dev;
794
+ }
795
+
642796 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
643797 mutex_unlock(&devfreq->lock);
644798 err = set_freq_table(devfreq);
645799 if (err < 0)
646
- goto err_out;
800
+ goto err_dev;
647801 mutex_lock(&devfreq->lock);
648802 }
649803
650804 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
651
- devfreq->min_freq = devfreq->scaling_min_freq;
652
- devfreq->policy.min = devfreq->min_freq;
805
+ if (!devfreq->scaling_min_freq) {
806
+ mutex_unlock(&devfreq->lock);
807
+ err = -EINVAL;
808
+ goto err_dev;
809
+ }
653810
654811 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
655812 if (!devfreq->scaling_max_freq) {
....@@ -657,8 +814,9 @@
657814 err = -EINVAL;
658815 goto err_dev;
659816 }
660
- devfreq->max_freq = devfreq->scaling_max_freq;
661
- devfreq->policy.max = devfreq->max_freq;
817
+
818
+ devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
819
+ atomic_set(&devfreq->suspend_count, 0);
662820
663821 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
664822 err = device_register(&devfreq->dev);
....@@ -668,22 +826,55 @@
668826 goto err_out;
669827 }
670828
671
- devfreq->trans_table =
672
- devm_kzalloc(&devfreq->dev,
673
- array3_size(sizeof(unsigned int),
674
- devfreq->profile->max_state,
675
- devfreq->profile->max_state),
676
- GFP_KERNEL);
677
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
678
- devfreq->profile->max_state,
679
- sizeof(unsigned long),
680
- GFP_KERNEL);
681
- devfreq->last_stat_updated = jiffies;
829
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
830
+ array3_size(sizeof(unsigned int),
831
+ devfreq->profile->max_state,
832
+ devfreq->profile->max_state),
833
+ GFP_KERNEL);
834
+ if (!devfreq->stats.trans_table) {
835
+ mutex_unlock(&devfreq->lock);
836
+ err = -ENOMEM;
837
+ goto err_devfreq;
838
+ }
839
+
840
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
841
+ devfreq->profile->max_state,
842
+ sizeof(*devfreq->stats.time_in_state),
843
+ GFP_KERNEL);
844
+ if (!devfreq->stats.time_in_state) {
845
+ mutex_unlock(&devfreq->lock);
846
+ err = -ENOMEM;
847
+ goto err_devfreq;
848
+ }
849
+
850
+ devfreq->stats.total_trans = 0;
851
+ devfreq->stats.last_update = get_jiffies_64();
682852
683853 srcu_init_notifier_head(&devfreq->transition_notifier_list);
684
- srcu_init_notifier_head(&devfreq->policy_notifier_list);
685854
686855 mutex_unlock(&devfreq->lock);
856
+
857
+ err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
858
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
859
+ if (err < 0)
860
+ goto err_devfreq;
861
+ err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
862
+ DEV_PM_QOS_MAX_FREQUENCY,
863
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
864
+ if (err < 0)
865
+ goto err_devfreq;
866
+
867
+ devfreq->nb_min.notifier_call = qos_min_notifier_call;
868
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
869
+ DEV_PM_QOS_MIN_FREQUENCY);
870
+ if (err)
871
+ goto err_devfreq;
872
+
873
+ devfreq->nb_max.notifier_call = qos_max_notifier_call;
874
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
875
+ DEV_PM_QOS_MAX_FREQUENCY);
876
+ if (err)
877
+ goto err_devfreq;
687878
688879 mutex_lock(&devfreq_list_lock);
689880
....@@ -712,12 +903,11 @@
712903
713904 err_init:
714905 mutex_unlock(&devfreq_list_lock);
715
-
906
+err_devfreq:
716907 devfreq_remove_device(devfreq);
717908 devfreq = NULL;
718909 err_dev:
719
- if (devfreq)
720
- kfree(devfreq);
910
+ kfree(devfreq);
721911 err_out:
722912 return ERR_PTR(err);
723913 }
....@@ -796,52 +986,79 @@
796986
797987 #ifdef CONFIG_OF
798988 /*
799
- * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
800
- * @dev - instance to the given device
801
- * @index - index into list of devfreq
989
+ * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
990
+ * @node - pointer to device_node
802991 *
803992 * return the instance of devfreq device
804993 */
805
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
994
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
806995 {
807
- struct device_node *node;
808996 struct devfreq *devfreq;
809997
810
- if (!dev)
811
- return ERR_PTR(-EINVAL);
812
-
813
- if (!dev->of_node)
814
- return ERR_PTR(-EINVAL);
815
-
816
- node = of_parse_phandle(dev->of_node, "devfreq", index);
817998 if (!node)
818
- return ERR_PTR(-ENODEV);
999
+ return ERR_PTR(-EINVAL);
8191000
8201001 mutex_lock(&devfreq_list_lock);
8211002 list_for_each_entry(devfreq, &devfreq_list, node) {
8221003 if (devfreq->dev.parent
8231004 && devfreq->dev.parent->of_node == node) {
8241005 mutex_unlock(&devfreq_list_lock);
825
- of_node_put(node);
8261006 return devfreq;
8271007 }
8281008 }
8291009 mutex_unlock(&devfreq_list_lock);
1010
+
1011
+ return ERR_PTR(-ENODEV);
1012
+}
1013
+
1014
+/*
1015
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
1016
+ * @dev - instance to the given device
1017
+ * @phandle_name - name of property holding a phandle value
1018
+ * @index - index into list of devfreq
1019
+ *
1020
+ * return the instance of devfreq device
1021
+ */
1022
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
1023
+ const char *phandle_name, int index)
1024
+{
1025
+ struct device_node *node;
1026
+ struct devfreq *devfreq;
1027
+
1028
+ if (!dev || !phandle_name)
1029
+ return ERR_PTR(-EINVAL);
1030
+
1031
+ if (!dev->of_node)
1032
+ return ERR_PTR(-EINVAL);
1033
+
1034
+ node = of_parse_phandle(dev->of_node, phandle_name, index);
1035
+ if (!node)
1036
+ return ERR_PTR(-ENODEV);
1037
+
1038
+ devfreq = devfreq_get_devfreq_by_node(node);
8301039 of_node_put(node);
8311040
832
- return ERR_PTR(-EPROBE_DEFER);
1041
+ return devfreq;
8331042 }
1043
+
8341044 #else
835
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1045
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
1046
+{
1047
+ return ERR_PTR(-ENODEV);
1048
+}
1049
+
1050
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
1051
+ const char *phandle_name, int index)
8361052 {
8371053 return ERR_PTR(-ENODEV);
8381054 }
8391055 #endif /* CONFIG_OF */
1056
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
8401057 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
8411058
8421059 /**
8431060 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
844
- * @dev: the device to add devfreq feature.
1061
+ * @dev: the device from which to remove devfreq feature.
8451062 * @devfreq: the devfreq instance to be removed
8461063 */
8471064 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
....@@ -866,14 +1083,25 @@
8661083 if (!devfreq)
8671084 return -EINVAL;
8681085
869
- if (!devfreq->governor)
1086
+ if (atomic_inc_return(&devfreq->suspend_count) > 1)
8701087 return 0;
8711088
872
- mutex_lock(&devfreq->event_lock);
873
- ret = devfreq->governor->event_handler(devfreq,
874
- DEVFREQ_GOV_SUSPEND, NULL);
875
- mutex_unlock(&devfreq->event_lock);
876
- return ret;
1089
+ if (devfreq->governor) {
1090
+ ret = devfreq->governor->event_handler(devfreq,
1091
+ DEVFREQ_GOV_SUSPEND, NULL);
1092
+ if (ret)
1093
+ return ret;
1094
+ }
1095
+
1096
+ if (devfreq->suspend_freq) {
1097
+ mutex_lock(&devfreq->lock);
1098
+ ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1099
+ mutex_unlock(&devfreq->lock);
1100
+ if (ret)
1101
+ return ret;
1102
+ }
1103
+
1104
+ return 0;
8771105 }
8781106 EXPORT_SYMBOL(devfreq_suspend_device);
8791107
....@@ -888,19 +1116,75 @@
8881116 int devfreq_resume_device(struct devfreq *devfreq)
8891117 {
8901118 int ret;
1119
+
8911120 if (!devfreq)
8921121 return -EINVAL;
8931122
894
- if (!devfreq->governor)
1123
+ if (atomic_dec_return(&devfreq->suspend_count) >= 1)
8951124 return 0;
8961125
897
- mutex_lock(&devfreq->event_lock);
898
- ret = devfreq->governor->event_handler(devfreq,
899
- DEVFREQ_GOV_RESUME, NULL);
900
- mutex_unlock(&devfreq->event_lock);
901
- return ret;
1126
+ if (devfreq->resume_freq) {
1127
+ mutex_lock(&devfreq->lock);
1128
+ ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1129
+ mutex_unlock(&devfreq->lock);
1130
+ if (ret)
1131
+ return ret;
1132
+ }
1133
+
1134
+ if (devfreq->governor) {
1135
+ ret = devfreq->governor->event_handler(devfreq,
1136
+ DEVFREQ_GOV_RESUME, NULL);
1137
+ if (ret)
1138
+ return ret;
1139
+ }
1140
+
1141
+ return 0;
9021142 }
9031143 EXPORT_SYMBOL(devfreq_resume_device);
1144
+
1145
+/**
1146
+ * devfreq_suspend() - Suspend devfreq governors and devices
1147
+ *
1148
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
1149
+ * and devices preserving the state for resume. On some platforms the devfreq
1150
+ * device must have precise state (frequency) after resume in order to provide
1151
+ * fully operating setup.
1152
+ */
1153
+void devfreq_suspend(void)
1154
+{
1155
+ struct devfreq *devfreq;
1156
+ int ret;
1157
+
1158
+ mutex_lock(&devfreq_list_lock);
1159
+ list_for_each_entry(devfreq, &devfreq_list, node) {
1160
+ ret = devfreq_suspend_device(devfreq);
1161
+ if (ret)
1162
+ dev_err(&devfreq->dev,
1163
+ "failed to suspend devfreq device\n");
1164
+ }
1165
+ mutex_unlock(&devfreq_list_lock);
1166
+}
1167
+
1168
+/**
1169
+ * devfreq_resume() - Resume devfreq governors and devices
1170
+ *
1171
+ * Called during system wide Suspend/Hibernate cycle for resuming governors and
1172
+ * devices that are suspended with devfreq_suspend().
1173
+ */
1174
+void devfreq_resume(void)
1175
+{
1176
+ struct devfreq *devfreq;
1177
+ int ret;
1178
+
1179
+ mutex_lock(&devfreq_list_lock);
1180
+ list_for_each_entry(devfreq, &devfreq_list, node) {
1181
+ ret = devfreq_resume_device(devfreq);
1182
+ if (ret)
1183
+ dev_warn(&devfreq->dev,
1184
+ "failed to resume devfreq device\n");
1185
+ }
1186
+ mutex_unlock(&devfreq_list_lock);
1187
+}
9041188
9051189 /**
9061190 * devfreq_add_governor() - Add devfreq governor
....@@ -1025,18 +1309,20 @@
10251309 static ssize_t name_show(struct device *dev,
10261310 struct device_attribute *attr, char *buf)
10271311 {
1028
- struct devfreq *devfreq = to_devfreq(dev);
1029
- return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1312
+ struct devfreq *df = to_devfreq(dev);
1313
+ return sprintf(buf, "%s\n", dev_name(df->dev.parent));
10301314 }
10311315 static DEVICE_ATTR_RO(name);
10321316
10331317 static ssize_t governor_show(struct device *dev,
10341318 struct device_attribute *attr, char *buf)
10351319 {
1036
- if (!to_devfreq(dev)->governor)
1320
+ struct devfreq *df = to_devfreq(dev);
1321
+
1322
+ if (!df->governor)
10371323 return -EINVAL;
10381324
1039
- return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1325
+ return sprintf(buf, "%s\n", df->governor->name);
10401326 }
10411327
10421328 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
....@@ -1045,7 +1331,10 @@
10451331 struct devfreq *df = to_devfreq(dev);
10461332 int ret;
10471333 char str_governor[DEVFREQ_NAME_LEN + 1];
1048
- const struct devfreq_governor *governor, *prev_gov;
1334
+ const struct devfreq_governor *governor, *prev_governor;
1335
+
1336
+ if (!df->governor)
1337
+ return -EINVAL;
10491338
10501339 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
10511340 if (ret != 1)
....@@ -1060,39 +1349,36 @@
10601349 if (df->governor == governor) {
10611350 ret = 0;
10621351 goto out;
1063
- } else if ((df->governor && df->governor->immutable) ||
1064
- governor->immutable) {
1352
+ } else if (df->governor->immutable || governor->immutable) {
10651353 ret = -EINVAL;
10661354 goto out;
10671355 }
10681356
1069
- mutex_lock(&df->event_lock);
1070
- if (df->governor) {
1071
- ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1072
- if (ret) {
1073
- dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1074
- __func__, df->governor->name, ret);
1075
- goto gov_stop_out;
1076
- }
1357
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1358
+ if (ret) {
1359
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1360
+ __func__, df->governor->name, ret);
1361
+ goto out;
10771362 }
1078
- prev_gov = df->governor;
1363
+
1364
+ prev_governor = df->governor;
10791365 df->governor = governor;
10801366 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
10811367 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
10821368 if (ret) {
10831369 dev_warn(dev, "%s: Governor %s not started(%d)\n",
10841370 __func__, df->governor->name, ret);
1085
- if (prev_gov) {
1086
- df->governor = prev_gov;
1087
- strlcpy(df->governor_name, prev_gov->name,
1088
- DEVFREQ_NAME_LEN);
1089
- df->governor->event_handler(df, DEVFREQ_GOV_START,
1090
- NULL);
1371
+ df->governor = prev_governor;
1372
+ strncpy(df->governor_name, prev_governor->name,
1373
+ DEVFREQ_NAME_LEN);
1374
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1375
+ if (ret) {
1376
+ dev_err(dev,
1377
+ "%s: reverting to Governor %s failed (%d)\n",
1378
+ __func__, df->governor_name, ret);
1379
+ df->governor = NULL;
10911380 }
10921381 }
1093
-
1094
-gov_stop_out:
1095
- mutex_unlock(&df->event_lock);
10961382 out:
10971383 mutex_unlock(&devfreq_list_lock);
10981384
....@@ -1109,15 +1395,18 @@
11091395 struct devfreq *df = to_devfreq(d);
11101396 ssize_t count = 0;
11111397
1398
+ if (!df->governor)
1399
+ return -EINVAL;
1400
+
11121401 mutex_lock(&devfreq_list_lock);
11131402
11141403 /*
11151404 * The devfreq with immutable governor (e.g., passive) shows
11161405 * only own governor.
11171406 */
1118
- if (df->governor && df->governor->immutable) {
1407
+ if (df->governor->immutable) {
11191408 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1120
- "%s ", df->governor_name);
1409
+ "%s ", df->governor_name);
11211410 /*
11221411 * The devfreq device shows the registered governor except for
11231412 * immutable governors such as passive governor .
....@@ -1149,27 +1438,37 @@
11491438 char *buf)
11501439 {
11511440 unsigned long freq;
1152
- struct devfreq *devfreq = to_devfreq(dev);
1441
+ struct devfreq *df = to_devfreq(dev);
11531442
1154
- if (devfreq->profile->get_cur_freq &&
1155
- !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1443
+ if (!df->profile)
1444
+ return -EINVAL;
1445
+
1446
+ if (df->profile->get_cur_freq &&
1447
+ !df->profile->get_cur_freq(df->dev.parent, &freq))
11561448 return sprintf(buf, "%lu\n", freq);
11571449
1158
- return sprintf(buf, "%lu\n", devfreq->previous_freq);
1450
+ return sprintf(buf, "%lu\n", df->previous_freq);
11591451 }
11601452 static DEVICE_ATTR_RO(cur_freq);
11611453
11621454 static ssize_t target_freq_show(struct device *dev,
11631455 struct device_attribute *attr, char *buf)
11641456 {
1165
- return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1457
+ struct devfreq *df = to_devfreq(dev);
1458
+
1459
+ return sprintf(buf, "%lu\n", df->previous_freq);
11661460 }
11671461 static DEVICE_ATTR_RO(target_freq);
11681462
11691463 static ssize_t polling_interval_show(struct device *dev,
11701464 struct device_attribute *attr, char *buf)
11711465 {
1172
- return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1466
+ struct devfreq *df = to_devfreq(dev);
1467
+
1468
+ if (!df->profile)
1469
+ return -EINVAL;
1470
+
1471
+ return sprintf(buf, "%d\n", df->profile->polling_ms);
11731472 }
11741473
11751474 static ssize_t polling_interval_store(struct device *dev,
....@@ -1187,10 +1486,8 @@
11871486 if (ret != 1)
11881487 return -EINVAL;
11891488
1190
- mutex_lock(&df->event_lock);
1191
- df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1489
+ df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
11921490 ret = count;
1193
- mutex_unlock(&df->event_lock);
11941491
11951492 return ret;
11961493 }
....@@ -1203,44 +1500,39 @@
12031500 unsigned long value;
12041501 int ret;
12051502
1503
+ /*
1504
+ * Protect against theoretical sysfs writes between
1505
+ * device_add and dev_pm_qos_add_request
1506
+ */
1507
+ if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1508
+ return -EAGAIN;
1509
+
12061510 ret = sscanf(buf, "%lu", &value);
12071511 if (ret != 1)
12081512 return -EINVAL;
12091513
1210
- mutex_lock(&df->event_lock);
1211
- mutex_lock(&df->lock);
1514
+ /* Round down to kHz for PM QoS */
1515
+ ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1516
+ value / HZ_PER_KHZ);
1517
+ if (ret < 0)
1518
+ return ret;
12121519
1213
- if (value) {
1214
- if (value > df->max_freq) {
1215
- ret = -EINVAL;
1216
- goto unlock;
1217
- }
1218
- } else {
1219
- unsigned long *freq_table = df->profile->freq_table;
1220
-
1221
- /* Get minimum frequency according to sorting order */
1222
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
1223
- value = freq_table[0];
1224
- else
1225
- value = freq_table[df->profile->max_state - 1];
1226
- }
1227
-
1228
- df->min_freq = value;
1229
- update_devfreq(df);
1230
- ret = count;
1231
-unlock:
1232
- mutex_unlock(&df->lock);
1233
- mutex_unlock(&df->event_lock);
1234
- return ret;
1520
+ return count;
12351521 }
12361522
12371523 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
12381524 char *buf)
12391525 {
12401526 struct devfreq *df = to_devfreq(dev);
1527
+ unsigned long min_freq, max_freq;
12411528
1242
- return sprintf(buf, "%lu\n", max(df->policy.min, df->min_freq));
1529
+ mutex_lock(&df->lock);
1530
+ get_freq_range(df, &min_freq, &max_freq);
1531
+ mutex_unlock(&df->lock);
1532
+
1533
+ return sprintf(buf, "%lu\n", min_freq);
12431534 }
1535
+static DEVICE_ATTR_RW(min_freq);
12441536
12451537 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
12461538 const char *buf, size_t count)
....@@ -1249,44 +1541,50 @@
12491541 unsigned long value;
12501542 int ret;
12511543
1544
+ /*
1545
+ * Protect against theoretical sysfs writes between
1546
+ * device_add and dev_pm_qos_add_request
1547
+ */
1548
+ if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1549
+ return -EINVAL;
1550
+
12521551 ret = sscanf(buf, "%lu", &value);
12531552 if (ret != 1)
12541553 return -EINVAL;
12551554
1256
- mutex_lock(&df->event_lock);
1257
- mutex_lock(&df->lock);
1555
+ /*
1556
+ * PM QoS frequencies are in kHz so we need to convert. Convert by
1557
+ * rounding upwards so that the acceptable interval never shrinks.
1558
+ *
1559
+ * For example if the user writes "666666666" to sysfs this value will
1560
+ * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1561
+ * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1562
+ *
1563
+ * A value of zero means "no limit".
1564
+ */
1565
+ if (value)
1566
+ value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1567
+ else
1568
+ value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
12581569
1259
- if (value) {
1260
- if (value < df->min_freq) {
1261
- ret = -EINVAL;
1262
- goto unlock;
1263
- }
1264
- } else {
1265
- unsigned long *freq_table = df->profile->freq_table;
1570
+ ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1571
+ if (ret < 0)
1572
+ return ret;
12661573
1267
- /* Get maximum frequency according to sorting order */
1268
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
1269
- value = freq_table[df->profile->max_state - 1];
1270
- else
1271
- value = freq_table[0];
1272
- }
1273
-
1274
- df->max_freq = value;
1275
- update_devfreq(df);
1276
- ret = count;
1277
-unlock:
1278
- mutex_unlock(&df->lock);
1279
- mutex_unlock(&df->event_lock);
1280
- return ret;
1574
+ return count;
12811575 }
1282
-static DEVICE_ATTR_RW(min_freq);
12831576
12841577 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
12851578 char *buf)
12861579 {
12871580 struct devfreq *df = to_devfreq(dev);
1581
+ unsigned long min_freq, max_freq;
12881582
1289
- return sprintf(buf, "%lu\n", min(df->policy.max, df->max_freq));
1583
+ mutex_lock(&df->lock);
1584
+ get_freq_range(df, &min_freq, &max_freq);
1585
+ mutex_unlock(&df->lock);
1586
+
1587
+ return sprintf(buf, "%lu\n", max_freq);
12901588 }
12911589 static DEVICE_ATTR_RW(max_freq);
12921590
....@@ -1297,6 +1595,9 @@
12971595 struct devfreq *df = to_devfreq(d);
12981596 ssize_t count = 0;
12991597 int i;
1598
+
1599
+ if (!df->profile)
1600
+ return -EINVAL;
13001601
13011602 mutex_lock(&df->lock);
13021603
....@@ -1318,51 +1619,149 @@
13181619 static ssize_t trans_stat_show(struct device *dev,
13191620 struct device_attribute *attr, char *buf)
13201621 {
1321
- struct devfreq *devfreq = to_devfreq(dev);
1622
+ struct devfreq *df = to_devfreq(dev);
13221623 ssize_t len;
13231624 int i, j;
1324
- unsigned int max_state = devfreq->profile->max_state;
1625
+ unsigned int max_state;
1626
+
1627
+ if (!df->profile)
1628
+ return -EINVAL;
1629
+ max_state = df->profile->max_state;
13251630
13261631 if (max_state == 0)
13271632 return sprintf(buf, "Not Supported.\n");
13281633
1329
- mutex_lock(&devfreq->lock);
1330
- if (!devfreq->stop_polling &&
1331
- devfreq_update_status(devfreq, devfreq->previous_freq)) {
1332
- mutex_unlock(&devfreq->lock);
1634
+ mutex_lock(&df->lock);
1635
+ if (!df->stop_polling &&
1636
+ devfreq_update_status(df, df->previous_freq)) {
1637
+ mutex_unlock(&df->lock);
13331638 return 0;
13341639 }
1335
- mutex_unlock(&devfreq->lock);
1640
+ mutex_unlock(&df->lock);
13361641
13371642 len = sprintf(buf, " From : To\n");
13381643 len += sprintf(buf + len, " :");
13391644 for (i = 0; i < max_state; i++)
13401645 len += sprintf(buf + len, "%10lu",
1341
- devfreq->profile->freq_table[i]);
1646
+ df->profile->freq_table[i]);
13421647
13431648 len += sprintf(buf + len, " time(ms)\n");
13441649
13451650 for (i = 0; i < max_state; i++) {
1346
- if (devfreq->profile->freq_table[i]
1347
- == devfreq->previous_freq) {
1651
+ if (df->profile->freq_table[i]
1652
+ == df->previous_freq) {
13481653 len += sprintf(buf + len, "*");
13491654 } else {
13501655 len += sprintf(buf + len, " ");
13511656 }
13521657 len += sprintf(buf + len, "%10lu:",
1353
- devfreq->profile->freq_table[i]);
1658
+ df->profile->freq_table[i]);
13541659 for (j = 0; j < max_state; j++)
13551660 len += sprintf(buf + len, "%10u",
1356
- devfreq->trans_table[(i * max_state) + j]);
1357
- len += sprintf(buf + len, "%10u\n",
1358
- jiffies_to_msecs(devfreq->time_in_state[i]));
1661
+ df->stats.trans_table[(i * max_state) + j]);
1662
+
1663
+ len += sprintf(buf + len, "%10llu\n", (u64)
1664
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
13591665 }
13601666
13611667 len += sprintf(buf + len, "Total transition : %u\n",
1362
- devfreq->total_trans);
1668
+ df->stats.total_trans);
13631669 return len;
13641670 }
1365
-static DEVICE_ATTR_RO(trans_stat);
1671
+
1672
+static ssize_t trans_stat_store(struct device *dev,
1673
+ struct device_attribute *attr,
1674
+ const char *buf, size_t count)
1675
+{
1676
+ struct devfreq *df = to_devfreq(dev);
1677
+ int err, value;
1678
+
1679
+ if (!df->profile)
1680
+ return -EINVAL;
1681
+
1682
+ if (df->profile->max_state == 0)
1683
+ return count;
1684
+
1685
+ err = kstrtoint(buf, 10, &value);
1686
+ if (err || value != 0)
1687
+ return -EINVAL;
1688
+
1689
+ mutex_lock(&df->lock);
1690
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
1691
+ sizeof(*df->stats.time_in_state)));
1692
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1693
+ df->profile->max_state,
1694
+ df->profile->max_state));
1695
+ df->stats.total_trans = 0;
1696
+ df->stats.last_update = get_jiffies_64();
1697
+ mutex_unlock(&df->lock);
1698
+
1699
+ return count;
1700
+}
1701
+static DEVICE_ATTR_RW(trans_stat);
1702
+
1703
+static ssize_t timer_show(struct device *dev,
1704
+ struct device_attribute *attr, char *buf)
1705
+{
1706
+ struct devfreq *df = to_devfreq(dev);
1707
+
1708
+ if (!df->profile)
1709
+ return -EINVAL;
1710
+
1711
+ return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
1712
+}
1713
+
1714
+static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
1715
+ const char *buf, size_t count)
1716
+{
1717
+ struct devfreq *df = to_devfreq(dev);
1718
+ char str_timer[DEVFREQ_NAME_LEN + 1];
1719
+ int timer = -1;
1720
+ int ret = 0, i;
1721
+
1722
+ if (!df->governor || !df->profile)
1723
+ return -EINVAL;
1724
+
1725
+ ret = sscanf(buf, "%16s", str_timer);
1726
+ if (ret != 1)
1727
+ return -EINVAL;
1728
+
1729
+ for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
1730
+ if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
1731
+ timer = i;
1732
+ break;
1733
+ }
1734
+ }
1735
+
1736
+ if (timer < 0) {
1737
+ ret = -EINVAL;
1738
+ goto out;
1739
+ }
1740
+
1741
+ if (df->profile->timer == timer) {
1742
+ ret = 0;
1743
+ goto out;
1744
+ }
1745
+
1746
+ mutex_lock(&df->lock);
1747
+ df->profile->timer = timer;
1748
+ mutex_unlock(&df->lock);
1749
+
1750
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1751
+ if (ret) {
1752
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1753
+ __func__, df->governor->name, ret);
1754
+ goto out;
1755
+ }
1756
+
1757
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1758
+ if (ret)
1759
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
1760
+ __func__, df->governor->name, ret);
1761
+out:
1762
+ return ret ? ret : count;
1763
+}
1764
+static DEVICE_ATTR_RW(timer);
13661765
13671766 static ssize_t load_show(struct device *dev, struct device_attribute *attr,
13681767 char *buf)
....@@ -1389,8 +1788,8 @@
13891788 len = sprintf(buf, "%lu", stat.busy_time * 100 / stat.total_time);
13901789
13911790 if (devfreq->profile->get_cur_freq &&
1392
- !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1393
- len += sprintf(buf + len, "@%luHz\n", freq);
1791
+ !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1792
+ len += sprintf(buf + len, "@%luHz\n", freq);
13941793 else
13951794 len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq);
13961795
....@@ -1409,10 +1808,88 @@
14091808 &dev_attr_min_freq.attr,
14101809 &dev_attr_max_freq.attr,
14111810 &dev_attr_trans_stat.attr,
1811
+ &dev_attr_timer.attr,
14121812 &dev_attr_load.attr,
14131813 NULL,
14141814 };
14151815 ATTRIBUTE_GROUPS(devfreq);
1816
+
1817
+/**
1818
+ * devfreq_summary_show() - Show the summary of the devfreq devices
1819
+ * @s: seq_file instance to show the summary of devfreq devices
1820
+ * @data: not used
1821
+ *
1822
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1823
+ * It helps that user can know the detailed information of the devfreq devices.
1824
+ *
1825
+ * Return 0 always because it shows the information without any data change.
1826
+ */
1827
+static int devfreq_summary_show(struct seq_file *s, void *data)
1828
+{
1829
+ struct devfreq *devfreq;
1830
+ struct devfreq *p_devfreq = NULL;
1831
+ unsigned long cur_freq, min_freq, max_freq;
1832
+ unsigned int polling_ms;
1833
+ unsigned int timer;
1834
+
1835
+ seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n",
1836
+ "dev",
1837
+ "parent_dev",
1838
+ "governor",
1839
+ "timer",
1840
+ "polling_ms",
1841
+ "cur_freq_Hz",
1842
+ "min_freq_Hz",
1843
+ "max_freq_Hz");
1844
+ seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n",
1845
+ "------------------------------",
1846
+ "------------------------------",
1847
+ "---------------",
1848
+ "----------",
1849
+ "----------",
1850
+ "------------",
1851
+ "------------",
1852
+ "------------");
1853
+
1854
+ mutex_lock(&devfreq_list_lock);
1855
+
1856
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1857
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1858
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1859
+ DEVFREQ_NAME_LEN)) {
1860
+ struct devfreq_passive_data *data = devfreq->data;
1861
+
1862
+ if (data)
1863
+ p_devfreq = data->parent;
1864
+ } else {
1865
+ p_devfreq = NULL;
1866
+ }
1867
+#endif
1868
+
1869
+ mutex_lock(&devfreq->lock);
1870
+ cur_freq = devfreq->previous_freq;
1871
+ get_freq_range(devfreq, &min_freq, &max_freq);
1872
+ polling_ms = devfreq->profile->polling_ms;
1873
+ timer = devfreq->profile->timer;
1874
+ mutex_unlock(&devfreq->lock);
1875
+
1876
+ seq_printf(s,
1877
+ "%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n",
1878
+ dev_name(&devfreq->dev),
1879
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1880
+ devfreq->governor_name,
1881
+ polling_ms ? timer_name[timer] : "null",
1882
+ polling_ms,
1883
+ cur_freq,
1884
+ min_freq,
1885
+ max_freq);
1886
+ }
1887
+
1888
+ mutex_unlock(&devfreq_list_lock);
1889
+
1890
+ return 0;
1891
+}
1892
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
14161893
14171894 static int __init devfreq_init(void)
14181895 {
....@@ -1429,6 +1906,11 @@
14291906 return -ENOMEM;
14301907 }
14311908 devfreq_class->dev_groups = devfreq_groups;
1909
+
1910
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1911
+ debugfs_create_file("devfreq_summary", 0444,
1912
+ devfreq_debugfs, NULL,
1913
+ &devfreq_summary_fops);
14321914
14331915 return 0;
14341916 }
....@@ -1477,8 +1959,8 @@
14771959
14781960 /**
14791961 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1480
- * for any changes in the OPP availability
1481
- * changes
1962
+ * for any changes in the OPP availability
1963
+ * changes
14821964 * @dev: The devfreq user device. (parent of devfreq)
14831965 * @devfreq: The devfreq object.
14841966 */
....@@ -1490,8 +1972,8 @@
14901972
14911973 /**
14921974 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1493
- * notified for any changes in the OPP
1494
- * availability changes anymore.
1975
+ * notified for any changes in the OPP
1976
+ * availability changes anymore.
14951977 * @dev: The devfreq user device. (parent of devfreq)
14961978 * @devfreq: The devfreq object.
14971979 *
....@@ -1510,8 +1992,8 @@
15101992 }
15111993
15121994 /**
1513
- * devm_ devfreq_register_opp_notifier()
1514
- * - Resource-managed devfreq_register_opp_notifier()
1995
+ * devm_devfreq_register_opp_notifier() - Resource-managed
1996
+ * devfreq_register_opp_notifier()
15151997 * @dev: The devfreq user device. (parent of devfreq)
15161998 * @devfreq: The devfreq object.
15171999 */
....@@ -1539,8 +2021,8 @@
15392021 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
15402022
15412023 /**
1542
- * devm_devfreq_unregister_opp_notifier()
1543
- * - Resource-managed devfreq_unregister_opp_notifier()
2024
+ * devm_devfreq_unregister_opp_notifier() - Resource-managed
2025
+ * devfreq_unregister_opp_notifier()
15442026 * @dev: The devfreq user device. (parent of devfreq)
15452027 * @devfreq: The devfreq object.
15462028 */
....@@ -1556,11 +2038,11 @@
15562038 * devfreq_register_notifier() - Register a driver with devfreq
15572039 * @devfreq: The devfreq object.
15582040 * @nb: The notifier block to register.
1559
- * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
2041
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
15602042 */
15612043 int devfreq_register_notifier(struct devfreq *devfreq,
1562
- struct notifier_block *nb,
1563
- unsigned int list)
2044
+ struct notifier_block *nb,
2045
+ unsigned int list)
15642046 {
15652047 int ret = 0;
15662048
....@@ -1571,10 +2053,6 @@
15712053 case DEVFREQ_TRANSITION_NOTIFIER:
15722054 ret = srcu_notifier_chain_register(
15732055 &devfreq->transition_notifier_list, nb);
1574
- break;
1575
- case DEVFREQ_POLICY_NOTIFIER:
1576
- ret = srcu_notifier_chain_register(
1577
- &devfreq->policy_notifier_list, nb);
15782056 break;
15792057 default:
15802058 ret = -EINVAL;
....@@ -1588,7 +2066,7 @@
15882066 * devfreq_unregister_notifier() - Unregister a driver with devfreq
15892067 * @devfreq: The devfreq object.
15902068 * @nb: The notifier block to be unregistered.
1591
- * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
2069
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
15922070 */
15932071 int devfreq_unregister_notifier(struct devfreq *devfreq,
15942072 struct notifier_block *nb,
....@@ -1603,10 +2081,6 @@
16032081 case DEVFREQ_TRANSITION_NOTIFIER:
16042082 ret = srcu_notifier_chain_unregister(
16052083 &devfreq->transition_notifier_list, nb);
1606
- break;
1607
- case DEVFREQ_POLICY_NOTIFIER:
1608
- ret = srcu_notifier_chain_unregister(
1609
- &devfreq->policy_notifier_list, nb);
16102084 break;
16112085 default:
16122086 ret = -EINVAL;
....@@ -1631,7 +2105,7 @@
16312105
16322106 /**
16332107 * devm_devfreq_register_notifier()
1634
- - Resource-managed devfreq_register_notifier()
2108
+ * - Resource-managed devfreq_register_notifier()
16352109 * @dev: The devfreq user device. (parent of devfreq)
16362110 * @devfreq: The devfreq object.
16372111 * @nb: The notifier block to be unregistered.
....@@ -1667,16 +2141,16 @@
16672141
16682142 /**
16692143 * devm_devfreq_unregister_notifier()
1670
- - Resource-managed devfreq_unregister_notifier()
2144
+ * - Resource-managed devfreq_unregister_notifier()
16712145 * @dev: The devfreq user device. (parent of devfreq)
16722146 * @devfreq: The devfreq object.
16732147 * @nb: The notifier block to be unregistered.
16742148 * @list: DEVFREQ_TRANSITION_NOTIFIER.
16752149 */
16762150 void devm_devfreq_unregister_notifier(struct device *dev,
1677
- struct devfreq *devfreq,
1678
- struct notifier_block *nb,
1679
- unsigned int list)
2151
+ struct devfreq *devfreq,
2152
+ struct notifier_block *nb,
2153
+ unsigned int list)
16802154 {
16812155 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
16822156 devm_devfreq_dev_match, devfreq));