hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/devfreq/devfreq.c
....@@ -1,18 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
34 * for Non-CPU Devices.
45 *
56 * Copyright (C) 2011 Samsung Electronics
67 * MyungJoo Ham <myungjoo.ham@samsung.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
118 */
129
1310 #include <linux/kernel.h>
1411 #include <linux/kmod.h>
1512 #include <linux/sched.h>
13
+#include <linux/debugfs.h>
1614 #include <linux/errno.h>
1715 #include <linux/err.h>
1816 #include <linux/init.h>
....@@ -27,9 +25,16 @@
2725 #include <linux/printk.h>
2826 #include <linux/hrtimer.h>
2927 #include <linux/of.h>
28
+#include <linux/pm_qos.h>
3029 #include "governor.h"
3130
31
+#define CREATE_TRACE_POINTS
32
+#include <trace/events/devfreq.h>
33
+
34
+#define HZ_PER_KHZ 1000
35
+
3236 static struct class *devfreq_class;
37
+static struct dentry *devfreq_debugfs;
3338
3439 /*
3540 * devfreq core provides delayed work based load monitoring helper
....@@ -44,6 +49,11 @@
4449 static LIST_HEAD(devfreq_list);
4550 static DEFINE_MUTEX(devfreq_list_lock);
4651
52
+static const char timer_name[][DEVFREQ_NAME_LEN] = {
53
+ [DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
54
+ [DEVFREQ_TIMER_DELAYED] = { "delayed" },
55
+};
56
+
4757 /**
4858 * find_device_devfreq() - find devfreq struct using device pointer
4959 * @dev: device pointer used to lookup device devfreq.
....@@ -55,12 +65,12 @@
5565 {
5666 struct devfreq *tmp_devfreq;
5767
68
+ lockdep_assert_held(&devfreq_list_lock);
69
+
5870 if (IS_ERR_OR_NULL(dev)) {
5971 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
6072 return ERR_PTR(-EINVAL);
6173 }
62
- WARN(!mutex_is_locked(&devfreq_list_lock),
63
- "devfreq_list_lock must be locked.");
6474
6575 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
6676 if (tmp_devfreq->dev.parent == dev)
....@@ -96,6 +106,54 @@
96106 dev_pm_opp_put(opp);
97107
98108 return max_freq;
109
+}
110
+
111
+/**
112
+ * get_freq_range() - Get the current freq range
113
+ * @devfreq: the devfreq instance
114
+ * @min_freq: the min frequency
115
+ * @max_freq: the max frequency
116
+ *
117
+ * This takes into consideration all constraints.
118
+ */
119
+static void get_freq_range(struct devfreq *devfreq,
120
+ unsigned long *min_freq,
121
+ unsigned long *max_freq)
122
+{
123
+ unsigned long *freq_table = devfreq->profile->freq_table;
124
+ s32 qos_min_freq, qos_max_freq;
125
+
126
+ lockdep_assert_held(&devfreq->lock);
127
+
128
+ /*
129
+ * Initialize minimum/maximum frequency from freq table.
130
+ * The devfreq drivers can initialize this in either ascending or
131
+ * descending order and devfreq core supports both.
132
+ */
133
+ if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
134
+ *min_freq = freq_table[0];
135
+ *max_freq = freq_table[devfreq->profile->max_state - 1];
136
+ } else {
137
+ *min_freq = freq_table[devfreq->profile->max_state - 1];
138
+ *max_freq = freq_table[0];
139
+ }
140
+
141
+ /* Apply constraints from PM QoS */
142
+ qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
143
+ DEV_PM_QOS_MIN_FREQUENCY);
144
+ qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
145
+ DEV_PM_QOS_MAX_FREQUENCY);
146
+ *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
147
+ if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
148
+ *max_freq = min(*max_freq,
149
+ (unsigned long)HZ_PER_KHZ * qos_max_freq);
150
+
151
+ /* Apply constraints from OPP interface */
152
+ *min_freq = max(*min_freq, devfreq->scaling_min_freq);
153
+ *max_freq = min(*max_freq, devfreq->scaling_max_freq);
154
+
155
+ if (*min_freq > *max_freq)
156
+ *min_freq = *max_freq;
99157 }
100158
101159 /**
....@@ -158,10 +216,10 @@
158216 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
159217 {
160218 int lev, prev_lev, ret = 0;
161
- unsigned long cur_time;
219
+ u64 cur_time;
162220
163221 lockdep_assert_held(&devfreq->lock);
164
- cur_time = jiffies;
222
+ cur_time = get_jiffies_64();
165223
166224 /* Immediately exit if previous_freq is not initialized yet. */
167225 if (!devfreq->previous_freq)
....@@ -173,8 +231,8 @@
173231 goto out;
174232 }
175233
176
- devfreq->time_in_state[prev_lev] +=
177
- cur_time - devfreq->last_stat_updated;
234
+ devfreq->stats.time_in_state[prev_lev] +=
235
+ cur_time - devfreq->stats.last_update;
178236
179237 lev = devfreq_get_freq_level(devfreq, freq);
180238 if (lev < 0) {
....@@ -183,13 +241,13 @@
183241 }
184242
185243 if (lev != prev_lev) {
186
- devfreq->trans_table[(prev_lev *
187
- devfreq->profile->max_state) + lev]++;
188
- devfreq->total_trans++;
244
+ devfreq->stats.trans_table[
245
+ (prev_lev * devfreq->profile->max_state) + lev]++;
246
+ devfreq->stats.total_trans++;
189247 }
190248
191249 out:
192
- devfreq->last_stat_updated = cur_time;
250
+ devfreq->stats.last_update = cur_time;
193251 return ret;
194252 }
195253 EXPORT_SYMBOL(devfreq_update_status);
....@@ -205,12 +263,12 @@
205263 {
206264 struct devfreq_governor *tmp_governor;
207265
266
+ lockdep_assert_held(&devfreq_list_lock);
267
+
208268 if (IS_ERR_OR_NULL(name)) {
209269 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
210270 return ERR_PTR(-EINVAL);
211271 }
212
- WARN(!mutex_is_locked(&devfreq_list_lock),
213
- "devfreq_list_lock must be locked.");
214272
215273 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
216274 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
....@@ -236,12 +294,12 @@
236294 struct devfreq_governor *governor;
237295 int err = 0;
238296
297
+ lockdep_assert_held(&devfreq_list_lock);
298
+
239299 if (IS_ERR_OR_NULL(name)) {
240300 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
241301 return ERR_PTR(-EINVAL);
242302 }
243
- WARN(!mutex_is_locked(&devfreq_list_lock),
244
- "devfreq_list_lock must be locked.");
245303
246304 governor = find_devfreq_governor(name);
247305 if (IS_ERR(governor)) {
....@@ -286,6 +344,44 @@
286344 return 0;
287345 }
288346
347
+static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
348
+ u32 flags)
349
+{
350
+ struct devfreq_freqs freqs;
351
+ unsigned long cur_freq;
352
+ int err = 0;
353
+
354
+ if (devfreq->profile->get_cur_freq)
355
+ devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
356
+ else
357
+ cur_freq = devfreq->previous_freq;
358
+
359
+ freqs.old = cur_freq;
360
+ freqs.new = new_freq;
361
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
362
+
363
+ err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
364
+ if (err) {
365
+ freqs.new = cur_freq;
366
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
367
+ return err;
368
+ }
369
+
370
+ freqs.new = new_freq;
371
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
372
+
373
+ if (devfreq_update_status(devfreq, new_freq))
374
+ dev_err(&devfreq->dev,
375
+ "Couldn't update frequency transition information.\n");
376
+
377
+ devfreq->previous_freq = new_freq;
378
+
379
+ if (devfreq->suspend_freq)
380
+ devfreq->resume_freq = new_freq;
381
+
382
+ return err;
383
+}
384
+
289385 /* Load monitoring helper functions for governors use */
290386
291387 /**
....@@ -297,39 +393,20 @@
297393 */
298394 int update_devfreq(struct devfreq *devfreq)
299395 {
300
- struct devfreq_policy *policy = &devfreq->policy;
301
- struct devfreq_freqs freqs;
302
- unsigned long freq, cur_freq, min_freq, max_freq;
396
+ unsigned long freq, min_freq, max_freq;
303397 int err = 0;
304398 u32 flags = 0;
305399
306
- if (!mutex_is_locked(&devfreq->lock)) {
307
- WARN(true, "devfreq->lock must be locked by the caller.\n");
308
- return -EINVAL;
309
- }
400
+ lockdep_assert_held(&devfreq->lock);
310401
311402 if (!devfreq->governor)
312403 return -EINVAL;
313
-
314
- policy->max = devfreq->scaling_max_freq;
315
- policy->min = devfreq->scaling_min_freq;
316
- srcu_notifier_call_chain(&devfreq->policy_notifier_list, DEVFREQ_ADJUST,
317
- policy);
318404
319405 /* Reevaluate the proper frequency */
320406 err = devfreq->governor->get_target_freq(devfreq, &freq);
321407 if (err)
322408 return err;
323
-
324
- /*
325
- * Adjust the frequency with user freq, QoS and available freq.
326
- *
327
- * List from the highest priority
328
- * max_freq
329
- * min_freq
330
- */
331
- max_freq = min(policy->max, devfreq->max_freq);
332
- min_freq = max(policy->min, devfreq->min_freq);
409
+ get_freq_range(devfreq, &min_freq, &max_freq);
333410
334411 if (freq < min_freq) {
335412 freq = min_freq;
....@@ -340,31 +417,8 @@
340417 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
341418 }
342419
343
- if (devfreq->profile->get_cur_freq)
344
- devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
345
- else
346
- cur_freq = devfreq->previous_freq;
420
+ return devfreq_set_target(devfreq, freq, flags);
347421
348
- freqs.old = cur_freq;
349
- freqs.new = freq;
350
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
351
-
352
- err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
353
- if (err) {
354
- freqs.new = cur_freq;
355
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
356
- return err;
357
- }
358
-
359
- freqs.new = freq;
360
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
361
-
362
- if (devfreq_update_status(devfreq, freq))
363
- dev_err(&devfreq->dev,
364
- "Couldn't update frequency transition information.\n");
365
-
366
- devfreq->previous_freq = freq;
367
- return err;
368422 }
369423 EXPORT_SYMBOL(update_devfreq);
370424
....@@ -387,20 +441,35 @@
387441 queue_delayed_work(devfreq_wq, &devfreq->work,
388442 msecs_to_jiffies(devfreq->profile->polling_ms));
389443 mutex_unlock(&devfreq->lock);
444
+
445
+ trace_devfreq_monitor(devfreq);
390446 }
391447
392448 /**
393449 * devfreq_monitor_start() - Start load monitoring of devfreq instance
394450 * @devfreq: the devfreq instance.
395451 *
396
- * Helper function for starting devfreq device load monitoing. By
452
+ * Helper function for starting devfreq device load monitoring. By
397453 * default delayed work based monitoring is supported. Function
398454 * to be called from governor in response to DEVFREQ_GOV_START
399455 * event when device is added to devfreq framework.
400456 */
401457 void devfreq_monitor_start(struct devfreq *devfreq)
402458 {
403
- INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
459
+ if (devfreq->governor->interrupt_driven)
460
+ return;
461
+
462
+ switch (devfreq->profile->timer) {
463
+ case DEVFREQ_TIMER_DEFERRABLE:
464
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
465
+ break;
466
+ case DEVFREQ_TIMER_DELAYED:
467
+ INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
468
+ break;
469
+ default:
470
+ return;
471
+ }
472
+
404473 if (devfreq->profile->polling_ms)
405474 queue_delayed_work(devfreq_wq, &devfreq->work,
406475 msecs_to_jiffies(devfreq->profile->polling_ms));
....@@ -411,12 +480,15 @@
411480 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
412481 * @devfreq: the devfreq instance.
413482 *
414
- * Helper function to stop devfreq device load monitoing. Function
483
+ * Helper function to stop devfreq device load monitoring. Function
415484 * to be called from governor in response to DEVFREQ_GOV_STOP
416485 * event when device is removed from devfreq framework.
417486 */
418487 void devfreq_monitor_stop(struct devfreq *devfreq)
419488 {
489
+ if (devfreq->governor->interrupt_driven)
490
+ return;
491
+
420492 cancel_delayed_work_sync(&devfreq->work);
421493 }
422494 EXPORT_SYMBOL(devfreq_monitor_stop);
....@@ -425,7 +497,7 @@
425497 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
426498 * @devfreq: the devfreq instance.
427499 *
428
- * Helper function to suspend devfreq device load monitoing. Function
500
+ * Helper function to suspend devfreq device load monitoring. Function
429501 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
430502 * event or when polling interval is set to zero.
431503 *
....@@ -444,6 +516,10 @@
444516 devfreq_update_status(devfreq, devfreq->previous_freq);
445517 devfreq->stop_polling = true;
446518 mutex_unlock(&devfreq->lock);
519
+
520
+ if (devfreq->governor->interrupt_driven)
521
+ return;
522
+
447523 cancel_delayed_work_sync(&devfreq->work);
448524 }
449525 EXPORT_SYMBOL(devfreq_monitor_suspend);
....@@ -452,7 +528,7 @@
452528 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
453529 * @devfreq: the devfreq instance.
454530 *
455
- * Helper function to resume devfreq device load monitoing. Function
531
+ * Helper function to resume devfreq device load monitoring. Function
456532 * to be called from governor in response to DEVFREQ_GOV_RESUME
457533 * event or when polling interval is set to non-zero.
458534 */
....@@ -464,12 +540,16 @@
464540 if (!devfreq->stop_polling)
465541 goto out;
466542
543
+ if (devfreq->governor->interrupt_driven)
544
+ goto out_update;
545
+
467546 if (!delayed_work_pending(&devfreq->work) &&
468547 devfreq->profile->polling_ms)
469548 queue_delayed_work(devfreq_wq, &devfreq->work,
470549 msecs_to_jiffies(devfreq->profile->polling_ms));
471550
472
- devfreq->last_stat_updated = jiffies;
551
+out_update:
552
+ devfreq->stats.last_update = get_jiffies_64();
473553 devfreq->stop_polling = false;
474554
475555 if (devfreq->profile->get_cur_freq &&
....@@ -482,14 +562,14 @@
482562 EXPORT_SYMBOL(devfreq_monitor_resume);
483563
484564 /**
485
- * devfreq_interval_update() - Update device devfreq monitoring interval
565
+ * devfreq_update_interval() - Update device devfreq monitoring interval
486566 * @devfreq: the devfreq instance.
487567 * @delay: new polling interval to be set.
488568 *
489569 * Helper function to set new load monitoring polling interval. Function
490
- * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
570
+ * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
491571 */
492
-void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
572
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
493573 {
494574 unsigned int cur_delay = devfreq->profile->polling_ms;
495575 unsigned int new_delay = *delay;
....@@ -498,6 +578,9 @@
498578 devfreq->profile->polling_ms = new_delay;
499579
500580 if (devfreq->stop_polling)
581
+ goto out;
582
+
583
+ if (devfreq->governor->interrupt_driven)
501584 goto out;
502585
503586 /* if new delay is zero, stop polling */
....@@ -521,16 +604,16 @@
521604 mutex_lock(&devfreq->lock);
522605 if (!devfreq->stop_polling)
523606 queue_delayed_work(devfreq_wq, &devfreq->work,
524
- msecs_to_jiffies(devfreq->profile->polling_ms));
607
+ msecs_to_jiffies(devfreq->profile->polling_ms));
525608 }
526609 out:
527610 mutex_unlock(&devfreq->lock);
528611 }
529
-EXPORT_SYMBOL(devfreq_interval_update);
612
+EXPORT_SYMBOL(devfreq_update_interval);
530613
531614 /**
532615 * devfreq_notifier_call() - Notify that the device frequency requirements
533
- * has been changed out of devfreq framework.
616
+ * has been changed out of devfreq framework.
534617 * @nb: the notifier_block (supposed to be devfreq->nb)
535618 * @type: not used
536619 * @devp: not used
....@@ -546,6 +629,8 @@
546629 mutex_lock(&devfreq->lock);
547630
548631 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
632
+ if (!devfreq->scaling_min_freq)
633
+ goto out;
549634
550635 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
551636 if (!devfreq->scaling_max_freq) {
....@@ -566,6 +651,45 @@
566651 }
567652
568653 /**
654
+ * qos_notifier_call() - Common handler for QoS constraints.
655
+ * @devfreq: the devfreq instance.
656
+ */
657
+static int qos_notifier_call(struct devfreq *devfreq)
658
+{
659
+ int err;
660
+
661
+ mutex_lock(&devfreq->lock);
662
+ err = update_devfreq(devfreq);
663
+ mutex_unlock(&devfreq->lock);
664
+ if (err)
665
+ dev_err(devfreq->dev.parent,
666
+ "failed to update frequency from PM QoS (%d)\n",
667
+ err);
668
+
669
+ return NOTIFY_OK;
670
+}
671
+
672
+/**
673
+ * qos_min_notifier_call() - Callback for QoS min_freq changes.
674
+ * @nb: Should be devfreq->nb_min
675
+ */
676
+static int qos_min_notifier_call(struct notifier_block *nb,
677
+ unsigned long val, void *ptr)
678
+{
679
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
680
+}
681
+
682
+/**
683
+ * qos_max_notifier_call() - Callback for QoS max_freq changes.
684
+ * @nb: Should be devfreq->nb_max
685
+ */
686
+static int qos_max_notifier_call(struct notifier_block *nb,
687
+ unsigned long val, void *ptr)
688
+{
689
+ return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
690
+}
691
+
692
+/**
569693 * devfreq_dev_release() - Callback for struct device to release the device.
570694 * @dev: the devfreq device
571695 *
....@@ -574,16 +698,41 @@
574698 static void devfreq_dev_release(struct device *dev)
575699 {
576700 struct devfreq *devfreq = to_devfreq(dev);
701
+ int err;
577702
578703 mutex_lock(&devfreq_list_lock);
579704 list_del(&devfreq->node);
580705 mutex_unlock(&devfreq_list_lock);
581706
707
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
708
+ DEV_PM_QOS_MAX_FREQUENCY);
709
+ if (err && err != -ENOENT)
710
+ dev_warn(dev->parent,
711
+ "Failed to remove max_freq notifier: %d\n", err);
712
+ err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
713
+ DEV_PM_QOS_MIN_FREQUENCY);
714
+ if (err && err != -ENOENT)
715
+ dev_warn(dev->parent,
716
+ "Failed to remove min_freq notifier: %d\n", err);
717
+
718
+ if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
719
+ err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
720
+ if (err < 0)
721
+ dev_warn(dev->parent,
722
+ "Failed to remove max_freq request: %d\n", err);
723
+ }
724
+ if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
725
+ err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
726
+ if (err < 0)
727
+ dev_warn(dev->parent,
728
+ "Failed to remove min_freq request: %d\n", err);
729
+ }
730
+
582731 if (devfreq->profile->exit)
583732 devfreq->profile->exit(devfreq->dev.parent);
584733
585734 mutex_destroy(&devfreq->lock);
586
- mutex_destroy(&devfreq->event_lock);
735
+ srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
587736 kfree(devfreq);
588737 }
589738
....@@ -613,7 +762,7 @@
613762 devfreq = find_device_devfreq(dev);
614763 mutex_unlock(&devfreq_list_lock);
615764 if (!IS_ERR(devfreq)) {
616
- dev_err(dev, "%s: Unable to create devfreq for the device.\n",
765
+ dev_err(dev, "%s: devfreq device already exists!\n",
617766 __func__);
618767 err = -EINVAL;
619768 goto err_out;
....@@ -626,30 +775,39 @@
626775 }
627776
628777 mutex_init(&devfreq->lock);
629
- mutex_init(&devfreq->event_lock);
630778 mutex_lock(&devfreq->lock);
631779 devfreq->dev.parent = dev;
632780 devfreq->dev.class = devfreq_class;
633781 devfreq->dev.release = devfreq_dev_release;
634782 INIT_LIST_HEAD(&devfreq->node);
635783 devfreq->profile = profile;
636
- strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
784
+ strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
637785 devfreq->previous_freq = profile->initial_freq;
638786 devfreq->last_status.current_frequency = profile->initial_freq;
639787 devfreq->data = data;
640788 devfreq->nb.notifier_call = devfreq_notifier_call;
641789
790
+ if (devfreq->profile->timer < 0
791
+ || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
792
+ mutex_unlock(&devfreq->lock);
793
+ err = -EINVAL;
794
+ goto err_dev;
795
+ }
796
+
642797 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
643798 mutex_unlock(&devfreq->lock);
644799 err = set_freq_table(devfreq);
645800 if (err < 0)
646
- goto err_out;
801
+ goto err_dev;
647802 mutex_lock(&devfreq->lock);
648803 }
649804
650805 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
651
- devfreq->min_freq = devfreq->scaling_min_freq;
652
- devfreq->policy.min = devfreq->min_freq;
806
+ if (!devfreq->scaling_min_freq) {
807
+ mutex_unlock(&devfreq->lock);
808
+ err = -EINVAL;
809
+ goto err_dev;
810
+ }
653811
654812 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
655813 if (!devfreq->scaling_max_freq) {
....@@ -657,8 +815,9 @@
657815 err = -EINVAL;
658816 goto err_dev;
659817 }
660
- devfreq->max_freq = devfreq->scaling_max_freq;
661
- devfreq->policy.max = devfreq->max_freq;
818
+
819
+ devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
820
+ atomic_set(&devfreq->suspend_count, 0);
662821
663822 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
664823 err = device_register(&devfreq->dev);
....@@ -668,22 +827,55 @@
668827 goto err_out;
669828 }
670829
671
- devfreq->trans_table =
672
- devm_kzalloc(&devfreq->dev,
673
- array3_size(sizeof(unsigned int),
674
- devfreq->profile->max_state,
675
- devfreq->profile->max_state),
676
- GFP_KERNEL);
677
- devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
678
- devfreq->profile->max_state,
679
- sizeof(unsigned long),
680
- GFP_KERNEL);
681
- devfreq->last_stat_updated = jiffies;
830
+ devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
831
+ array3_size(sizeof(unsigned int),
832
+ devfreq->profile->max_state,
833
+ devfreq->profile->max_state),
834
+ GFP_KERNEL);
835
+ if (!devfreq->stats.trans_table) {
836
+ mutex_unlock(&devfreq->lock);
837
+ err = -ENOMEM;
838
+ goto err_devfreq;
839
+ }
840
+
841
+ devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
842
+ devfreq->profile->max_state,
843
+ sizeof(*devfreq->stats.time_in_state),
844
+ GFP_KERNEL);
845
+ if (!devfreq->stats.time_in_state) {
846
+ mutex_unlock(&devfreq->lock);
847
+ err = -ENOMEM;
848
+ goto err_devfreq;
849
+ }
850
+
851
+ devfreq->stats.total_trans = 0;
852
+ devfreq->stats.last_update = get_jiffies_64();
682853
683854 srcu_init_notifier_head(&devfreq->transition_notifier_list);
684
- srcu_init_notifier_head(&devfreq->policy_notifier_list);
685855
686856 mutex_unlock(&devfreq->lock);
857
+
858
+ err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
859
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
860
+ if (err < 0)
861
+ goto err_devfreq;
862
+ err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
863
+ DEV_PM_QOS_MAX_FREQUENCY,
864
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
865
+ if (err < 0)
866
+ goto err_devfreq;
867
+
868
+ devfreq->nb_min.notifier_call = qos_min_notifier_call;
869
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
870
+ DEV_PM_QOS_MIN_FREQUENCY);
871
+ if (err)
872
+ goto err_devfreq;
873
+
874
+ devfreq->nb_max.notifier_call = qos_max_notifier_call;
875
+ err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
876
+ DEV_PM_QOS_MAX_FREQUENCY);
877
+ if (err)
878
+ goto err_devfreq;
687879
688880 mutex_lock(&devfreq_list_lock);
689881
....@@ -712,12 +904,11 @@
712904
713905 err_init:
714906 mutex_unlock(&devfreq_list_lock);
715
-
907
+err_devfreq:
716908 devfreq_remove_device(devfreq);
717909 devfreq = NULL;
718910 err_dev:
719
- if (devfreq)
720
- kfree(devfreq);
911
+ kfree(devfreq);
721912 err_out:
722913 return ERR_PTR(err);
723914 }
....@@ -796,52 +987,79 @@
796987
797988 #ifdef CONFIG_OF
798989 /*
799
- * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
800
- * @dev - instance to the given device
801
- * @index - index into list of devfreq
990
+ * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
991
+ * @node - pointer to device_node
802992 *
803993 * return the instance of devfreq device
804994 */
805
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
995
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
806996 {
807
- struct device_node *node;
808997 struct devfreq *devfreq;
809998
810
- if (!dev)
811
- return ERR_PTR(-EINVAL);
812
-
813
- if (!dev->of_node)
814
- return ERR_PTR(-EINVAL);
815
-
816
- node = of_parse_phandle(dev->of_node, "devfreq", index);
817999 if (!node)
818
- return ERR_PTR(-ENODEV);
1000
+ return ERR_PTR(-EINVAL);
8191001
8201002 mutex_lock(&devfreq_list_lock);
8211003 list_for_each_entry(devfreq, &devfreq_list, node) {
8221004 if (devfreq->dev.parent
8231005 && devfreq->dev.parent->of_node == node) {
8241006 mutex_unlock(&devfreq_list_lock);
825
- of_node_put(node);
8261007 return devfreq;
8271008 }
8281009 }
8291010 mutex_unlock(&devfreq_list_lock);
1011
+
1012
+ return ERR_PTR(-ENODEV);
1013
+}
1014
+
1015
+/*
1016
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
1017
+ * @dev - instance to the given device
1018
+ * @phandle_name - name of property holding a phandle value
1019
+ * @index - index into list of devfreq
1020
+ *
1021
+ * return the instance of devfreq device
1022
+ */
1023
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
1024
+ const char *phandle_name, int index)
1025
+{
1026
+ struct device_node *node;
1027
+ struct devfreq *devfreq;
1028
+
1029
+ if (!dev || !phandle_name)
1030
+ return ERR_PTR(-EINVAL);
1031
+
1032
+ if (!dev->of_node)
1033
+ return ERR_PTR(-EINVAL);
1034
+
1035
+ node = of_parse_phandle(dev->of_node, phandle_name, index);
1036
+ if (!node)
1037
+ return ERR_PTR(-ENODEV);
1038
+
1039
+ devfreq = devfreq_get_devfreq_by_node(node);
8301040 of_node_put(node);
8311041
832
- return ERR_PTR(-EPROBE_DEFER);
1042
+ return devfreq;
8331043 }
1044
+
8341045 #else
835
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1046
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
1047
+{
1048
+ return ERR_PTR(-ENODEV);
1049
+}
1050
+
1051
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
1052
+ const char *phandle_name, int index)
8361053 {
8371054 return ERR_PTR(-ENODEV);
8381055 }
8391056 #endif /* CONFIG_OF */
1057
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
8401058 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
8411059
8421060 /**
8431061 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
844
- * @dev: the device to add devfreq feature.
1062
+ * @dev: the device from which to remove devfreq feature.
8451063 * @devfreq: the devfreq instance to be removed
8461064 */
8471065 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
....@@ -866,14 +1084,25 @@
8661084 if (!devfreq)
8671085 return -EINVAL;
8681086
869
- if (!devfreq->governor)
1087
+ if (atomic_inc_return(&devfreq->suspend_count) > 1)
8701088 return 0;
8711089
872
- mutex_lock(&devfreq->event_lock);
873
- ret = devfreq->governor->event_handler(devfreq,
874
- DEVFREQ_GOV_SUSPEND, NULL);
875
- mutex_unlock(&devfreq->event_lock);
876
- return ret;
1090
+ if (devfreq->governor) {
1091
+ ret = devfreq->governor->event_handler(devfreq,
1092
+ DEVFREQ_GOV_SUSPEND, NULL);
1093
+ if (ret)
1094
+ return ret;
1095
+ }
1096
+
1097
+ if (devfreq->suspend_freq) {
1098
+ mutex_lock(&devfreq->lock);
1099
+ ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1100
+ mutex_unlock(&devfreq->lock);
1101
+ if (ret)
1102
+ return ret;
1103
+ }
1104
+
1105
+ return 0;
8771106 }
8781107 EXPORT_SYMBOL(devfreq_suspend_device);
8791108
....@@ -888,19 +1117,75 @@
8881117 int devfreq_resume_device(struct devfreq *devfreq)
8891118 {
8901119 int ret;
1120
+
8911121 if (!devfreq)
8921122 return -EINVAL;
8931123
894
- if (!devfreq->governor)
1124
+ if (atomic_dec_return(&devfreq->suspend_count) >= 1)
8951125 return 0;
8961126
897
- mutex_lock(&devfreq->event_lock);
898
- ret = devfreq->governor->event_handler(devfreq,
899
- DEVFREQ_GOV_RESUME, NULL);
900
- mutex_unlock(&devfreq->event_lock);
901
- return ret;
1127
+ if (devfreq->resume_freq) {
1128
+ mutex_lock(&devfreq->lock);
1129
+ ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1130
+ mutex_unlock(&devfreq->lock);
1131
+ if (ret)
1132
+ return ret;
1133
+ }
1134
+
1135
+ if (devfreq->governor) {
1136
+ ret = devfreq->governor->event_handler(devfreq,
1137
+ DEVFREQ_GOV_RESUME, NULL);
1138
+ if (ret)
1139
+ return ret;
1140
+ }
1141
+
1142
+ return 0;
9021143 }
9031144 EXPORT_SYMBOL(devfreq_resume_device);
1145
+
1146
+/**
1147
+ * devfreq_suspend() - Suspend devfreq governors and devices
1148
+ *
1149
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
1150
+ * and devices preserving the state for resume. On some platforms the devfreq
1151
+ * device must have precise state (frequency) after resume in order to provide
1152
+ * fully operating setup.
1153
+ */
1154
+void devfreq_suspend(void)
1155
+{
1156
+ struct devfreq *devfreq;
1157
+ int ret;
1158
+
1159
+ mutex_lock(&devfreq_list_lock);
1160
+ list_for_each_entry(devfreq, &devfreq_list, node) {
1161
+ ret = devfreq_suspend_device(devfreq);
1162
+ if (ret)
1163
+ dev_err(&devfreq->dev,
1164
+ "failed to suspend devfreq device\n");
1165
+ }
1166
+ mutex_unlock(&devfreq_list_lock);
1167
+}
1168
+
1169
+/**
1170
+ * devfreq_resume() - Resume devfreq governors and devices
1171
+ *
1172
+ * Called during system wide Suspend/Hibernate cycle for resuming governors and
1173
+ * devices that are suspended with devfreq_suspend().
1174
+ */
1175
+void devfreq_resume(void)
1176
+{
1177
+ struct devfreq *devfreq;
1178
+ int ret;
1179
+
1180
+ mutex_lock(&devfreq_list_lock);
1181
+ list_for_each_entry(devfreq, &devfreq_list, node) {
1182
+ ret = devfreq_resume_device(devfreq);
1183
+ if (ret)
1184
+ dev_warn(&devfreq->dev,
1185
+ "failed to resume devfreq device\n");
1186
+ }
1187
+ mutex_unlock(&devfreq_list_lock);
1188
+}
9041189
9051190 /**
9061191 * devfreq_add_governor() - Add devfreq governor
....@@ -1025,18 +1310,20 @@
10251310 static ssize_t name_show(struct device *dev,
10261311 struct device_attribute *attr, char *buf)
10271312 {
1028
- struct devfreq *devfreq = to_devfreq(dev);
1029
- return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1313
+ struct devfreq *df = to_devfreq(dev);
1314
+ return sprintf(buf, "%s\n", dev_name(df->dev.parent));
10301315 }
10311316 static DEVICE_ATTR_RO(name);
10321317
10331318 static ssize_t governor_show(struct device *dev,
10341319 struct device_attribute *attr, char *buf)
10351320 {
1036
- if (!to_devfreq(dev)->governor)
1321
+ struct devfreq *df = to_devfreq(dev);
1322
+
1323
+ if (!df->governor)
10371324 return -EINVAL;
10381325
1039
- return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1326
+ return sprintf(buf, "%s\n", df->governor->name);
10401327 }
10411328
10421329 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
....@@ -1045,7 +1332,10 @@
10451332 struct devfreq *df = to_devfreq(dev);
10461333 int ret;
10471334 char str_governor[DEVFREQ_NAME_LEN + 1];
1048
- const struct devfreq_governor *governor, *prev_gov;
1335
+ const struct devfreq_governor *governor, *prev_governor;
1336
+
1337
+ if (!df->governor)
1338
+ return -EINVAL;
10491339
10501340 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
10511341 if (ret != 1)
....@@ -1060,39 +1350,36 @@
10601350 if (df->governor == governor) {
10611351 ret = 0;
10621352 goto out;
1063
- } else if ((df->governor && df->governor->immutable) ||
1064
- governor->immutable) {
1353
+ } else if (df->governor->immutable || governor->immutable) {
10651354 ret = -EINVAL;
10661355 goto out;
10671356 }
10681357
1069
- mutex_lock(&df->event_lock);
1070
- if (df->governor) {
1071
- ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1072
- if (ret) {
1073
- dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1074
- __func__, df->governor->name, ret);
1075
- goto gov_stop_out;
1076
- }
1358
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1359
+ if (ret) {
1360
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1361
+ __func__, df->governor->name, ret);
1362
+ goto out;
10771363 }
1078
- prev_gov = df->governor;
1364
+
1365
+ prev_governor = df->governor;
10791366 df->governor = governor;
10801367 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
10811368 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
10821369 if (ret) {
10831370 dev_warn(dev, "%s: Governor %s not started(%d)\n",
10841371 __func__, df->governor->name, ret);
1085
- if (prev_gov) {
1086
- df->governor = prev_gov;
1087
- strlcpy(df->governor_name, prev_gov->name,
1088
- DEVFREQ_NAME_LEN);
1089
- df->governor->event_handler(df, DEVFREQ_GOV_START,
1090
- NULL);
1372
+ df->governor = prev_governor;
1373
+ strncpy(df->governor_name, prev_governor->name,
1374
+ DEVFREQ_NAME_LEN);
1375
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1376
+ if (ret) {
1377
+ dev_err(dev,
1378
+ "%s: reverting to Governor %s failed (%d)\n",
1379
+ __func__, df->governor_name, ret);
1380
+ df->governor = NULL;
10911381 }
10921382 }
1093
-
1094
-gov_stop_out:
1095
- mutex_unlock(&df->event_lock);
10961383 out:
10971384 mutex_unlock(&devfreq_list_lock);
10981385
....@@ -1109,15 +1396,18 @@
11091396 struct devfreq *df = to_devfreq(d);
11101397 ssize_t count = 0;
11111398
1399
+ if (!df->governor)
1400
+ return -EINVAL;
1401
+
11121402 mutex_lock(&devfreq_list_lock);
11131403
11141404 /*
11151405 * The devfreq with immutable governor (e.g., passive) shows
11161406 * only own governor.
11171407 */
1118
- if (df->governor && df->governor->immutable) {
1408
+ if (df->governor->immutable) {
11191409 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1120
- "%s ", df->governor_name);
1410
+ "%s ", df->governor_name);
11211411 /*
11221412 * The devfreq device shows the registered governor except for
11231413 * immutable governors such as passive governor .
....@@ -1149,27 +1439,37 @@
11491439 char *buf)
11501440 {
11511441 unsigned long freq;
1152
- struct devfreq *devfreq = to_devfreq(dev);
1442
+ struct devfreq *df = to_devfreq(dev);
11531443
1154
- if (devfreq->profile->get_cur_freq &&
1155
- !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1444
+ if (!df->profile)
1445
+ return -EINVAL;
1446
+
1447
+ if (df->profile->get_cur_freq &&
1448
+ !df->profile->get_cur_freq(df->dev.parent, &freq))
11561449 return sprintf(buf, "%lu\n", freq);
11571450
1158
- return sprintf(buf, "%lu\n", devfreq->previous_freq);
1451
+ return sprintf(buf, "%lu\n", df->previous_freq);
11591452 }
11601453 static DEVICE_ATTR_RO(cur_freq);
11611454
11621455 static ssize_t target_freq_show(struct device *dev,
11631456 struct device_attribute *attr, char *buf)
11641457 {
1165
- return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1458
+ struct devfreq *df = to_devfreq(dev);
1459
+
1460
+ return sprintf(buf, "%lu\n", df->previous_freq);
11661461 }
11671462 static DEVICE_ATTR_RO(target_freq);
11681463
11691464 static ssize_t polling_interval_show(struct device *dev,
11701465 struct device_attribute *attr, char *buf)
11711466 {
1172
- return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1467
+ struct devfreq *df = to_devfreq(dev);
1468
+
1469
+ if (!df->profile)
1470
+ return -EINVAL;
1471
+
1472
+ return sprintf(buf, "%d\n", df->profile->polling_ms);
11731473 }
11741474
11751475 static ssize_t polling_interval_store(struct device *dev,
....@@ -1187,10 +1487,8 @@
11871487 if (ret != 1)
11881488 return -EINVAL;
11891489
1190
- mutex_lock(&df->event_lock);
1191
- df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1490
+ df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
11921491 ret = count;
1193
- mutex_unlock(&df->event_lock);
11941492
11951493 return ret;
11961494 }
....@@ -1203,44 +1501,39 @@
12031501 unsigned long value;
12041502 int ret;
12051503
1504
+ /*
1505
+ * Protect against theoretical sysfs writes between
1506
+ * device_add and dev_pm_qos_add_request
1507
+ */
1508
+ if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1509
+ return -EAGAIN;
1510
+
12061511 ret = sscanf(buf, "%lu", &value);
12071512 if (ret != 1)
12081513 return -EINVAL;
12091514
1210
- mutex_lock(&df->event_lock);
1211
- mutex_lock(&df->lock);
1515
+ /* Round down to kHz for PM QoS */
1516
+ ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1517
+ value / HZ_PER_KHZ);
1518
+ if (ret < 0)
1519
+ return ret;
12121520
1213
- if (value) {
1214
- if (value > df->max_freq) {
1215
- ret = -EINVAL;
1216
- goto unlock;
1217
- }
1218
- } else {
1219
- unsigned long *freq_table = df->profile->freq_table;
1220
-
1221
- /* Get minimum frequency according to sorting order */
1222
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
1223
- value = freq_table[0];
1224
- else
1225
- value = freq_table[df->profile->max_state - 1];
1226
- }
1227
-
1228
- df->min_freq = value;
1229
- update_devfreq(df);
1230
- ret = count;
1231
-unlock:
1232
- mutex_unlock(&df->lock);
1233
- mutex_unlock(&df->event_lock);
1234
- return ret;
1521
+ return count;
12351522 }
12361523
12371524 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
12381525 char *buf)
12391526 {
12401527 struct devfreq *df = to_devfreq(dev);
1528
+ unsigned long min_freq, max_freq;
12411529
1242
- return sprintf(buf, "%lu\n", max(df->policy.min, df->min_freq));
1530
+ mutex_lock(&df->lock);
1531
+ get_freq_range(df, &min_freq, &max_freq);
1532
+ mutex_unlock(&df->lock);
1533
+
1534
+ return sprintf(buf, "%lu\n", min_freq);
12431535 }
1536
+static DEVICE_ATTR_RW(min_freq);
12441537
12451538 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
12461539 const char *buf, size_t count)
....@@ -1249,44 +1542,50 @@
12491542 unsigned long value;
12501543 int ret;
12511544
1545
+ /*
1546
+ * Protect against theoretical sysfs writes between
1547
+ * device_add and dev_pm_qos_add_request
1548
+ */
1549
+ if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1550
+ return -EINVAL;
1551
+
12521552 ret = sscanf(buf, "%lu", &value);
12531553 if (ret != 1)
12541554 return -EINVAL;
12551555
1256
- mutex_lock(&df->event_lock);
1257
- mutex_lock(&df->lock);
1556
+ /*
1557
+ * PM QoS frequencies are in kHz so we need to convert. Convert by
1558
+ * rounding upwards so that the acceptable interval never shrinks.
1559
+ *
1560
+ * For example if the user writes "666666666" to sysfs this value will
1561
+ * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1562
+ * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1563
+ *
1564
+ * A value of zero means "no limit".
1565
+ */
1566
+ if (value)
1567
+ value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1568
+ else
1569
+ value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
12581570
1259
- if (value) {
1260
- if (value < df->min_freq) {
1261
- ret = -EINVAL;
1262
- goto unlock;
1263
- }
1264
- } else {
1265
- unsigned long *freq_table = df->profile->freq_table;
1571
+ ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1572
+ if (ret < 0)
1573
+ return ret;
12661574
1267
- /* Get maximum frequency according to sorting order */
1268
- if (freq_table[0] < freq_table[df->profile->max_state - 1])
1269
- value = freq_table[df->profile->max_state - 1];
1270
- else
1271
- value = freq_table[0];
1272
- }
1273
-
1274
- df->max_freq = value;
1275
- update_devfreq(df);
1276
- ret = count;
1277
-unlock:
1278
- mutex_unlock(&df->lock);
1279
- mutex_unlock(&df->event_lock);
1280
- return ret;
1575
+ return count;
12811576 }
1282
-static DEVICE_ATTR_RW(min_freq);
12831577
12841578 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
12851579 char *buf)
12861580 {
12871581 struct devfreq *df = to_devfreq(dev);
1582
+ unsigned long min_freq, max_freq;
12881583
1289
- return sprintf(buf, "%lu\n", min(df->policy.max, df->max_freq));
1584
+ mutex_lock(&df->lock);
1585
+ get_freq_range(df, &min_freq, &max_freq);
1586
+ mutex_unlock(&df->lock);
1587
+
1588
+ return sprintf(buf, "%lu\n", max_freq);
12901589 }
12911590 static DEVICE_ATTR_RW(max_freq);
12921591
....@@ -1297,6 +1596,9 @@
12971596 struct devfreq *df = to_devfreq(d);
12981597 ssize_t count = 0;
12991598 int i;
1599
+
1600
+ if (!df->profile)
1601
+ return -EINVAL;
13001602
13011603 mutex_lock(&df->lock);
13021604
....@@ -1318,51 +1620,149 @@
13181620 static ssize_t trans_stat_show(struct device *dev,
13191621 struct device_attribute *attr, char *buf)
13201622 {
1321
- struct devfreq *devfreq = to_devfreq(dev);
1623
+ struct devfreq *df = to_devfreq(dev);
13221624 ssize_t len;
13231625 int i, j;
1324
- unsigned int max_state = devfreq->profile->max_state;
1626
+ unsigned int max_state;
1627
+
1628
+ if (!df->profile)
1629
+ return -EINVAL;
1630
+ max_state = df->profile->max_state;
13251631
13261632 if (max_state == 0)
13271633 return sprintf(buf, "Not Supported.\n");
13281634
1329
- mutex_lock(&devfreq->lock);
1330
- if (!devfreq->stop_polling &&
1331
- devfreq_update_status(devfreq, devfreq->previous_freq)) {
1332
- mutex_unlock(&devfreq->lock);
1635
+ mutex_lock(&df->lock);
1636
+ if (!df->stop_polling &&
1637
+ devfreq_update_status(df, df->previous_freq)) {
1638
+ mutex_unlock(&df->lock);
13331639 return 0;
13341640 }
1335
- mutex_unlock(&devfreq->lock);
1641
+ mutex_unlock(&df->lock);
13361642
13371643 len = sprintf(buf, " From : To\n");
13381644 len += sprintf(buf + len, " :");
13391645 for (i = 0; i < max_state; i++)
13401646 len += sprintf(buf + len, "%10lu",
1341
- devfreq->profile->freq_table[i]);
1647
+ df->profile->freq_table[i]);
13421648
13431649 len += sprintf(buf + len, " time(ms)\n");
13441650
13451651 for (i = 0; i < max_state; i++) {
1346
- if (devfreq->profile->freq_table[i]
1347
- == devfreq->previous_freq) {
1652
+ if (df->profile->freq_table[i]
1653
+ == df->previous_freq) {
13481654 len += sprintf(buf + len, "*");
13491655 } else {
13501656 len += sprintf(buf + len, " ");
13511657 }
13521658 len += sprintf(buf + len, "%10lu:",
1353
- devfreq->profile->freq_table[i]);
1659
+ df->profile->freq_table[i]);
13541660 for (j = 0; j < max_state; j++)
13551661 len += sprintf(buf + len, "%10u",
1356
- devfreq->trans_table[(i * max_state) + j]);
1357
- len += sprintf(buf + len, "%10u\n",
1358
- jiffies_to_msecs(devfreq->time_in_state[i]));
1662
+ df->stats.trans_table[(i * max_state) + j]);
1663
+
1664
+ len += sprintf(buf + len, "%10llu\n", (u64)
1665
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
13591666 }
13601667
13611668 len += sprintf(buf + len, "Total transition : %u\n",
1362
- devfreq->total_trans);
1669
+ df->stats.total_trans);
13631670 return len;
13641671 }
1365
-static DEVICE_ATTR_RO(trans_stat);
1672
+
1673
+static ssize_t trans_stat_store(struct device *dev,
1674
+ struct device_attribute *attr,
1675
+ const char *buf, size_t count)
1676
+{
1677
+ struct devfreq *df = to_devfreq(dev);
1678
+ int err, value;
1679
+
1680
+ if (!df->profile)
1681
+ return -EINVAL;
1682
+
1683
+ if (df->profile->max_state == 0)
1684
+ return count;
1685
+
1686
+ err = kstrtoint(buf, 10, &value);
1687
+ if (err || value != 0)
1688
+ return -EINVAL;
1689
+
1690
+ mutex_lock(&df->lock);
1691
+ memset(df->stats.time_in_state, 0, (df->profile->max_state *
1692
+ sizeof(*df->stats.time_in_state)));
1693
+ memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1694
+ df->profile->max_state,
1695
+ df->profile->max_state));
1696
+ df->stats.total_trans = 0;
1697
+ df->stats.last_update = get_jiffies_64();
1698
+ mutex_unlock(&df->lock);
1699
+
1700
+ return count;
1701
+}
1702
+static DEVICE_ATTR_RW(trans_stat);
1703
+
1704
+static ssize_t timer_show(struct device *dev,
1705
+ struct device_attribute *attr, char *buf)
1706
+{
1707
+ struct devfreq *df = to_devfreq(dev);
1708
+
1709
+ if (!df->profile)
1710
+ return -EINVAL;
1711
+
1712
+ return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
1713
+}
1714
+
1715
+static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
1716
+ const char *buf, size_t count)
1717
+{
1718
+ struct devfreq *df = to_devfreq(dev);
1719
+ char str_timer[DEVFREQ_NAME_LEN + 1];
1720
+ int timer = -1;
1721
+ int ret = 0, i;
1722
+
1723
+ if (!df->governor || !df->profile)
1724
+ return -EINVAL;
1725
+
1726
+ ret = sscanf(buf, "%16s", str_timer);
1727
+ if (ret != 1)
1728
+ return -EINVAL;
1729
+
1730
+ for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
1731
+ if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
1732
+ timer = i;
1733
+ break;
1734
+ }
1735
+ }
1736
+
1737
+ if (timer < 0) {
1738
+ ret = -EINVAL;
1739
+ goto out;
1740
+ }
1741
+
1742
+ if (df->profile->timer == timer) {
1743
+ ret = 0;
1744
+ goto out;
1745
+ }
1746
+
1747
+ mutex_lock(&df->lock);
1748
+ df->profile->timer = timer;
1749
+ mutex_unlock(&df->lock);
1750
+
1751
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1752
+ if (ret) {
1753
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1754
+ __func__, df->governor->name, ret);
1755
+ goto out;
1756
+ }
1757
+
1758
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1759
+ if (ret)
1760
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
1761
+ __func__, df->governor->name, ret);
1762
+out:
1763
+ return ret ? ret : count;
1764
+}
1765
+static DEVICE_ATTR_RW(timer);
13661766
13671767 static ssize_t load_show(struct device *dev, struct device_attribute *attr,
13681768 char *buf)
....@@ -1389,8 +1789,8 @@
13891789 len = sprintf(buf, "%lu", stat.busy_time * 100 / stat.total_time);
13901790
13911791 if (devfreq->profile->get_cur_freq &&
1392
- !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1393
- len += sprintf(buf + len, "@%luHz\n", freq);
1792
+ !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1793
+ len += sprintf(buf + len, "@%luHz\n", freq);
13941794 else
13951795 len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq);
13961796
....@@ -1409,10 +1809,88 @@
14091809 &dev_attr_min_freq.attr,
14101810 &dev_attr_max_freq.attr,
14111811 &dev_attr_trans_stat.attr,
1812
+ &dev_attr_timer.attr,
14121813 &dev_attr_load.attr,
14131814 NULL,
14141815 };
14151816 ATTRIBUTE_GROUPS(devfreq);
1817
+
1818
+/**
1819
+ * devfreq_summary_show() - Show the summary of the devfreq devices
1820
+ * @s: seq_file instance to show the summary of devfreq devices
1821
+ * @data: not used
1822
+ *
1823
+ * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1824
+ * It helps that user can know the detailed information of the devfreq devices.
1825
+ *
1826
+ * Return 0 always because it shows the information without any data change.
1827
+ */
1828
+static int devfreq_summary_show(struct seq_file *s, void *data)
1829
+{
1830
+ struct devfreq *devfreq;
1831
+ struct devfreq *p_devfreq = NULL;
1832
+ unsigned long cur_freq, min_freq, max_freq;
1833
+ unsigned int polling_ms;
1834
+ unsigned int timer;
1835
+
1836
+ seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n",
1837
+ "dev",
1838
+ "parent_dev",
1839
+ "governor",
1840
+ "timer",
1841
+ "polling_ms",
1842
+ "cur_freq_Hz",
1843
+ "min_freq_Hz",
1844
+ "max_freq_Hz");
1845
+ seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n",
1846
+ "------------------------------",
1847
+ "------------------------------",
1848
+ "---------------",
1849
+ "----------",
1850
+ "----------",
1851
+ "------------",
1852
+ "------------",
1853
+ "------------");
1854
+
1855
+ mutex_lock(&devfreq_list_lock);
1856
+
1857
+ list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1858
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1859
+ if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1860
+ DEVFREQ_NAME_LEN)) {
1861
+ struct devfreq_passive_data *data = devfreq->data;
1862
+
1863
+ if (data)
1864
+ p_devfreq = data->parent;
1865
+ } else {
1866
+ p_devfreq = NULL;
1867
+ }
1868
+#endif
1869
+
1870
+ mutex_lock(&devfreq->lock);
1871
+ cur_freq = devfreq->previous_freq;
1872
+ get_freq_range(devfreq, &min_freq, &max_freq);
1873
+ polling_ms = devfreq->profile->polling_ms;
1874
+ timer = devfreq->profile->timer;
1875
+ mutex_unlock(&devfreq->lock);
1876
+
1877
+ seq_printf(s,
1878
+ "%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n",
1879
+ dev_name(&devfreq->dev),
1880
+ p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1881
+ devfreq->governor_name,
1882
+ polling_ms ? timer_name[timer] : "null",
1883
+ polling_ms,
1884
+ cur_freq,
1885
+ min_freq,
1886
+ max_freq);
1887
+ }
1888
+
1889
+ mutex_unlock(&devfreq_list_lock);
1890
+
1891
+ return 0;
1892
+}
1893
+DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
14161894
14171895 static int __init devfreq_init(void)
14181896 {
....@@ -1429,6 +1907,11 @@
14291907 return -ENOMEM;
14301908 }
14311909 devfreq_class->dev_groups = devfreq_groups;
1910
+
1911
+ devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1912
+ debugfs_create_file("devfreq_summary", 0444,
1913
+ devfreq_debugfs, NULL,
1914
+ &devfreq_summary_fops);
14321915
14331916 return 0;
14341917 }
....@@ -1477,8 +1960,8 @@
14771960
14781961 /**
14791962 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1480
- * for any changes in the OPP availability
1481
- * changes
1963
+ * for any changes in the OPP availability
1964
+ * changes
14821965 * @dev: The devfreq user device. (parent of devfreq)
14831966 * @devfreq: The devfreq object.
14841967 */
....@@ -1490,8 +1973,8 @@
14901973
14911974 /**
14921975 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1493
- * notified for any changes in the OPP
1494
- * availability changes anymore.
1976
+ * notified for any changes in the OPP
1977
+ * availability changes anymore.
14951978 * @dev: The devfreq user device. (parent of devfreq)
14961979 * @devfreq: The devfreq object.
14971980 *
....@@ -1510,8 +1993,8 @@
15101993 }
15111994
15121995 /**
1513
- * devm_ devfreq_register_opp_notifier()
1514
- * - Resource-managed devfreq_register_opp_notifier()
1996
+ * devm_devfreq_register_opp_notifier() - Resource-managed
1997
+ * devfreq_register_opp_notifier()
15151998 * @dev: The devfreq user device. (parent of devfreq)
15161999 * @devfreq: The devfreq object.
15172000 */
....@@ -1539,8 +2022,8 @@
15392022 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
15402023
15412024 /**
1542
- * devm_devfreq_unregister_opp_notifier()
1543
- * - Resource-managed devfreq_unregister_opp_notifier()
2025
+ * devm_devfreq_unregister_opp_notifier() - Resource-managed
2026
+ * devfreq_unregister_opp_notifier()
15442027 * @dev: The devfreq user device. (parent of devfreq)
15452028 * @devfreq: The devfreq object.
15462029 */
....@@ -1556,11 +2039,11 @@
15562039 * devfreq_register_notifier() - Register a driver with devfreq
15572040 * @devfreq: The devfreq object.
15582041 * @nb: The notifier block to register.
1559
- * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
2042
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
15602043 */
15612044 int devfreq_register_notifier(struct devfreq *devfreq,
1562
- struct notifier_block *nb,
1563
- unsigned int list)
2045
+ struct notifier_block *nb,
2046
+ unsigned int list)
15642047 {
15652048 int ret = 0;
15662049
....@@ -1571,10 +2054,6 @@
15712054 case DEVFREQ_TRANSITION_NOTIFIER:
15722055 ret = srcu_notifier_chain_register(
15732056 &devfreq->transition_notifier_list, nb);
1574
- break;
1575
- case DEVFREQ_POLICY_NOTIFIER:
1576
- ret = srcu_notifier_chain_register(
1577
- &devfreq->policy_notifier_list, nb);
15782057 break;
15792058 default:
15802059 ret = -EINVAL;
....@@ -1588,7 +2067,7 @@
15882067 * devfreq_unregister_notifier() - Unregister a driver with devfreq
15892068 * @devfreq: The devfreq object.
15902069 * @nb: The notifier block to be unregistered.
1591
- * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
2070
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
15922071 */
15932072 int devfreq_unregister_notifier(struct devfreq *devfreq,
15942073 struct notifier_block *nb,
....@@ -1603,10 +2082,6 @@
16032082 case DEVFREQ_TRANSITION_NOTIFIER:
16042083 ret = srcu_notifier_chain_unregister(
16052084 &devfreq->transition_notifier_list, nb);
1606
- break;
1607
- case DEVFREQ_POLICY_NOTIFIER:
1608
- ret = srcu_notifier_chain_unregister(
1609
- &devfreq->policy_notifier_list, nb);
16102085 break;
16112086 default:
16122087 ret = -EINVAL;
....@@ -1631,7 +2106,7 @@
16312106
16322107 /**
16332108 * devm_devfreq_register_notifier()
1634
- - Resource-managed devfreq_register_notifier()
2109
+ * - Resource-managed devfreq_register_notifier()
16352110 * @dev: The devfreq user device. (parent of devfreq)
16362111 * @devfreq: The devfreq object.
16372112 * @nb: The notifier block to be unregistered.
....@@ -1667,16 +2142,16 @@
16672142
16682143 /**
16692144 * devm_devfreq_unregister_notifier()
1670
- - Resource-managed devfreq_unregister_notifier()
2145
+ * - Resource-managed devfreq_unregister_notifier()
16712146 * @dev: The devfreq user device. (parent of devfreq)
16722147 * @devfreq: The devfreq object.
16732148 * @nb: The notifier block to be unregistered.
16742149 * @list: DEVFREQ_TRANSITION_NOTIFIER.
16752150 */
16762151 void devm_devfreq_unregister_notifier(struct device *dev,
1677
- struct devfreq *devfreq,
1678
- struct notifier_block *nb,
1679
- unsigned int list)
2152
+ struct devfreq *devfreq,
2153
+ struct notifier_block *nb,
2154
+ unsigned int list)
16802155 {
16812156 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
16822157 devm_devfreq_dev_match, devfreq));