.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * CPUFreq governor based on scheduler-provided CPU utilization data. |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2016, Intel Corporation |
---|
5 | 6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | 7 | */ |
---|
11 | 8 | |
---|
12 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
15 | 12 | |
---|
16 | 13 | #include <linux/sched/cpufreq.h> |
---|
17 | 14 | #include <trace/events/power.h> |
---|
| 15 | +#include <trace/hooks/sched.h> |
---|
| 16 | + |
---|
| 17 | +#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) |
---|
18 | 18 | |
---|
19 | 19 | struct sugov_tunables { |
---|
20 | 20 | struct gov_attr_set attr_set; |
---|
21 | | - unsigned int up_rate_limit_us; |
---|
22 | | - unsigned int down_rate_limit_us; |
---|
| 21 | + unsigned int rate_limit_us; |
---|
| 22 | +#ifdef CONFIG_ARCH_ROCKCHIP |
---|
| 23 | + unsigned int target_load; |
---|
| 24 | +#endif |
---|
23 | 25 | }; |
---|
24 | 26 | |
---|
25 | 27 | struct sugov_policy { |
---|
.. | .. |
---|
30 | 32 | |
---|
31 | 33 | raw_spinlock_t update_lock; /* For shared policies */ |
---|
32 | 34 | u64 last_freq_update_time; |
---|
33 | | - s64 min_rate_limit_ns; |
---|
34 | | - s64 up_rate_delay_ns; |
---|
35 | | - s64 down_rate_delay_ns; |
---|
| 35 | + s64 freq_update_delay_ns; |
---|
36 | 36 | unsigned int next_freq; |
---|
37 | 37 | unsigned int cached_raw_freq; |
---|
38 | | - unsigned int prev_cached_raw_freq; |
---|
39 | 38 | |
---|
40 | 39 | /* The next fields are only needed if fast switch cannot be used: */ |
---|
41 | 40 | struct irq_work irq_work; |
---|
.. | .. |
---|
59 | 58 | u64 last_update; |
---|
60 | 59 | |
---|
61 | 60 | unsigned long bw_dl; |
---|
62 | | - unsigned long min; |
---|
63 | 61 | unsigned long max; |
---|
64 | 62 | |
---|
65 | 63 | /* The field below is for single-CPU policies only: */ |
---|
.. | .. |
---|
100 | 98 | return true; |
---|
101 | 99 | } |
---|
102 | 100 | |
---|
103 | | - /* No need to recalculate next freq for min_rate_limit_us |
---|
104 | | - * at least. However we might still decide to further rate |
---|
105 | | - * limit once frequency change direction is decided, according |
---|
106 | | - * to the separate rate limits. |
---|
107 | | - */ |
---|
108 | | - |
---|
109 | | - delta_ns = time - sg_policy->last_freq_update_time; |
---|
110 | | - return delta_ns >= sg_policy->min_rate_limit_ns; |
---|
111 | | -} |
---|
112 | | - |
---|
113 | | -static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time, |
---|
114 | | - unsigned int next_freq) |
---|
115 | | -{ |
---|
116 | | - s64 delta_ns; |
---|
117 | | - |
---|
118 | 101 | delta_ns = time - sg_policy->last_freq_update_time; |
---|
119 | 102 | |
---|
120 | | - if (next_freq > sg_policy->next_freq && |
---|
121 | | - delta_ns < sg_policy->up_rate_delay_ns) |
---|
122 | | - return true; |
---|
123 | | - |
---|
124 | | - if (next_freq < sg_policy->next_freq && |
---|
125 | | - delta_ns < sg_policy->down_rate_delay_ns) |
---|
126 | | - return true; |
---|
127 | | - |
---|
128 | | - return false; |
---|
| 103 | + return delta_ns >= sg_policy->freq_update_delay_ns; |
---|
129 | 104 | } |
---|
130 | 105 | |
---|
131 | 106 | static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, |
---|
132 | 107 | unsigned int next_freq) |
---|
133 | 108 | { |
---|
134 | | - if (sg_policy->next_freq == next_freq) |
---|
135 | | - return false; |
---|
136 | | - |
---|
137 | | - if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) { |
---|
138 | | - /* Restore cached freq as next_freq is not changed */ |
---|
139 | | - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; |
---|
140 | | - return false; |
---|
| 109 | + if (!sg_policy->need_freq_update) { |
---|
| 110 | + if (sg_policy->next_freq == next_freq) |
---|
| 111 | + return false; |
---|
| 112 | + } else { |
---|
| 113 | + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
---|
141 | 114 | } |
---|
142 | 115 | |
---|
143 | 116 | sg_policy->next_freq = next_freq; |
---|
.. | .. |
---|
149 | 122 | static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, |
---|
150 | 123 | unsigned int next_freq) |
---|
151 | 124 | { |
---|
152 | | - struct cpufreq_policy *policy = sg_policy->policy; |
---|
153 | | - int cpu; |
---|
154 | | - |
---|
155 | | - if (!sugov_update_next_freq(sg_policy, time, next_freq)) |
---|
156 | | - return; |
---|
157 | | - |
---|
158 | | - next_freq = cpufreq_driver_fast_switch(policy, next_freq); |
---|
159 | | - if (!next_freq) |
---|
160 | | - return; |
---|
161 | | - |
---|
162 | | - policy->cur = next_freq; |
---|
163 | | - |
---|
164 | | - if (trace_cpu_frequency_enabled()) { |
---|
165 | | - for_each_cpu(cpu, policy->cpus) |
---|
166 | | - trace_cpu_frequency(next_freq, cpu); |
---|
167 | | - } |
---|
| 125 | + if (sugov_update_next_freq(sg_policy, time, next_freq)) |
---|
| 126 | + cpufreq_driver_fast_switch(sg_policy->policy, next_freq); |
---|
168 | 127 | } |
---|
169 | 128 | |
---|
170 | 129 | static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, |
---|
.. | .. |
---|
207 | 166 | struct cpufreq_policy *policy = sg_policy->policy; |
---|
208 | 167 | unsigned int freq = arch_scale_freq_invariant() ? |
---|
209 | 168 | policy->cpuinfo.max_freq : policy->cur; |
---|
| 169 | + unsigned long next_freq = 0; |
---|
210 | 170 | |
---|
211 | | - freq = map_util_freq(util, freq, max); |
---|
| 171 | + trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy, |
---|
| 172 | + &sg_policy->need_freq_update); |
---|
| 173 | + if (next_freq) |
---|
| 174 | + freq = next_freq; |
---|
| 175 | + else |
---|
| 176 | +#ifdef CONFIG_ARCH_ROCKCHIP |
---|
| 177 | + freq = div64_ul((u64)(100 * freq / sg_policy->tunables->target_load) * util, max); |
---|
| 178 | +#else |
---|
| 179 | + freq = map_util_freq(util, freq, max); |
---|
| 180 | +#endif |
---|
212 | 181 | |
---|
213 | 182 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
---|
214 | 183 | return sg_policy->next_freq; |
---|
215 | 184 | |
---|
216 | | - sg_policy->need_freq_update = false; |
---|
217 | | - sg_policy->prev_cached_raw_freq = sg_policy->cached_raw_freq; |
---|
218 | 185 | sg_policy->cached_raw_freq = freq; |
---|
219 | 186 | return cpufreq_driver_resolve_freq(policy, freq); |
---|
220 | 187 | } |
---|
221 | | - |
---|
222 | | -extern long |
---|
223 | | -schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p); |
---|
224 | 188 | |
---|
225 | 189 | /* |
---|
226 | 190 | * This function computes an effective utilization for the given CPU, to be |
---|
.. | .. |
---|
233 | 197 | * |
---|
234 | 198 | * Where the cfs,rt and dl util numbers are tracked with the same metric and |
---|
235 | 199 | * synchronized windows and are thus directly comparable. |
---|
236 | | - * |
---|
237 | | - * The @util parameter passed to this function is assumed to be the aggregation |
---|
238 | | - * of RT and CFS util numbers. The cases of DL and IRQ are managed here. |
---|
239 | 200 | * |
---|
240 | 201 | * The cfs,rt,dl utilization are the running times measured with rq->clock_task |
---|
241 | 202 | * which excludes things like IRQ and steal-time. These latter are then accrued |
---|
.. | .. |
---|
252 | 213 | unsigned long dl_util, util, irq; |
---|
253 | 214 | struct rq *rq = cpu_rq(cpu); |
---|
254 | 215 | |
---|
255 | | - if (sched_feat(SUGOV_RT_MAX_FREQ) && !IS_BUILTIN(CONFIG_UCLAMP_TASK) && |
---|
| 216 | + if (!uclamp_is_used() && |
---|
256 | 217 | type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { |
---|
257 | 218 | return max; |
---|
258 | 219 | } |
---|
.. | .. |
---|
280 | 241 | */ |
---|
281 | 242 | util = util_cfs + cpu_util_rt(rq); |
---|
282 | 243 | if (type == FREQUENCY_UTIL) |
---|
283 | | -#ifdef CONFIG_SCHED_TUNE |
---|
284 | | - util += schedtune_cpu_margin_with(util, cpu, p); |
---|
285 | | -#else |
---|
286 | 244 | util = uclamp_rq_util_with(rq, util, p); |
---|
287 | | -#endif |
---|
288 | 245 | |
---|
289 | 246 | dl_util = cpu_util_dl(rq); |
---|
290 | 247 | |
---|
.. | .. |
---|
312 | 269 | * irq metric. Because IRQ/steal time is hidden from the task clock we |
---|
313 | 270 | * need to scale the task numbers: |
---|
314 | 271 | * |
---|
315 | | - * 1 - irq |
---|
316 | | - * U' = irq + ------- * U |
---|
317 | | - * max |
---|
| 272 | + * max - irq |
---|
| 273 | + * U' = irq + --------- * U |
---|
| 274 | + * max |
---|
318 | 275 | */ |
---|
319 | 276 | util = scale_irq_capacity(util, irq, max); |
---|
320 | 277 | util += irq; |
---|
.. | .. |
---|
334 | 291 | |
---|
335 | 292 | return min(max, util); |
---|
336 | 293 | } |
---|
| 294 | +EXPORT_SYMBOL_GPL(schedutil_cpu_util); |
---|
337 | 295 | |
---|
338 | 296 | static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) |
---|
339 | 297 | { |
---|
340 | 298 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
---|
341 | | - unsigned long util_cfs = cpu_util_cfs(rq); |
---|
342 | | - unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); |
---|
| 299 | + unsigned long util = cpu_util_cfs(rq); |
---|
| 300 | + unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); |
---|
343 | 301 | |
---|
344 | 302 | sg_cpu->max = max; |
---|
345 | 303 | sg_cpu->bw_dl = cpu_bw_dl(rq); |
---|
346 | 304 | |
---|
347 | | - return schedutil_cpu_util(sg_cpu->cpu, util_cfs, max, |
---|
348 | | - FREQUENCY_UTIL, NULL); |
---|
| 305 | + return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); |
---|
349 | 306 | } |
---|
350 | 307 | |
---|
351 | 308 | /** |
---|
.. | .. |
---|
356 | 313 | * |
---|
357 | 314 | * The IO wait boost of a task is disabled after a tick since the last update |
---|
358 | 315 | * of a CPU. If a new IO wait boost is requested after more then a tick, then |
---|
359 | | - * we enable the boost starting from the minimum frequency, which improves |
---|
360 | | - * energy efficiency by ignoring sporadic wakeups from IO. |
---|
| 316 | + * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy |
---|
| 317 | + * efficiency by ignoring sporadic wakeups from IO. |
---|
361 | 318 | */ |
---|
362 | 319 | static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, |
---|
363 | 320 | bool set_iowait_boost) |
---|
.. | .. |
---|
368 | 325 | if (delta_ns <= TICK_NSEC) |
---|
369 | 326 | return false; |
---|
370 | 327 | |
---|
371 | | - sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0; |
---|
| 328 | + sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; |
---|
372 | 329 | sg_cpu->iowait_boost_pending = set_iowait_boost; |
---|
373 | 330 | |
---|
374 | 331 | return true; |
---|
.. | .. |
---|
382 | 339 | * |
---|
383 | 340 | * Each time a task wakes up after an IO operation, the CPU utilization can be |
---|
384 | 341 | * boosted to a certain utilization which doubles at each "frequent and |
---|
385 | | - * successive" wakeup from IO, ranging from the utilization of the minimum |
---|
386 | | - * OPP to the utilization of the maximum OPP. |
---|
| 342 | + * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization |
---|
| 343 | + * of the maximum OPP. |
---|
| 344 | + * |
---|
387 | 345 | * To keep doubling, an IO boost has to be requested at least once per tick, |
---|
388 | 346 | * otherwise we restart from the utilization of the minimum OPP. |
---|
389 | 347 | */ |
---|
.. | .. |
---|
414 | 372 | } |
---|
415 | 373 | |
---|
416 | 374 | /* First wakeup after IO: start with minimum boost */ |
---|
417 | | - sg_cpu->iowait_boost = sg_cpu->min; |
---|
| 375 | + sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; |
---|
418 | 376 | } |
---|
419 | 377 | |
---|
420 | 378 | /** |
---|
.. | .. |
---|
454 | 412 | * No boost pending; reduce the boost value. |
---|
455 | 413 | */ |
---|
456 | 414 | sg_cpu->iowait_boost >>= 1; |
---|
457 | | - if (sg_cpu->iowait_boost < sg_cpu->min) { |
---|
| 415 | + if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { |
---|
458 | 416 | sg_cpu->iowait_boost = 0; |
---|
459 | 417 | return util; |
---|
460 | 418 | } |
---|
.. | .. |
---|
500 | 458 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
---|
501 | 459 | unsigned long util, max; |
---|
502 | 460 | unsigned int next_f; |
---|
503 | | - bool busy; |
---|
| 461 | + unsigned int cached_freq = sg_policy->cached_raw_freq; |
---|
504 | 462 | |
---|
505 | 463 | sugov_iowait_boost(sg_cpu, time, flags); |
---|
506 | 464 | sg_cpu->last_update = time; |
---|
.. | .. |
---|
510 | 468 | if (!sugov_should_update_freq(sg_policy, time)) |
---|
511 | 469 | return; |
---|
512 | 470 | |
---|
513 | | - /* Limits may have changed, don't skip frequency update */ |
---|
514 | | - busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); |
---|
515 | | - |
---|
516 | 471 | util = sugov_get_util(sg_cpu); |
---|
517 | 472 | max = sg_cpu->max; |
---|
518 | 473 | util = sugov_iowait_apply(sg_cpu, time, util, max); |
---|
.. | .. |
---|
521 | 476 | * Do not reduce the frequency if the CPU has not been idle |
---|
522 | 477 | * recently, as the reduction is likely to be premature then. |
---|
523 | 478 | */ |
---|
524 | | - if (busy && next_f < sg_policy->next_freq) { |
---|
| 479 | + if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { |
---|
525 | 480 | next_f = sg_policy->next_freq; |
---|
526 | 481 | |
---|
527 | 482 | /* Restore cached freq as next_freq has changed */ |
---|
528 | | - sg_policy->cached_raw_freq = sg_policy->prev_cached_raw_freq; |
---|
| 483 | + sg_policy->cached_raw_freq = cached_freq; |
---|
529 | 484 | } |
---|
530 | 485 | |
---|
531 | 486 | /* |
---|
.. | .. |
---|
637 | 592 | return container_of(attr_set, struct sugov_tunables, attr_set); |
---|
638 | 593 | } |
---|
639 | 594 | |
---|
640 | | -static DEFINE_MUTEX(min_rate_lock); |
---|
641 | | - |
---|
642 | | -static void update_min_rate_limit_ns(struct sugov_policy *sg_policy) |
---|
643 | | -{ |
---|
644 | | - mutex_lock(&min_rate_lock); |
---|
645 | | - sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns, |
---|
646 | | - sg_policy->down_rate_delay_ns); |
---|
647 | | - mutex_unlock(&min_rate_lock); |
---|
648 | | -} |
---|
649 | | - |
---|
650 | | -static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
---|
| 595 | +static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
---|
651 | 596 | { |
---|
652 | 597 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
---|
653 | 598 | |
---|
654 | | - return sprintf(buf, "%u\n", tunables->up_rate_limit_us); |
---|
| 599 | + return sprintf(buf, "%u\n", tunables->rate_limit_us); |
---|
655 | 600 | } |
---|
656 | 601 | |
---|
657 | | -static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
---|
658 | | -{ |
---|
659 | | - struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
---|
660 | | - |
---|
661 | | - return sprintf(buf, "%u\n", tunables->down_rate_limit_us); |
---|
662 | | -} |
---|
663 | | - |
---|
664 | | -static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set, |
---|
665 | | - const char *buf, size_t count) |
---|
| 602 | +static ssize_t |
---|
| 603 | +rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) |
---|
666 | 604 | { |
---|
667 | 605 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
---|
668 | 606 | struct sugov_policy *sg_policy; |
---|
.. | .. |
---|
671 | 609 | if (kstrtouint(buf, 10, &rate_limit_us)) |
---|
672 | 610 | return -EINVAL; |
---|
673 | 611 | |
---|
674 | | - tunables->up_rate_limit_us = rate_limit_us; |
---|
| 612 | + tunables->rate_limit_us = rate_limit_us; |
---|
675 | 613 | |
---|
676 | | - list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { |
---|
677 | | - sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC; |
---|
678 | | - update_min_rate_limit_ns(sg_policy); |
---|
679 | | - } |
---|
| 614 | + list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) |
---|
| 615 | + sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; |
---|
680 | 616 | |
---|
681 | 617 | return count; |
---|
682 | 618 | } |
---|
683 | 619 | |
---|
684 | | -static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set, |
---|
685 | | - const char *buf, size_t count) |
---|
| 620 | +static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); |
---|
| 621 | + |
---|
| 622 | +#ifdef CONFIG_ARCH_ROCKCHIP |
---|
| 623 | +static ssize_t target_load_show(struct gov_attr_set *attr_set, char *buf) |
---|
686 | 624 | { |
---|
687 | 625 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
---|
688 | | - struct sugov_policy *sg_policy; |
---|
689 | | - unsigned int rate_limit_us; |
---|
690 | 626 | |
---|
691 | | - if (kstrtouint(buf, 10, &rate_limit_us)) |
---|
| 627 | + return sprintf(buf, "%u\n", tunables->target_load); |
---|
| 628 | +} |
---|
| 629 | + |
---|
| 630 | +static ssize_t |
---|
| 631 | +target_load_store(struct gov_attr_set *attr_set, const char *buf, size_t count) |
---|
| 632 | +{ |
---|
| 633 | + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
---|
| 634 | + unsigned int target_load; |
---|
| 635 | + |
---|
| 636 | + if (kstrtouint(buf, 10, &target_load)) |
---|
692 | 637 | return -EINVAL; |
---|
693 | 638 | |
---|
694 | | - tunables->down_rate_limit_us = rate_limit_us; |
---|
| 639 | + if (!target_load || (target_load > 100)) |
---|
| 640 | + return -EINVAL; |
---|
695 | 641 | |
---|
696 | | - list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { |
---|
697 | | - sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC; |
---|
698 | | - update_min_rate_limit_ns(sg_policy); |
---|
699 | | - } |
---|
| 642 | + tunables->target_load = target_load; |
---|
700 | 643 | |
---|
701 | 644 | return count; |
---|
702 | 645 | } |
---|
703 | 646 | |
---|
704 | | -static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us); |
---|
705 | | -static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us); |
---|
| 647 | +static struct governor_attr target_load = __ATTR_RW(target_load); |
---|
| 648 | +#endif |
---|
706 | 649 | |
---|
707 | | -static struct attribute *sugov_attributes[] = { |
---|
708 | | - &up_rate_limit_us.attr, |
---|
709 | | - &down_rate_limit_us.attr, |
---|
| 650 | +static struct attribute *sugov_attrs[] = { |
---|
| 651 | + &rate_limit_us.attr, |
---|
| 652 | +#ifdef CONFIG_ARCH_ROCKCHIP |
---|
| 653 | + &target_load.attr, |
---|
| 654 | +#endif |
---|
710 | 655 | NULL |
---|
711 | 656 | }; |
---|
| 657 | +ATTRIBUTE_GROUPS(sugov); |
---|
712 | 658 | |
---|
713 | 659 | static void sugov_tunables_free(struct kobject *kobj) |
---|
714 | 660 | { |
---|
.. | .. |
---|
718 | 664 | } |
---|
719 | 665 | |
---|
720 | 666 | static struct kobj_type sugov_tunables_ktype = { |
---|
721 | | - .default_attrs = sugov_attributes, |
---|
| 667 | + .default_groups = sugov_groups, |
---|
722 | 668 | .sysfs_ops = &governor_sysfs_ops, |
---|
723 | 669 | .release = &sugov_tunables_free, |
---|
724 | 670 | }; |
---|
725 | 671 | |
---|
726 | 672 | /********************** cpufreq governor interface *********************/ |
---|
727 | 673 | |
---|
728 | | -static struct cpufreq_governor schedutil_gov; |
---|
| 674 | +struct cpufreq_governor schedutil_gov; |
---|
729 | 675 | |
---|
730 | 676 | static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) |
---|
731 | 677 | { |
---|
.. | .. |
---|
868 | 814 | goto stop_kthread; |
---|
869 | 815 | } |
---|
870 | 816 | |
---|
871 | | - tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
---|
872 | | - tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
---|
| 817 | + tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
---|
| 818 | +#ifdef CONFIG_ARCH_ROCKCHIP |
---|
| 819 | + tunables->target_load = 80; |
---|
| 820 | +#endif |
---|
873 | 821 | |
---|
874 | 822 | policy->governor_data = sg_policy; |
---|
875 | 823 | sg_policy->tunables = tunables; |
---|
.. | .. |
---|
928 | 876 | struct sugov_policy *sg_policy = policy->governor_data; |
---|
929 | 877 | unsigned int cpu; |
---|
930 | 878 | |
---|
931 | | - sg_policy->up_rate_delay_ns = |
---|
932 | | - sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC; |
---|
933 | | - sg_policy->down_rate_delay_ns = |
---|
934 | | - sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC; |
---|
935 | | - update_min_rate_limit_ns(sg_policy); |
---|
| 879 | + sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
---|
936 | 880 | sg_policy->last_freq_update_time = 0; |
---|
937 | 881 | sg_policy->next_freq = 0; |
---|
938 | 882 | sg_policy->work_in_progress = false; |
---|
939 | 883 | sg_policy->limits_changed = false; |
---|
940 | | - sg_policy->need_freq_update = false; |
---|
941 | 884 | sg_policy->cached_raw_freq = 0; |
---|
942 | | - sg_policy->prev_cached_raw_freq = 0; |
---|
| 885 | + |
---|
| 886 | + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
---|
943 | 887 | |
---|
944 | 888 | for_each_cpu(cpu, policy->cpus) { |
---|
945 | 889 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
---|
.. | .. |
---|
947 | 891 | memset(sg_cpu, 0, sizeof(*sg_cpu)); |
---|
948 | 892 | sg_cpu->cpu = cpu; |
---|
949 | 893 | sg_cpu->sg_policy = sg_policy; |
---|
950 | | - sg_cpu->min = |
---|
951 | | - (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) / |
---|
952 | | - policy->cpuinfo.max_freq; |
---|
953 | 894 | } |
---|
954 | 895 | |
---|
955 | 896 | for_each_cpu(cpu, policy->cpus) { |
---|
.. | .. |
---|
971 | 912 | for_each_cpu(cpu, policy->cpus) |
---|
972 | 913 | cpufreq_remove_update_util_hook(cpu); |
---|
973 | 914 | |
---|
974 | | - synchronize_sched(); |
---|
| 915 | + synchronize_rcu(); |
---|
975 | 916 | |
---|
976 | 917 | if (!policy->fast_switch_enabled) { |
---|
977 | 918 | irq_work_sync(&sg_policy->irq_work); |
---|
.. | .. |
---|
992 | 933 | sg_policy->limits_changed = true; |
---|
993 | 934 | } |
---|
994 | 935 | |
---|
995 | | -static struct cpufreq_governor schedutil_gov = { |
---|
| 936 | +struct cpufreq_governor schedutil_gov = { |
---|
996 | 937 | .name = "schedutil", |
---|
997 | 938 | .owner = THIS_MODULE, |
---|
998 | | - .dynamic_switching = true, |
---|
| 939 | + .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, |
---|
999 | 940 | .init = sugov_init, |
---|
1000 | 941 | .exit = sugov_exit, |
---|
1001 | 942 | .start = sugov_start, |
---|
.. | .. |
---|
1010 | 951 | } |
---|
1011 | 952 | #endif |
---|
1012 | 953 | |
---|
1013 | | -static int __init sugov_register(void) |
---|
1014 | | -{ |
---|
1015 | | - return cpufreq_register_governor(&schedutil_gov); |
---|
1016 | | -} |
---|
1017 | | -fs_initcall(sugov_register); |
---|
| 954 | +cpufreq_governor_init(schedutil_gov); |
---|