hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/cpufreq/cpufreq_interactive.c
....@@ -37,7 +37,6 @@
3737 #include <linux/slab.h>
3838 #include <uapi/linux/sched/types.h>
3939 #include <linux/sched/clock.h>
40
-#include <soc/rockchip/rockchip_system_monitor.h>
4140
4241 #define CREATE_TRACE_POINTS
4342 #include <trace/events/cpufreq_interactive.h>
....@@ -101,7 +100,6 @@
101100 int touchboostpulse_duration_val;
102101 /* End time of touchboost pulse in ktime converted to usecs */
103102 u64 touchboostpulse_endtime;
104
- bool touchboost, is_touchboosted;
105103 #endif
106104 bool boosted;
107105
....@@ -128,7 +126,6 @@
128126 struct interactive_policy *ipolicy;
129127
130128 struct irq_work irq_work;
131
- struct irq_work boost_irq_work;
132129 u64 last_sample_time;
133130 unsigned long next_sample_jiffies;
134131 bool work_in_progress;
....@@ -151,9 +148,6 @@
151148 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
152149 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
153150 int cpu;
154
- unsigned int task_boost_freq;
155
- unsigned long task_boost_util;
156
- u64 task_boos_endtime;
157151 };
158152
159153 static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
....@@ -429,9 +423,6 @@
429423 new_freq < tunables->touchboost_freq) {
430424 new_freq = tunables->touchboost_freq;
431425 }
432
- if ((now < icpu->task_boos_endtime) && (new_freq < icpu->task_boost_freq)) {
433
- new_freq = icpu->task_boost_freq;
434
- }
435426 #endif
436427 if (policy->cur >= tunables->hispeed_freq &&
437428 new_freq > policy->cur &&
....@@ -609,44 +600,7 @@
609600 for_each_cpu(cpu, &tmp_mask) {
610601 struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
611602 struct cpufreq_policy *policy;
612
-#ifdef CONFIG_ARCH_ROCKCHIP
613
- struct interactive_tunables *tunables;
614
- bool update_policy = false;
615
- u64 now;
616603
617
- now = ktime_to_us(ktime_get());
618
- if (!down_read_trylock(&icpu->enable_sem))
619
- continue;
620
-
621
- if (!icpu->ipolicy) {
622
- up_read(&icpu->enable_sem);
623
- continue;
624
- }
625
-
626
- tunables = icpu->ipolicy->tunables;
627
- if (!tunables) {
628
- up_read(&icpu->enable_sem);
629
- continue;
630
- }
631
-
632
- if (tunables->touchboost &&
633
- now > tunables->touchboostpulse_endtime) {
634
- tunables->touchboost = false;
635
- rockchip_monitor_clear_boosted();
636
- update_policy = true;
637
- }
638
-
639
- if (!tunables->is_touchboosted && tunables->touchboost) {
640
- rockchip_monitor_set_boosted();
641
- update_policy = true;
642
- }
643
-
644
- tunables->is_touchboosted = tunables->touchboost;
645
-
646
- up_read(&icpu->enable_sem);
647
- if (update_policy)
648
- cpufreq_update_policy(cpu);
649
-#endif
650604 policy = cpufreq_cpu_get(cpu);
651605 if (!policy)
652606 continue;
....@@ -716,25 +670,31 @@
716670 unsigned long val, void *data)
717671 {
718672 struct cpufreq_freqs *freq = data;
719
- struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu);
673
+ struct cpufreq_policy *policy = freq->policy;
674
+ struct interactive_cpu *icpu;
720675 unsigned long flags;
676
+ int cpu;
721677
722678 if (val != CPUFREQ_POSTCHANGE)
723679 return 0;
724680
725
- if (!down_read_trylock(&icpu->enable_sem))
726
- return 0;
681
+ for_each_cpu(cpu, policy->cpus) {
682
+ icpu = &per_cpu(interactive_cpu, cpu);
727683
728
- if (!icpu->ipolicy) {
684
+ if (!down_read_trylock(&icpu->enable_sem))
685
+ continue;
686
+
687
+ if (!icpu->ipolicy) {
688
+ up_read(&icpu->enable_sem);
689
+ continue;
690
+ }
691
+
692
+ spin_lock_irqsave(&icpu->load_lock, flags);
693
+ update_load(icpu, cpu);
694
+ spin_unlock_irqrestore(&icpu->load_lock, flags);
695
+
729696 up_read(&icpu->enable_sem);
730
- return 0;
731697 }
732
-
733
- spin_lock_irqsave(&icpu->load_lock, flags);
734
- update_load(icpu, freq->cpu);
735
- spin_unlock_irqrestore(&icpu->load_lock, flags);
736
-
737
- up_read(&icpu->enable_sem);
738698
739699 return 0;
740700 }
....@@ -1052,44 +1012,6 @@
10521012 return count;
10531013 }
10541014
1055
-static ssize_t store_touchboost_freq(struct gov_attr_set *attr_set,
1056
- const char *buf, size_t count)
1057
-{
1058
- struct interactive_tunables *tunables = to_tunables(attr_set);
1059
- unsigned long val;
1060
- int ret;
1061
-
1062
- ret = kstrtoul(buf, 0, &val);
1063
- if (ret < 0)
1064
- return ret;
1065
-
1066
- tunables->touchboost_freq = val;
1067
-
1068
- return count;
1069
-}
1070
-
1071
-static ssize_t show_touchboost_duration(struct gov_attr_set *attr_set, char *buf)
1072
-{
1073
- struct interactive_tunables *tunables = to_tunables(attr_set);
1074
-
1075
- return sprintf(buf, "%d\n", tunables->touchboostpulse_duration_val);
1076
-}
1077
-
1078
-static ssize_t store_touchboost_duration(struct gov_attr_set *attr_set,
1079
- const char *buf, size_t count)
1080
-{
1081
- struct interactive_tunables *tunables = to_tunables(attr_set);
1082
- int val, ret;
1083
-
1084
- ret = kstrtoint(buf, 0, &val);
1085
- if (ret < 0)
1086
- return ret;
1087
-
1088
- tunables->touchboostpulse_duration_val = val;
1089
-
1090
- return count;
1091
-}
1092
-
10931015 show_one(hispeed_freq, "%u");
10941016 show_one(go_hispeed_load, "%lu");
10951017 show_one(min_sample_time, "%lu");
....@@ -1097,7 +1019,6 @@
10971019 show_one(boost, "%u");
10981020 show_one(boostpulse_duration, "%u");
10991021 show_one(io_is_busy, "%u");
1100
-show_one(touchboost_freq, "%lu");
11011022
11021023 gov_attr_rw(target_loads);
11031024 gov_attr_rw(above_hispeed_delay);
....@@ -1110,8 +1031,6 @@
11101031 gov_attr_wo(boostpulse);
11111032 gov_attr_rw(boostpulse_duration);
11121033 gov_attr_rw(io_is_busy);
1113
-gov_attr_rw(touchboost_freq);
1114
-gov_attr_rw(touchboost_duration);
11151034
11161035 static struct attribute *interactive_attributes[] = {
11171036 &target_loads.attr,
....@@ -1125,8 +1044,6 @@
11251044 &boostpulse.attr,
11261045 &boostpulse_duration.attr,
11271046 &io_is_busy.attr,
1128
- &touchboost_freq.attr,
1129
- &touchboost_duration.attr,
11301047 NULL
11311048 };
11321049
....@@ -1220,15 +1137,12 @@
12201137 for_each_cpu(i, policy->cpus)
12211138 cpufreq_remove_update_util_hook(i);
12221139
1223
- synchronize_sched();
1140
+ synchronize_rcu();
12241141 }
12251142
12261143 static void icpu_cancel_work(struct interactive_cpu *icpu)
12271144 {
12281145 irq_work_sync(&icpu->irq_work);
1229
-#ifdef CONFIG_ARCH_ROCKCHIP
1230
- irq_work_sync(&icpu->boost_irq_work);
1231
-#endif
12321146 icpu->work_in_progress = false;
12331147 del_timer_sync(&icpu->slack_timer);
12341148 }
....@@ -1328,7 +1242,6 @@
13281242 cpumask_set_cpu(i, &speedchange_cpumask);
13291243 pcpu->loc_hispeed_val_time =
13301244 ktime_to_us(ktime_get());
1331
- tunables->touchboost = true;
13321245 anyboost = 1;
13331246 }
13341247
....@@ -1454,83 +1367,6 @@
14541367 tunables->attr_set = attr_set;
14551368 }
14561369 }
1457
-
1458
-static unsigned int get_freq_for_util(struct cpufreq_policy *policy, unsigned long util)
1459
-{
1460
- struct cpufreq_frequency_table *pos;
1461
- unsigned long max_cap, cur_cap;
1462
- unsigned int freq = 0;
1463
-
1464
- max_cap = arch_scale_cpu_capacity(NULL, policy->cpu);
1465
- cpufreq_for_each_valid_entry(pos, policy->freq_table) {
1466
- freq = pos->frequency;
1467
-
1468
- cur_cap = max_cap * freq / policy->max;
1469
- if (cur_cap > util)
1470
- break;
1471
- }
1472
-
1473
- return freq;
1474
-}
1475
-
1476
-static void task_boost_irq_work(struct irq_work *irq_work)
1477
-{
1478
- struct interactive_cpu *pcpu;
1479
- struct interactive_policy *ipolicy;
1480
- unsigned long flags[2];
1481
- u64 now, prev_boos_endtime;
1482
- unsigned int boost_freq;
1483
-
1484
- pcpu = container_of(irq_work, struct interactive_cpu, boost_irq_work);
1485
- if (!down_read_trylock(&pcpu->enable_sem))
1486
- return;
1487
-
1488
- ipolicy = pcpu->ipolicy;
1489
- if (!ipolicy)
1490
- goto out;
1491
-
1492
- if (ipolicy->policy->cur == ipolicy->policy->max)
1493
- goto out;
1494
-
1495
- now = ktime_to_us(ktime_get());
1496
- prev_boos_endtime = pcpu->task_boos_endtime;;
1497
- pcpu->task_boos_endtime = now + ipolicy->tunables->sampling_rate;
1498
- boost_freq = get_freq_for_util(ipolicy->policy, pcpu->task_boost_util);
1499
- if ((now < prev_boos_endtime) && (boost_freq <= pcpu->task_boost_freq))
1500
- goto out;
1501
- pcpu->task_boost_freq = boost_freq;
1502
-
1503
- spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
1504
- spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
1505
- if (pcpu->target_freq < pcpu->task_boost_freq) {
1506
- pcpu->target_freq = pcpu->task_boost_freq;
1507
- cpumask_set_cpu(pcpu->cpu, &speedchange_cpumask);
1508
- wake_up_process(speedchange_task);
1509
- }
1510
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
1511
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
1512
-
1513
-out:
1514
- up_read(&pcpu->enable_sem);
1515
-}
1516
-
1517
-extern unsigned long capacity_curr_of(int cpu);
1518
-
1519
-void cpufreq_task_boost(int cpu, unsigned long util)
1520
-{
1521
- struct interactive_cpu *pcpu = &per_cpu(interactive_cpu, cpu);
1522
- unsigned long cap, min_util;
1523
-
1524
- if (!speedchange_task)
1525
- return;
1526
-
1527
- min_util = util + (util >> 2);
1528
- cap = capacity_curr_of(cpu);
1529
- if (min_util > cap) {
1530
- pcpu->task_boost_util = min_util;
1531
- irq_work_queue(&pcpu->boost_irq_work);
1532
- }
1533
-}
15341370 #endif
15351371
15361372 int cpufreq_interactive_init(struct cpufreq_policy *policy)
....@@ -1639,10 +1475,6 @@
16391475 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
16401476 #ifdef CONFIG_ARCH_ROCKCHIP
16411477 input_unregister_handler(&cpufreq_interactive_input_handler);
1642
- if (tunables->touchboost) {
1643
- tunables->touchboost = false;
1644
- rockchip_monitor_clear_boosted();
1645
- }
16461478 #endif
16471479 }
16481480
....@@ -1763,9 +1595,6 @@
17631595 icpu = &per_cpu(interactive_cpu, cpu);
17641596
17651597 init_irq_work(&icpu->irq_work, irq_work);
1766
-#ifdef CONFIG_ARCH_ROCKCHIP
1767
- init_irq_work(&icpu->boost_irq_work, task_boost_irq_work);
1768
-#endif
17691598 spin_lock_init(&icpu->load_lock);
17701599 spin_lock_init(&icpu->target_freq_lock);
17711600 init_rwsem(&icpu->enable_sem);