hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/cpufreq/acpi-cpufreq.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * acpi-cpufreq.c - ACPI Processor P-States Driver
34 *
....@@ -5,24 +6,6 @@
56 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
67 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
78 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8
- *
9
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10
- *
11
- * This program is free software; you can redistribute it and/or modify
12
- * it under the terms of the GNU General Public License as published by
13
- * the Free Software Foundation; either version 2 of the License, or (at
14
- * your option) any later version.
15
- *
16
- * This program is distributed in the hope that it will be useful, but
17
- * WITHOUT ANY WARRANTY; without even the implied warranty of
18
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19
- * General Public License for more details.
20
- *
21
- * You should have received a copy of the GNU General Public License along
22
- * with this program; if not, write to the Free Software Foundation, Inc.,
23
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24
- *
25
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
269 */
2710
2811 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -43,10 +26,12 @@
4326 #include <linux/uaccess.h>
4427
4528 #include <acpi/processor.h>
29
+#include <acpi/cppc_acpi.h>
4630
4731 #include <asm/msr.h>
4832 #include <asm/processor.h>
4933 #include <asm/cpufeature.h>
34
+#include <asm/cpu_device_id.h>
5035
5136 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
5237 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
....@@ -61,6 +46,7 @@
6146
6247 #define INTEL_MSR_RANGE (0xffff)
6348 #define AMD_MSR_RANGE (0x7)
49
+#define HYGON_MSR_RANGE (0x7)
6450
6551 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
6652
....@@ -95,6 +81,7 @@
9581 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
9682 msr = lo | ((u64)hi << 32);
9783 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
84
+ case X86_VENDOR_HYGON:
9885 case X86_VENDOR_AMD:
9986 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
10087 msr = lo | ((u64)hi << 32);
....@@ -113,6 +100,7 @@
113100 msr_addr = MSR_IA32_MISC_ENABLE;
114101 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
115102 break;
103
+ case X86_VENDOR_HYGON:
116104 case X86_VENDOR_AMD:
117105 msr_addr = MSR_K7_HWCR;
118106 msr_mask = MSR_K7_HWCR_CPB_DIS;
....@@ -139,12 +127,12 @@
139127 boost_set_msr(enable);
140128 }
141129
142
-static int set_boost(int val)
130
+static int set_boost(struct cpufreq_policy *policy, int val)
143131 {
144
- get_online_cpus();
145
- on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
146
- put_online_cpus();
147
- pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
132
+ on_each_cpu_mask(policy->cpus, boost_set_msr_each,
133
+ (void *)(long)val, 1);
134
+ pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
135
+ cpumask_pr_args(policy->cpus), val ? "en" : "dis");
148136
149137 return 0;
150138 }
....@@ -175,7 +163,9 @@
175163 if (ret || val > 1)
176164 return -EINVAL;
177165
178
- set_boost(val);
166
+ get_online_cpus();
167
+ set_boost(policy, val);
168
+ put_online_cpus();
179169
180170 return count;
181171 }
....@@ -225,6 +215,8 @@
225215
226216 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
227217 msr &= AMD_MSR_RANGE;
218
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
219
+ msr &= HYGON_MSR_RANGE;
228220 else
229221 msr &= INTEL_MSR_RANGE;
230222
....@@ -253,7 +245,7 @@
253245
254246 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
255247 {
256
- u32 val, dummy;
248
+ u32 val, dummy __always_unused;
257249
258250 rdmsr(MSR_IA32_PERF_CTL, val, dummy);
259251 return val;
....@@ -270,7 +262,7 @@
270262
271263 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
272264 {
273
- u32 val, dummy;
265
+ u32 val, dummy __always_unused;
274266
275267 rdmsr(MSR_AMD_PERF_CTL, val, dummy);
276268 return val;
....@@ -361,7 +353,7 @@
361353
362354 val = drv_read(data, mask);
363355
364
- pr_debug("get_cur_val = %u\n", val);
356
+ pr_debug("%s = %u\n", __func__, val);
365357
366358 return val;
367359 }
....@@ -373,7 +365,7 @@
373365 unsigned int freq;
374366 unsigned int cached_freq;
375367
376
- pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
368
+ pr_debug("%s (%d)\n", __func__, cpu);
377369
378370 policy = cpufreq_cpu_get_raw(cpu);
379371 if (unlikely(!policy))
....@@ -453,8 +445,7 @@
453445 if (acpi_pstate_strict) {
454446 if (!check_freqs(policy, mask,
455447 policy->freq_table[index].frequency)) {
456
- pr_debug("acpi_cpufreq_target failed (%d)\n",
457
- policy->cpu);
448
+ pr_debug("%s (%d)\n", __func__, policy->cpu);
458449 result = -EAGAIN;
459450 }
460451 }
....@@ -568,7 +559,7 @@
568559 static int __init acpi_cpufreq_early_init(void)
569560 {
570561 unsigned int i;
571
- pr_debug("acpi_cpufreq_early_init\n");
562
+ pr_debug("%s\n", __func__);
572563
573564 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
574565 if (!acpi_perf_data) {
....@@ -622,7 +613,7 @@
622613 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
623614 {
624615 /* Intel Xeon Processor 7100 Series Specification Update
625
- * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
616
+ * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
626617 * AL30: A Machine Check Exception (MCE) Occurring during an
627618 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
628619 * Both Processor Cores to Lock Up. */
....@@ -638,21 +629,58 @@
638629 }
639630 #endif
640631
632
+#ifdef CONFIG_ACPI_CPPC_LIB
633
+static u64 get_max_boost_ratio(unsigned int cpu)
634
+{
635
+ struct cppc_perf_caps perf_caps;
636
+ u64 highest_perf, nominal_perf;
637
+ int ret;
638
+
639
+ if (acpi_pstate_strict)
640
+ return 0;
641
+
642
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
643
+ if (ret) {
644
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
645
+ cpu, ret);
646
+ return 0;
647
+ }
648
+
649
+ highest_perf = perf_caps.highest_perf;
650
+ nominal_perf = perf_caps.nominal_perf;
651
+
652
+ if (!highest_perf || !nominal_perf) {
653
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
654
+ return 0;
655
+ }
656
+
657
+ if (highest_perf < nominal_perf) {
658
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
659
+ return 0;
660
+ }
661
+
662
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
663
+}
664
+#else
665
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
666
+#endif
667
+
641668 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
642669 {
643
- unsigned int i;
644
- unsigned int valid_states = 0;
645
- unsigned int cpu = policy->cpu;
646
- struct acpi_cpufreq_data *data;
647
- unsigned int result = 0;
648
- struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
649
- struct acpi_processor_performance *perf;
650670 struct cpufreq_frequency_table *freq_table;
671
+ struct acpi_processor_performance *perf;
672
+ struct acpi_cpufreq_data *data;
673
+ unsigned int cpu = policy->cpu;
674
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
675
+ unsigned int valid_states = 0;
676
+ unsigned int result = 0;
677
+ u64 max_boost_ratio;
678
+ unsigned int i;
651679 #ifdef CONFIG_SMP
652680 static int blacklisted;
653681 #endif
654682
655
- pr_debug("acpi_cpufreq_cpu_init\n");
683
+ pr_debug("%s\n", __func__);
656684
657685 #ifdef CONFIG_SMP
658686 if (blacklisted)
....@@ -795,6 +823,28 @@
795823 valid_states++;
796824 }
797825 freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
826
+
827
+ max_boost_ratio = get_max_boost_ratio(cpu);
828
+ if (max_boost_ratio) {
829
+ unsigned int freq = freq_table[0].frequency;
830
+
831
+ /*
832
+ * Because the loop above sorts the freq_table entries in the
833
+ * descending order, freq is the maximum frequency in the table.
834
+ * Assume that it corresponds to the CPPC nominal frequency and
835
+ * use it to set cpuinfo.max_freq.
836
+ */
837
+ policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
838
+ } else {
839
+ /*
840
+ * If the maximum "boost" frequency is unknown, ask the arch
841
+ * scale-invariance code to use the "nominal" performance for
842
+ * CPU utilization scaling so as to prevent the schedutil
843
+ * governor from selecting inadequate CPU frequencies.
844
+ */
845
+ arch_set_max_freq_ratio(true);
846
+ }
847
+
798848 policy->freq_table = freq_table;
799849 perf->state = 0;
800850
....@@ -852,7 +902,7 @@
852902 {
853903 struct acpi_cpufreq_data *data = policy->driver_data;
854904
855
- pr_debug("acpi_cpufreq_cpu_exit\n");
905
+ pr_debug("%s\n", __func__);
856906
857907 policy->fast_switch_possible = false;
858908 policy->driver_data = NULL;
....@@ -868,8 +918,9 @@
868918 {
869919 struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
870920 policy->cpu);
921
+ unsigned int freq = policy->freq_table[0].frequency;
871922
872
- if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
923
+ if (perf->states[0].core_frequency * 1000 != freq)
873924 pr_warn(FW_WARN "P-state 0 is not max freq\n");
874925 }
875926
....@@ -877,7 +928,7 @@
877928 {
878929 struct acpi_cpufreq_data *data = policy->driver_data;
879930
880
- pr_debug("acpi_cpufreq_resume\n");
931
+ pr_debug("%s\n", __func__);
881932
882933 data->resume = 1;
883934
....@@ -950,7 +1001,7 @@
9501001 if (cpufreq_get_current_driver())
9511002 return -EEXIST;
9521003
953
- pr_debug("acpi_cpufreq_init\n");
1004
+ pr_debug("%s\n", __func__);
9541005
9551006 ret = acpi_cpufreq_early_init();
9561007 if (ret)
....@@ -987,7 +1038,7 @@
9871038
9881039 static void __exit acpi_cpufreq_exit(void)
9891040 {
990
- pr_debug("acpi_cpufreq_exit\n");
1041
+ pr_debug("%s\n", __func__);
9911042
9921043 acpi_cpufreq_boost_exit();
9931044
....@@ -1004,14 +1055,14 @@
10041055 late_initcall(acpi_cpufreq_init);
10051056 module_exit(acpi_cpufreq_exit);
10061057
1007
-static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1008
- X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1009
- X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1058
+static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
1059
+ X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
1060
+ X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
10101061 {}
10111062 };
10121063 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
10131064
1014
-static const struct acpi_device_id processor_device_ids[] = {
1065
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
10151066 {ACPI_PROCESSOR_OBJECT_HID, },
10161067 {ACPI_PROCESSOR_DEVICE_HID, },
10171068 {},