From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/drivers/cpufreq/acpi-cpufreq.c | 149 +++++++++++++++++++++++++++++++++---------------- 1 files changed, 100 insertions(+), 49 deletions(-) diff --git a/kernel/drivers/cpufreq/acpi-cpufreq.c b/kernel/drivers/cpufreq/acpi-cpufreq.c index 9e86404..d1bbc16 100644 --- a/kernel/drivers/cpufreq/acpi-cpufreq.c +++ b/kernel/drivers/cpufreq/acpi-cpufreq.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi-cpufreq.c - ACPI Processor P-States Driver * @@ -5,24 +6,6 @@ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -43,10 +26,12 @@ #include <linux/uaccess.h> #include <acpi/processor.h> +#include <acpi/cppc_acpi.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpufeature.h> +#include <asm/cpu_device_id.h> MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); @@ -61,6 +46,7 @@ #define INTEL_MSR_RANGE (0xffff) #define AMD_MSR_RANGE (0x7) +#define HYGON_MSR_RANGE (0x7) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) @@ -95,6 +81,7 @@ rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); msr = lo | ((u64)hi << 32); @@ -113,6 +100,7 @@ msr_addr = MSR_IA32_MISC_ENABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; break; + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: msr_addr = MSR_K7_HWCR; msr_mask = MSR_K7_HWCR_CPB_DIS; @@ -139,12 +127,12 @@ boost_set_msr(enable); } -static int set_boost(int val) +static int set_boost(struct cpufreq_policy *policy, int val) { - get_online_cpus(); - on_each_cpu(boost_set_msr_each, (void *)(long)val, 1); - put_online_cpus(); - pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); + on_each_cpu_mask(policy->cpus, boost_set_msr_each, + (void *)(long)val, 1); + pr_debug("CPU %*pbl: Core Boosting %sabled.\n", + cpumask_pr_args(policy->cpus), val ? "en" : "dis"); return 0; } @@ -175,7 +163,9 @@ if (ret || val > 1) return -EINVAL; - set_boost(val); + get_online_cpus(); + set_boost(policy, val); + put_online_cpus(); return count; } @@ -225,6 +215,8 @@ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) msr &= AMD_MSR_RANGE; + else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + msr &= HYGON_MSR_RANGE; else msr &= INTEL_MSR_RANGE; @@ -253,7 +245,7 @@ static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) { - u32 val, dummy; + u32 val, dummy __always_unused; rdmsr(MSR_IA32_PERF_CTL, val, dummy); return val; @@ -270,7 +262,7 @@ static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) { - u32 val, dummy; + u32 val, dummy __always_unused; rdmsr(MSR_AMD_PERF_CTL, val, dummy); return val; @@ -361,7 +353,7 @@ val = drv_read(data, mask); - pr_debug("get_cur_val = %u\n", val); + pr_debug("%s = %u\n", __func__, val); return val; } @@ -373,7 +365,7 @@ unsigned int freq; unsigned int cached_freq; - pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); + pr_debug("%s (%d)\n", __func__, cpu); policy = cpufreq_cpu_get_raw(cpu); if (unlikely(!policy)) @@ -453,8 +445,7 @@ if (acpi_pstate_strict) { if (!check_freqs(policy, mask, policy->freq_table[index].frequency)) { - pr_debug("acpi_cpufreq_target failed (%d)\n", - policy->cpu); + pr_debug("%s (%d)\n", __func__, policy->cpu); result = -EAGAIN; } } @@ -568,7 +559,7 @@ static int __init acpi_cpufreq_early_init(void) { unsigned int i; - pr_debug("acpi_cpufreq_early_init\n"); + pr_debug("%s\n", __func__); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { @@ -622,7 +613,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) { /* Intel Xeon Processor 7100 Series Specification Update - * http://www.intel.com/Assets/PDF/specupdate/314554.pdf + * https://www.intel.com/Assets/PDF/specupdate/314554.pdf * AL30: A Machine Check Exception (MCE) Occurring during an * Enhanced Intel SpeedStep Technology Ratio Change May Cause * Both Processor Cores to Lock Up. */ @@ -638,21 +629,58 @@ } #endif +#ifdef CONFIG_ACPI_CPPC_LIB +static u64 get_max_boost_ratio(unsigned int cpu) +{ + struct cppc_perf_caps perf_caps; + u64 highest_perf, nominal_perf; + int ret; + + if (acpi_pstate_strict) + return 0; + + ret = cppc_get_perf_caps(cpu, &perf_caps); + if (ret) { + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", + cpu, ret); + return 0; + } + + highest_perf = perf_caps.highest_perf; + nominal_perf = perf_caps.nominal_perf; + + if (!highest_perf || !nominal_perf) { + pr_debug("CPU%d: highest or nominal performance missing\n", cpu); + return 0; + } + + if (highest_perf < nominal_perf) { + pr_debug("CPU%d: nominal performance above highest\n", cpu); + return 0; + } + + return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); +} +#else +static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +#endif + static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) { - unsigned int i; - unsigned int valid_states = 0; - unsigned int cpu = policy->cpu; - struct acpi_cpufreq_data *data; - unsigned int result = 0; - struct cpuinfo_x86 *c = &cpu_data(policy->cpu); - struct acpi_processor_performance *perf; struct cpufreq_frequency_table *freq_table; + struct acpi_processor_performance *perf; + struct acpi_cpufreq_data *data; + unsigned int cpu = policy->cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); + unsigned int valid_states = 0; + unsigned int result = 0; + u64 max_boost_ratio; + unsigned int i; #ifdef CONFIG_SMP static int blacklisted; #endif - pr_debug("acpi_cpufreq_cpu_init\n"); + pr_debug("%s\n", __func__); #ifdef CONFIG_SMP if (blacklisted) @@ -795,6 +823,28 @@ valid_states++; } freq_table[valid_states].frequency = CPUFREQ_TABLE_END; + + max_boost_ratio = get_max_boost_ratio(cpu); + if (max_boost_ratio) { + unsigned int freq = freq_table[0].frequency; + + /* + * Because the loop above sorts the freq_table entries in the + * descending order, freq is the maximum frequency in the table. + * Assume that it corresponds to the CPPC nominal frequency and + * use it to set cpuinfo.max_freq. + */ + policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; + } else { + /* + * If the maximum "boost" frequency is unknown, ask the arch + * scale-invariance code to use the "nominal" performance for + * CPU utilization scaling so as to prevent the schedutil + * governor from selecting inadequate CPU frequencies. + */ + arch_set_max_freq_ratio(true); + } + policy->freq_table = freq_table; perf->state = 0; @@ -852,7 +902,7 @@ { struct acpi_cpufreq_data *data = policy->driver_data; - pr_debug("acpi_cpufreq_cpu_exit\n"); + pr_debug("%s\n", __func__); policy->fast_switch_possible = false; policy->driver_data = NULL; @@ -868,8 +918,9 @@ { struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data, policy->cpu); + unsigned int freq = policy->freq_table[0].frequency; - if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) + if (perf->states[0].core_frequency * 1000 != freq) pr_warn(FW_WARN "P-state 0 is not max freq\n"); } @@ -877,7 +928,7 @@ { struct acpi_cpufreq_data *data = policy->driver_data; - pr_debug("acpi_cpufreq_resume\n"); + pr_debug("%s\n", __func__); data->resume = 1; @@ -950,7 +1001,7 @@ if (cpufreq_get_current_driver()) return -EEXIST; - pr_debug("acpi_cpufreq_init\n"); + pr_debug("%s\n", __func__); ret = acpi_cpufreq_early_init(); if (ret) @@ -987,7 +1038,7 @@ static void __exit acpi_cpufreq_exit(void) { - pr_debug("acpi_cpufreq_exit\n"); + pr_debug("%s\n", __func__); acpi_cpufreq_boost_exit(); @@ -1004,14 +1055,14 @@ late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); -static const struct x86_cpu_id acpi_cpufreq_ids[] = { - X86_FEATURE_MATCH(X86_FEATURE_ACPI), - X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), +static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL), + X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); -static const struct acpi_device_id processor_device_ids[] = { +static const struct acpi_device_id __maybe_unused processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, }, {ACPI_PROCESSOR_DEVICE_HID, }, {}, -- Gitblit v1.6.2