| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * intel_pstate.c: Native P state management for Intel processors |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * (C) Copyright 2012 Intel Corporation |
|---|
| 5 | 6 | * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or |
|---|
| 8 | | - * modify it under the terms of the GNU General Public License |
|---|
| 9 | | - * as published by the Free Software Foundation; version 2 |
|---|
| 10 | | - * of the License. |
|---|
| 11 | 7 | */ |
|---|
| 12 | 8 | |
|---|
| 13 | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| .. | .. |
|---|
| 28 | 24 | #include <linux/fs.h> |
|---|
| 29 | 25 | #include <linux/acpi.h> |
|---|
| 30 | 26 | #include <linux/vmalloc.h> |
|---|
| 27 | +#include <linux/pm_qos.h> |
|---|
| 31 | 28 | #include <trace/events/power.h> |
|---|
| 32 | 29 | |
|---|
| 33 | 30 | #include <asm/div64.h> |
|---|
| .. | .. |
|---|
| 39 | 36 | #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) |
|---|
| 40 | 37 | |
|---|
| 41 | 38 | #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 |
|---|
| 39 | +#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 |
|---|
| 42 | 40 | #define INTEL_CPUFREQ_TRANSITION_DELAY 500 |
|---|
| 43 | 41 | |
|---|
| 44 | 42 | #ifdef CONFIG_ACPI |
|---|
| .. | .. |
|---|
| 49 | 47 | #define FRAC_BITS 8 |
|---|
| 50 | 48 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
|---|
| 51 | 49 | #define fp_toint(X) ((X) >> FRAC_BITS) |
|---|
| 50 | + |
|---|
| 51 | +#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) |
|---|
| 52 | 52 | |
|---|
| 53 | 53 | #define EXT_BITS 6 |
|---|
| 54 | 54 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) |
|---|
| .. | .. |
|---|
| 173 | 173 | /** |
|---|
| 174 | 174 | * struct global_params - Global parameters, mostly tunable via sysfs. |
|---|
| 175 | 175 | * @no_turbo: Whether or not to use turbo P-states. |
|---|
| 176 | | - * @turbo_disabled: Whethet or not turbo P-states are available at all, |
|---|
| 176 | + * @turbo_disabled: Whether or not turbo P-states are available at all, |
|---|
| 177 | 177 | * based on the MSR_IA32_MISC_ENABLE value and whether or |
|---|
| 178 | 178 | * not the maximum reported turbo P-state is different from |
|---|
| 179 | 179 | * the maximum reported non-turbo one. |
|---|
| 180 | + * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. |
|---|
| 180 | 181 | * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo |
|---|
| 181 | 182 | * P-state capacity. |
|---|
| 182 | 183 | * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo |
|---|
| .. | .. |
|---|
| 185 | 186 | struct global_params { |
|---|
| 186 | 187 | bool no_turbo; |
|---|
| 187 | 188 | bool turbo_disabled; |
|---|
| 189 | + bool turbo_disabled_mf; |
|---|
| 188 | 190 | int max_perf_pct; |
|---|
| 189 | 191 | int min_perf_pct; |
|---|
| 190 | 192 | }; |
|---|
| .. | .. |
|---|
| 200 | 202 | * @pstate: Stores P state limits for this CPU |
|---|
| 201 | 203 | * @vid: Stores VID limits for this CPU |
|---|
| 202 | 204 | * @last_sample_time: Last Sample time |
|---|
| 203 | | - * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented |
|---|
| 204 | | - * This shift is a multiplier to mperf delta to |
|---|
| 205 | | - * calculate CPU busy. |
|---|
| 205 | + * @aperf_mperf_shift: APERF vs MPERF counting frequency difference |
|---|
| 206 | 206 | * @prev_aperf: Last APERF value read from APERF MSR |
|---|
| 207 | 207 | * @prev_mperf: Last MPERF value read from MPERF MSR |
|---|
| 208 | 208 | * @prev_tsc: Last timestamp counter (TSC) value |
|---|
| .. | .. |
|---|
| 219 | 219 | * @epp_policy: Last saved policy used to set EPP/EPB |
|---|
| 220 | 220 | * @epp_default: Power on default HWP energy performance |
|---|
| 221 | 221 | * preference/bias |
|---|
| 222 | | - * @epp_saved: Saved EPP/EPB during system suspend or CPU offline |
|---|
| 223 | | - * operation |
|---|
| 222 | + * @epp_cached Cached HWP energy-performance preference value |
|---|
| 224 | 223 | * @hwp_req_cached: Cached value of the last HWP Request MSR |
|---|
| 225 | 224 | * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR |
|---|
| 226 | 225 | * @last_io_update: Last time when IO wake flag was set |
|---|
| 227 | 226 | * @sched_flags: Store scheduler flags for possible cross CPU update |
|---|
| 228 | 227 | * @hwp_boost_min: Last HWP boosted min performance |
|---|
| 228 | + * @suspended: Whether or not the driver has been suspended. |
|---|
| 229 | 229 | * |
|---|
| 230 | 230 | * This structure stores per CPU instance data for all CPUs. |
|---|
| 231 | 231 | */ |
|---|
| .. | .. |
|---|
| 257 | 257 | s16 epp_powersave; |
|---|
| 258 | 258 | s16 epp_policy; |
|---|
| 259 | 259 | s16 epp_default; |
|---|
| 260 | | - s16 epp_saved; |
|---|
| 260 | + s16 epp_cached; |
|---|
| 261 | 261 | u64 hwp_req_cached; |
|---|
| 262 | 262 | u64 hwp_cap_cached; |
|---|
| 263 | 263 | u64 last_io_update; |
|---|
| 264 | 264 | unsigned int sched_flags; |
|---|
| 265 | 265 | u32 hwp_boost_min; |
|---|
| 266 | + bool suspended; |
|---|
| 266 | 267 | }; |
|---|
| 267 | 268 | |
|---|
| 268 | 269 | static struct cpudata **all_cpu_data; |
|---|
| .. | .. |
|---|
| 274 | 275 | * @get_min: Callback to get minimum P state |
|---|
| 275 | 276 | * @get_turbo: Callback to get turbo P state |
|---|
| 276 | 277 | * @get_scaling: Callback to get frequency scaling factor |
|---|
| 278 | + * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference |
|---|
| 277 | 279 | * @get_val: Callback to convert P state to actual MSR write value |
|---|
| 278 | 280 | * @get_vid: Callback to get VID data for Atom platforms |
|---|
| 279 | 281 | * |
|---|
| .. | .. |
|---|
| 373 | 375 | } |
|---|
| 374 | 376 | } |
|---|
| 375 | 377 | } |
|---|
| 376 | | -#else |
|---|
| 378 | + |
|---|
| 379 | +static int intel_pstate_get_cppc_guranteed(int cpu) |
|---|
| 380 | +{ |
|---|
| 381 | + struct cppc_perf_caps cppc_perf; |
|---|
| 382 | + int ret; |
|---|
| 383 | + |
|---|
| 384 | + ret = cppc_get_perf_caps(cpu, &cppc_perf); |
|---|
| 385 | + if (ret) |
|---|
| 386 | + return ret; |
|---|
| 387 | + |
|---|
| 388 | + if (cppc_perf.guaranteed_perf) |
|---|
| 389 | + return cppc_perf.guaranteed_perf; |
|---|
| 390 | + |
|---|
| 391 | + return cppc_perf.nominal_perf; |
|---|
| 392 | +} |
|---|
| 393 | + |
|---|
| 394 | +#else /* CONFIG_ACPI_CPPC_LIB */ |
|---|
| 377 | 395 | static void intel_pstate_set_itmt_prio(int cpu) |
|---|
| 378 | 396 | { |
|---|
| 379 | 397 | } |
|---|
| 380 | | -#endif |
|---|
| 398 | +#endif /* CONFIG_ACPI_CPPC_LIB */ |
|---|
| 381 | 399 | |
|---|
| 382 | 400 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
|---|
| 383 | 401 | { |
|---|
| .. | .. |
|---|
| 459 | 477 | |
|---|
| 460 | 478 | acpi_processor_unregister_performance(policy->cpu); |
|---|
| 461 | 479 | } |
|---|
| 462 | | -#else |
|---|
| 480 | +#else /* CONFIG_ACPI */ |
|---|
| 463 | 481 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
|---|
| 464 | 482 | { |
|---|
| 465 | 483 | } |
|---|
| .. | .. |
|---|
| 472 | 490 | { |
|---|
| 473 | 491 | return false; |
|---|
| 474 | 492 | } |
|---|
| 475 | | -#endif |
|---|
| 493 | +#endif /* CONFIG_ACPI */ |
|---|
| 494 | + |
|---|
| 495 | +#ifndef CONFIG_ACPI_CPPC_LIB |
|---|
| 496 | +static int intel_pstate_get_cppc_guranteed(int cpu) |
|---|
| 497 | +{ |
|---|
| 498 | + return -ENOTSUPP; |
|---|
| 499 | +} |
|---|
| 500 | +#endif /* CONFIG_ACPI_CPPC_LIB */ |
|---|
| 476 | 501 | |
|---|
| 477 | 502 | static inline void update_turbo_state(void) |
|---|
| 478 | 503 | { |
|---|
| .. | .. |
|---|
| 500 | 525 | u64 epb; |
|---|
| 501 | 526 | int ret; |
|---|
| 502 | 527 | |
|---|
| 503 | | - if (!static_cpu_has(X86_FEATURE_EPB)) |
|---|
| 528 | + if (!boot_cpu_has(X86_FEATURE_EPB)) |
|---|
| 504 | 529 | return -ENXIO; |
|---|
| 505 | 530 | |
|---|
| 506 | 531 | ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); |
|---|
| .. | .. |
|---|
| 514 | 539 | { |
|---|
| 515 | 540 | s16 epp; |
|---|
| 516 | 541 | |
|---|
| 517 | | - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 542 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 518 | 543 | /* |
|---|
| 519 | 544 | * When hwp_req_data is 0, means that caller didn't read |
|---|
| 520 | 545 | * MSR_HWP_REQUEST, so need to read and get EPP. |
|---|
| .. | .. |
|---|
| 539 | 564 | u64 epb; |
|---|
| 540 | 565 | int ret; |
|---|
| 541 | 566 | |
|---|
| 542 | | - if (!static_cpu_has(X86_FEATURE_EPB)) |
|---|
| 567 | + if (!boot_cpu_has(X86_FEATURE_EPB)) |
|---|
| 543 | 568 | return -ENXIO; |
|---|
| 544 | 569 | |
|---|
| 545 | 570 | ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); |
|---|
| .. | .. |
|---|
| 578 | 603 | HWP_EPP_POWERSAVE |
|---|
| 579 | 604 | }; |
|---|
| 580 | 605 | |
|---|
| 581 | | -static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) |
|---|
| 606 | +static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) |
|---|
| 582 | 607 | { |
|---|
| 583 | 608 | s16 epp; |
|---|
| 584 | 609 | int index = -EINVAL; |
|---|
| 585 | 610 | |
|---|
| 611 | + *raw_epp = 0; |
|---|
| 586 | 612 | epp = intel_pstate_get_epp(cpu_data, 0); |
|---|
| 587 | 613 | if (epp < 0) |
|---|
| 588 | 614 | return epp; |
|---|
| 589 | 615 | |
|---|
| 590 | | - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 616 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 591 | 617 | if (epp == HWP_EPP_PERFORMANCE) |
|---|
| 592 | 618 | return 1; |
|---|
| 593 | | - if (epp <= HWP_EPP_BALANCE_PERFORMANCE) |
|---|
| 619 | + if (epp == HWP_EPP_BALANCE_PERFORMANCE) |
|---|
| 594 | 620 | return 2; |
|---|
| 595 | | - if (epp <= HWP_EPP_BALANCE_POWERSAVE) |
|---|
| 621 | + if (epp == HWP_EPP_BALANCE_POWERSAVE) |
|---|
| 596 | 622 | return 3; |
|---|
| 597 | | - else |
|---|
| 623 | + if (epp == HWP_EPP_POWERSAVE) |
|---|
| 598 | 624 | return 4; |
|---|
| 599 | | - } else if (static_cpu_has(X86_FEATURE_EPB)) { |
|---|
| 625 | + *raw_epp = epp; |
|---|
| 626 | + return 0; |
|---|
| 627 | + } else if (boot_cpu_has(X86_FEATURE_EPB)) { |
|---|
| 600 | 628 | /* |
|---|
| 601 | 629 | * Range: |
|---|
| 602 | 630 | * 0x00-0x03 : Performance |
|---|
| .. | .. |
|---|
| 613 | 641 | return index; |
|---|
| 614 | 642 | } |
|---|
| 615 | 643 | |
|---|
| 644 | +static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) |
|---|
| 645 | +{ |
|---|
| 646 | + int ret; |
|---|
| 647 | + |
|---|
| 648 | + /* |
|---|
| 649 | + * Use the cached HWP Request MSR value, because in the active mode the |
|---|
| 650 | + * register itself may be updated by intel_pstate_hwp_boost_up() or |
|---|
| 651 | + * intel_pstate_hwp_boost_down() at any time. |
|---|
| 652 | + */ |
|---|
| 653 | + u64 value = READ_ONCE(cpu->hwp_req_cached); |
|---|
| 654 | + |
|---|
| 655 | + value &= ~GENMASK_ULL(31, 24); |
|---|
| 656 | + value |= (u64)epp << 24; |
|---|
| 657 | + /* |
|---|
| 658 | + * The only other updater of hwp_req_cached in the active mode, |
|---|
| 659 | + * intel_pstate_hwp_set(), is called under the same lock as this |
|---|
| 660 | + * function, so it cannot run in parallel with the update below. |
|---|
| 661 | + */ |
|---|
| 662 | + WRITE_ONCE(cpu->hwp_req_cached, value); |
|---|
| 663 | + ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
|---|
| 664 | + if (!ret) |
|---|
| 665 | + cpu->epp_cached = epp; |
|---|
| 666 | + |
|---|
| 667 | + return ret; |
|---|
| 668 | +} |
|---|
| 669 | + |
|---|
| 616 | 670 | static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, |
|---|
| 617 | | - int pref_index) |
|---|
| 671 | + int pref_index, bool use_raw, |
|---|
| 672 | + u32 raw_epp) |
|---|
| 618 | 673 | { |
|---|
| 619 | 674 | int epp = -EINVAL; |
|---|
| 620 | 675 | int ret; |
|---|
| .. | .. |
|---|
| 622 | 677 | if (!pref_index) |
|---|
| 623 | 678 | epp = cpu_data->epp_default; |
|---|
| 624 | 679 | |
|---|
| 625 | | - mutex_lock(&intel_pstate_limits_lock); |
|---|
| 626 | | - |
|---|
| 627 | | - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 628 | | - u64 value; |
|---|
| 629 | | - |
|---|
| 630 | | - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); |
|---|
| 631 | | - if (ret) |
|---|
| 632 | | - goto return_pref; |
|---|
| 633 | | - |
|---|
| 634 | | - value &= ~GENMASK_ULL(31, 24); |
|---|
| 635 | | - |
|---|
| 636 | | - if (epp == -EINVAL) |
|---|
| 680 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 681 | + if (use_raw) |
|---|
| 682 | + epp = raw_epp; |
|---|
| 683 | + else if (epp == -EINVAL) |
|---|
| 637 | 684 | epp = epp_values[pref_index - 1]; |
|---|
| 638 | 685 | |
|---|
| 639 | | - value |= (u64)epp << 24; |
|---|
| 640 | | - ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); |
|---|
| 686 | + /* |
|---|
| 687 | + * To avoid confusion, refuse to set EPP to any values different |
|---|
| 688 | + * from 0 (performance) if the current policy is "performance", |
|---|
| 689 | + * because those values would be overridden. |
|---|
| 690 | + */ |
|---|
| 691 | + if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
|---|
| 692 | + return -EBUSY; |
|---|
| 693 | + |
|---|
| 694 | + ret = intel_pstate_set_epp(cpu_data, epp); |
|---|
| 641 | 695 | } else { |
|---|
| 642 | 696 | if (epp == -EINVAL) |
|---|
| 643 | 697 | epp = (pref_index - 1) << 2; |
|---|
| 644 | 698 | ret = intel_pstate_set_epb(cpu_data->cpu, epp); |
|---|
| 645 | 699 | } |
|---|
| 646 | | -return_pref: |
|---|
| 647 | | - mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 648 | 700 | |
|---|
| 649 | 701 | return ret; |
|---|
| 650 | 702 | } |
|---|
| .. | .. |
|---|
| 665 | 717 | |
|---|
| 666 | 718 | cpufreq_freq_attr_ro(energy_performance_available_preferences); |
|---|
| 667 | 719 | |
|---|
| 720 | +static struct cpufreq_driver intel_pstate; |
|---|
| 721 | + |
|---|
| 668 | 722 | static ssize_t store_energy_performance_preference( |
|---|
| 669 | 723 | struct cpufreq_policy *policy, const char *buf, size_t count) |
|---|
| 670 | 724 | { |
|---|
| 671 | | - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; |
|---|
| 725 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 672 | 726 | char str_preference[21]; |
|---|
| 673 | | - int ret; |
|---|
| 727 | + bool raw = false; |
|---|
| 728 | + ssize_t ret; |
|---|
| 729 | + u32 epp = 0; |
|---|
| 674 | 730 | |
|---|
| 675 | 731 | ret = sscanf(buf, "%20s", str_preference); |
|---|
| 676 | 732 | if (ret != 1) |
|---|
| 677 | 733 | return -EINVAL; |
|---|
| 678 | 734 | |
|---|
| 679 | 735 | ret = match_string(energy_perf_strings, -1, str_preference); |
|---|
| 680 | | - if (ret < 0) |
|---|
| 681 | | - return ret; |
|---|
| 736 | + if (ret < 0) { |
|---|
| 737 | + if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) |
|---|
| 738 | + return ret; |
|---|
| 682 | 739 | |
|---|
| 683 | | - intel_pstate_set_energy_pref_index(cpu_data, ret); |
|---|
| 684 | | - return count; |
|---|
| 740 | + ret = kstrtouint(buf, 10, &epp); |
|---|
| 741 | + if (ret) |
|---|
| 742 | + return ret; |
|---|
| 743 | + |
|---|
| 744 | + if (epp > 255) |
|---|
| 745 | + return -EINVAL; |
|---|
| 746 | + |
|---|
| 747 | + raw = true; |
|---|
| 748 | + } |
|---|
| 749 | + |
|---|
| 750 | + /* |
|---|
| 751 | + * This function runs with the policy R/W semaphore held, which |
|---|
| 752 | + * guarantees that the driver pointer will not change while it is |
|---|
| 753 | + * running. |
|---|
| 754 | + */ |
|---|
| 755 | + if (!intel_pstate_driver) |
|---|
| 756 | + return -EAGAIN; |
|---|
| 757 | + |
|---|
| 758 | + mutex_lock(&intel_pstate_limits_lock); |
|---|
| 759 | + |
|---|
| 760 | + if (intel_pstate_driver == &intel_pstate) { |
|---|
| 761 | + ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); |
|---|
| 762 | + } else { |
|---|
| 763 | + /* |
|---|
| 764 | + * In the passive mode the governor needs to be stopped on the |
|---|
| 765 | + * target CPU before the EPP update and restarted after it, |
|---|
| 766 | + * which is super-heavy-weight, so make sure it is worth doing |
|---|
| 767 | + * upfront. |
|---|
| 768 | + */ |
|---|
| 769 | + if (!raw) |
|---|
| 770 | + epp = ret ? epp_values[ret - 1] : cpu->epp_default; |
|---|
| 771 | + |
|---|
| 772 | + if (cpu->epp_cached != epp) { |
|---|
| 773 | + int err; |
|---|
| 774 | + |
|---|
| 775 | + cpufreq_stop_governor(policy); |
|---|
| 776 | + ret = intel_pstate_set_epp(cpu, epp); |
|---|
| 777 | + err = cpufreq_start_governor(policy); |
|---|
| 778 | + if (!ret) |
|---|
| 779 | + ret = err; |
|---|
| 780 | + } |
|---|
| 781 | + } |
|---|
| 782 | + |
|---|
| 783 | + mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 784 | + |
|---|
| 785 | + return ret ?: count; |
|---|
| 685 | 786 | } |
|---|
| 686 | 787 | |
|---|
| 687 | 788 | static ssize_t show_energy_performance_preference( |
|---|
| 688 | 789 | struct cpufreq_policy *policy, char *buf) |
|---|
| 689 | 790 | { |
|---|
| 690 | 791 | struct cpudata *cpu_data = all_cpu_data[policy->cpu]; |
|---|
| 691 | | - int preference; |
|---|
| 792 | + int preference, raw_epp; |
|---|
| 692 | 793 | |
|---|
| 693 | | - preference = intel_pstate_get_energy_pref_index(cpu_data); |
|---|
| 794 | + preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp); |
|---|
| 694 | 795 | if (preference < 0) |
|---|
| 695 | 796 | return preference; |
|---|
| 696 | 797 | |
|---|
| 697 | | - return sprintf(buf, "%s\n", energy_perf_strings[preference]); |
|---|
| 798 | + if (raw_epp) |
|---|
| 799 | + return sprintf(buf, "%d\n", raw_epp); |
|---|
| 800 | + else |
|---|
| 801 | + return sprintf(buf, "%s\n", energy_perf_strings[preference]); |
|---|
| 698 | 802 | } |
|---|
| 699 | 803 | |
|---|
| 700 | 804 | cpufreq_freq_attr_rw(energy_performance_preference); |
|---|
| 701 | 805 | |
|---|
| 806 | +static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) |
|---|
| 807 | +{ |
|---|
| 808 | + struct cpudata *cpu; |
|---|
| 809 | + u64 cap; |
|---|
| 810 | + int ratio; |
|---|
| 811 | + |
|---|
| 812 | + ratio = intel_pstate_get_cppc_guranteed(policy->cpu); |
|---|
| 813 | + if (ratio <= 0) { |
|---|
| 814 | + rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); |
|---|
| 815 | + ratio = HWP_GUARANTEED_PERF(cap); |
|---|
| 816 | + } |
|---|
| 817 | + |
|---|
| 818 | + cpu = all_cpu_data[policy->cpu]; |
|---|
| 819 | + |
|---|
| 820 | + return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling); |
|---|
| 821 | +} |
|---|
| 822 | + |
|---|
| 823 | +cpufreq_freq_attr_ro(base_frequency); |
|---|
| 824 | + |
|---|
| 702 | 825 | static struct freq_attr *hwp_cpufreq_attrs[] = { |
|---|
| 703 | 826 | &energy_performance_preference, |
|---|
| 704 | 827 | &energy_performance_available_preferences, |
|---|
| 828 | + &base_frequency, |
|---|
| 705 | 829 | NULL, |
|---|
| 706 | 830 | }; |
|---|
| 707 | 831 | |
|---|
| 708 | | -static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, |
|---|
| 832 | +static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max, |
|---|
| 709 | 833 | int *current_max) |
|---|
| 710 | 834 | { |
|---|
| 711 | 835 | u64 cap; |
|---|
| 712 | 836 | |
|---|
| 713 | | - rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); |
|---|
| 714 | | - WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); |
|---|
| 837 | + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); |
|---|
| 838 | + WRITE_ONCE(cpu->hwp_cap_cached, cap); |
|---|
| 715 | 839 | if (global.no_turbo || global.turbo_disabled) |
|---|
| 716 | 840 | *current_max = HWP_GUARANTEED_PERF(cap); |
|---|
| 717 | 841 | else |
|---|
| .. | .. |
|---|
| 746 | 870 | |
|---|
| 747 | 871 | cpu_data->epp_policy = cpu_data->policy; |
|---|
| 748 | 872 | |
|---|
| 749 | | - if (cpu_data->epp_saved >= 0) { |
|---|
| 750 | | - epp = cpu_data->epp_saved; |
|---|
| 751 | | - cpu_data->epp_saved = -EINVAL; |
|---|
| 752 | | - goto update_epp; |
|---|
| 753 | | - } |
|---|
| 754 | | - |
|---|
| 755 | 873 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { |
|---|
| 756 | 874 | epp = intel_pstate_get_epp(cpu_data, value); |
|---|
| 757 | 875 | cpu_data->epp_powersave = epp; |
|---|
| .. | .. |
|---|
| 778 | 896 | |
|---|
| 779 | 897 | epp = cpu_data->epp_powersave; |
|---|
| 780 | 898 | } |
|---|
| 781 | | -update_epp: |
|---|
| 782 | | - if (static_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 899 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 783 | 900 | value &= ~GENMASK_ULL(31, 24); |
|---|
| 784 | 901 | value |= (u64)epp << 24; |
|---|
| 785 | 902 | } else { |
|---|
| .. | .. |
|---|
| 790 | 907 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); |
|---|
| 791 | 908 | } |
|---|
| 792 | 909 | |
|---|
| 793 | | -static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) |
|---|
| 910 | +static void intel_pstate_hwp_offline(struct cpudata *cpu) |
|---|
| 794 | 911 | { |
|---|
| 795 | | - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; |
|---|
| 912 | + u64 value = READ_ONCE(cpu->hwp_req_cached); |
|---|
| 913 | + int min_perf; |
|---|
| 796 | 914 | |
|---|
| 797 | | - if (!hwp_active) |
|---|
| 798 | | - return 0; |
|---|
| 915 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
|---|
| 916 | + /* |
|---|
| 917 | + * In case the EPP has been set to "performance" by the |
|---|
| 918 | + * active mode "performance" scaling algorithm, replace that |
|---|
| 919 | + * temporary value with the cached EPP one. |
|---|
| 920 | + */ |
|---|
| 921 | + value &= ~GENMASK_ULL(31, 24); |
|---|
| 922 | + value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); |
|---|
| 923 | + WRITE_ONCE(cpu->hwp_req_cached, value); |
|---|
| 924 | + } |
|---|
| 799 | 925 | |
|---|
| 800 | | - cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); |
|---|
| 926 | + value &= ~GENMASK_ULL(31, 0); |
|---|
| 927 | + min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached); |
|---|
| 801 | 928 | |
|---|
| 802 | | - return 0; |
|---|
| 929 | + /* Set hwp_max = hwp_min */ |
|---|
| 930 | + value |= HWP_MAX_PERF(min_perf); |
|---|
| 931 | + value |= HWP_MIN_PERF(min_perf); |
|---|
| 932 | + |
|---|
| 933 | + /* Set EPP to min */ |
|---|
| 934 | + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) |
|---|
| 935 | + value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); |
|---|
| 936 | + |
|---|
| 937 | + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
|---|
| 938 | +} |
|---|
| 939 | + |
|---|
| 940 | +#define POWER_CTL_EE_ENABLE 1 |
|---|
| 941 | +#define POWER_CTL_EE_DISABLE 2 |
|---|
| 942 | + |
|---|
| 943 | +static int power_ctl_ee_state; |
|---|
| 944 | + |
|---|
| 945 | +static void set_power_ctl_ee_state(bool input) |
|---|
| 946 | +{ |
|---|
| 947 | + u64 power_ctl; |
|---|
| 948 | + |
|---|
| 949 | + mutex_lock(&intel_pstate_driver_lock); |
|---|
| 950 | + rdmsrl(MSR_IA32_POWER_CTL, power_ctl); |
|---|
| 951 | + if (input) { |
|---|
| 952 | + power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); |
|---|
| 953 | + power_ctl_ee_state = POWER_CTL_EE_ENABLE; |
|---|
| 954 | + } else { |
|---|
| 955 | + power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); |
|---|
| 956 | + power_ctl_ee_state = POWER_CTL_EE_DISABLE; |
|---|
| 957 | + } |
|---|
| 958 | + wrmsrl(MSR_IA32_POWER_CTL, power_ctl); |
|---|
| 959 | + mutex_unlock(&intel_pstate_driver_lock); |
|---|
| 803 | 960 | } |
|---|
| 804 | 961 | |
|---|
| 805 | 962 | static void intel_pstate_hwp_enable(struct cpudata *cpudata); |
|---|
| 806 | 963 | |
|---|
| 964 | +static void intel_pstate_hwp_reenable(struct cpudata *cpu) |
|---|
| 965 | +{ |
|---|
| 966 | + intel_pstate_hwp_enable(cpu); |
|---|
| 967 | + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); |
|---|
| 968 | +} |
|---|
| 969 | + |
|---|
| 970 | +static int intel_pstate_suspend(struct cpufreq_policy *policy) |
|---|
| 971 | +{ |
|---|
| 972 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 973 | + |
|---|
| 974 | + pr_debug("CPU %d suspending\n", cpu->cpu); |
|---|
| 975 | + |
|---|
| 976 | + cpu->suspended = true; |
|---|
| 977 | + |
|---|
| 978 | + return 0; |
|---|
| 979 | +} |
|---|
| 980 | + |
|---|
| 807 | 981 | static int intel_pstate_resume(struct cpufreq_policy *policy) |
|---|
| 808 | 982 | { |
|---|
| 809 | | - if (!hwp_active) |
|---|
| 810 | | - return 0; |
|---|
| 983 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 811 | 984 | |
|---|
| 812 | | - mutex_lock(&intel_pstate_limits_lock); |
|---|
| 985 | + pr_debug("CPU %d resuming\n", cpu->cpu); |
|---|
| 813 | 986 | |
|---|
| 814 | | - if (policy->cpu == 0) |
|---|
| 815 | | - intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); |
|---|
| 987 | + /* Only restore if the system default is changed */ |
|---|
| 988 | + if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) |
|---|
| 989 | + set_power_ctl_ee_state(true); |
|---|
| 990 | + else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) |
|---|
| 991 | + set_power_ctl_ee_state(false); |
|---|
| 816 | 992 | |
|---|
| 817 | | - all_cpu_data[policy->cpu]->epp_policy = 0; |
|---|
| 818 | | - intel_pstate_hwp_set(policy->cpu); |
|---|
| 993 | + if (cpu->suspended && hwp_active) { |
|---|
| 994 | + mutex_lock(&intel_pstate_limits_lock); |
|---|
| 819 | 995 | |
|---|
| 820 | | - mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 996 | + /* Re-enable HWP, because "online" has not done that. */ |
|---|
| 997 | + intel_pstate_hwp_reenable(cpu); |
|---|
| 998 | + |
|---|
| 999 | + mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 1000 | + } |
|---|
| 1001 | + |
|---|
| 1002 | + cpu->suspended = false; |
|---|
| 821 | 1003 | |
|---|
| 822 | 1004 | return 0; |
|---|
| 823 | 1005 | } |
|---|
| .. | .. |
|---|
| 828 | 1010 | |
|---|
| 829 | 1011 | for_each_possible_cpu(cpu) |
|---|
| 830 | 1012 | cpufreq_update_policy(cpu); |
|---|
| 1013 | +} |
|---|
| 1014 | + |
|---|
| 1015 | +static void intel_pstate_update_max_freq(unsigned int cpu) |
|---|
| 1016 | +{ |
|---|
| 1017 | + struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); |
|---|
| 1018 | + struct cpudata *cpudata; |
|---|
| 1019 | + |
|---|
| 1020 | + if (!policy) |
|---|
| 1021 | + return; |
|---|
| 1022 | + |
|---|
| 1023 | + cpudata = all_cpu_data[cpu]; |
|---|
| 1024 | + policy->cpuinfo.max_freq = global.turbo_disabled_mf ? |
|---|
| 1025 | + cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; |
|---|
| 1026 | + |
|---|
| 1027 | + refresh_frequency_limits(policy); |
|---|
| 1028 | + |
|---|
| 1029 | + cpufreq_cpu_release(policy); |
|---|
| 1030 | +} |
|---|
| 1031 | + |
|---|
| 1032 | +static void intel_pstate_update_limits(unsigned int cpu) |
|---|
| 1033 | +{ |
|---|
| 1034 | + mutex_lock(&intel_pstate_driver_lock); |
|---|
| 1035 | + |
|---|
| 1036 | + update_turbo_state(); |
|---|
| 1037 | + /* |
|---|
| 1038 | + * If turbo has been turned on or off globally, policy limits for |
|---|
| 1039 | + * all CPUs need to be updated to reflect that. |
|---|
| 1040 | + */ |
|---|
| 1041 | + if (global.turbo_disabled_mf != global.turbo_disabled) { |
|---|
| 1042 | + global.turbo_disabled_mf = global.turbo_disabled; |
|---|
| 1043 | + arch_set_max_freq_ratio(global.turbo_disabled); |
|---|
| 1044 | + for_each_possible_cpu(cpu) |
|---|
| 1045 | + intel_pstate_update_max_freq(cpu); |
|---|
| 1046 | + } else { |
|---|
| 1047 | + cpufreq_update_policy(cpu); |
|---|
| 1048 | + } |
|---|
| 1049 | + |
|---|
| 1050 | + mutex_unlock(&intel_pstate_driver_lock); |
|---|
| 831 | 1051 | } |
|---|
| 832 | 1052 | |
|---|
| 833 | 1053 | /************************** sysfs begin ************************/ |
|---|
| .. | .. |
|---|
| 983 | 1203 | return count; |
|---|
| 984 | 1204 | } |
|---|
| 985 | 1205 | |
|---|
| 1206 | +static void update_qos_request(enum freq_qos_req_type type) |
|---|
| 1207 | +{ |
|---|
| 1208 | + int max_state, turbo_max, freq, i, perf_pct; |
|---|
| 1209 | + struct freq_qos_request *req; |
|---|
| 1210 | + struct cpufreq_policy *policy; |
|---|
| 1211 | + |
|---|
| 1212 | + for_each_possible_cpu(i) { |
|---|
| 1213 | + struct cpudata *cpu = all_cpu_data[i]; |
|---|
| 1214 | + |
|---|
| 1215 | + policy = cpufreq_cpu_get(i); |
|---|
| 1216 | + if (!policy) |
|---|
| 1217 | + continue; |
|---|
| 1218 | + |
|---|
| 1219 | + req = policy->driver_data; |
|---|
| 1220 | + cpufreq_cpu_put(policy); |
|---|
| 1221 | + |
|---|
| 1222 | + if (!req) |
|---|
| 1223 | + continue; |
|---|
| 1224 | + |
|---|
| 1225 | + if (hwp_active) |
|---|
| 1226 | + intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); |
|---|
| 1227 | + else |
|---|
| 1228 | + turbo_max = cpu->pstate.turbo_pstate; |
|---|
| 1229 | + |
|---|
| 1230 | + if (type == FREQ_QOS_MIN) { |
|---|
| 1231 | + perf_pct = global.min_perf_pct; |
|---|
| 1232 | + } else { |
|---|
| 1233 | + req++; |
|---|
| 1234 | + perf_pct = global.max_perf_pct; |
|---|
| 1235 | + } |
|---|
| 1236 | + |
|---|
| 1237 | + freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); |
|---|
| 1238 | + freq *= cpu->pstate.scaling; |
|---|
| 1239 | + |
|---|
| 1240 | + if (freq_qos_update_request(req, freq) < 0) |
|---|
| 1241 | + pr_warn("Failed to update freq constraint: CPU%d\n", i); |
|---|
| 1242 | + } |
|---|
| 1243 | +} |
|---|
| 1244 | + |
|---|
| 986 | 1245 | static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, |
|---|
| 987 | 1246 | const char *buf, size_t count) |
|---|
| 988 | 1247 | { |
|---|
| .. | .. |
|---|
| 1006 | 1265 | |
|---|
| 1007 | 1266 | mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 1008 | 1267 | |
|---|
| 1009 | | - intel_pstate_update_policies(); |
|---|
| 1268 | + if (intel_pstate_driver == &intel_pstate) |
|---|
| 1269 | + intel_pstate_update_policies(); |
|---|
| 1270 | + else |
|---|
| 1271 | + update_qos_request(FREQ_QOS_MAX); |
|---|
| 1010 | 1272 | |
|---|
| 1011 | 1273 | mutex_unlock(&intel_pstate_driver_lock); |
|---|
| 1012 | 1274 | |
|---|
| .. | .. |
|---|
| 1037 | 1299 | |
|---|
| 1038 | 1300 | mutex_unlock(&intel_pstate_limits_lock); |
|---|
| 1039 | 1301 | |
|---|
| 1040 | | - intel_pstate_update_policies(); |
|---|
| 1302 | + if (intel_pstate_driver == &intel_pstate) |
|---|
| 1303 | + intel_pstate_update_policies(); |
|---|
| 1304 | + else |
|---|
| 1305 | + update_qos_request(FREQ_QOS_MIN); |
|---|
| 1041 | 1306 | |
|---|
| 1042 | 1307 | mutex_unlock(&intel_pstate_driver_lock); |
|---|
| 1043 | 1308 | |
|---|
| .. | .. |
|---|
| 1069 | 1334 | return count; |
|---|
| 1070 | 1335 | } |
|---|
| 1071 | 1336 | |
|---|
| 1337 | +static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, |
|---|
| 1338 | + char *buf) |
|---|
| 1339 | +{ |
|---|
| 1340 | + u64 power_ctl; |
|---|
| 1341 | + int enable; |
|---|
| 1342 | + |
|---|
| 1343 | + rdmsrl(MSR_IA32_POWER_CTL, power_ctl); |
|---|
| 1344 | + enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); |
|---|
| 1345 | + return sprintf(buf, "%d\n", !enable); |
|---|
| 1346 | +} |
|---|
| 1347 | + |
|---|
| 1348 | +static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, |
|---|
| 1349 | + const char *buf, size_t count) |
|---|
| 1350 | +{ |
|---|
| 1351 | + bool input; |
|---|
| 1352 | + int ret; |
|---|
| 1353 | + |
|---|
| 1354 | + ret = kstrtobool(buf, &input); |
|---|
| 1355 | + if (ret) |
|---|
| 1356 | + return ret; |
|---|
| 1357 | + |
|---|
| 1358 | + set_power_ctl_ee_state(input); |
|---|
| 1359 | + |
|---|
| 1360 | + return count; |
|---|
| 1361 | +} |
|---|
| 1362 | + |
|---|
| 1072 | 1363 | show_one(max_perf_pct, max_perf_pct); |
|---|
| 1073 | 1364 | show_one(min_perf_pct, min_perf_pct); |
|---|
| 1074 | 1365 | |
|---|
| .. | .. |
|---|
| 1079 | 1370 | define_one_global_ro(turbo_pct); |
|---|
| 1080 | 1371 | define_one_global_ro(num_pstates); |
|---|
| 1081 | 1372 | define_one_global_rw(hwp_dynamic_boost); |
|---|
| 1373 | +define_one_global_rw(energy_efficiency); |
|---|
| 1082 | 1374 | |
|---|
| 1083 | 1375 | static struct attribute *intel_pstate_attributes[] = { |
|---|
| 1084 | 1376 | &status.attr, |
|---|
| .. | .. |
|---|
| 1092 | 1384 | .attrs = intel_pstate_attributes, |
|---|
| 1093 | 1385 | }; |
|---|
| 1094 | 1386 | |
|---|
| 1387 | +static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; |
|---|
| 1388 | + |
|---|
| 1389 | +static struct kobject *intel_pstate_kobject; |
|---|
| 1390 | + |
|---|
| 1095 | 1391 | static void __init intel_pstate_sysfs_expose_params(void) |
|---|
| 1096 | 1392 | { |
|---|
| 1097 | | - struct kobject *intel_pstate_kobject; |
|---|
| 1098 | 1393 | int rc; |
|---|
| 1099 | 1394 | |
|---|
| 1100 | 1395 | intel_pstate_kobject = kobject_create_and_add("intel_pstate", |
|---|
| .. | .. |
|---|
| 1119 | 1414 | rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); |
|---|
| 1120 | 1415 | WARN_ON(rc); |
|---|
| 1121 | 1416 | |
|---|
| 1122 | | - if (hwp_active) { |
|---|
| 1123 | | - rc = sysfs_create_file(intel_pstate_kobject, |
|---|
| 1124 | | - &hwp_dynamic_boost.attr); |
|---|
| 1417 | + if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { |
|---|
| 1418 | + rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); |
|---|
| 1125 | 1419 | WARN_ON(rc); |
|---|
| 1126 | 1420 | } |
|---|
| 1127 | 1421 | } |
|---|
| 1422 | + |
|---|
| 1423 | +static void __init intel_pstate_sysfs_remove(void) |
|---|
| 1424 | +{ |
|---|
| 1425 | + if (!intel_pstate_kobject) |
|---|
| 1426 | + return; |
|---|
| 1427 | + |
|---|
| 1428 | + sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); |
|---|
| 1429 | + |
|---|
| 1430 | + if (!per_cpu_limits) { |
|---|
| 1431 | + sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); |
|---|
| 1432 | + sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); |
|---|
| 1433 | + |
|---|
| 1434 | + if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) |
|---|
| 1435 | + sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr); |
|---|
| 1436 | + } |
|---|
| 1437 | + |
|---|
| 1438 | + kobject_put(intel_pstate_kobject); |
|---|
| 1439 | +} |
|---|
| 1440 | + |
|---|
| 1441 | +static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) |
|---|
| 1442 | +{ |
|---|
| 1443 | + int rc; |
|---|
| 1444 | + |
|---|
| 1445 | + if (!hwp_active) |
|---|
| 1446 | + return; |
|---|
| 1447 | + |
|---|
| 1448 | + rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); |
|---|
| 1449 | + WARN_ON_ONCE(rc); |
|---|
| 1450 | +} |
|---|
| 1451 | + |
|---|
| 1452 | +static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) |
|---|
| 1453 | +{ |
|---|
| 1454 | + if (!hwp_active) |
|---|
| 1455 | + return; |
|---|
| 1456 | + |
|---|
| 1457 | + sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); |
|---|
| 1458 | +} |
|---|
| 1459 | + |
|---|
| 1128 | 1460 | /************************** sysfs end ************************/ |
|---|
| 1129 | 1461 | |
|---|
| 1130 | 1462 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) |
|---|
| 1131 | 1463 | { |
|---|
| 1132 | 1464 | /* First disable HWP notification interrupt as we don't process them */ |
|---|
| 1133 | | - if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
|---|
| 1465 | + if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
|---|
| 1134 | 1466 | wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); |
|---|
| 1135 | 1467 | |
|---|
| 1136 | 1468 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
|---|
| 1137 | | - cpudata->epp_policy = 0; |
|---|
| 1138 | 1469 | if (cpudata->epp_default == -EINVAL) |
|---|
| 1139 | 1470 | cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); |
|---|
| 1140 | | -} |
|---|
| 1141 | | - |
|---|
| 1142 | | -#define MSR_IA32_POWER_CTL_BIT_EE 19 |
|---|
| 1143 | | - |
|---|
| 1144 | | -/* Disable energy efficiency optimization */ |
|---|
| 1145 | | -static void intel_pstate_disable_ee(int cpu) |
|---|
| 1146 | | -{ |
|---|
| 1147 | | - u64 power_ctl; |
|---|
| 1148 | | - int ret; |
|---|
| 1149 | | - |
|---|
| 1150 | | - ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl); |
|---|
| 1151 | | - if (ret) |
|---|
| 1152 | | - return; |
|---|
| 1153 | | - |
|---|
| 1154 | | - if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) { |
|---|
| 1155 | | - pr_info("Disabling energy efficiency optimization\n"); |
|---|
| 1156 | | - power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); |
|---|
| 1157 | | - wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl); |
|---|
| 1158 | | - } |
|---|
| 1159 | 1471 | } |
|---|
| 1160 | 1472 | |
|---|
| 1161 | 1473 | static int atom_get_min_pstate(void) |
|---|
| .. | .. |
|---|
| 1383 | 1695 | return ret; |
|---|
| 1384 | 1696 | } |
|---|
| 1385 | 1697 | |
|---|
| 1386 | | -static int intel_pstate_get_base_pstate(struct cpudata *cpu) |
|---|
| 1387 | | -{ |
|---|
| 1388 | | - return global.no_turbo || global.turbo_disabled ? |
|---|
| 1389 | | - cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
|---|
| 1390 | | -} |
|---|
| 1391 | | - |
|---|
| 1392 | 1698 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
|---|
| 1393 | 1699 | { |
|---|
| 1394 | 1700 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); |
|---|
| .. | .. |
|---|
| 1409 | 1715 | |
|---|
| 1410 | 1716 | static void intel_pstate_max_within_limits(struct cpudata *cpu) |
|---|
| 1411 | 1717 | { |
|---|
| 1412 | | - int pstate; |
|---|
| 1718 | + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
|---|
| 1413 | 1719 | |
|---|
| 1414 | 1720 | update_turbo_state(); |
|---|
| 1415 | | - pstate = intel_pstate_get_base_pstate(cpu); |
|---|
| 1416 | | - pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
|---|
| 1417 | 1721 | intel_pstate_set_pstate(cpu, pstate); |
|---|
| 1418 | 1722 | } |
|---|
| 1419 | 1723 | |
|---|
| .. | .. |
|---|
| 1427 | 1731 | if (hwp_active && !hwp_mode_bdw) { |
|---|
| 1428 | 1732 | unsigned int phy_max, current_max; |
|---|
| 1429 | 1733 | |
|---|
| 1430 | | - intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); |
|---|
| 1734 | + intel_pstate_get_hwp_max(cpu, &phy_max, ¤t_max); |
|---|
| 1431 | 1735 | cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; |
|---|
| 1432 | 1736 | cpu->pstate.turbo_pstate = phy_max; |
|---|
| 1433 | 1737 | cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached)); |
|---|
| .. | .. |
|---|
| 1619 | 1923 | static inline int32_t get_target_pstate(struct cpudata *cpu) |
|---|
| 1620 | 1924 | { |
|---|
| 1621 | 1925 | struct sample *sample = &cpu->sample; |
|---|
| 1622 | | - int32_t busy_frac, boost; |
|---|
| 1926 | + int32_t busy_frac; |
|---|
| 1623 | 1927 | int target, avg_pstate; |
|---|
| 1624 | 1928 | |
|---|
| 1625 | 1929 | busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, |
|---|
| 1626 | 1930 | sample->tsc); |
|---|
| 1627 | 1931 | |
|---|
| 1628 | | - boost = cpu->iowait_boost; |
|---|
| 1629 | | - cpu->iowait_boost >>= 1; |
|---|
| 1630 | | - |
|---|
| 1631 | | - if (busy_frac < boost) |
|---|
| 1632 | | - busy_frac = boost; |
|---|
| 1932 | + if (busy_frac < cpu->iowait_boost) |
|---|
| 1933 | + busy_frac = cpu->iowait_boost; |
|---|
| 1633 | 1934 | |
|---|
| 1634 | 1935 | sample->busy_scaled = busy_frac * 100; |
|---|
| 1635 | 1936 | |
|---|
| .. | .. |
|---|
| 1656 | 1957 | |
|---|
| 1657 | 1958 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) |
|---|
| 1658 | 1959 | { |
|---|
| 1659 | | - int max_pstate = intel_pstate_get_base_pstate(cpu); |
|---|
| 1660 | | - int min_pstate; |
|---|
| 1960 | + int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
|---|
| 1961 | + int max_pstate = max(min_pstate, cpu->max_perf_ratio); |
|---|
| 1661 | 1962 | |
|---|
| 1662 | | - min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
|---|
| 1663 | | - max_pstate = max(min_pstate, cpu->max_perf_ratio); |
|---|
| 1664 | 1963 | return clamp_t(int, pstate, min_pstate, max_pstate); |
|---|
| 1665 | 1964 | } |
|---|
| 1666 | 1965 | |
|---|
| .. | .. |
|---|
| 1708 | 2007 | if (smp_processor_id() != cpu->cpu) |
|---|
| 1709 | 2008 | return; |
|---|
| 1710 | 2009 | |
|---|
| 2010 | + delta_ns = time - cpu->last_update; |
|---|
| 1711 | 2011 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
|---|
| 1712 | | - cpu->iowait_boost = int_tofp(1); |
|---|
| 1713 | | - cpu->last_update = time; |
|---|
| 1714 | | - /* |
|---|
| 1715 | | - * The last time the busy was 100% so P-state was max anyway |
|---|
| 1716 | | - * so avoid overhead of computation. |
|---|
| 1717 | | - */ |
|---|
| 1718 | | - if (fp_toint(cpu->sample.busy_scaled) == 100) |
|---|
| 1719 | | - return; |
|---|
| 1720 | | - |
|---|
| 1721 | | - goto set_pstate; |
|---|
| 2012 | + /* Start over if the CPU may have been idle. */ |
|---|
| 2013 | + if (delta_ns > TICK_NSEC) { |
|---|
| 2014 | + cpu->iowait_boost = ONE_EIGHTH_FP; |
|---|
| 2015 | + } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { |
|---|
| 2016 | + cpu->iowait_boost <<= 1; |
|---|
| 2017 | + if (cpu->iowait_boost > int_tofp(1)) |
|---|
| 2018 | + cpu->iowait_boost = int_tofp(1); |
|---|
| 2019 | + } else { |
|---|
| 2020 | + cpu->iowait_boost = ONE_EIGHTH_FP; |
|---|
| 2021 | + } |
|---|
| 1722 | 2022 | } else if (cpu->iowait_boost) { |
|---|
| 1723 | 2023 | /* Clear iowait_boost if the CPU may have been idle. */ |
|---|
| 1724 | | - delta_ns = time - cpu->last_update; |
|---|
| 1725 | 2024 | if (delta_ns > TICK_NSEC) |
|---|
| 1726 | 2025 | cpu->iowait_boost = 0; |
|---|
| 2026 | + else |
|---|
| 2027 | + cpu->iowait_boost >>= 1; |
|---|
| 1727 | 2028 | } |
|---|
| 1728 | 2029 | cpu->last_update = time; |
|---|
| 1729 | 2030 | delta_ns = time - cpu->sample.time; |
|---|
| 1730 | 2031 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) |
|---|
| 1731 | 2032 | return; |
|---|
| 1732 | 2033 | |
|---|
| 1733 | | -set_pstate: |
|---|
| 1734 | 2034 | if (intel_pstate_sample(cpu, time)) |
|---|
| 1735 | 2035 | intel_pstate_adjust_pstate(cpu); |
|---|
| 1736 | 2036 | } |
|---|
| .. | .. |
|---|
| 1774 | 2074 | .get_val = core_get_val, |
|---|
| 1775 | 2075 | }; |
|---|
| 1776 | 2076 | |
|---|
| 1777 | | -#define ICPU(model, policy) \ |
|---|
| 1778 | | - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ |
|---|
| 1779 | | - (unsigned long)&policy } |
|---|
| 2077 | +#define X86_MATCH(model, policy) \ |
|---|
| 2078 | + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ |
|---|
| 2079 | + X86_FEATURE_APERFMPERF, &policy) |
|---|
| 1780 | 2080 | |
|---|
| 1781 | 2081 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
|---|
| 1782 | | - ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), |
|---|
| 1783 | | - ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), |
|---|
| 1784 | | - ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), |
|---|
| 1785 | | - ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), |
|---|
| 1786 | | - ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), |
|---|
| 1787 | | - ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), |
|---|
| 1788 | | - ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs), |
|---|
| 1789 | | - ICPU(INTEL_FAM6_HASWELL_X, core_funcs), |
|---|
| 1790 | | - ICPU(INTEL_FAM6_HASWELL_ULT, core_funcs), |
|---|
| 1791 | | - ICPU(INTEL_FAM6_HASWELL_GT3E, core_funcs), |
|---|
| 1792 | | - ICPU(INTEL_FAM6_BROADWELL_GT3E, core_funcs), |
|---|
| 1793 | | - ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs), |
|---|
| 1794 | | - ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_funcs), |
|---|
| 1795 | | - ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), |
|---|
| 1796 | | - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), |
|---|
| 1797 | | - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), |
|---|
| 1798 | | - ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), |
|---|
| 1799 | | - ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), |
|---|
| 1800 | | - ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), |
|---|
| 1801 | | - ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs), |
|---|
| 1802 | | - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), |
|---|
| 2082 | + X86_MATCH(SANDYBRIDGE, core_funcs), |
|---|
| 2083 | + X86_MATCH(SANDYBRIDGE_X, core_funcs), |
|---|
| 2084 | + X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), |
|---|
| 2085 | + X86_MATCH(IVYBRIDGE, core_funcs), |
|---|
| 2086 | + X86_MATCH(HASWELL, core_funcs), |
|---|
| 2087 | + X86_MATCH(BROADWELL, core_funcs), |
|---|
| 2088 | + X86_MATCH(IVYBRIDGE_X, core_funcs), |
|---|
| 2089 | + X86_MATCH(HASWELL_X, core_funcs), |
|---|
| 2090 | + X86_MATCH(HASWELL_L, core_funcs), |
|---|
| 2091 | + X86_MATCH(HASWELL_G, core_funcs), |
|---|
| 2092 | + X86_MATCH(BROADWELL_G, core_funcs), |
|---|
| 2093 | + X86_MATCH(ATOM_AIRMONT, airmont_funcs), |
|---|
| 2094 | + X86_MATCH(SKYLAKE_L, core_funcs), |
|---|
| 2095 | + X86_MATCH(BROADWELL_X, core_funcs), |
|---|
| 2096 | + X86_MATCH(SKYLAKE, core_funcs), |
|---|
| 2097 | + X86_MATCH(BROADWELL_D, core_funcs), |
|---|
| 2098 | + X86_MATCH(XEON_PHI_KNL, knl_funcs), |
|---|
| 2099 | + X86_MATCH(XEON_PHI_KNM, knl_funcs), |
|---|
| 2100 | + X86_MATCH(ATOM_GOLDMONT, core_funcs), |
|---|
| 2101 | + X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), |
|---|
| 2102 | + X86_MATCH(SKYLAKE_X, core_funcs), |
|---|
| 1803 | 2103 | {} |
|---|
| 1804 | 2104 | }; |
|---|
| 1805 | 2105 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); |
|---|
| 1806 | 2106 | |
|---|
| 1807 | 2107 | static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { |
|---|
| 1808 | | - ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), |
|---|
| 1809 | | - ICPU(INTEL_FAM6_BROADWELL_X, core_funcs), |
|---|
| 1810 | | - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), |
|---|
| 2108 | + X86_MATCH(BROADWELL_D, core_funcs), |
|---|
| 2109 | + X86_MATCH(BROADWELL_X, core_funcs), |
|---|
| 2110 | + X86_MATCH(SKYLAKE_X, core_funcs), |
|---|
| 1811 | 2111 | {} |
|---|
| 1812 | 2112 | }; |
|---|
| 1813 | 2113 | |
|---|
| 1814 | 2114 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { |
|---|
| 1815 | | - ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_funcs), |
|---|
| 2115 | + X86_MATCH(KABYLAKE, core_funcs), |
|---|
| 1816 | 2116 | {} |
|---|
| 1817 | 2117 | }; |
|---|
| 1818 | 2118 | |
|---|
| 1819 | 2119 | static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { |
|---|
| 1820 | | - ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), |
|---|
| 1821 | | - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), |
|---|
| 2120 | + X86_MATCH(SKYLAKE_X, core_funcs), |
|---|
| 2121 | + X86_MATCH(SKYLAKE, core_funcs), |
|---|
| 1822 | 2122 | {} |
|---|
| 1823 | 2123 | }; |
|---|
| 1824 | 2124 | |
|---|
| .. | .. |
|---|
| 1835 | 2135 | |
|---|
| 1836 | 2136 | all_cpu_data[cpunum] = cpu; |
|---|
| 1837 | 2137 | |
|---|
| 2138 | + cpu->cpu = cpunum; |
|---|
| 2139 | + |
|---|
| 1838 | 2140 | cpu->epp_default = -EINVAL; |
|---|
| 1839 | | - cpu->epp_powersave = -EINVAL; |
|---|
| 1840 | | - cpu->epp_saved = -EINVAL; |
|---|
| 2141 | + |
|---|
| 2142 | + if (hwp_active) { |
|---|
| 2143 | + const struct x86_cpu_id *id; |
|---|
| 2144 | + |
|---|
| 2145 | + intel_pstate_hwp_enable(cpu); |
|---|
| 2146 | + |
|---|
| 2147 | + id = x86_match_cpu(intel_pstate_hwp_boost_ids); |
|---|
| 2148 | + if (id && intel_pstate_acpi_pm_profile_server()) |
|---|
| 2149 | + hwp_boost = true; |
|---|
| 2150 | + } |
|---|
| 2151 | + } else if (hwp_active) { |
|---|
| 2152 | + /* |
|---|
| 2153 | + * Re-enable HWP in case this happens after a resume from ACPI |
|---|
| 2154 | + * S3 if the CPU was offline during the whole system/resume |
|---|
| 2155 | + * cycle. |
|---|
| 2156 | + */ |
|---|
| 2157 | + intel_pstate_hwp_reenable(cpu); |
|---|
| 1841 | 2158 | } |
|---|
| 1842 | 2159 | |
|---|
| 1843 | | - cpu = all_cpu_data[cpunum]; |
|---|
| 1844 | | - |
|---|
| 1845 | | - cpu->cpu = cpunum; |
|---|
| 1846 | | - |
|---|
| 1847 | | - if (hwp_active) { |
|---|
| 1848 | | - const struct x86_cpu_id *id; |
|---|
| 1849 | | - |
|---|
| 1850 | | - id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); |
|---|
| 1851 | | - if (id) |
|---|
| 1852 | | - intel_pstate_disable_ee(cpunum); |
|---|
| 1853 | | - |
|---|
| 1854 | | - intel_pstate_hwp_enable(cpu); |
|---|
| 1855 | | - |
|---|
| 1856 | | - id = x86_match_cpu(intel_pstate_hwp_boost_ids); |
|---|
| 1857 | | - if (id && intel_pstate_acpi_pm_profile_server()) |
|---|
| 1858 | | - hwp_boost = true; |
|---|
| 1859 | | - } |
|---|
| 2160 | + cpu->epp_powersave = -EINVAL; |
|---|
| 2161 | + cpu->epp_policy = 0; |
|---|
| 1860 | 2162 | |
|---|
| 1861 | 2163 | intel_pstate_get_cpu_pstates(cpu); |
|---|
| 1862 | 2164 | |
|---|
| .. | .. |
|---|
| 1893 | 2195 | |
|---|
| 1894 | 2196 | cpufreq_remove_update_util_hook(cpu); |
|---|
| 1895 | 2197 | cpu_data->update_util_set = false; |
|---|
| 1896 | | - synchronize_sched(); |
|---|
| 2198 | + synchronize_rcu(); |
|---|
| 1897 | 2199 | } |
|---|
| 1898 | 2200 | |
|---|
| 1899 | 2201 | static int intel_pstate_get_max_freq(struct cpudata *cpu) |
|---|
| .. | .. |
|---|
| 1902 | 2204 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
|---|
| 1903 | 2205 | } |
|---|
| 1904 | 2206 | |
|---|
| 1905 | | -static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, |
|---|
| 1906 | | - struct cpudata *cpu) |
|---|
| 2207 | +static void intel_pstate_update_perf_limits(struct cpudata *cpu, |
|---|
| 2208 | + unsigned int policy_min, |
|---|
| 2209 | + unsigned int policy_max) |
|---|
| 1907 | 2210 | { |
|---|
| 1908 | | - int max_freq = intel_pstate_get_max_freq(cpu); |
|---|
| 1909 | 2211 | int32_t max_policy_perf, min_policy_perf; |
|---|
| 1910 | 2212 | int max_state, turbo_max; |
|---|
| 2213 | + int max_freq; |
|---|
| 1911 | 2214 | |
|---|
| 1912 | 2215 | /* |
|---|
| 1913 | 2216 | * HWP needs some special consideration, because on BDX the |
|---|
| .. | .. |
|---|
| 1915 | 2218 | * rather than pure ratios. |
|---|
| 1916 | 2219 | */ |
|---|
| 1917 | 2220 | if (hwp_active) { |
|---|
| 1918 | | - intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); |
|---|
| 2221 | + intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); |
|---|
| 1919 | 2222 | } else { |
|---|
| 1920 | | - max_state = intel_pstate_get_base_pstate(cpu); |
|---|
| 2223 | + max_state = global.no_turbo || global.turbo_disabled ? |
|---|
| 2224 | + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
|---|
| 1921 | 2225 | turbo_max = cpu->pstate.turbo_pstate; |
|---|
| 1922 | 2226 | } |
|---|
| 2227 | + max_freq = max_state * cpu->pstate.scaling; |
|---|
| 1923 | 2228 | |
|---|
| 1924 | | - max_policy_perf = max_state * policy->max / max_freq; |
|---|
| 1925 | | - if (policy->max == policy->min) { |
|---|
| 2229 | + max_policy_perf = max_state * policy_max / max_freq; |
|---|
| 2230 | + if (policy_max == policy_min) { |
|---|
| 1926 | 2231 | min_policy_perf = max_policy_perf; |
|---|
| 1927 | 2232 | } else { |
|---|
| 1928 | | - min_policy_perf = max_state * policy->min / max_freq; |
|---|
| 2233 | + min_policy_perf = max_state * policy_min / max_freq; |
|---|
| 1929 | 2234 | min_policy_perf = clamp_t(int32_t, min_policy_perf, |
|---|
| 1930 | 2235 | 0, max_policy_perf); |
|---|
| 1931 | 2236 | } |
|---|
| 1932 | 2237 | |
|---|
| 1933 | 2238 | pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", |
|---|
| 1934 | | - policy->cpu, max_state, |
|---|
| 1935 | | - min_policy_perf, max_policy_perf); |
|---|
| 2239 | + cpu->cpu, max_state, min_policy_perf, max_policy_perf); |
|---|
| 1936 | 2240 | |
|---|
| 1937 | 2241 | /* Normalize user input to [min_perf, max_perf] */ |
|---|
| 1938 | 2242 | if (per_cpu_limits) { |
|---|
| .. | .. |
|---|
| 1946 | 2250 | global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); |
|---|
| 1947 | 2251 | global_min = clamp_t(int32_t, global_min, 0, global_max); |
|---|
| 1948 | 2252 | |
|---|
| 1949 | | - pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, |
|---|
| 2253 | + pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, |
|---|
| 1950 | 2254 | global_min, global_max); |
|---|
| 1951 | 2255 | |
|---|
| 1952 | 2256 | cpu->min_perf_ratio = max(min_policy_perf, global_min); |
|---|
| .. | .. |
|---|
| 1959 | 2263 | cpu->max_perf_ratio); |
|---|
| 1960 | 2264 | |
|---|
| 1961 | 2265 | } |
|---|
| 1962 | | - pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, |
|---|
| 2266 | + pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, |
|---|
| 1963 | 2267 | cpu->max_perf_ratio, |
|---|
| 1964 | 2268 | cpu->min_perf_ratio); |
|---|
| 1965 | 2269 | } |
|---|
| .. | .. |
|---|
| 1979 | 2283 | |
|---|
| 1980 | 2284 | mutex_lock(&intel_pstate_limits_lock); |
|---|
| 1981 | 2285 | |
|---|
| 1982 | | - intel_pstate_update_perf_limits(policy, cpu); |
|---|
| 2286 | + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
|---|
| 1983 | 2287 | |
|---|
| 1984 | 2288 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
|---|
| 1985 | 2289 | /* |
|---|
| .. | .. |
|---|
| 2008 | 2312 | return 0; |
|---|
| 2009 | 2313 | } |
|---|
| 2010 | 2314 | |
|---|
| 2011 | | -static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, |
|---|
| 2012 | | - struct cpudata *cpu) |
|---|
| 2315 | +static void intel_pstate_adjust_policy_max(struct cpudata *cpu, |
|---|
| 2316 | + struct cpufreq_policy_data *policy) |
|---|
| 2013 | 2317 | { |
|---|
| 2014 | 2318 | if (!hwp_active && |
|---|
| 2015 | 2319 | cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
|---|
| .. | .. |
|---|
| 2020 | 2324 | } |
|---|
| 2021 | 2325 | } |
|---|
| 2022 | 2326 | |
|---|
| 2023 | | -static int intel_pstate_verify_policy(struct cpufreq_policy *policy) |
|---|
| 2327 | +static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, |
|---|
| 2328 | + struct cpufreq_policy_data *policy) |
|---|
| 2024 | 2329 | { |
|---|
| 2025 | | - struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2330 | + int max_freq; |
|---|
| 2026 | 2331 | |
|---|
| 2027 | 2332 | update_turbo_state(); |
|---|
| 2028 | | - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
|---|
| 2029 | | - intel_pstate_get_max_freq(cpu)); |
|---|
| 2333 | + if (hwp_active) { |
|---|
| 2334 | + int max_state, turbo_max; |
|---|
| 2030 | 2335 | |
|---|
| 2031 | | - if (policy->policy != CPUFREQ_POLICY_POWERSAVE && |
|---|
| 2032 | | - policy->policy != CPUFREQ_POLICY_PERFORMANCE) |
|---|
| 2033 | | - return -EINVAL; |
|---|
| 2336 | + intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); |
|---|
| 2337 | + max_freq = max_state * cpu->pstate.scaling; |
|---|
| 2338 | + } else { |
|---|
| 2339 | + max_freq = intel_pstate_get_max_freq(cpu); |
|---|
| 2340 | + } |
|---|
| 2341 | + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq); |
|---|
| 2034 | 2342 | |
|---|
| 2035 | | - intel_pstate_adjust_policy_max(policy, cpu); |
|---|
| 2343 | + intel_pstate_adjust_policy_max(cpu, policy); |
|---|
| 2344 | +} |
|---|
| 2345 | + |
|---|
| 2346 | +static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) |
|---|
| 2347 | +{ |
|---|
| 2348 | + intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); |
|---|
| 2036 | 2349 | |
|---|
| 2037 | 2350 | return 0; |
|---|
| 2038 | 2351 | } |
|---|
| 2039 | 2352 | |
|---|
| 2040 | | -static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) |
|---|
| 2353 | +static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) |
|---|
| 2041 | 2354 | { |
|---|
| 2042 | | - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); |
|---|
| 2355 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2356 | + |
|---|
| 2357 | + pr_debug("CPU %d going offline\n", cpu->cpu); |
|---|
| 2358 | + |
|---|
| 2359 | + if (cpu->suspended) |
|---|
| 2360 | + return 0; |
|---|
| 2361 | + |
|---|
| 2362 | + /* |
|---|
| 2363 | + * If the CPU is an SMT thread and it goes offline with the performance |
|---|
| 2364 | + * settings different from the minimum, it will prevent its sibling |
|---|
| 2365 | + * from getting to lower performance levels, so force the minimum |
|---|
| 2366 | + * performance on CPU offline to prevent that from happening. |
|---|
| 2367 | + */ |
|---|
| 2368 | + if (hwp_active) |
|---|
| 2369 | + intel_pstate_hwp_offline(cpu); |
|---|
| 2370 | + else |
|---|
| 2371 | + intel_pstate_set_min_pstate(cpu); |
|---|
| 2372 | + |
|---|
| 2373 | + intel_pstate_exit_perf_limits(policy); |
|---|
| 2374 | + |
|---|
| 2375 | + return 0; |
|---|
| 2376 | +} |
|---|
| 2377 | + |
|---|
| 2378 | +static int intel_pstate_cpu_online(struct cpufreq_policy *policy) |
|---|
| 2379 | +{ |
|---|
| 2380 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2381 | + |
|---|
| 2382 | + pr_debug("CPU %d going online\n", cpu->cpu); |
|---|
| 2383 | + |
|---|
| 2384 | + intel_pstate_init_acpi_perf_limits(policy); |
|---|
| 2385 | + |
|---|
| 2386 | + if (hwp_active) { |
|---|
| 2387 | + /* |
|---|
| 2388 | + * Re-enable HWP and clear the "suspended" flag to let "resume" |
|---|
| 2389 | + * know that it need not do that. |
|---|
| 2390 | + */ |
|---|
| 2391 | + intel_pstate_hwp_reenable(cpu); |
|---|
| 2392 | + cpu->suspended = false; |
|---|
| 2393 | + } |
|---|
| 2394 | + |
|---|
| 2395 | + return 0; |
|---|
| 2043 | 2396 | } |
|---|
| 2044 | 2397 | |
|---|
| 2045 | 2398 | static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) |
|---|
| 2046 | 2399 | { |
|---|
| 2047 | | - pr_debug("CPU %d exiting\n", policy->cpu); |
|---|
| 2400 | + pr_debug("CPU %d stopping\n", policy->cpu); |
|---|
| 2048 | 2401 | |
|---|
| 2049 | 2402 | intel_pstate_clear_update_util_hook(policy->cpu); |
|---|
| 2050 | | - if (hwp_active) |
|---|
| 2051 | | - intel_pstate_hwp_save_state(policy); |
|---|
| 2052 | | - else |
|---|
| 2053 | | - intel_cpufreq_stop_cpu(policy); |
|---|
| 2054 | 2403 | } |
|---|
| 2055 | 2404 | |
|---|
| 2056 | 2405 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) |
|---|
| 2057 | 2406 | { |
|---|
| 2058 | | - intel_pstate_exit_perf_limits(policy); |
|---|
| 2407 | + pr_debug("CPU %d exiting\n", policy->cpu); |
|---|
| 2059 | 2408 | |
|---|
| 2060 | 2409 | policy->fast_switch_possible = false; |
|---|
| 2061 | 2410 | |
|---|
| .. | .. |
|---|
| 2082 | 2431 | /* cpuinfo and default policy values */ |
|---|
| 2083 | 2432 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
|---|
| 2084 | 2433 | update_turbo_state(); |
|---|
| 2434 | + global.turbo_disabled_mf = global.turbo_disabled; |
|---|
| 2085 | 2435 | policy->cpuinfo.max_freq = global.turbo_disabled ? |
|---|
| 2086 | 2436 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
|---|
| 2087 | 2437 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; |
|---|
| .. | .. |
|---|
| 2109 | 2459 | if (ret) |
|---|
| 2110 | 2460 | return ret; |
|---|
| 2111 | 2461 | |
|---|
| 2112 | | - if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) |
|---|
| 2113 | | - policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
|---|
| 2114 | | - else |
|---|
| 2115 | | - policy->policy = CPUFREQ_POLICY_POWERSAVE; |
|---|
| 2462 | + /* |
|---|
| 2463 | + * Set the policy to powersave to provide a valid fallback value in case |
|---|
| 2464 | + * the default cpufreq governor is neither powersave nor performance. |
|---|
| 2465 | + */ |
|---|
| 2466 | + policy->policy = CPUFREQ_POLICY_POWERSAVE; |
|---|
| 2467 | + |
|---|
| 2468 | + if (hwp_active) { |
|---|
| 2469 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2470 | + |
|---|
| 2471 | + cpu->epp_cached = intel_pstate_get_epp(cpu, 0); |
|---|
| 2472 | + } |
|---|
| 2116 | 2473 | |
|---|
| 2117 | 2474 | return 0; |
|---|
| 2118 | 2475 | } |
|---|
| .. | .. |
|---|
| 2121 | 2478 | .flags = CPUFREQ_CONST_LOOPS, |
|---|
| 2122 | 2479 | .verify = intel_pstate_verify_policy, |
|---|
| 2123 | 2480 | .setpolicy = intel_pstate_set_policy, |
|---|
| 2124 | | - .suspend = intel_pstate_hwp_save_state, |
|---|
| 2481 | + .suspend = intel_pstate_suspend, |
|---|
| 2125 | 2482 | .resume = intel_pstate_resume, |
|---|
| 2126 | 2483 | .init = intel_pstate_cpu_init, |
|---|
| 2127 | 2484 | .exit = intel_pstate_cpu_exit, |
|---|
| 2128 | 2485 | .stop_cpu = intel_pstate_stop_cpu, |
|---|
| 2486 | + .offline = intel_pstate_cpu_offline, |
|---|
| 2487 | + .online = intel_pstate_cpu_online, |
|---|
| 2488 | + .update_limits = intel_pstate_update_limits, |
|---|
| 2129 | 2489 | .name = "intel_pstate", |
|---|
| 2130 | 2490 | }; |
|---|
| 2131 | 2491 | |
|---|
| 2132 | | -static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) |
|---|
| 2492 | +static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) |
|---|
| 2133 | 2493 | { |
|---|
| 2134 | 2494 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2135 | 2495 | |
|---|
| 2136 | | - update_turbo_state(); |
|---|
| 2137 | | - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
|---|
| 2138 | | - intel_pstate_get_max_freq(cpu)); |
|---|
| 2139 | | - |
|---|
| 2140 | | - intel_pstate_adjust_policy_max(policy, cpu); |
|---|
| 2141 | | - |
|---|
| 2142 | | - intel_pstate_update_perf_limits(policy, cpu); |
|---|
| 2496 | + intel_pstate_verify_cpu_policy(cpu, policy); |
|---|
| 2497 | + intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
|---|
| 2143 | 2498 | |
|---|
| 2144 | 2499 | return 0; |
|---|
| 2145 | 2500 | } |
|---|
| .. | .. |
|---|
| 2182 | 2537 | fp_toint(cpu->iowait_boost * 100)); |
|---|
| 2183 | 2538 | } |
|---|
| 2184 | 2539 | |
|---|
| 2540 | +static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, |
|---|
| 2541 | + bool strict, bool fast_switch) |
|---|
| 2542 | +{ |
|---|
| 2543 | + u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; |
|---|
| 2544 | + |
|---|
| 2545 | + value &= ~HWP_MIN_PERF(~0L); |
|---|
| 2546 | + value |= HWP_MIN_PERF(target_pstate); |
|---|
| 2547 | + |
|---|
| 2548 | + /* |
|---|
| 2549 | + * The entire MSR needs to be updated in order to update the HWP min |
|---|
| 2550 | + * field in it, so opportunistically update the max too if needed. |
|---|
| 2551 | + */ |
|---|
| 2552 | + value &= ~HWP_MAX_PERF(~0L); |
|---|
| 2553 | + value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio); |
|---|
| 2554 | + |
|---|
| 2555 | + if (value == prev) |
|---|
| 2556 | + return; |
|---|
| 2557 | + |
|---|
| 2558 | + WRITE_ONCE(cpu->hwp_req_cached, value); |
|---|
| 2559 | + if (fast_switch) |
|---|
| 2560 | + wrmsrl(MSR_HWP_REQUEST, value); |
|---|
| 2561 | + else |
|---|
| 2562 | + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); |
|---|
| 2563 | +} |
|---|
| 2564 | + |
|---|
| 2565 | +static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu, |
|---|
| 2566 | + u32 target_pstate, bool fast_switch) |
|---|
| 2567 | +{ |
|---|
| 2568 | + if (fast_switch) |
|---|
| 2569 | + wrmsrl(MSR_IA32_PERF_CTL, |
|---|
| 2570 | + pstate_funcs.get_val(cpu, target_pstate)); |
|---|
| 2571 | + else |
|---|
| 2572 | + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, |
|---|
| 2573 | + pstate_funcs.get_val(cpu, target_pstate)); |
|---|
| 2574 | +} |
|---|
| 2575 | + |
|---|
| 2576 | +static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, |
|---|
| 2577 | + int target_pstate, bool fast_switch) |
|---|
| 2578 | +{ |
|---|
| 2579 | + struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2580 | + int old_pstate = cpu->pstate.current_pstate; |
|---|
| 2581 | + |
|---|
| 2582 | + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
|---|
| 2583 | + if (hwp_active) { |
|---|
| 2584 | + intel_cpufreq_adjust_hwp(cpu, target_pstate, |
|---|
| 2585 | + policy->strict_target, fast_switch); |
|---|
| 2586 | + cpu->pstate.current_pstate = target_pstate; |
|---|
| 2587 | + } else if (target_pstate != old_pstate) { |
|---|
| 2588 | + intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch); |
|---|
| 2589 | + cpu->pstate.current_pstate = target_pstate; |
|---|
| 2590 | + } |
|---|
| 2591 | + |
|---|
| 2592 | + intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : |
|---|
| 2593 | + INTEL_PSTATE_TRACE_TARGET, old_pstate); |
|---|
| 2594 | + |
|---|
| 2595 | + return target_pstate; |
|---|
| 2596 | +} |
|---|
| 2597 | + |
|---|
| 2185 | 2598 | static int intel_cpufreq_target(struct cpufreq_policy *policy, |
|---|
| 2186 | 2599 | unsigned int target_freq, |
|---|
| 2187 | 2600 | unsigned int relation) |
|---|
| 2188 | 2601 | { |
|---|
| 2189 | 2602 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2190 | 2603 | struct cpufreq_freqs freqs; |
|---|
| 2191 | | - int target_pstate, old_pstate; |
|---|
| 2604 | + int target_pstate; |
|---|
| 2192 | 2605 | |
|---|
| 2193 | 2606 | update_turbo_state(); |
|---|
| 2194 | 2607 | |
|---|
| .. | .. |
|---|
| 2196 | 2609 | freqs.new = target_freq; |
|---|
| 2197 | 2610 | |
|---|
| 2198 | 2611 | cpufreq_freq_transition_begin(policy, &freqs); |
|---|
| 2612 | + |
|---|
| 2199 | 2613 | switch (relation) { |
|---|
| 2200 | 2614 | case CPUFREQ_RELATION_L: |
|---|
| 2201 | 2615 | target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); |
|---|
| .. | .. |
|---|
| 2207 | 2621 | target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); |
|---|
| 2208 | 2622 | break; |
|---|
| 2209 | 2623 | } |
|---|
| 2210 | | - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
|---|
| 2211 | | - old_pstate = cpu->pstate.current_pstate; |
|---|
| 2212 | | - if (target_pstate != cpu->pstate.current_pstate) { |
|---|
| 2213 | | - cpu->pstate.current_pstate = target_pstate; |
|---|
| 2214 | | - wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, |
|---|
| 2215 | | - pstate_funcs.get_val(cpu, target_pstate)); |
|---|
| 2216 | | - } |
|---|
| 2624 | + |
|---|
| 2625 | + target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false); |
|---|
| 2626 | + |
|---|
| 2217 | 2627 | freqs.new = target_pstate * cpu->pstate.scaling; |
|---|
| 2218 | | - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate); |
|---|
| 2628 | + |
|---|
| 2219 | 2629 | cpufreq_freq_transition_end(policy, &freqs, false); |
|---|
| 2220 | 2630 | |
|---|
| 2221 | 2631 | return 0; |
|---|
| .. | .. |
|---|
| 2225 | 2635 | unsigned int target_freq) |
|---|
| 2226 | 2636 | { |
|---|
| 2227 | 2637 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
|---|
| 2228 | | - int target_pstate, old_pstate; |
|---|
| 2638 | + int target_pstate; |
|---|
| 2229 | 2639 | |
|---|
| 2230 | 2640 | update_turbo_state(); |
|---|
| 2231 | 2641 | |
|---|
| 2232 | 2642 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
|---|
| 2233 | | - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
|---|
| 2234 | | - old_pstate = cpu->pstate.current_pstate; |
|---|
| 2235 | | - intel_pstate_update_pstate(cpu, target_pstate); |
|---|
| 2236 | | - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); |
|---|
| 2643 | + |
|---|
| 2644 | + target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); |
|---|
| 2645 | + |
|---|
| 2237 | 2646 | return target_pstate * cpu->pstate.scaling; |
|---|
| 2238 | 2647 | } |
|---|
| 2239 | 2648 | |
|---|
| 2240 | 2649 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
|---|
| 2241 | 2650 | { |
|---|
| 2242 | | - int ret = __intel_pstate_cpu_init(policy); |
|---|
| 2651 | + int max_state, turbo_max, min_freq, max_freq, ret; |
|---|
| 2652 | + struct freq_qos_request *req; |
|---|
| 2653 | + struct cpudata *cpu; |
|---|
| 2654 | + struct device *dev; |
|---|
| 2243 | 2655 | |
|---|
| 2656 | + dev = get_cpu_device(policy->cpu); |
|---|
| 2657 | + if (!dev) |
|---|
| 2658 | + return -ENODEV; |
|---|
| 2659 | + |
|---|
| 2660 | + ret = __intel_pstate_cpu_init(policy); |
|---|
| 2244 | 2661 | if (ret) |
|---|
| 2245 | 2662 | return ret; |
|---|
| 2246 | 2663 | |
|---|
| 2247 | 2664 | policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; |
|---|
| 2248 | | - policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; |
|---|
| 2249 | 2665 | /* This reflects the intel_pstate_get_cpu_pstates() setting. */ |
|---|
| 2250 | 2666 | policy->cur = policy->cpuinfo.min_freq; |
|---|
| 2251 | 2667 | |
|---|
| 2668 | + req = kcalloc(2, sizeof(*req), GFP_KERNEL); |
|---|
| 2669 | + if (!req) { |
|---|
| 2670 | + ret = -ENOMEM; |
|---|
| 2671 | + goto pstate_exit; |
|---|
| 2672 | + } |
|---|
| 2673 | + |
|---|
| 2674 | + cpu = all_cpu_data[policy->cpu]; |
|---|
| 2675 | + |
|---|
| 2676 | + if (hwp_active) { |
|---|
| 2677 | + u64 value; |
|---|
| 2678 | + |
|---|
| 2679 | + intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state); |
|---|
| 2680 | + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; |
|---|
| 2681 | + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); |
|---|
| 2682 | + WRITE_ONCE(cpu->hwp_req_cached, value); |
|---|
| 2683 | + cpu->epp_cached = intel_pstate_get_epp(cpu, value); |
|---|
| 2684 | + } else { |
|---|
| 2685 | + turbo_max = cpu->pstate.turbo_pstate; |
|---|
| 2686 | + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; |
|---|
| 2687 | + } |
|---|
| 2688 | + |
|---|
| 2689 | + min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); |
|---|
| 2690 | + min_freq *= cpu->pstate.scaling; |
|---|
| 2691 | + max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); |
|---|
| 2692 | + max_freq *= cpu->pstate.scaling; |
|---|
| 2693 | + |
|---|
| 2694 | + ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN, |
|---|
| 2695 | + min_freq); |
|---|
| 2696 | + if (ret < 0) { |
|---|
| 2697 | + dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); |
|---|
| 2698 | + goto free_req; |
|---|
| 2699 | + } |
|---|
| 2700 | + |
|---|
| 2701 | + ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX, |
|---|
| 2702 | + max_freq); |
|---|
| 2703 | + if (ret < 0) { |
|---|
| 2704 | + dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); |
|---|
| 2705 | + goto remove_min_req; |
|---|
| 2706 | + } |
|---|
| 2707 | + |
|---|
| 2708 | + policy->driver_data = req; |
|---|
| 2709 | + |
|---|
| 2252 | 2710 | return 0; |
|---|
| 2711 | + |
|---|
| 2712 | +remove_min_req: |
|---|
| 2713 | + freq_qos_remove_request(req); |
|---|
| 2714 | +free_req: |
|---|
| 2715 | + kfree(req); |
|---|
| 2716 | +pstate_exit: |
|---|
| 2717 | + intel_pstate_exit_perf_limits(policy); |
|---|
| 2718 | + |
|---|
| 2719 | + return ret; |
|---|
| 2720 | +} |
|---|
| 2721 | + |
|---|
| 2722 | +static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
|---|
| 2723 | +{ |
|---|
| 2724 | + struct freq_qos_request *req; |
|---|
| 2725 | + |
|---|
| 2726 | + req = policy->driver_data; |
|---|
| 2727 | + |
|---|
| 2728 | + freq_qos_remove_request(req + 1); |
|---|
| 2729 | + freq_qos_remove_request(req); |
|---|
| 2730 | + kfree(req); |
|---|
| 2731 | + |
|---|
| 2732 | + return intel_pstate_cpu_exit(policy); |
|---|
| 2253 | 2733 | } |
|---|
| 2254 | 2734 | |
|---|
| 2255 | 2735 | static struct cpufreq_driver intel_cpufreq = { |
|---|
| .. | .. |
|---|
| 2258 | 2738 | .target = intel_cpufreq_target, |
|---|
| 2259 | 2739 | .fast_switch = intel_cpufreq_fast_switch, |
|---|
| 2260 | 2740 | .init = intel_cpufreq_cpu_init, |
|---|
| 2261 | | - .exit = intel_pstate_cpu_exit, |
|---|
| 2262 | | - .stop_cpu = intel_cpufreq_stop_cpu, |
|---|
| 2741 | + .exit = intel_cpufreq_cpu_exit, |
|---|
| 2742 | + .offline = intel_pstate_cpu_offline, |
|---|
| 2743 | + .online = intel_pstate_cpu_online, |
|---|
| 2744 | + .suspend = intel_pstate_suspend, |
|---|
| 2745 | + .resume = intel_pstate_resume, |
|---|
| 2746 | + .update_limits = intel_pstate_update_limits, |
|---|
| 2263 | 2747 | .name = "intel_cpufreq", |
|---|
| 2264 | 2748 | }; |
|---|
| 2265 | 2749 | |
|---|
| 2266 | | -static struct cpufreq_driver *default_driver = &intel_pstate; |
|---|
| 2750 | +static struct cpufreq_driver *default_driver; |
|---|
| 2267 | 2751 | |
|---|
| 2268 | 2752 | static void intel_pstate_driver_cleanup(void) |
|---|
| 2269 | 2753 | { |
|---|
| .. | .. |
|---|
| 2280 | 2764 | } |
|---|
| 2281 | 2765 | } |
|---|
| 2282 | 2766 | put_online_cpus(); |
|---|
| 2767 | + |
|---|
| 2283 | 2768 | intel_pstate_driver = NULL; |
|---|
| 2284 | 2769 | } |
|---|
| 2285 | 2770 | |
|---|
| 2286 | 2771 | static int intel_pstate_register_driver(struct cpufreq_driver *driver) |
|---|
| 2287 | 2772 | { |
|---|
| 2288 | 2773 | int ret; |
|---|
| 2774 | + |
|---|
| 2775 | + if (driver == &intel_pstate) |
|---|
| 2776 | + intel_pstate_sysfs_expose_hwp_dynamic_boost(); |
|---|
| 2289 | 2777 | |
|---|
| 2290 | 2778 | memset(&global, 0, sizeof(global)); |
|---|
| 2291 | 2779 | global.max_perf_pct = 100; |
|---|
| .. | .. |
|---|
| 2302 | 2790 | return 0; |
|---|
| 2303 | 2791 | } |
|---|
| 2304 | 2792 | |
|---|
| 2305 | | -static int intel_pstate_unregister_driver(void) |
|---|
| 2306 | | -{ |
|---|
| 2307 | | - if (hwp_active) |
|---|
| 2308 | | - return -EBUSY; |
|---|
| 2309 | | - |
|---|
| 2310 | | - cpufreq_unregister_driver(intel_pstate_driver); |
|---|
| 2311 | | - intel_pstate_driver_cleanup(); |
|---|
| 2312 | | - |
|---|
| 2313 | | - return 0; |
|---|
| 2314 | | -} |
|---|
| 2315 | | - |
|---|
| 2316 | 2793 | static ssize_t intel_pstate_show_status(char *buf) |
|---|
| 2317 | 2794 | { |
|---|
| 2318 | 2795 | if (!intel_pstate_driver) |
|---|
| .. | .. |
|---|
| 2324 | 2801 | |
|---|
| 2325 | 2802 | static int intel_pstate_update_status(const char *buf, size_t size) |
|---|
| 2326 | 2803 | { |
|---|
| 2327 | | - int ret; |
|---|
| 2328 | | - |
|---|
| 2329 | 2804 | if (size == 3 && !strncmp(buf, "off", size)) { |
|---|
| 2330 | 2805 | if (!intel_pstate_driver) |
|---|
| 2331 | 2806 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 2333 | 2808 | if (hwp_active) |
|---|
| 2334 | 2809 | return -EBUSY; |
|---|
| 2335 | 2810 | |
|---|
| 2336 | | - return intel_pstate_unregister_driver(); |
|---|
| 2811 | + cpufreq_unregister_driver(intel_pstate_driver); |
|---|
| 2812 | + intel_pstate_driver_cleanup(); |
|---|
| 2813 | + return 0; |
|---|
| 2337 | 2814 | } |
|---|
| 2338 | 2815 | |
|---|
| 2339 | 2816 | if (size == 6 && !strncmp(buf, "active", size)) { |
|---|
| .. | .. |
|---|
| 2341 | 2818 | if (intel_pstate_driver == &intel_pstate) |
|---|
| 2342 | 2819 | return 0; |
|---|
| 2343 | 2820 | |
|---|
| 2344 | | - ret = intel_pstate_unregister_driver(); |
|---|
| 2345 | | - if (ret) |
|---|
| 2346 | | - return ret; |
|---|
| 2821 | + cpufreq_unregister_driver(intel_pstate_driver); |
|---|
| 2347 | 2822 | } |
|---|
| 2348 | 2823 | |
|---|
| 2349 | 2824 | return intel_pstate_register_driver(&intel_pstate); |
|---|
| .. | .. |
|---|
| 2354 | 2829 | if (intel_pstate_driver == &intel_cpufreq) |
|---|
| 2355 | 2830 | return 0; |
|---|
| 2356 | 2831 | |
|---|
| 2357 | | - ret = intel_pstate_unregister_driver(); |
|---|
| 2358 | | - if (ret) |
|---|
| 2359 | | - return ret; |
|---|
| 2832 | + cpufreq_unregister_driver(intel_pstate_driver); |
|---|
| 2833 | + intel_pstate_sysfs_hide_hwp_dynamic_boost(); |
|---|
| 2360 | 2834 | } |
|---|
| 2361 | 2835 | |
|---|
| 2362 | 2836 | return intel_pstate_register_driver(&intel_cpufreq); |
|---|
| .. | .. |
|---|
| 2420 | 2894 | kfree(pss); |
|---|
| 2421 | 2895 | } |
|---|
| 2422 | 2896 | |
|---|
| 2897 | + pr_debug("ACPI _PSS not found\n"); |
|---|
| 2423 | 2898 | return true; |
|---|
| 2424 | 2899 | } |
|---|
| 2425 | 2900 | |
|---|
| .. | .. |
|---|
| 2430 | 2905 | |
|---|
| 2431 | 2906 | status = acpi_get_handle(NULL, "\\_SB", &handle); |
|---|
| 2432 | 2907 | if (ACPI_FAILURE(status)) |
|---|
| 2433 | | - return true; |
|---|
| 2908 | + goto not_found; |
|---|
| 2434 | 2909 | |
|---|
| 2435 | | - return !acpi_has_method(handle, "PCCH"); |
|---|
| 2910 | + if (acpi_has_method(handle, "PCCH")) |
|---|
| 2911 | + return false; |
|---|
| 2912 | + |
|---|
| 2913 | +not_found: |
|---|
| 2914 | + pr_debug("ACPI PCCH not found\n"); |
|---|
| 2915 | + return true; |
|---|
| 2436 | 2916 | } |
|---|
| 2437 | 2917 | |
|---|
| 2438 | 2918 | static bool __init intel_pstate_has_acpi_ppc(void) |
|---|
| .. | .. |
|---|
| 2447 | 2927 | if (acpi_has_method(pr->handle, "_PPC")) |
|---|
| 2448 | 2928 | return true; |
|---|
| 2449 | 2929 | } |
|---|
| 2930 | + pr_debug("ACPI _PPC not found\n"); |
|---|
| 2450 | 2931 | return false; |
|---|
| 2451 | 2932 | } |
|---|
| 2452 | 2933 | |
|---|
| .. | .. |
|---|
| 2457 | 2938 | |
|---|
| 2458 | 2939 | /* Hardware vendor-specific info that has its own power management modes */ |
|---|
| 2459 | 2940 | static struct acpi_platform_list plat_info[] __initdata = { |
|---|
| 2460 | | - {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS}, |
|---|
| 2461 | | - {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2462 | | - {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2463 | | - {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2464 | | - {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2465 | | - {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2466 | | - {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2467 | | - {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2468 | | - {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2469 | | - {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2470 | | - {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2471 | | - {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2472 | | - {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2473 | | - {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2474 | | - {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC}, |
|---|
| 2941 | + {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, |
|---|
| 2942 | + {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2943 | + {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2944 | + {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2945 | + {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2946 | + {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2947 | + {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2948 | + {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2949 | + {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2950 | + {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2951 | + {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2952 | + {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2953 | + {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2954 | + {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2955 | + {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
|---|
| 2475 | 2956 | { } /* End */ |
|---|
| 2476 | 2957 | }; |
|---|
| 2958 | + |
|---|
| 2959 | +#define BITMASK_OOB (BIT(8) | BIT(18)) |
|---|
| 2477 | 2960 | |
|---|
| 2478 | 2961 | static bool __init intel_pstate_platform_pwr_mgmt_exists(void) |
|---|
| 2479 | 2962 | { |
|---|
| .. | .. |
|---|
| 2484 | 2967 | id = x86_match_cpu(intel_pstate_cpu_oob_ids); |
|---|
| 2485 | 2968 | if (id) { |
|---|
| 2486 | 2969 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); |
|---|
| 2487 | | - if ( misc_pwr & (1 << 8)) |
|---|
| 2970 | + if (misc_pwr & BITMASK_OOB) { |
|---|
| 2971 | + pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); |
|---|
| 2972 | + pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); |
|---|
| 2488 | 2973 | return true; |
|---|
| 2974 | + } |
|---|
| 2489 | 2975 | } |
|---|
| 2490 | 2976 | |
|---|
| 2491 | 2977 | idx = acpi_match_platform_list(plat_info); |
|---|
| .. | .. |
|---|
| 2522 | 3008 | |
|---|
| 2523 | 3009 | #define INTEL_PSTATE_HWP_BROADWELL 0x01 |
|---|
| 2524 | 3010 | |
|---|
| 2525 | | -#define ICPU_HWP(model, hwp_mode) \ |
|---|
| 2526 | | - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } |
|---|
| 3011 | +#define X86_MATCH_HWP(model, hwp_mode) \ |
|---|
| 3012 | + X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ |
|---|
| 3013 | + X86_FEATURE_HWP, hwp_mode) |
|---|
| 2527 | 3014 | |
|---|
| 2528 | 3015 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { |
|---|
| 2529 | | - ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
|---|
| 2530 | | - ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), |
|---|
| 2531 | | - ICPU_HWP(X86_MODEL_ANY, 0), |
|---|
| 3016 | + X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
|---|
| 3017 | + X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), |
|---|
| 3018 | + X86_MATCH_HWP(ANY, 0), |
|---|
| 2532 | 3019 | {} |
|---|
| 2533 | 3020 | }; |
|---|
| 3021 | + |
|---|
| 3022 | +static bool intel_pstate_hwp_is_enabled(void) |
|---|
| 3023 | +{ |
|---|
| 3024 | + u64 value; |
|---|
| 3025 | + |
|---|
| 3026 | + rdmsrl(MSR_PM_ENABLE, value); |
|---|
| 3027 | + return !!(value & 0x1); |
|---|
| 3028 | +} |
|---|
| 2534 | 3029 | |
|---|
| 2535 | 3030 | static int __init intel_pstate_init(void) |
|---|
| 2536 | 3031 | { |
|---|
| 2537 | 3032 | const struct x86_cpu_id *id; |
|---|
| 2538 | 3033 | int rc; |
|---|
| 2539 | 3034 | |
|---|
| 2540 | | - if (no_load) |
|---|
| 3035 | + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
|---|
| 2541 | 3036 | return -ENODEV; |
|---|
| 2542 | 3037 | |
|---|
| 2543 | 3038 | id = x86_match_cpu(hwp_support_ids); |
|---|
| 2544 | 3039 | if (id) { |
|---|
| 3040 | + bool hwp_forced = intel_pstate_hwp_is_enabled(); |
|---|
| 3041 | + |
|---|
| 3042 | + if (hwp_forced) |
|---|
| 3043 | + pr_info("HWP enabled by BIOS\n"); |
|---|
| 3044 | + else if (no_load) |
|---|
| 3045 | + return -ENODEV; |
|---|
| 3046 | + |
|---|
| 2545 | 3047 | copy_cpu_funcs(&core_funcs); |
|---|
| 2546 | | - if (!no_hwp) { |
|---|
| 3048 | + /* |
|---|
| 3049 | + * Avoid enabling HWP for processors without EPP support, |
|---|
| 3050 | + * because that means incomplete HWP implementation which is a |
|---|
| 3051 | + * corner case and supporting it is generally problematic. |
|---|
| 3052 | + * |
|---|
| 3053 | + * If HWP is enabled already, though, there is no choice but to |
|---|
| 3054 | + * deal with it. |
|---|
| 3055 | + */ |
|---|
| 3056 | + if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { |
|---|
| 2547 | 3057 | hwp_active++; |
|---|
| 2548 | 3058 | hwp_mode_bdw = id->driver_data; |
|---|
| 2549 | 3059 | intel_pstate.attr = hwp_cpufreq_attrs; |
|---|
| 3060 | + intel_cpufreq.attr = hwp_cpufreq_attrs; |
|---|
| 3061 | + intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; |
|---|
| 3062 | + if (!default_driver) |
|---|
| 3063 | + default_driver = &intel_pstate; |
|---|
| 3064 | + |
|---|
| 2550 | 3065 | goto hwp_cpu_matched; |
|---|
| 2551 | 3066 | } |
|---|
| 3067 | + pr_info("HWP not enabled\n"); |
|---|
| 2552 | 3068 | } else { |
|---|
| 2553 | | - id = x86_match_cpu(intel_pstate_cpu_ids); |
|---|
| 2554 | | - if (!id) |
|---|
| 3069 | + if (no_load) |
|---|
| 2555 | 3070 | return -ENODEV; |
|---|
| 3071 | + |
|---|
| 3072 | + id = x86_match_cpu(intel_pstate_cpu_ids); |
|---|
| 3073 | + if (!id) { |
|---|
| 3074 | + pr_info("CPU model not supported\n"); |
|---|
| 3075 | + return -ENODEV; |
|---|
| 3076 | + } |
|---|
| 2556 | 3077 | |
|---|
| 2557 | 3078 | copy_cpu_funcs((struct pstate_funcs *)id->driver_data); |
|---|
| 2558 | 3079 | } |
|---|
| 2559 | 3080 | |
|---|
| 2560 | | - if (intel_pstate_msrs_not_valid()) |
|---|
| 3081 | + if (intel_pstate_msrs_not_valid()) { |
|---|
| 3082 | + pr_info("Invalid MSRs\n"); |
|---|
| 2561 | 3083 | return -ENODEV; |
|---|
| 3084 | + } |
|---|
| 3085 | + /* Without HWP start in the passive mode. */ |
|---|
| 3086 | + if (!default_driver) |
|---|
| 3087 | + default_driver = &intel_cpufreq; |
|---|
| 2562 | 3088 | |
|---|
| 2563 | 3089 | hwp_cpu_matched: |
|---|
| 2564 | 3090 | /* |
|---|
| 2565 | 3091 | * The Intel pstate driver will be ignored if the platform |
|---|
| 2566 | 3092 | * firmware has its own power management modes. |
|---|
| 2567 | 3093 | */ |
|---|
| 2568 | | - if (intel_pstate_platform_pwr_mgmt_exists()) |
|---|
| 3094 | + if (intel_pstate_platform_pwr_mgmt_exists()) { |
|---|
| 3095 | + pr_info("P-states controlled by the platform\n"); |
|---|
| 2569 | 3096 | return -ENODEV; |
|---|
| 3097 | + } |
|---|
| 2570 | 3098 | |
|---|
| 2571 | 3099 | if (!hwp_active && hwp_only) |
|---|
| 2572 | 3100 | return -ENOTSUPP; |
|---|
| .. | .. |
|---|
| 2584 | 3112 | mutex_lock(&intel_pstate_driver_lock); |
|---|
| 2585 | 3113 | rc = intel_pstate_register_driver(default_driver); |
|---|
| 2586 | 3114 | mutex_unlock(&intel_pstate_driver_lock); |
|---|
| 2587 | | - if (rc) |
|---|
| 3115 | + if (rc) { |
|---|
| 3116 | + intel_pstate_sysfs_remove(); |
|---|
| 2588 | 3117 | return rc; |
|---|
| 3118 | + } |
|---|
| 2589 | 3119 | |
|---|
| 2590 | | - if (hwp_active) |
|---|
| 3120 | + if (hwp_active) { |
|---|
| 3121 | + const struct x86_cpu_id *id; |
|---|
| 3122 | + |
|---|
| 3123 | + id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids); |
|---|
| 3124 | + if (id) { |
|---|
| 3125 | + set_power_ctl_ee_state(false); |
|---|
| 3126 | + pr_info("Disabling energy efficiency optimization\n"); |
|---|
| 3127 | + } |
|---|
| 3128 | + |
|---|
| 2591 | 3129 | pr_info("HWP enabled\n"); |
|---|
| 3130 | + } |
|---|
| 2592 | 3131 | |
|---|
| 2593 | 3132 | return 0; |
|---|
| 2594 | 3133 | } |
|---|
| .. | .. |
|---|
| 2599 | 3138 | if (!str) |
|---|
| 2600 | 3139 | return -EINVAL; |
|---|
| 2601 | 3140 | |
|---|
| 2602 | | - if (!strcmp(str, "disable")) { |
|---|
| 3141 | + if (!strcmp(str, "disable")) |
|---|
| 2603 | 3142 | no_load = 1; |
|---|
| 2604 | | - } else if (!strcmp(str, "passive")) { |
|---|
| 2605 | | - pr_info("Passive mode enabled\n"); |
|---|
| 3143 | + else if (!strcmp(str, "active")) |
|---|
| 3144 | + default_driver = &intel_pstate; |
|---|
| 3145 | + else if (!strcmp(str, "passive")) |
|---|
| 2606 | 3146 | default_driver = &intel_cpufreq; |
|---|
| 3147 | + |
|---|
| 3148 | + if (!strcmp(str, "no_hwp")) |
|---|
| 2607 | 3149 | no_hwp = 1; |
|---|
| 2608 | | - } |
|---|
| 2609 | | - if (!strcmp(str, "no_hwp")) { |
|---|
| 2610 | | - pr_info("HWP disabled\n"); |
|---|
| 2611 | | - no_hwp = 1; |
|---|
| 2612 | | - } |
|---|
| 3150 | + |
|---|
| 2613 | 3151 | if (!strcmp(str, "force")) |
|---|
| 2614 | 3152 | force_load = 1; |
|---|
| 2615 | 3153 | if (!strcmp(str, "hwp_only")) |
|---|