hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/acpi/cppc_acpi.c
....@@ -1,13 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
34 *
45 * (C) Copyright 2014, 2015 Linaro Ltd.
56 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License
9
- * as published by the Free Software Foundation; version 2
10
- * of the License.
117 *
128 * CPPC describes a few methods for controlling CPU performance using
139 * information from a per CPU table called CPC. This table is described in
....@@ -81,9 +77,9 @@
8177 int refcount;
8278 };
8379
84
-/* Array to represent the PCC channel per subspace id */
80
+/* Array to represent the PCC channel per subspace ID */
8581 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
86
-/* The cpu_pcc_subspace_idx containsper CPU subspace id */
82
+/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
8783 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
8884
8985 /*
....@@ -346,7 +342,7 @@
346342 *(u16 *)msg, ret);
347343 }
348344
349
-struct mbox_client cppc_mbox_cl = {
345
+static struct mbox_client cppc_mbox_cl = {
350346 .tx_done = cppc_chan_tx_done,
351347 .knows_txdone = true,
352348 };
....@@ -430,17 +426,14 @@
430426 return -ENOMEM;
431427
432428 /*
433
- * Now that we have _PSD data from all CPUs, lets setup P-state
429
+ * Now that we have _PSD data from all CPUs, let's setup P-state
434430 * domain info.
435431 */
436432 for_each_possible_cpu(i) {
437
- pr = all_cpu_data[i];
438
- if (!pr)
439
- continue;
440
-
441433 if (cpumask_test_cpu(i, covered_cpus))
442434 continue;
443435
436
+ pr = all_cpu_data[i];
444437 cpc_ptr = per_cpu(cpc_desc_ptr, i);
445438 if (!cpc_ptr) {
446439 retval = -EFAULT;
....@@ -491,44 +484,28 @@
491484 cpumask_set_cpu(j, pr->shared_cpu_map);
492485 }
493486
494
- for_each_possible_cpu(j) {
487
+ for_each_cpu(j, pr->shared_cpu_map) {
495488 if (i == j)
496489 continue;
497490
498491 match_pr = all_cpu_data[j];
499
- if (!match_pr)
500
- continue;
501
-
502
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
503
- if (!match_cpc_ptr) {
504
- retval = -EFAULT;
505
- goto err_ret;
506
- }
507
-
508
- match_pdomain = &(match_cpc_ptr->domain_info);
509
- if (match_pdomain->domain != pdomain->domain)
510
- continue;
511
-
512492 match_pr->shared_type = pr->shared_type;
513493 cpumask_copy(match_pr->shared_cpu_map,
514494 pr->shared_cpu_map);
515495 }
516496 }
497
+ goto out;
517498
518499 err_ret:
519500 for_each_possible_cpu(i) {
520501 pr = all_cpu_data[i];
521
- if (!pr)
522
- continue;
523502
524503 /* Assume no coordination on any error parsing domain info */
525
- if (retval) {
526
- cpumask_clear(pr->shared_cpu_map);
527
- cpumask_set_cpu(i, pr->shared_cpu_map);
528
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
529
- }
504
+ cpumask_clear(pr->shared_cpu_map);
505
+ cpumask_set_cpu(i, pr->shared_cpu_map);
506
+ pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
530507 }
531
-
508
+out:
532509 free_cpumask_var(covered_cpus);
533510 return retval;
534511 }
....@@ -582,7 +559,7 @@
582559 return -ENOMEM;
583560 }
584561
585
- /* Set flag so that we dont come here for each CPU. */
562
+ /* Set flag so that we don't come here for each CPU. */
586563 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
587564 }
588565
....@@ -607,12 +584,12 @@
607584 *
608585 * Check and allocate the cppc_pcc_data memory.
609586 * In some processor configurations it is possible that same subspace
610
- * is shared between multiple CPU's. This is seen especially in CPU's
587
+ * is shared between multiple CPUs. This is seen especially in CPUs
611588 * with hardware multi-threading support.
612589 *
613590 * Return: 0 for success, errno for failure
614591 */
615
-int pcc_data_alloc(int pcc_ss_id)
592
+static int pcc_data_alloc(int pcc_ss_id)
616593 {
617594 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
618595 return -EINVAL;
....@@ -628,33 +605,6 @@
628605 }
629606
630607 return 0;
631
-}
632
-
633
-/* Check if CPPC revision + num_ent combination is supported */
634
-static bool is_cppc_supported(int revision, int num_ent)
635
-{
636
- int expected_num_ent;
637
-
638
- switch (revision) {
639
- case CPPC_V2_REV:
640
- expected_num_ent = CPPC_V2_NUM_ENT;
641
- break;
642
- case CPPC_V3_REV:
643
- expected_num_ent = CPPC_V3_NUM_ENT;
644
- break;
645
- default:
646
- pr_debug("Firmware exports unsupported CPPC revision: %d\n",
647
- revision);
648
- return false;
649
- }
650
-
651
- if (expected_num_ent != num_ent) {
652
- pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
653
- num_ent, expected_num_ent, revision);
654
- return false;
655
- }
656
-
657
- return true;
658608 }
659609
660610 /*
....@@ -705,7 +655,7 @@
705655
706656 /**
707657 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
708
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
658
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
709659 *
710660 * Return: 0 for success or negative value for err.
711661 */
....@@ -722,7 +672,7 @@
722672 acpi_status status;
723673 int ret = -EFAULT;
724674
725
- /* Parse the ACPI _CPC table for this cpu. */
675
+ /* Parse the ACPI _CPC table for this CPU. */
726676 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
727677 ACPI_TYPE_PACKAGE);
728678 if (ACPI_FAILURE(status)) {
....@@ -742,12 +692,16 @@
742692 cpc_obj = &out_obj->package.elements[0];
743693 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
744694 num_ent = cpc_obj->integer.value;
695
+ if (num_ent <= 1) {
696
+ pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
697
+ num_ent, pr->id);
698
+ goto out_free;
699
+ }
745700 } else {
746701 pr_debug("Unexpected entry type(%d) for NumEntries\n",
747702 cpc_obj->type);
748703 goto out_free;
749704 }
750
- cpc_ptr->num_entries = num_ent;
751705
752706 /* Second entry should be revision. */
753707 cpc_obj = &out_obj->package.elements[1];
....@@ -758,10 +712,32 @@
758712 cpc_obj->type);
759713 goto out_free;
760714 }
761
- cpc_ptr->version = cpc_rev;
762715
763
- if (!is_cppc_supported(cpc_rev, num_ent))
716
+ if (cpc_rev < CPPC_V2_REV) {
717
+ pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
718
+ pr->id);
764719 goto out_free;
720
+ }
721
+
722
+ /*
723
+ * Disregard _CPC if the number of entries in the return pachage is not
724
+ * as expected, but support future revisions being proper supersets of
725
+ * the v3 and only causing more entries to be returned by _CPC.
726
+ */
727
+ if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
728
+ (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
729
+ (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
730
+ pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
731
+ num_ent, pr->id);
732
+ goto out_free;
733
+ }
734
+ if (cpc_rev > CPPC_V3_REV) {
735
+ num_ent = CPPC_V3_NUM_ENT;
736
+ cpc_rev = CPPC_V3_REV;
737
+ }
738
+
739
+ cpc_ptr->num_entries = num_ent;
740
+ cpc_ptr->version = cpc_rev;
765741
766742 /* Iterate through remaining entries in _CPC */
767743 for (i = 2; i < num_ent; i++) {
....@@ -834,7 +810,7 @@
834810 if (ret)
835811 goto out_free;
836812
837
- /* Register PCC channel once for all PCC subspace id. */
813
+ /* Register PCC channel once for all PCC subspace ID. */
838814 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
839815 ret = register_pcc_channel(pcc_subspace_id);
840816 if (ret)
....@@ -854,7 +830,7 @@
854830 goto out_free;
855831 }
856832
857
- /* Plug PSD data into this CPUs CPC descriptor. */
833
+ /* Plug PSD data into this CPU's CPC descriptor. */
858834 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
859835
860836 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
....@@ -886,7 +862,7 @@
886862
887863 /**
888864 * acpi_cppc_processor_exit - Cleanup CPC structs.
889
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
865
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
890866 *
891867 * Return: Void
892868 */
....@@ -926,7 +902,7 @@
926902
927903 /**
928904 * cpc_read_ffh() - Read FFH register
929
- * @cpunum: cpu number to read
905
+ * @cpunum: CPU number to read
930906 * @reg: cppc register information
931907 * @val: place holder for return value
932908 *
....@@ -941,7 +917,7 @@
941917
942918 /**
943919 * cpc_write_ffh() - Write FFH register
944
- * @cpunum: cpu number to write
920
+ * @cpunum: CPU number to write
945921 * @reg: cppc register information
946922 * @val: value to write
947923 *
....@@ -1046,7 +1022,49 @@
10461022 }
10471023
10481024 /**
1049
- * cppc_get_perf_caps - Get a CPUs performance capabilities.
1025
+ * cppc_get_desired_perf - Get the value of desired performance register.
1026
+ * @cpunum: CPU from which to get desired performance.
1027
+ * @desired_perf: address of a variable to store the returned desired performance
1028
+ *
1029
+ * Return: 0 for success, -EIO otherwise.
1030
+ */
1031
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1032
+{
1033
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1034
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1035
+ struct cpc_register_resource *desired_reg;
1036
+ struct cppc_pcc_data *pcc_ss_data = NULL;
1037
+
1038
+ desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1039
+
1040
+ if (CPC_IN_PCC(desired_reg)) {
1041
+ int ret = 0;
1042
+
1043
+ if (pcc_ss_id < 0)
1044
+ return -EIO;
1045
+
1046
+ pcc_ss_data = pcc_data[pcc_ss_id];
1047
+
1048
+ down_write(&pcc_ss_data->pcc_lock);
1049
+
1050
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1051
+ cpc_read(cpunum, desired_reg, desired_perf);
1052
+ else
1053
+ ret = -EIO;
1054
+
1055
+ up_write(&pcc_ss_data->pcc_lock);
1056
+
1057
+ return ret;
1058
+ }
1059
+
1060
+ cpc_read(cpunum, desired_reg, desired_perf);
1061
+
1062
+ return 0;
1063
+}
1064
+EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1065
+
1066
+/**
1067
+ * cppc_get_perf_caps - Get a CPU's performance capabilities.
10501068 * @cpunum: CPU from which to get capabilities info.
10511069 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
10521070 *
....@@ -1056,9 +1074,9 @@
10561074 {
10571075 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
10581076 struct cpc_register_resource *highest_reg, *lowest_reg,
1059
- *lowest_non_linear_reg, *nominal_reg,
1077
+ *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
10601078 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1061
- u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
1079
+ u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
10621080 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
10631081 struct cppc_pcc_data *pcc_ss_data = NULL;
10641082 int ret = 0, regs_in_pcc = 0;
....@@ -1074,6 +1092,7 @@
10741092 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
10751093 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
10761094 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1095
+ guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
10771096
10781097 /* Are any of the regs PCC ?*/
10791098 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
....@@ -1102,6 +1121,14 @@
11021121 cpc_read(cpunum, nominal_reg, &nom);
11031122 perf_caps->nominal_perf = nom;
11041123
1124
+ if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1125
+ IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1126
+ perf_caps->guaranteed_perf = 0;
1127
+ } else {
1128
+ cpc_read(cpunum, guaranteed_reg, &guaranteed);
1129
+ perf_caps->guaranteed_perf = guaranteed;
1130
+ }
1131
+
11051132 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
11061133 perf_caps->lowest_nonlinear_perf = min_nonlinear;
11071134
....@@ -1127,7 +1154,7 @@
11271154 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
11281155
11291156 /**
1130
- * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1157
+ * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
11311158 * @cpunum: CPU from which to read counters.
11321159 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
11331160 *
....@@ -1154,7 +1181,7 @@
11541181 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
11551182
11561183 /*
1157
- * If refernce perf register is not supported then we should
1184
+ * If reference perf register is not supported then we should
11581185 * use the nominal perf value
11591186 */
11601187 if (!CPC_SUPPORTED(ref_perf_reg))
....@@ -1207,7 +1234,7 @@
12071234 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
12081235
12091236 /**
1210
- * cppc_set_perf - Set a CPUs performance controls.
1237
+ * cppc_set_perf - Set a CPU's performance controls.
12111238 * @cpu: CPU for which to set performance controls.
12121239 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
12131240 *
....@@ -1288,7 +1315,7 @@
12881315 * executing the Phase-II.
12891316 * 2. Some other CPU has beaten this CPU to successfully execute the
12901317 * write_trylock and has already acquired the write_lock. We know for a
1291
- * fact it(other CPU acquiring the write_lock) couldn't have happened
1318
+ * fact it (other CPU acquiring the write_lock) couldn't have happened
12921319 * before this CPU's Phase-I as we held the read_lock.
12931320 * 3. Some other CPU executing pcc CMD_READ has stolen the
12941321 * down_write, in which case, send_pcc_cmd will check for pending