.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. |
---|
3 | 4 | * |
---|
4 | 5 | * (C) Copyright 2014, 2015 Linaro Ltd. |
---|
5 | 6 | * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or |
---|
8 | | - * modify it under the terms of the GNU General Public License |
---|
9 | | - * as published by the Free Software Foundation; version 2 |
---|
10 | | - * of the License. |
---|
11 | 7 | * |
---|
12 | 8 | * CPPC describes a few methods for controlling CPU performance using |
---|
13 | 9 | * information from a per CPU table called CPC. This table is described in |
---|
.. | .. |
---|
81 | 77 | int refcount; |
---|
82 | 78 | }; |
---|
83 | 79 | |
---|
84 | | -/* Array to represent the PCC channel per subspace id */ |
---|
| 80 | +/* Array to represent the PCC channel per subspace ID */ |
---|
85 | 81 | static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; |
---|
86 | | -/* The cpu_pcc_subspace_idx containsper CPU subspace id */ |
---|
| 82 | +/* The cpu_pcc_subspace_idx contains per CPU subspace ID */ |
---|
87 | 83 | static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); |
---|
88 | 84 | |
---|
89 | 85 | /* |
---|
.. | .. |
---|
346 | 342 | *(u16 *)msg, ret); |
---|
347 | 343 | } |
---|
348 | 344 | |
---|
349 | | -struct mbox_client cppc_mbox_cl = { |
---|
| 345 | +static struct mbox_client cppc_mbox_cl = { |
---|
350 | 346 | .tx_done = cppc_chan_tx_done, |
---|
351 | 347 | .knows_txdone = true, |
---|
352 | 348 | }; |
---|
.. | .. |
---|
430 | 426 | return -ENOMEM; |
---|
431 | 427 | |
---|
432 | 428 | /* |
---|
433 | | - * Now that we have _PSD data from all CPUs, lets setup P-state |
---|
| 429 | + * Now that we have _PSD data from all CPUs, let's setup P-state |
---|
434 | 430 | * domain info. |
---|
435 | 431 | */ |
---|
436 | 432 | for_each_possible_cpu(i) { |
---|
437 | | - pr = all_cpu_data[i]; |
---|
438 | | - if (!pr) |
---|
439 | | - continue; |
---|
440 | | - |
---|
441 | 433 | if (cpumask_test_cpu(i, covered_cpus)) |
---|
442 | 434 | continue; |
---|
443 | 435 | |
---|
| 436 | + pr = all_cpu_data[i]; |
---|
444 | 437 | cpc_ptr = per_cpu(cpc_desc_ptr, i); |
---|
445 | 438 | if (!cpc_ptr) { |
---|
446 | 439 | retval = -EFAULT; |
---|
.. | .. |
---|
491 | 484 | cpumask_set_cpu(j, pr->shared_cpu_map); |
---|
492 | 485 | } |
---|
493 | 486 | |
---|
494 | | - for_each_possible_cpu(j) { |
---|
| 487 | + for_each_cpu(j, pr->shared_cpu_map) { |
---|
495 | 488 | if (i == j) |
---|
496 | 489 | continue; |
---|
497 | 490 | |
---|
498 | 491 | match_pr = all_cpu_data[j]; |
---|
499 | | - if (!match_pr) |
---|
500 | | - continue; |
---|
501 | | - |
---|
502 | | - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); |
---|
503 | | - if (!match_cpc_ptr) { |
---|
504 | | - retval = -EFAULT; |
---|
505 | | - goto err_ret; |
---|
506 | | - } |
---|
507 | | - |
---|
508 | | - match_pdomain = &(match_cpc_ptr->domain_info); |
---|
509 | | - if (match_pdomain->domain != pdomain->domain) |
---|
510 | | - continue; |
---|
511 | | - |
---|
512 | 492 | match_pr->shared_type = pr->shared_type; |
---|
513 | 493 | cpumask_copy(match_pr->shared_cpu_map, |
---|
514 | 494 | pr->shared_cpu_map); |
---|
515 | 495 | } |
---|
516 | 496 | } |
---|
| 497 | + goto out; |
---|
517 | 498 | |
---|
518 | 499 | err_ret: |
---|
519 | 500 | for_each_possible_cpu(i) { |
---|
520 | 501 | pr = all_cpu_data[i]; |
---|
521 | | - if (!pr) |
---|
522 | | - continue; |
---|
523 | 502 | |
---|
524 | 503 | /* Assume no coordination on any error parsing domain info */ |
---|
525 | | - if (retval) { |
---|
526 | | - cpumask_clear(pr->shared_cpu_map); |
---|
527 | | - cpumask_set_cpu(i, pr->shared_cpu_map); |
---|
528 | | - pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
---|
529 | | - } |
---|
| 504 | + cpumask_clear(pr->shared_cpu_map); |
---|
| 505 | + cpumask_set_cpu(i, pr->shared_cpu_map); |
---|
| 506 | + pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
---|
530 | 507 | } |
---|
531 | | - |
---|
| 508 | +out: |
---|
532 | 509 | free_cpumask_var(covered_cpus); |
---|
533 | 510 | return retval; |
---|
534 | 511 | } |
---|
.. | .. |
---|
582 | 559 | return -ENOMEM; |
---|
583 | 560 | } |
---|
584 | 561 | |
---|
585 | | - /* Set flag so that we dont come here for each CPU. */ |
---|
| 562 | + /* Set flag so that we don't come here for each CPU. */ |
---|
586 | 563 | pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; |
---|
587 | 564 | } |
---|
588 | 565 | |
---|
.. | .. |
---|
607 | 584 | * |
---|
608 | 585 | * Check and allocate the cppc_pcc_data memory. |
---|
609 | 586 | * In some processor configurations it is possible that same subspace |
---|
610 | | - * is shared between multiple CPU's. This is seen especially in CPU's |
---|
| 587 | + * is shared between multiple CPUs. This is seen especially in CPUs |
---|
611 | 588 | * with hardware multi-threading support. |
---|
612 | 589 | * |
---|
613 | 590 | * Return: 0 for success, errno for failure |
---|
614 | 591 | */ |
---|
615 | | -int pcc_data_alloc(int pcc_ss_id) |
---|
| 592 | +static int pcc_data_alloc(int pcc_ss_id) |
---|
616 | 593 | { |
---|
617 | 594 | if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) |
---|
618 | 595 | return -EINVAL; |
---|
.. | .. |
---|
628 | 605 | } |
---|
629 | 606 | |
---|
630 | 607 | return 0; |
---|
631 | | -} |
---|
632 | | - |
---|
633 | | -/* Check if CPPC revision + num_ent combination is supported */ |
---|
634 | | -static bool is_cppc_supported(int revision, int num_ent) |
---|
635 | | -{ |
---|
636 | | - int expected_num_ent; |
---|
637 | | - |
---|
638 | | - switch (revision) { |
---|
639 | | - case CPPC_V2_REV: |
---|
640 | | - expected_num_ent = CPPC_V2_NUM_ENT; |
---|
641 | | - break; |
---|
642 | | - case CPPC_V3_REV: |
---|
643 | | - expected_num_ent = CPPC_V3_NUM_ENT; |
---|
644 | | - break; |
---|
645 | | - default: |
---|
646 | | - pr_debug("Firmware exports unsupported CPPC revision: %d\n", |
---|
647 | | - revision); |
---|
648 | | - return false; |
---|
649 | | - } |
---|
650 | | - |
---|
651 | | - if (expected_num_ent != num_ent) { |
---|
652 | | - pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n", |
---|
653 | | - num_ent, expected_num_ent, revision); |
---|
654 | | - return false; |
---|
655 | | - } |
---|
656 | | - |
---|
657 | | - return true; |
---|
658 | 608 | } |
---|
659 | 609 | |
---|
660 | 610 | /* |
---|
.. | .. |
---|
705 | 655 | |
---|
706 | 656 | /** |
---|
707 | 657 | * acpi_cppc_processor_probe - Search for per CPU _CPC objects. |
---|
708 | | - * @pr: Ptr to acpi_processor containing this CPUs logical Id. |
---|
| 658 | + * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
---|
709 | 659 | * |
---|
710 | 660 | * Return: 0 for success or negative value for err. |
---|
711 | 661 | */ |
---|
.. | .. |
---|
722 | 672 | acpi_status status; |
---|
723 | 673 | int ret = -EFAULT; |
---|
724 | 674 | |
---|
725 | | - /* Parse the ACPI _CPC table for this cpu. */ |
---|
| 675 | + /* Parse the ACPI _CPC table for this CPU. */ |
---|
726 | 676 | status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, |
---|
727 | 677 | ACPI_TYPE_PACKAGE); |
---|
728 | 678 | if (ACPI_FAILURE(status)) { |
---|
.. | .. |
---|
742 | 692 | cpc_obj = &out_obj->package.elements[0]; |
---|
743 | 693 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { |
---|
744 | 694 | num_ent = cpc_obj->integer.value; |
---|
| 695 | + if (num_ent <= 1) { |
---|
| 696 | + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", |
---|
| 697 | + num_ent, pr->id); |
---|
| 698 | + goto out_free; |
---|
| 699 | + } |
---|
745 | 700 | } else { |
---|
746 | 701 | pr_debug("Unexpected entry type(%d) for NumEntries\n", |
---|
747 | 702 | cpc_obj->type); |
---|
748 | 703 | goto out_free; |
---|
749 | 704 | } |
---|
750 | | - cpc_ptr->num_entries = num_ent; |
---|
751 | 705 | |
---|
752 | 706 | /* Second entry should be revision. */ |
---|
753 | 707 | cpc_obj = &out_obj->package.elements[1]; |
---|
.. | .. |
---|
758 | 712 | cpc_obj->type); |
---|
759 | 713 | goto out_free; |
---|
760 | 714 | } |
---|
761 | | - cpc_ptr->version = cpc_rev; |
---|
762 | 715 | |
---|
763 | | - if (!is_cppc_supported(cpc_rev, num_ent)) |
---|
| 716 | + if (cpc_rev < CPPC_V2_REV) { |
---|
| 717 | + pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, |
---|
| 718 | + pr->id); |
---|
764 | 719 | goto out_free; |
---|
| 720 | + } |
---|
| 721 | + |
---|
| 722 | + /* |
---|
| 723 | + * Disregard _CPC if the number of entries in the return pachage is not |
---|
| 724 | + * as expected, but support future revisions being proper supersets of |
---|
| 725 | + * the v3 and only causing more entries to be returned by _CPC. |
---|
| 726 | + */ |
---|
| 727 | + if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || |
---|
| 728 | + (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || |
---|
| 729 | + (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { |
---|
| 730 | + pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", |
---|
| 731 | + num_ent, pr->id); |
---|
| 732 | + goto out_free; |
---|
| 733 | + } |
---|
| 734 | + if (cpc_rev > CPPC_V3_REV) { |
---|
| 735 | + num_ent = CPPC_V3_NUM_ENT; |
---|
| 736 | + cpc_rev = CPPC_V3_REV; |
---|
| 737 | + } |
---|
| 738 | + |
---|
| 739 | + cpc_ptr->num_entries = num_ent; |
---|
| 740 | + cpc_ptr->version = cpc_rev; |
---|
765 | 741 | |
---|
766 | 742 | /* Iterate through remaining entries in _CPC */ |
---|
767 | 743 | for (i = 2; i < num_ent; i++) { |
---|
.. | .. |
---|
834 | 810 | if (ret) |
---|
835 | 811 | goto out_free; |
---|
836 | 812 | |
---|
837 | | - /* Register PCC channel once for all PCC subspace id. */ |
---|
| 813 | + /* Register PCC channel once for all PCC subspace ID. */ |
---|
838 | 814 | if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { |
---|
839 | 815 | ret = register_pcc_channel(pcc_subspace_id); |
---|
840 | 816 | if (ret) |
---|
.. | .. |
---|
854 | 830 | goto out_free; |
---|
855 | 831 | } |
---|
856 | 832 | |
---|
857 | | - /* Plug PSD data into this CPUs CPC descriptor. */ |
---|
| 833 | + /* Plug PSD data into this CPU's CPC descriptor. */ |
---|
858 | 834 | per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; |
---|
859 | 835 | |
---|
860 | 836 | ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, |
---|
.. | .. |
---|
886 | 862 | |
---|
887 | 863 | /** |
---|
888 | 864 | * acpi_cppc_processor_exit - Cleanup CPC structs. |
---|
889 | | - * @pr: Ptr to acpi_processor containing this CPUs logical Id. |
---|
| 865 | + * @pr: Ptr to acpi_processor containing this CPU's logical ID. |
---|
890 | 866 | * |
---|
891 | 867 | * Return: Void |
---|
892 | 868 | */ |
---|
.. | .. |
---|
926 | 902 | |
---|
927 | 903 | /** |
---|
928 | 904 | * cpc_read_ffh() - Read FFH register |
---|
929 | | - * @cpunum: cpu number to read |
---|
| 905 | + * @cpunum: CPU number to read |
---|
930 | 906 | * @reg: cppc register information |
---|
931 | 907 | * @val: place holder for return value |
---|
932 | 908 | * |
---|
.. | .. |
---|
941 | 917 | |
---|
942 | 918 | /** |
---|
943 | 919 | * cpc_write_ffh() - Write FFH register |
---|
944 | | - * @cpunum: cpu number to write |
---|
| 920 | + * @cpunum: CPU number to write |
---|
945 | 921 | * @reg: cppc register information |
---|
946 | 922 | * @val: value to write |
---|
947 | 923 | * |
---|
.. | .. |
---|
1046 | 1022 | } |
---|
1047 | 1023 | |
---|
1048 | 1024 | /** |
---|
1049 | | - * cppc_get_perf_caps - Get a CPUs performance capabilities. |
---|
| 1025 | + * cppc_get_desired_perf - Get the value of desired performance register. |
---|
| 1026 | + * @cpunum: CPU from which to get desired performance. |
---|
| 1027 | + * @desired_perf: address of a variable to store the returned desired performance |
---|
| 1028 | + * |
---|
| 1029 | + * Return: 0 for success, -EIO otherwise. |
---|
| 1030 | + */ |
---|
| 1031 | +int cppc_get_desired_perf(int cpunum, u64 *desired_perf) |
---|
| 1032 | +{ |
---|
| 1033 | + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
---|
| 1034 | + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
---|
| 1035 | + struct cpc_register_resource *desired_reg; |
---|
| 1036 | + struct cppc_pcc_data *pcc_ss_data = NULL; |
---|
| 1037 | + |
---|
| 1038 | + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; |
---|
| 1039 | + |
---|
| 1040 | + if (CPC_IN_PCC(desired_reg)) { |
---|
| 1041 | + int ret = 0; |
---|
| 1042 | + |
---|
| 1043 | + if (pcc_ss_id < 0) |
---|
| 1044 | + return -EIO; |
---|
| 1045 | + |
---|
| 1046 | + pcc_ss_data = pcc_data[pcc_ss_id]; |
---|
| 1047 | + |
---|
| 1048 | + down_write(&pcc_ss_data->pcc_lock); |
---|
| 1049 | + |
---|
| 1050 | + if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) |
---|
| 1051 | + cpc_read(cpunum, desired_reg, desired_perf); |
---|
| 1052 | + else |
---|
| 1053 | + ret = -EIO; |
---|
| 1054 | + |
---|
| 1055 | + up_write(&pcc_ss_data->pcc_lock); |
---|
| 1056 | + |
---|
| 1057 | + return ret; |
---|
| 1058 | + } |
---|
| 1059 | + |
---|
| 1060 | + cpc_read(cpunum, desired_reg, desired_perf); |
---|
| 1061 | + |
---|
| 1062 | + return 0; |
---|
| 1063 | +} |
---|
| 1064 | +EXPORT_SYMBOL_GPL(cppc_get_desired_perf); |
---|
| 1065 | + |
---|
| 1066 | +/** |
---|
| 1067 | + * cppc_get_perf_caps - Get a CPU's performance capabilities. |
---|
1050 | 1068 | * @cpunum: CPU from which to get capabilities info. |
---|
1051 | 1069 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h |
---|
1052 | 1070 | * |
---|
.. | .. |
---|
1056 | 1074 | { |
---|
1057 | 1075 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
---|
1058 | 1076 | struct cpc_register_resource *highest_reg, *lowest_reg, |
---|
1059 | | - *lowest_non_linear_reg, *nominal_reg, |
---|
| 1077 | + *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, |
---|
1060 | 1078 | *low_freq_reg = NULL, *nom_freq_reg = NULL; |
---|
1061 | | - u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0; |
---|
| 1079 | + u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; |
---|
1062 | 1080 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); |
---|
1063 | 1081 | struct cppc_pcc_data *pcc_ss_data = NULL; |
---|
1064 | 1082 | int ret = 0, regs_in_pcc = 0; |
---|
.. | .. |
---|
1074 | 1092 | nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
---|
1075 | 1093 | low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; |
---|
1076 | 1094 | nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; |
---|
| 1095 | + guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; |
---|
1077 | 1096 | |
---|
1078 | 1097 | /* Are any of the regs PCC ?*/ |
---|
1079 | 1098 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
---|
.. | .. |
---|
1102 | 1121 | cpc_read(cpunum, nominal_reg, &nom); |
---|
1103 | 1122 | perf_caps->nominal_perf = nom; |
---|
1104 | 1123 | |
---|
| 1124 | + if (guaranteed_reg->type != ACPI_TYPE_BUFFER || |
---|
| 1125 | + IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { |
---|
| 1126 | + perf_caps->guaranteed_perf = 0; |
---|
| 1127 | + } else { |
---|
| 1128 | + cpc_read(cpunum, guaranteed_reg, &guaranteed); |
---|
| 1129 | + perf_caps->guaranteed_perf = guaranteed; |
---|
| 1130 | + } |
---|
| 1131 | + |
---|
1105 | 1132 | cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); |
---|
1106 | 1133 | perf_caps->lowest_nonlinear_perf = min_nonlinear; |
---|
1107 | 1134 | |
---|
.. | .. |
---|
1127 | 1154 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); |
---|
1128 | 1155 | |
---|
1129 | 1156 | /** |
---|
1130 | | - * cppc_get_perf_ctrs - Read a CPUs performance feedback counters. |
---|
| 1157 | + * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. |
---|
1131 | 1158 | * @cpunum: CPU from which to read counters. |
---|
1132 | 1159 | * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h |
---|
1133 | 1160 | * |
---|
.. | .. |
---|
1154 | 1181 | ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; |
---|
1155 | 1182 | |
---|
1156 | 1183 | /* |
---|
1157 | | - * If refernce perf register is not supported then we should |
---|
| 1184 | + * If reference perf register is not supported then we should |
---|
1158 | 1185 | * use the nominal perf value |
---|
1159 | 1186 | */ |
---|
1160 | 1187 | if (!CPC_SUPPORTED(ref_perf_reg)) |
---|
.. | .. |
---|
1207 | 1234 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); |
---|
1208 | 1235 | |
---|
1209 | 1236 | /** |
---|
1210 | | - * cppc_set_perf - Set a CPUs performance controls. |
---|
| 1237 | + * cppc_set_perf - Set a CPU's performance controls. |
---|
1211 | 1238 | * @cpu: CPU for which to set performance controls. |
---|
1212 | 1239 | * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h |
---|
1213 | 1240 | * |
---|
.. | .. |
---|
1288 | 1315 | * executing the Phase-II. |
---|
1289 | 1316 | * 2. Some other CPU has beaten this CPU to successfully execute the |
---|
1290 | 1317 | * write_trylock and has already acquired the write_lock. We know for a |
---|
1291 | | - * fact it(other CPU acquiring the write_lock) couldn't have happened |
---|
| 1318 | + * fact it (other CPU acquiring the write_lock) couldn't have happened |
---|
1292 | 1319 | * before this CPU's Phase-I as we held the read_lock. |
---|
1293 | 1320 | * 3. Some other CPU executing pcc CMD_READ has stolen the |
---|
1294 | 1321 | * down_write, in which case, send_pcc_cmd will check for pending |
---|