hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/perf/arm_smmuv3_pmu.c
....@@ -95,6 +95,7 @@
9595 #define SMMU_PMCG_PA_SHIFT 12
9696
9797 #define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
98
+#define SMMU_PMCG_HARDEN_DISABLE BIT(1)
9899
99100 static int cpuhp_state_num;
100101
....@@ -138,12 +139,42 @@
138139 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
139140 }
140141
142
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
143
+ struct perf_event *event, int idx);
144
+
145
+static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
146
+{
147
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
148
+ unsigned int idx;
149
+
150
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
151
+ smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
152
+
153
+ smmu_pmu_enable(pmu);
154
+}
155
+
141156 static inline void smmu_pmu_disable(struct pmu *pmu)
142157 {
143158 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
144159
145160 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
146161 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
162
+}
163
+
164
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
165
+{
166
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
167
+ unsigned int idx;
168
+
169
+ /*
170
+ * The global disable of PMU sometimes fail to stop the counting.
171
+ * Harden this by writing an invalid event type to each used counter
172
+ * to forcibly stop counting.
173
+ */
174
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
175
+ writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
176
+
177
+ smmu_pmu_disable(pmu);
147178 }
148179
149180 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
....@@ -719,7 +750,10 @@
719750 switch (model) {
720751 case IORT_SMMU_V3_PMCG_HISI_HIP08:
721752 /* HiSilicon Erratum 162001800 */
722
- smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
753
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
754
+ break;
755
+ case IORT_SMMU_V3_PMCG_HISI_HIP09:
756
+ smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
723757 break;
724758 }
725759
....@@ -806,6 +840,16 @@
806840
807841 smmu_pmu_get_acpi_options(smmu_pmu);
808842
843
+ /*
844
+ * For platforms suffer this quirk, the PMU disable sometimes fails to
845
+ * stop the counters. This will leads to inaccurate or error counting.
846
+ * Forcibly disable the counters with these quirk handler.
847
+ */
848
+ if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
849
+ smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
850
+ smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
851
+ }
852
+
809853 /* Pick one CPU to be the preferred one to use */
810854 smmu_pmu->on_cpu = raw_smp_processor_id();
811855 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
....@@ -870,6 +914,8 @@
870914
871915 static int __init arm_smmu_pmu_init(void)
872916 {
917
+ int ret;
918
+
873919 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
874920 "perf/arm/pmcg:online",
875921 NULL,
....@@ -877,7 +923,11 @@
877923 if (cpuhp_state_num < 0)
878924 return cpuhp_state_num;
879925
880
- return platform_driver_register(&smmu_pmu_driver);
926
+ ret = platform_driver_register(&smmu_pmu_driver);
927
+ if (ret)
928
+ cpuhp_remove_multi_state(cpuhp_state_num);
929
+
930
+ return ret;
881931 }
882932 module_init(arm_smmu_pmu_init);
883933