hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/cpufreq/scmi-cpufreq.c
....@@ -8,10 +8,10 @@
88
99 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1010
11
+#include <linux/clk-provider.h>
1112 #include <linux/cpu.h>
1213 #include <linux/cpufreq.h>
1314 #include <linux/cpumask.h>
14
-#include <linux/cpu_cooling.h>
1515 #include <linux/energy_model.h>
1616 #include <linux/export.h>
1717 #include <linux/module.h>
....@@ -23,20 +23,19 @@
2323 struct scmi_data {
2424 int domain_id;
2525 struct device *cpu_dev;
26
- struct thermal_cooling_device *cdev;
2726 };
2827
29
-static const struct scmi_handle *handle;
28
+static struct scmi_protocol_handle *ph;
29
+static const struct scmi_perf_proto_ops *perf_ops;
3030
3131 static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
3232 {
3333 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
34
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
3534 struct scmi_data *priv = policy->driver_data;
3635 unsigned long rate;
3736 int ret;
3837
39
- ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
38
+ ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
4039 if (ret)
4140 return 0;
4241 return rate / 1000;
....@@ -50,30 +49,20 @@
5049 static int
5150 scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
5251 {
53
- int ret;
5452 struct scmi_data *priv = policy->driver_data;
55
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
5653 u64 freq = policy->freq_table[index].frequency;
5754
58
- ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
59
- if (!ret)
60
- arch_set_freq_scale(policy->related_cpus, freq,
61
- policy->cpuinfo.max_freq);
62
- return ret;
55
+ return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
6356 }
6457
6558 static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
6659 unsigned int target_freq)
6760 {
6861 struct scmi_data *priv = policy->driver_data;
69
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
7062
71
- if (!perf_ops->freq_set(handle, priv->domain_id,
72
- target_freq * 1000, true)) {
73
- arch_set_freq_scale(policy->related_cpus, target_freq,
74
- policy->cpuinfo.max_freq);
63
+ if (!perf_ops->freq_set(ph, priv->domain_id,
64
+ target_freq * 1000, true))
7565 return target_freq;
76
- }
7766
7867 return 0;
7968 }
....@@ -84,7 +73,7 @@
8473 int cpu, domain, tdomain;
8574 struct device *tcpu_dev;
8675
87
- domain = handle->perf_ops->device_domain_id(cpu_dev);
76
+ domain = perf_ops->device_domain_id(cpu_dev);
8877 if (domain < 0)
8978 return domain;
9079
....@@ -96,7 +85,7 @@
9685 if (!tcpu_dev)
9786 continue;
9887
99
- tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
88
+ tdomain = perf_ops->device_domain_id(tcpu_dev);
10089 if (tdomain == domain)
10190 cpumask_set_cpu(cpu, cpumask);
10291 }
....@@ -105,24 +94,19 @@
10594 }
10695
10796 static int __maybe_unused
108
-scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
97
+scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
98
+ struct device *cpu_dev)
10999 {
110
- struct device *cpu_dev = get_cpu_device(cpu);
111100 unsigned long Hz;
112101 int ret, domain;
113102
114
- if (!cpu_dev) {
115
- pr_err("failed to get cpu%d device\n", cpu);
116
- return -ENODEV;
117
- }
118
-
119
- domain = handle->perf_ops->device_domain_id(cpu_dev);
103
+ domain = perf_ops->device_domain_id(cpu_dev);
120104 if (domain < 0)
121105 return domain;
122106
123107 /* Get the power cost of the performance domain. */
124108 Hz = *KHz * 1000;
125
- ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
109
+ ret = perf_ops->est_power_get(ph, domain, &Hz, power);
126110 if (ret)
127111 return ret;
128112
....@@ -140,6 +124,7 @@
140124 struct scmi_data *priv;
141125 struct cpufreq_frequency_table *freq_table;
142126 struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
127
+ bool power_scale_mw;
143128
144129 cpu_dev = get_cpu_device(policy->cpu);
145130 if (!cpu_dev) {
....@@ -147,7 +132,7 @@
147132 return -ENODEV;
148133 }
149134
150
- ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
135
+ ret = perf_ops->device_opps_add(ph, cpu_dev);
151136 if (ret) {
152137 dev_warn(cpu_dev, "failed to add opps to the device\n");
153138 return ret;
....@@ -166,13 +151,12 @@
166151 return ret;
167152 }
168153
169
- ret = dev_pm_opp_get_opp_count(cpu_dev);
170
- if (ret <= 0) {
154
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
155
+ if (nr_opp <= 0) {
171156 dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
172157 ret = -EPROBE_DEFER;
173158 goto out_free_opp;
174159 }
175
- nr_opp = ret;
176160
177161 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
178162 if (!priv) {
....@@ -187,7 +171,7 @@
187171 }
188172
189173 priv->cpu_dev = cpu_dev;
190
- priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
174
+ priv->domain_id = perf_ops->device_domain_id(cpu_dev);
191175
192176 policy->driver_data = priv;
193177 policy->freq_table = freq_table;
....@@ -195,22 +179,25 @@
195179 /* SCMI allows DVFS request for any domain from any CPU */
196180 policy->dvfs_possible_from_any_cpu = true;
197181
198
- latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
182
+ latency = perf_ops->transition_latency_get(ph, cpu_dev);
199183 if (!latency)
200184 latency = CPUFREQ_ETERNAL;
201185
202186 policy->cpuinfo.transition_latency = latency;
203187
204
- policy->fast_switch_possible = true;
188
+ policy->fast_switch_possible =
189
+ perf_ops->fast_switch_possible(ph, cpu_dev);
205190
206
- em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
191
+ power_scale_mw = perf_ops->power_scale_mw_get(ph);
192
+ em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
193
+ power_scale_mw);
207194
208195 return 0;
209196
210197 out_free_priv:
211198 kfree(priv);
212199 out_free_opp:
213
- dev_pm_opp_cpumask_remove_table(policy->cpus);
200
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
214201
215202 return ret;
216203 }
....@@ -219,25 +206,18 @@
219206 {
220207 struct scmi_data *priv = policy->driver_data;
221208
222
- cpufreq_cooling_unregister(priv->cdev);
223209 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
210
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
224211 kfree(priv);
225
- dev_pm_opp_cpumask_remove_table(policy->related_cpus);
226212
227213 return 0;
228
-}
229
-
230
-static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
231
-{
232
- struct scmi_data *priv = policy->driver_data;
233
-
234
- priv->cdev = of_cpufreq_cooling_register(policy);
235214 }
236215
237216 static struct cpufreq_driver scmi_cpufreq_driver = {
238217 .name = "scmi",
239218 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
240
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
219
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
220
+ CPUFREQ_IS_COOLING_DEV,
241221 .verify = cpufreq_generic_frequency_table_verify,
242222 .attr = cpufreq_generic_attr,
243223 .target_index = scmi_cpufreq_set_target,
....@@ -245,21 +225,32 @@
245225 .get = scmi_cpufreq_get_rate,
246226 .init = scmi_cpufreq_init,
247227 .exit = scmi_cpufreq_exit,
248
- .ready = scmi_cpufreq_ready,
249228 };
250229
251230 static int scmi_cpufreq_probe(struct scmi_device *sdev)
252231 {
253232 int ret;
233
+ struct device *dev = &sdev->dev;
234
+ const struct scmi_handle *handle;
254235
255236 handle = sdev->handle;
256237
257
- if (!handle || !handle->perf_ops)
238
+ if (!handle)
258239 return -ENODEV;
240
+
241
+ perf_ops = handle->devm_get_protocol(sdev, SCMI_PROTOCOL_PERF, &ph);
242
+ if (IS_ERR(perf_ops))
243
+ return PTR_ERR(perf_ops);
244
+
245
+#ifdef CONFIG_COMMON_CLK
246
+ /* dummy clock provider as needed by OPP if clocks property is used */
247
+ if (of_find_property(dev->of_node, "#clock-cells", NULL))
248
+ devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
249
+#endif
259250
260251 ret = cpufreq_register_driver(&scmi_cpufreq_driver);
261252 if (ret) {
262
- dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
253
+ dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
263254 __func__, ret);
264255 }
265256
....@@ -272,7 +263,7 @@
272263 }
273264
274265 static const struct scmi_device_id scmi_id_table[] = {
275
- { SCMI_PROTOCOL_PERF },
266
+ { SCMI_PROTOCOL_PERF, "cpufreq" },
276267 { },
277268 };
278269 MODULE_DEVICE_TABLE(scmi, scmi_id_table);