hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/drivers/cpufreq/cpufreq-dt.c
....@@ -1,12 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2012 Freescale Semiconductor, Inc.
34 *
45 * Copyright (C) 2014 Linaro.
56 * Viresh Kumar <viresh.kumar@linaro.org>
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
107 */
118
129 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -15,8 +12,8 @@
1512 #include <linux/cpu.h>
1613 #include <linux/cpufreq.h>
1714 #include <linux/cpumask.h>
18
-#include <linux/energy_model.h>
1915 #include <linux/err.h>
16
+#include <linux/list.h>
2017 #include <linux/module.h>
2118 #include <linux/of.h>
2219 #include <linux/pm_opp.h>
....@@ -28,20 +25,19 @@
2825 #include "cpufreq-dt.h"
2926 #ifdef CONFIG_ARCH_ROCKCHIP
3027 #include "rockchip-cpufreq.h"
31
-#include <soc/rockchip/rockchip_ipa.h>
32
-#include <soc/rockchip/rockchip_system_monitor.h>
3328 #endif
3429
3530 struct private_data {
36
- struct opp_table *opp_table;
31
+ struct list_head node;
32
+
33
+ cpumask_var_t cpus;
3734 struct device *cpu_dev;
38
-#ifdef CONFIG_ARCH_ROCKCHIP
39
- struct monitor_dev_info *mdev_info;
40
- struct monitor_dev_profile *mdevp;
41
-#endif
42
- const char *reg_name;
35
+ struct opp_table *opp_table;
36
+ struct cpufreq_frequency_table *freq_table;
4337 bool have_static_opps;
4438 };
39
+
40
+static LIST_HEAD(priv_list);
4541
4642 static struct freq_attr *cpufreq_dt_attr[] = {
4743 &cpufreq_freq_attr_scaling_available_freqs,
....@@ -49,28 +45,28 @@
4945 NULL,
5046 };
5147
48
+static struct private_data *cpufreq_dt_find_data(int cpu)
49
+{
50
+ struct private_data *priv;
51
+
52
+ list_for_each_entry(priv, &priv_list, node) {
53
+ if (cpumask_test_cpu(cpu, priv->cpus))
54
+ return priv;
55
+ }
56
+
57
+ return NULL;
58
+}
59
+
5260 static int set_target(struct cpufreq_policy *policy, unsigned int index)
5361 {
5462 struct private_data *priv = policy->driver_data;
5563 unsigned long freq = policy->freq_table[index].frequency;
56
- int ret;
5764
58
-#ifdef CONFIG_ROCKCHIP_SYSTEM_MONITOR
59
- if (priv->mdev_info)
60
- ret = rockchip_monitor_opp_set_rate(priv->mdev_info,
61
- freq * 1000);
62
- else
63
- ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
65
+#ifdef CONFIG_ARCH_ROCKCHIP
66
+ return rockchip_cpufreq_opp_set_rate(priv->cpu_dev, freq * 1000);
6467 #else
65
- ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
68
+ return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
6669 #endif
67
-
68
- if (!ret) {
69
- arch_set_freq_scale(policy->related_cpus, freq,
70
- policy->cpuinfo.max_freq);
71
- }
72
-
73
- return ret;
7470 }
7571
7672 /*
....@@ -111,79 +107,20 @@
111107 return name;
112108 }
113109
114
-static int resources_available(void)
115
-{
116
- struct device *cpu_dev;
117
- struct regulator *cpu_reg;
118
- struct clk *cpu_clk;
119
- int ret = 0;
120
- const char *name;
121
-
122
- cpu_dev = get_cpu_device(0);
123
- if (!cpu_dev) {
124
- pr_err("failed to get cpu0 device\n");
125
- return -ENODEV;
126
- }
127
-
128
- cpu_clk = clk_get(cpu_dev, NULL);
129
- ret = PTR_ERR_OR_ZERO(cpu_clk);
130
- if (ret) {
131
- /*
132
- * If cpu's clk node is present, but clock is not yet
133
- * registered, we should try defering probe.
134
- */
135
- if (ret == -EPROBE_DEFER)
136
- dev_dbg(cpu_dev, "clock not ready, retry\n");
137
- else
138
- dev_err(cpu_dev, "failed to get clock: %d\n", ret);
139
-
140
- return ret;
141
- }
142
-
143
- clk_put(cpu_clk);
144
-
145
- name = find_supply_name(cpu_dev);
146
- /* Platform doesn't require regulator */
147
- if (!name)
148
- return 0;
149
-
150
- cpu_reg = regulator_get_optional(cpu_dev, name);
151
- ret = PTR_ERR_OR_ZERO(cpu_reg);
152
- if (ret) {
153
- /*
154
- * If cpu's regulator supply node is present, but regulator is
155
- * not yet registered, we should try defering probe.
156
- */
157
- if (ret == -EPROBE_DEFER)
158
- dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
159
- else
160
- dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
161
-
162
- return ret;
163
- }
164
-
165
- regulator_put(cpu_reg);
166
- return 0;
167
-}
168
-
169110 static int cpufreq_init(struct cpufreq_policy *policy)
170111 {
171
- struct em_data_callback em_cb = EM_DATA_CB(of_dev_pm_opp_get_cpu_power);
172
- struct cpufreq_frequency_table *freq_table;
173
- struct opp_table *opp_table = NULL;
174112 struct private_data *priv;
175113 struct device *cpu_dev;
176114 struct clk *cpu_clk;
177115 unsigned int transition_latency;
178
- bool fallback = false;
179
- const char *name;
180
- int ret, nr_opp;
116
+ int ret;
181117
182
- cpu_dev = get_cpu_device(policy->cpu);
183
- if (!cpu_dev) {
184
- pr_err("failed to get cpu%d device\n", policy->cpu);
118
+ priv = cpufreq_dt_find_data(policy->cpu);
119
+ if (!priv) {
120
+ pr_err("failed to find data for cpu%d\n", policy->cpu);
185121 return -ENODEV;
186122 }
123
+ cpu_dev = priv->cpu_dev;
187124
188125 cpu_clk = clk_get(cpu_dev, NULL);
189126 if (IS_ERR(cpu_clk)) {
....@@ -192,195 +129,55 @@
192129 return ret;
193130 }
194131
195
- /* Get OPP-sharing information from "operating-points-v2" bindings */
196
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
197
- if (ret) {
198
- if (ret != -ENOENT)
199
- goto out_put_clk;
132
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
133
+ if (!transition_latency)
134
+ transition_latency = CPUFREQ_ETERNAL;
200135
201
- /*
202
- * operating-points-v2 not supported, fallback to old method of
203
- * finding shared-OPPs for backward compatibility if the
204
- * platform hasn't set sharing CPUs.
205
- */
206
- if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
207
- fallback = true;
208
- }
209
-
210
- /*
211
- * OPP layer will be taking care of regulators now, but it needs to know
212
- * the name of the regulator first.
213
- */
214
- name = find_supply_name(cpu_dev);
215
- if (name) {
216
- opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
217
- if (IS_ERR(opp_table)) {
218
- ret = PTR_ERR(opp_table);
219
- dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
220
- policy->cpu, ret);
221
- goto out_put_clk;
222
- }
223
- }
224
-
225
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
226
- if (!priv) {
227
- ret = -ENOMEM;
228
- goto out_put_regulator;
229
- }
230
-
231
- priv->reg_name = name;
232
- priv->opp_table = opp_table;
233
-
234
- /*
235
- * Initialize OPP tables for all policy->cpus. They will be shared by
236
- * all CPUs which have marked their CPUs shared with OPP bindings.
237
- *
238
- * For platforms not using operating-points-v2 bindings, we do this
239
- * before updating policy->cpus. Otherwise, we will end up creating
240
- * duplicate OPPs for policy->cpus.
241
- *
242
- * OPPs might be populated at runtime, don't check for error here
243
- */
244
-#ifdef CONFIG_ARCH_ROCKCHIP
245
- rockchip_cpufreq_set_opp_info(cpu_dev);
246
- ret = dev_pm_opp_of_add_table(cpu_dev);
247
- if (ret) {
248
- dev_err(cpu_dev, "couldn't find opp table for cpu:%d, %d\n",
249
- policy->cpu, ret);
250
- } else {
251
- struct cpumask cpus;
252
-
253
- cpumask_copy(&cpus, policy->cpus);
254
- cpumask_clear_cpu(policy->cpu, &cpus);
255
- if (!cpumask_empty(&cpus)) {
256
- if (!dev_pm_opp_of_cpumask_add_table(&cpus))
257
- priv->have_static_opps = true;
258
- else
259
- dev_pm_opp_of_remove_table(cpu_dev);
260
- } else {
261
- priv->have_static_opps = true;
262
- }
263
- }
264
- rockchip_cpufreq_adjust_power_scale(cpu_dev);
265
-#else
266
- if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
267
- priv->have_static_opps = true;
268
-#endif
269
-
270
- /*
271
- * But we need OPP table to function so if it is not there let's
272
- * give platform code chance to provide it for us.
273
- */
274
- ret = dev_pm_opp_get_opp_count(cpu_dev);
275
- if (ret <= 0) {
276
- dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
277
- ret = -EPROBE_DEFER;
278
- goto out_free_opp;
279
- }
280
- nr_opp = ret;
281
-
282
- if (fallback) {
283
- cpumask_setall(policy->cpus);
284
-
285
- /*
286
- * OPP tables are initialized only for policy->cpu, do it for
287
- * others as well.
288
- */
289
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
290
- if (ret)
291
- dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
292
- __func__, ret);
293
- }
294
-
295
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
296
- if (ret) {
297
- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
298
- goto out_free_opp;
299
- }
300
-
301
- priv->cpu_dev = cpu_dev;
136
+ cpumask_copy(policy->cpus, priv->cpus);
302137 policy->driver_data = priv;
303138 policy->clk = cpu_clk;
304
- policy->freq_table = freq_table;
305
-
139
+ policy->freq_table = priv->freq_table;
306140 policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
141
+ policy->cpuinfo.transition_latency = transition_latency;
142
+ policy->dvfs_possible_from_any_cpu = true;
307143
308144 /* Support turbo/boost mode */
309145 if (policy_has_boost_freq(policy)) {
310146 /* This gets disabled by core on driver unregister */
311147 ret = cpufreq_enable_boost_support();
312148 if (ret)
313
- goto out_free_cpufreq_table;
149
+ goto out_clk_put;
314150 cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
315151 }
316152
317
- transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
318
- if (!transition_latency)
319
- transition_latency = CPUFREQ_ETERNAL;
153
+ dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
320154
321
- policy->cpuinfo.transition_latency = transition_latency;
322
- policy->dvfs_possible_from_any_cpu = true;
323
-
324
- em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
325
-
326
-#ifdef CONFIG_ARCH_ROCKCHIP
327
- priv->mdevp = kzalloc(sizeof(*priv->mdevp), GFP_KERNEL);
328
- if (!priv->mdevp)
329
- goto check_rate_volt;
330
- priv->mdevp->type = MONITOR_TPYE_CPU;
331
- priv->mdevp->low_temp_adjust = rockchip_monitor_cpu_low_temp_adjust;
332
- priv->mdevp->high_temp_adjust = rockchip_monitor_cpu_high_temp_adjust;
333
- cpumask_copy(&priv->mdevp->allowed_cpus, policy->cpus);
334
- priv->mdev_info = rockchip_system_monitor_register(cpu_dev,
335
- priv->mdevp);
336
- if (IS_ERR(priv->mdev_info)) {
337
- kfree(priv->mdevp);
338
- priv->mdevp = NULL;
339
- dev_dbg(priv->cpu_dev,
340
- "running cpufreq without system monitor\n");
341
- priv->mdev_info = NULL;
342
- }
343
-check_rate_volt:
344
- rockchip_cpufreq_check_rate_volt(cpu_dev);
345
-#endif
346155 return 0;
347156
348
-out_free_cpufreq_table:
349
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
350
-out_free_opp:
351
- if (priv->have_static_opps)
352
- dev_pm_opp_of_cpumask_remove_table(policy->cpus);
353
- kfree(priv);
354
-out_put_regulator:
355
- if (name)
356
- dev_pm_opp_put_regulators(opp_table);
357
-out_put_clk:
157
+out_clk_put:
358158 clk_put(cpu_clk);
359159
360160 return ret;
361161 }
362162
163
+static int cpufreq_online(struct cpufreq_policy *policy)
164
+{
165
+ /* We did light-weight tear down earlier, nothing to do here */
166
+ return 0;
167
+}
168
+
169
+static int cpufreq_offline(struct cpufreq_policy *policy)
170
+{
171
+ /*
172
+ * Preserve policy->driver_data and don't free resources on light-weight
173
+ * tear down.
174
+ */
175
+ return 0;
176
+}
177
+
363178 static int cpufreq_exit(struct cpufreq_policy *policy)
364179 {
365
- struct private_data *priv = policy->driver_data;
366
-
367
-#ifdef CONFIG_ARCH_ROCKCHIP
368
- rockchip_cpufreq_suspend(policy);
369
- rockchip_system_monitor_unregister(priv->mdev_info);
370
- kfree(priv->mdevp);
371
- priv->mdevp = NULL;
372
-#endif
373
- dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
374
- if (priv->have_static_opps)
375
- dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
376
- if (priv->reg_name)
377
- dev_pm_opp_put_regulators(priv->opp_table);
378
-#ifdef CONFIG_ARCH_ROCKCHIP
379
- rockchip_cpufreq_put_opp_info(priv->cpu_dev);
380
-#endif
381180 clk_put(policy->clk);
382
- kfree(priv);
383
-
384181 return 0;
385182 }
386183
....@@ -392,26 +189,152 @@
392189 .get = cpufreq_generic_get,
393190 .init = cpufreq_init,
394191 .exit = cpufreq_exit,
192
+ .online = cpufreq_online,
193
+ .offline = cpufreq_offline,
395194 .name = "cpufreq-dt",
396195 .attr = cpufreq_dt_attr,
397196 .suspend = cpufreq_generic_suspend,
398197 };
399198
199
+static int dt_cpufreq_early_init(struct device *dev, int cpu)
200
+{
201
+ struct private_data *priv;
202
+ struct device *cpu_dev;
203
+ bool fallback = false;
204
+ const char *reg_name;
205
+ int ret;
206
+
207
+ /* Check if this CPU is already covered by some other policy */
208
+ if (cpufreq_dt_find_data(cpu))
209
+ return 0;
210
+
211
+ cpu_dev = get_cpu_device(cpu);
212
+ if (!cpu_dev)
213
+ return -EPROBE_DEFER;
214
+
215
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
216
+ if (!priv)
217
+ return -ENOMEM;
218
+
219
+ if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
220
+ return -ENOMEM;
221
+
222
+ cpumask_set_cpu(cpu, priv->cpus);
223
+ priv->cpu_dev = cpu_dev;
224
+
225
+ /*
226
+ * OPP layer will be taking care of regulators now, but it needs to know
227
+ * the name of the regulator first.
228
+ */
229
+ reg_name = find_supply_name(cpu_dev);
230
+ if (reg_name) {
231
+ priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
232
+ 1);
233
+ if (IS_ERR(priv->opp_table)) {
234
+ ret = PTR_ERR(priv->opp_table);
235
+ if (ret != -EPROBE_DEFER)
236
+ dev_err(cpu_dev, "failed to set regulators: %d\n",
237
+ ret);
238
+ goto free_cpumask;
239
+ }
240
+ }
241
+
242
+ /* Get OPP-sharing information from "operating-points-v2" bindings */
243
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
244
+ if (ret) {
245
+ if (ret != -ENOENT)
246
+ goto out;
247
+
248
+ /*
249
+ * operating-points-v2 not supported, fallback to all CPUs share
250
+ * OPP for backward compatibility if the platform hasn't set
251
+ * sharing CPUs.
252
+ */
253
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
254
+ fallback = true;
255
+ }
256
+
257
+ /*
258
+ * Initialize OPP tables for all priv->cpus. They will be shared by
259
+ * all CPUs which have marked their CPUs shared with OPP bindings.
260
+ *
261
+ * For platforms not using operating-points-v2 bindings, we do this
262
+ * before updating priv->cpus. Otherwise, we will end up creating
263
+ * duplicate OPPs for the CPUs.
264
+ *
265
+ * OPPs might be populated at runtime, don't check for error here.
266
+ */
267
+ if (!dev_pm_opp_of_cpumask_add_table(priv->cpus))
268
+ priv->have_static_opps = true;
269
+
270
+ /*
271
+ * The OPP table must be initialized, statically or dynamically, by this
272
+ * point.
273
+ */
274
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
275
+ if (ret <= 0) {
276
+ dev_err(cpu_dev, "OPP table can't be empty\n");
277
+ ret = -ENODEV;
278
+ goto out;
279
+ }
280
+
281
+ if (fallback) {
282
+ cpumask_setall(priv->cpus);
283
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
284
+ if (ret)
285
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
286
+ __func__, ret);
287
+ }
288
+
289
+#ifdef CONFIG_ARCH_ROCKCHIP
290
+ rockchip_cpufreq_adjust_power_scale(cpu_dev);
291
+#endif
292
+
293
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
294
+ if (ret) {
295
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
296
+ goto out;
297
+ }
298
+
299
+ list_add(&priv->node, &priv_list);
300
+ return 0;
301
+
302
+out:
303
+ if (priv->have_static_opps)
304
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
305
+ if (priv->opp_table)
306
+ dev_pm_opp_put_regulators(priv->opp_table);
307
+free_cpumask:
308
+ free_cpumask_var(priv->cpus);
309
+ return ret;
310
+}
311
+
312
+static void dt_cpufreq_release(void)
313
+{
314
+ struct private_data *priv, *tmp;
315
+
316
+ list_for_each_entry_safe(priv, tmp, &priv_list, node) {
317
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
318
+ if (priv->have_static_opps)
319
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
320
+ if (priv->opp_table)
321
+ dev_pm_opp_put_regulators(priv->opp_table);
322
+ free_cpumask_var(priv->cpus);
323
+ list_del(&priv->node);
324
+ }
325
+}
326
+
400327 static int dt_cpufreq_probe(struct platform_device *pdev)
401328 {
402329 struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
403
- int ret;
330
+ int ret, cpu;
404331
405
- /*
406
- * All per-cluster (CPUs sharing clock/voltages) initialization is done
407
- * from ->init(). In probe(), we just need to make sure that clk and
408
- * regulators are available. Else defer probe and retry.
409
- *
410
- * FIXME: Is checking this only for CPU0 sufficient ?
411
- */
412
- ret = resources_available();
413
- if (ret)
414
- return ret;
332
+ /* Request resources early so we can return in case of -EPROBE_DEFER */
333
+ for_each_possible_cpu(cpu) {
334
+ ret = dt_cpufreq_early_init(&pdev->dev, cpu);
335
+ if (ret)
336
+ goto err;
337
+ }
415338
416339 if (data) {
417340 if (data->have_governor_per_policy)
....@@ -420,18 +343,28 @@
420343 dt_cpufreq_driver.resume = data->resume;
421344 if (data->suspend)
422345 dt_cpufreq_driver.suspend = data->suspend;
346
+ if (data->get_intermediate) {
347
+ dt_cpufreq_driver.target_intermediate = data->target_intermediate;
348
+ dt_cpufreq_driver.get_intermediate = data->get_intermediate;
349
+ }
423350 }
424351
425352 ret = cpufreq_register_driver(&dt_cpufreq_driver);
426
- if (ret)
353
+ if (ret) {
427354 dev_err(&pdev->dev, "failed register driver: %d\n", ret);
355
+ goto err;
356
+ }
428357
358
+ return 0;
359
+err:
360
+ dt_cpufreq_release();
429361 return ret;
430362 }
431363
432364 static int dt_cpufreq_remove(struct platform_device *pdev)
433365 {
434366 cpufreq_unregister_driver(&dt_cpufreq_driver);
367
+ dt_cpufreq_release();
435368 return 0;
436369 }
437370