hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/cpufreq/rockchip-cpufreq.c
....@@ -16,48 +16,46 @@
1616 #include <linux/clk.h>
1717 #include <linux/cpu.h>
1818 #include <linux/cpufreq.h>
19
+#include <linux/cpuidle.h>
1920 #include <linux/err.h>
2021 #include <linux/init.h>
2122 #include <linux/kernel.h>
23
+#include <linux/mfd/syscon.h>
2224 #include <linux/module.h>
2325 #include <linux/nvmem-consumer.h>
2426 #include <linux/of.h>
27
+#include <linux/of_address.h>
2528 #include <linux/platform_device.h>
2629 #include <linux/pm_opp.h>
27
-#include <linux/regulator/consumer.h>
30
+#include <linux/pm_qos.h>
2831 #include <linux/slab.h>
29
-#include <linux/soc/rockchip/pvtm.h>
30
-#include <linux/thermal.h>
32
+#include <linux/regmap.h>
33
+#include <linux/regulator/consumer.h>
3134 #include <linux/rockchip/cpu.h>
3235 #include <soc/rockchip/rockchip_opp_select.h>
3336 #include <soc/rockchip/rockchip_system_monitor.h>
3437
3538 #include "cpufreq-dt.h"
3639 #include "rockchip-cpufreq.h"
37
-#include "../clk/rockchip/clk.h"
38
-
39
-#define LEAKAGE_INVALID 0xff
4040
4141 struct cluster_info {
42
- struct opp_table *opp_table;
4342 struct list_head list_head;
43
+ struct monitor_dev_info *mdev_info;
44
+ struct rockchip_opp_info opp_info;
45
+ struct freq_qos_request dsu_qos_req;
4446 cpumask_t cpus;
45
- unsigned int reboot_freq;
46
- unsigned int threshold_freq;
47
- unsigned int scale_rate;
48
- unsigned int temp_limit_rate;
49
- int volt_sel;
47
+ unsigned int idle_threshold_freq;
5048 int scale;
51
- int process;
52
- bool offline;
53
- bool freq_limit;
54
- bool is_check_init;
49
+ bool is_idle_disabled;
50
+ bool is_opp_shared_dsu;
51
+ unsigned int regulator_count;
52
+ unsigned long rate;
53
+ unsigned long volt, mem_volt;
5554 };
5655 static LIST_HEAD(cluster_info_list);
5756
58
-static __maybe_unused int px30_get_soc_info(struct device *dev,
59
- struct device_node *np,
60
- int *bin, int *process)
57
+static int px30_get_soc_info(struct device *dev, struct device_node *np,
58
+ int *bin, int *process)
6159 {
6260 int ret = 0;
6361 u8 value = 0;
....@@ -80,9 +78,8 @@
8078 return ret;
8179 }
8280
83
-static __maybe_unused int rk3288_get_soc_info(struct device *dev,
84
- struct device_node *np,
85
- int *bin, int *process)
81
+static int rk3288_get_soc_info(struct device *dev, struct device_node *np,
82
+ int *bin, int *process)
8683 {
8784 int ret = 0;
8885 u8 value = 0;
....@@ -141,9 +138,8 @@
141138 return ret;
142139 }
143140
144
-static __maybe_unused int rk3399_get_soc_info(struct device *dev,
145
- struct device_node *np,
146
- int *bin, int *process)
141
+static int rk3399_get_soc_info(struct device *dev, struct device_node *np,
142
+ int *bin, int *process)
147143 {
148144 int ret = 0;
149145 u8 value = 0;
....@@ -191,9 +187,147 @@
191187 return ret;
192188 }
193189
194
-static __maybe_unused int rv1126_get_soc_info(struct device *dev,
195
- struct device_node *np,
196
- int *bin, int *process)
190
+static int rk3588_get_soc_info(struct device *dev, struct device_node *np,
191
+ int *bin, int *process)
192
+{
193
+ int ret = 0;
194
+ u8 value = 0;
195
+
196
+ if (!bin)
197
+ return 0;
198
+
199
+ if (of_property_match_string(np, "nvmem-cell-names",
200
+ "specification_serial_number") >= 0) {
201
+ ret = rockchip_nvmem_cell_read_u8(np,
202
+ "specification_serial_number",
203
+ &value);
204
+ if (ret) {
205
+ dev_err(dev,
206
+ "Failed to get specification_serial_number\n");
207
+ return ret;
208
+ }
209
+ /* RK3588M */
210
+ if (value == 0xd)
211
+ *bin = 1;
212
+ /* RK3588J */
213
+ else if (value == 0xa)
214
+ *bin = 2;
215
+ }
216
+ if (*bin < 0)
217
+ *bin = 0;
218
+ dev_info(dev, "bin=%d\n", *bin);
219
+
220
+ return ret;
221
+}
222
+
223
+static int rk3588_change_length(struct device *dev, struct device_node *np,
224
+ int bin, int process, int volt_sel)
225
+{
226
+ struct clk *clk;
227
+ unsigned long old_rate;
228
+ unsigned int low_len_sel;
229
+ u32 opp_flag = 0;
230
+ int ret = 0;
231
+
232
+ clk = clk_get(dev, NULL);
233
+ if (IS_ERR(clk)) {
234
+ dev_warn(dev, "failed to get cpu clk\n");
235
+ return PTR_ERR(clk);
236
+ }
237
+
238
+ /* RK3588 low speed grade should change to low length */
239
+ if (of_property_read_u32(np, "rockchip,pvtm-low-len-sel",
240
+ &low_len_sel))
241
+ goto out;
242
+ if (volt_sel > low_len_sel)
243
+ goto out;
244
+ opp_flag = OPP_LENGTH_LOW;
245
+
246
+ old_rate = clk_get_rate(clk);
247
+ ret = clk_set_rate(clk, old_rate | opp_flag);
248
+ if (ret) {
249
+ dev_err(dev, "failed to change length\n");
250
+ goto out;
251
+ }
252
+ clk_set_rate(clk, old_rate);
253
+out:
254
+ clk_put(clk);
255
+
256
+ return ret;
257
+}
258
+
259
+static int rk3588_set_supported_hw(struct device *dev, struct device_node *np,
260
+ int bin, int process, int volt_sel)
261
+{
262
+ struct opp_table *opp_table;
263
+ u32 supported_hw[2];
264
+
265
+ if (!of_property_read_bool(np, "rockchip,supported-hw"))
266
+ return 0;
267
+
268
+ /* SoC Version */
269
+ supported_hw[0] = BIT(bin);
270
+ /* Speed Grade */
271
+ supported_hw[1] = BIT(volt_sel);
272
+ opp_table = dev_pm_opp_set_supported_hw(dev, supported_hw, 2);
273
+ if (IS_ERR(opp_table)) {
274
+ dev_err(dev, "failed to set supported opp\n");
275
+ return PTR_ERR(opp_table);
276
+ }
277
+
278
+ return 0;
279
+}
280
+
281
+static int rk3588_set_soc_info(struct device *dev, struct device_node *np,
282
+ int bin, int process, int volt_sel)
283
+{
284
+ if (volt_sel < 0)
285
+ return 0;
286
+ if (bin < 0)
287
+ bin = 0;
288
+
289
+ rk3588_change_length(dev, np, bin, process, volt_sel);
290
+ rk3588_set_supported_hw(dev, np, bin, process, volt_sel);
291
+
292
+ return 0;
293
+}
294
+
295
+static int rk3588_cpu_set_read_margin(struct device *dev,
296
+ struct rockchip_opp_info *opp_info,
297
+ u32 rm)
298
+{
299
+ if (!opp_info->volt_rm_tbl)
300
+ return 0;
301
+ if (rm == opp_info->current_rm || rm == UINT_MAX)
302
+ return 0;
303
+
304
+ dev_dbg(dev, "set rm to %d\n", rm);
305
+ if (opp_info->grf) {
306
+ regmap_write(opp_info->grf, 0x20, 0x001c0000 | (rm << 2));
307
+ regmap_write(opp_info->grf, 0x28, 0x003c0000 | (rm << 2));
308
+ regmap_write(opp_info->grf, 0x2c, 0x003c0000 | (rm << 2));
309
+ regmap_write(opp_info->grf, 0x30, 0x00200020);
310
+ udelay(1);
311
+ regmap_write(opp_info->grf, 0x30, 0x00200000);
312
+ }
313
+ if (opp_info->dsu_grf) {
314
+ regmap_write(opp_info->dsu_grf, 0x20, 0x001c0000 | (rm << 2));
315
+ regmap_write(opp_info->dsu_grf, 0x28, 0x003c0000 | (rm << 2));
316
+ regmap_write(opp_info->dsu_grf, 0x2c, 0x003c0000 | (rm << 2));
317
+ regmap_write(opp_info->dsu_grf, 0x30, 0x001c0000 | (rm << 2));
318
+ regmap_write(opp_info->dsu_grf, 0x38, 0x001c0000 | (rm << 2));
319
+ regmap_write(opp_info->dsu_grf, 0x18, 0x40004000);
320
+ udelay(1);
321
+ regmap_write(opp_info->dsu_grf, 0x18, 0x40000000);
322
+ }
323
+
324
+ opp_info->current_rm = rm;
325
+
326
+ return 0;
327
+}
328
+
329
+static int rv1126_get_soc_info(struct device *dev, struct device_node *np,
330
+ int *bin, int *process)
197331 {
198332 int ret = 0;
199333 u8 value = 0;
....@@ -215,45 +349,61 @@
215349 return ret;
216350 }
217351
352
+static const struct rockchip_opp_data px30_cpu_opp_data = {
353
+ .get_soc_info = px30_get_soc_info,
354
+};
355
+
356
+static const struct rockchip_opp_data rk3288_cpu_opp_data = {
357
+ .get_soc_info = rk3288_get_soc_info,
358
+};
359
+
360
+static const struct rockchip_opp_data rk3399_cpu_opp_data = {
361
+ .get_soc_info = rk3399_get_soc_info,
362
+};
363
+
364
+static const struct rockchip_opp_data rk3588_cpu_opp_data = {
365
+ .get_soc_info = rk3588_get_soc_info,
366
+ .set_soc_info = rk3588_set_soc_info,
367
+ .set_read_margin = rk3588_cpu_set_read_margin,
368
+};
369
+
370
+static const struct rockchip_opp_data rv1126_cpu_opp_data = {
371
+ .get_soc_info = rv1126_get_soc_info,
372
+};
373
+
218374 static const struct of_device_id rockchip_cpufreq_of_match[] = {
219
-#ifdef CONFIG_CPU_PX30
220375 {
221376 .compatible = "rockchip,px30",
222
- .data = (void *)&px30_get_soc_info,
377
+ .data = (void *)&px30_cpu_opp_data,
223378 },
224
-#endif
225
-#ifdef CONFIG_CPU_RK3288
226379 {
227380 .compatible = "rockchip,rk3288",
228
- .data = (void *)&rk3288_get_soc_info,
381
+ .data = (void *)&rk3288_cpu_opp_data,
229382 },
230383 {
231384 .compatible = "rockchip,rk3288w",
232
- .data = (void *)&rk3288_get_soc_info,
385
+ .data = (void *)&rk3288_cpu_opp_data,
233386 },
234
-#endif
235
-#ifdef CONFIG_CPU_PX30
236387 {
237388 .compatible = "rockchip,rk3326",
238
- .data = (void *)&px30_get_soc_info,
389
+ .data = (void *)&px30_cpu_opp_data,
239390 },
240
-#endif
241
-#ifdef CONFIG_CPU_RK3399
242391 {
243392 .compatible = "rockchip,rk3399",
244
- .data = (void *)&rk3399_get_soc_info,
393
+ .data = (void *)&rk3399_cpu_opp_data,
245394 },
246
-#endif
247
-#ifdef CONFIG_CPU_RV1126
395
+ {
396
+ .compatible = "rockchip,rk3588",
397
+ .data = (void *)&rk3588_cpu_opp_data,
398
+ },
248399 {
249400 .compatible = "rockchip,rv1109",
250
- .data = (void *)&rv1126_get_soc_info,
401
+ .data = (void *)&rv1126_cpu_opp_data,
251402 },
252403 {
253404 .compatible = "rockchip,rv1126",
254
- .data = (void *)&rv1126_get_soc_info,
405
+ .data = (void *)&rv1126_cpu_opp_data,
255406 },
256
-#endif
257407 {},
258408 };
259409
....@@ -269,51 +419,153 @@
269419 return NULL;
270420 }
271421
272
-static struct cluster_info *rockchip_cluster_lookup_by_dev(struct device *dev)
422
+static int rockchip_cpufreq_set_volt(struct device *dev,
423
+ struct regulator *reg,
424
+ struct dev_pm_opp_supply *supply,
425
+ char *reg_name)
273426 {
274
- struct cluster_info *cluster;
275
- struct device *cpu_dev;
276
- int cpu;
427
+ int ret;
277428
278
- list_for_each_entry(cluster, &cluster_info_list, list_head) {
279
- for_each_cpu(cpu, &cluster->cpus) {
280
- cpu_dev = get_cpu_device(cpu);
281
- if (!cpu_dev)
282
- continue;
283
- if (cpu_dev == dev)
284
- return cluster;
429
+ dev_dbg(dev, "%s: %s voltages (uV): %lu %lu %lu\n", __func__, reg_name,
430
+ supply->u_volt_min, supply->u_volt, supply->u_volt_max);
431
+
432
+ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
433
+ supply->u_volt, supply->u_volt_max);
434
+ if (ret)
435
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu uV): %d\n",
436
+ __func__, supply->u_volt_min, supply->u_volt,
437
+ supply->u_volt_max, ret);
438
+
439
+ return ret;
440
+}
441
+
442
+static int cpu_opp_helper(struct dev_pm_set_opp_data *data)
443
+{
444
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
445
+ struct dev_pm_opp_supply *old_supply_mem = &data->old_opp.supplies[1];
446
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
447
+ struct dev_pm_opp_supply *new_supply_mem = &data->new_opp.supplies[1];
448
+ struct regulator *vdd_reg = data->regulators[0];
449
+ struct regulator *mem_reg = data->regulators[1];
450
+ struct device *dev = data->dev;
451
+ struct clk *clk = data->clk;
452
+ struct cluster_info *cluster;
453
+ struct rockchip_opp_info *opp_info;
454
+ unsigned long old_freq = data->old_opp.rate;
455
+ unsigned long new_freq = data->new_opp.rate;
456
+ u32 target_rm = UINT_MAX;
457
+ int ret = 0;
458
+
459
+ cluster = rockchip_cluster_info_lookup(dev->id);
460
+ if (!cluster)
461
+ return -EINVAL;
462
+ opp_info = &cluster->opp_info;
463
+ rockchip_get_read_margin(dev, opp_info, new_supply_vdd->u_volt,
464
+ &target_rm);
465
+
466
+ /* Change frequency */
467
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
468
+ old_freq, new_freq);
469
+ /* Scaling up? Scale voltage before frequency */
470
+ if (new_freq >= old_freq) {
471
+ ret = rockchip_set_intermediate_rate(dev, opp_info, clk,
472
+ old_freq, new_freq,
473
+ true, true);
474
+ if (ret) {
475
+ dev_err(dev, "%s: failed to set clk rate: %lu\n",
476
+ __func__, new_freq);
477
+ return -EINVAL;
285478 }
479
+ ret = rockchip_cpufreq_set_volt(dev, mem_reg, new_supply_mem,
480
+ "mem");
481
+ if (ret)
482
+ goto restore_voltage;
483
+ ret = rockchip_cpufreq_set_volt(dev, vdd_reg, new_supply_vdd,
484
+ "vdd");
485
+ if (ret)
486
+ goto restore_voltage;
487
+ rockchip_set_read_margin(dev, opp_info, target_rm, true);
488
+ ret = clk_set_rate(clk, new_freq);
489
+ if (ret) {
490
+ dev_err(dev, "%s: failed to set clk rate: %lu %d\n",
491
+ __func__, new_freq, ret);
492
+ goto restore_rm;
493
+ }
494
+ /* Scaling down? Scale voltage after frequency */
495
+ } else {
496
+ ret = rockchip_set_intermediate_rate(dev, opp_info, clk,
497
+ old_freq, new_freq,
498
+ false, true);
499
+ if (ret) {
500
+ dev_err(dev, "%s: failed to set clk rate: %lu\n",
501
+ __func__, new_freq);
502
+ return -EINVAL;
503
+ }
504
+ rockchip_set_read_margin(dev, opp_info, target_rm, true);
505
+ ret = clk_set_rate(clk, new_freq);
506
+ if (ret) {
507
+ dev_err(dev, "%s: failed to set clk rate: %lu %d\n",
508
+ __func__, new_freq, ret);
509
+ goto restore_rm;
510
+ }
511
+ ret = rockchip_cpufreq_set_volt(dev, vdd_reg, new_supply_vdd,
512
+ "vdd");
513
+ if (ret)
514
+ goto restore_freq;
515
+ ret = rockchip_cpufreq_set_volt(dev, mem_reg, new_supply_mem,
516
+ "mem");
517
+ if (ret)
518
+ goto restore_freq;
286519 }
287520
288
- return NULL;
521
+ cluster->volt = new_supply_vdd->u_volt;
522
+ cluster->mem_volt = new_supply_mem->u_volt;
523
+
524
+ return 0;
525
+
526
+restore_freq:
527
+ if (clk_set_rate(clk, old_freq))
528
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
529
+ __func__, old_freq);
530
+restore_rm:
531
+ rockchip_get_read_margin(dev, opp_info, old_supply_vdd->u_volt,
532
+ &target_rm);
533
+ rockchip_set_read_margin(dev, opp_info, target_rm, true);
534
+restore_voltage:
535
+ rockchip_cpufreq_set_volt(dev, mem_reg, old_supply_mem, "mem");
536
+ rockchip_cpufreq_set_volt(dev, vdd_reg, old_supply_vdd, "vdd");
537
+
538
+ return ret;
289539 }
290540
291541 static int rockchip_cpufreq_cluster_init(int cpu, struct cluster_info *cluster)
292542 {
543
+ struct rockchip_opp_info *opp_info = &cluster->opp_info;
544
+ struct opp_table *pname_table = NULL;
545
+ struct opp_table *reg_table = NULL;
546
+ struct opp_table *opp_table;
293547 struct device_node *np;
294
- struct property *pp;
295548 struct device *dev;
549
+ const char * const reg_names[] = {"cpu", "mem"};
296550 char *reg_name = NULL;
297
- int ret = 0, bin = -EINVAL;
298
-
299
- cluster->process = -EINVAL;
300
- cluster->volt_sel = -EINVAL;
301
- cluster->scale = 0;
551
+ int bin = -EINVAL;
552
+ int process = -EINVAL;
553
+ int volt_sel = -EINVAL;
554
+ int ret = 0;
555
+ u32 freq = 0;
302556
303557 dev = get_cpu_device(cpu);
304558 if (!dev)
305559 return -ENODEV;
306560
307
- pp = of_find_property(dev->of_node, "cpu-supply", NULL);
308
- if (pp) {
561
+ opp_info->dev = dev;
562
+
563
+ if (of_find_property(dev->of_node, "cpu-supply", NULL))
309564 reg_name = "cpu";
310
- } else {
311
- pp = of_find_property(dev->of_node, "cpu0-supply", NULL);
312
- if (pp)
313
- reg_name = "cpu0";
314
- else
315
- return -ENOENT;
316
- }
565
+ else if (of_find_property(dev->of_node, "cpu0-supply", NULL))
566
+ reg_name = "cpu0";
567
+ else
568
+ return -ENOENT;
317569
318570 np = of_parse_phandle(dev->of_node, "operating-points-v2", 0);
319571 if (!np) {
....@@ -321,181 +573,391 @@
321573 return -ENOENT;
322574 }
323575
576
+ opp_info->grf = syscon_regmap_lookup_by_phandle(np,
577
+ "rockchip,grf");
578
+ if (IS_ERR(opp_info->grf))
579
+ opp_info->grf = NULL;
580
+
324581 ret = dev_pm_opp_of_get_sharing_cpus(dev, &cluster->cpus);
325582 if (ret) {
326583 dev_err(dev, "Failed to get sharing cpus\n");
327584 goto np_err;
328585 }
329586
330
- of_property_read_u32(np, "rockchip,threshold-freq",
331
- &cluster->threshold_freq);
332
- cluster->freq_limit = of_property_read_bool(np, "rockchip,freq-limit");
587
+ cluster->is_opp_shared_dsu = of_property_read_bool(np, "rockchip,opp-shared-dsu");
588
+ if (!of_property_read_u32(np, "rockchip,idle-threshold-freq", &freq))
589
+ cluster->idle_threshold_freq = freq;
590
+ rockchip_get_opp_data(rockchip_cpufreq_of_match, opp_info);
591
+ if (opp_info->data && opp_info->data->set_read_margin) {
592
+ opp_info->current_rm = UINT_MAX;
593
+ opp_info->target_rm = UINT_MAX;
594
+ opp_info->dsu_grf =
595
+ syscon_regmap_lookup_by_phandle(np, "rockchip,dsu-grf");
596
+ if (IS_ERR(opp_info->dsu_grf))
597
+ opp_info->dsu_grf = NULL;
598
+ rockchip_get_volt_rm_table(dev, np, "volt-mem-read-margin",
599
+ &opp_info->volt_rm_tbl);
600
+ of_property_read_u32(np, "low-volt-mem-read-margin",
601
+ &opp_info->low_rm);
602
+ if (!of_property_read_u32(np, "intermediate-threshold-freq", &freq))
603
+ opp_info->intermediate_threshold_freq = freq * 1000;
604
+ rockchip_init_read_margin(dev, opp_info, reg_name);
605
+ }
606
+ if (opp_info->data && opp_info->data->get_soc_info)
607
+ opp_info->data->get_soc_info(dev, np, &bin, &process);
608
+ rockchip_get_soc_info(dev, np, &bin, &process);
609
+ rockchip_init_pvtpll_table(&cluster->opp_info, bin);
610
+ rockchip_get_scale_volt_sel(dev, "cpu_leakage", reg_name, bin, process,
611
+ &cluster->scale, &volt_sel);
612
+ if (opp_info->data && opp_info->data->set_soc_info)
613
+ opp_info->data->set_soc_info(dev, np, bin, process, volt_sel);
614
+ pname_table = rockchip_set_opp_prop_name(dev, process, volt_sel);
615
+ rockchip_set_opp_supported_hw(dev, np, bin, volt_sel);
333616
334
- rockchip_get_soc_info(dev, rockchip_cpufreq_of_match,
335
- &bin, &cluster->process);
336
- rockchip_get_scale_volt_sel(dev, "cpu_leakage", reg_name,
337
- bin, cluster->process,
338
- &cluster->scale, &cluster->volt_sel);
339
-np_err:
340
- of_node_put(np);
341
- return ret;
342
-}
343
-
344
-int rockchip_cpufreq_check_rate_volt(struct device *dev)
345
-{
346
- struct cluster_info *cluster;
347
-
348
- cluster = rockchip_cluster_lookup_by_dev(dev);
349
- if (!cluster)
350
- return -EINVAL;
351
- if (cluster->is_check_init)
352
- return 0;
353
- dev_pm_opp_check_rate_volt(dev, true);
354
- cluster->is_check_init = true;
355
-
356
- return 0;
357
-}
358
-EXPORT_SYMBOL_GPL(rockchip_cpufreq_check_rate_volt);
359
-
360
-int rockchip_cpufreq_set_opp_info(struct device *dev)
361
-{
362
- struct cluster_info *cluster;
363
-
364
- cluster = rockchip_cluster_lookup_by_dev(dev);
365
- if (!cluster)
366
- return -EINVAL;
367
- cluster->opp_table = rockchip_set_opp_prop_name(dev,
368
- cluster->process,
369
- cluster->volt_sel);
370
- if (IS_ERR(cluster->opp_table)) {
371
- dev_err(dev, "Failed to set prop name\n");
372
- return PTR_ERR(cluster->opp_table);
617
+ if (of_find_property(dev->of_node, "cpu-supply", NULL) &&
618
+ of_find_property(dev->of_node, "mem-supply", NULL)) {
619
+ cluster->regulator_count = 2;
620
+ reg_table = dev_pm_opp_set_regulators(dev, reg_names,
621
+ ARRAY_SIZE(reg_names));
622
+ if (IS_ERR(reg_table)) {
623
+ ret = PTR_ERR(reg_table);
624
+ goto pname_opp_table;
625
+ }
626
+ opp_table = dev_pm_opp_register_set_opp_helper(dev,
627
+ cpu_opp_helper);
628
+ if (IS_ERR(opp_table)) {
629
+ ret = PTR_ERR(opp_table);
630
+ goto reg_opp_table;
631
+ }
632
+ } else {
633
+ cluster->regulator_count = 1;
373634 }
374635
636
+ of_node_put(np);
637
+
375638 return 0;
376
-}
377
-EXPORT_SYMBOL_GPL(rockchip_cpufreq_set_opp_info);
378639
379
-void rockchip_cpufreq_put_opp_info(struct device *dev)
380
-{
381
- struct cluster_info *cluster;
640
+reg_opp_table:
641
+ if (reg_table)
642
+ dev_pm_opp_put_regulators(reg_table);
643
+pname_opp_table:
644
+ if (!IS_ERR_OR_NULL(pname_table))
645
+ dev_pm_opp_put_prop_name(pname_table);
646
+np_err:
647
+ of_node_put(np);
382648
383
- cluster = rockchip_cluster_lookup_by_dev(dev);
384
- if (!cluster)
385
- return;
386
- if (!IS_ERR_OR_NULL(cluster->opp_table))
387
- dev_pm_opp_put_prop_name(cluster->opp_table);
649
+ return ret;
388650 }
389
-EXPORT_SYMBOL_GPL(rockchip_cpufreq_put_opp_info);
390651
391652 int rockchip_cpufreq_adjust_power_scale(struct device *dev)
392653 {
393654 struct cluster_info *cluster;
394655
395
- cluster = rockchip_cluster_lookup_by_dev(dev);
656
+ cluster = rockchip_cluster_info_lookup(dev->id);
396657 if (!cluster)
397658 return -EINVAL;
398659 rockchip_adjust_power_scale(dev, cluster->scale);
660
+ rockchip_pvtpll_calibrate_opp(&cluster->opp_info);
661
+ rockchip_pvtpll_add_length(&cluster->opp_info);
399662
400663 return 0;
401664 }
402665 EXPORT_SYMBOL_GPL(rockchip_cpufreq_adjust_power_scale);
403666
404
-int rockchip_cpufreq_suspend(struct cpufreq_policy *policy)
667
+int rockchip_cpufreq_opp_set_rate(struct device *dev, unsigned long target_freq)
668
+{
669
+ struct cluster_info *cluster;
670
+ struct dev_pm_opp *opp;
671
+ unsigned long freq;
672
+ int ret = 0;
673
+
674
+ cluster = rockchip_cluster_info_lookup(dev->id);
675
+ if (!cluster)
676
+ return -EINVAL;
677
+
678
+ rockchip_monitor_volt_adjust_lock(cluster->mdev_info);
679
+ ret = dev_pm_opp_set_rate(dev, target_freq);
680
+ if (!ret) {
681
+ cluster->rate = target_freq;
682
+ if (cluster->regulator_count == 1) {
683
+ freq = target_freq;
684
+ opp = dev_pm_opp_find_freq_ceil(cluster->opp_info.dev, &freq);
685
+ if (!IS_ERR(opp)) {
686
+ cluster->volt = dev_pm_opp_get_voltage(opp);
687
+ dev_pm_opp_put(opp);
688
+ }
689
+ }
690
+ }
691
+ rockchip_monitor_volt_adjust_unlock(cluster->mdev_info);
692
+
693
+ return ret;
694
+}
695
+EXPORT_SYMBOL_GPL(rockchip_cpufreq_opp_set_rate);
696
+
697
+static int rockchip_cpufreq_suspend(struct cpufreq_policy *policy)
405698 {
406699 int ret = 0;
407700
408701 ret = cpufreq_generic_suspend(policy);
409702 if (!ret)
410703 rockchip_monitor_suspend_low_temp_adjust(policy->cpu);
704
+
411705 return ret;
412706 }
413
-EXPORT_SYMBOL_GPL(rockchip_cpufreq_suspend);
414707
415
-static struct cpufreq_policy *rockchip_get_policy(struct cluster_info *cluster)
708
+static int rockchip_cpufreq_add_monitor(struct cluster_info *cluster,
709
+ struct cpufreq_policy *policy)
416710 {
417
- int first_cpu;
711
+ struct device *dev = cluster->opp_info.dev;
712
+ struct monitor_dev_profile *mdevp = NULL;
713
+ struct monitor_dev_info *mdev_info = NULL;
418714
419
- first_cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
420
- if (first_cpu >= nr_cpu_ids)
421
- return NULL;
715
+ mdevp = kzalloc(sizeof(*mdevp), GFP_KERNEL);
716
+ if (!mdevp)
717
+ return -ENOMEM;
422718
423
- return cpufreq_cpu_get(first_cpu);
719
+ mdevp->type = MONITOR_TYPE_CPU;
720
+ mdevp->low_temp_adjust = rockchip_monitor_cpu_low_temp_adjust;
721
+ mdevp->high_temp_adjust = rockchip_monitor_cpu_high_temp_adjust;
722
+ mdevp->update_volt = rockchip_monitor_check_rate_volt;
723
+ mdevp->data = (void *)policy;
724
+ mdevp->opp_info = &cluster->opp_info;
725
+ cpumask_copy(&mdevp->allowed_cpus, policy->cpus);
726
+ mdev_info = rockchip_system_monitor_register(dev, mdevp);
727
+ if (IS_ERR(mdev_info)) {
728
+ kfree(mdevp);
729
+ dev_err(dev, "failed to register system monitor\n");
730
+ return -EINVAL;
731
+ }
732
+ mdev_info->devp = mdevp;
733
+ cluster->mdev_info = mdev_info;
734
+
735
+ return 0;
424736 }
425737
426
-/**
427
- * rockchip_cpufreq_adjust_target() - Adjust cpu target frequency
428
- * @cpu: CPU number
429
- * @freq: Expected target frequency
430
- *
431
- * This adjusts cpu target frequency for reducing power consumption.
432
- * Only one cluster can eanble frequency limit, and the cluster's
433
- * maximum frequency will be limited to its threshold frequency, if the
434
- * other cluster's frequency is geater than or equal to its threshold
435
- * frequency.
436
- */
437
-unsigned int rockchip_cpufreq_adjust_target(int cpu, unsigned int freq)
738
+static int rockchip_cpufreq_remove_monitor(struct cluster_info *cluster)
438739 {
439
- struct cpufreq_policy *policy;
440
- struct cluster_info *cluster, *temp;
740
+ if (cluster->mdev_info) {
741
+ kfree(cluster->mdev_info->devp);
742
+ rockchip_system_monitor_unregister(cluster->mdev_info);
743
+ cluster->mdev_info = NULL;
744
+ }
441745
442
- cluster = rockchip_cluster_info_lookup(cpu);
443
- if (!cluster || !cluster->threshold_freq)
444
- goto adjust_out;
746
+ return 0;
747
+}
445748
446
- if (cluster->freq_limit) {
447
- if (freq <= cluster->threshold_freq)
448
- goto adjust_out;
749
+static int rockchip_cpufreq_remove_dsu_qos(struct cluster_info *cluster)
750
+{
751
+ struct cluster_info *ci;
449752
450
- list_for_each_entry(temp, &cluster_info_list, list_head) {
451
- if (temp->freq_limit || temp == cluster ||
452
- temp->offline)
453
- continue;
753
+ if (!cluster->is_opp_shared_dsu)
754
+ return 0;
454755
455
- policy = rockchip_get_policy(temp);
456
- if (!policy)
457
- continue;
756
+ list_for_each_entry(ci, &cluster_info_list, list_head) {
757
+ if (ci->is_opp_shared_dsu)
758
+ continue;
759
+ if (freq_qos_request_active(&ci->dsu_qos_req))
760
+ freq_qos_remove_request(&ci->dsu_qos_req);
761
+ }
458762
459
- if (temp->threshold_freq &&
460
- temp->threshold_freq <= policy->cur) {
461
- cpufreq_cpu_put(policy);
462
- return cluster->threshold_freq;
463
- }
464
- cpufreq_cpu_put(policy);
465
- }
466
- } else {
467
- if (freq < cluster->threshold_freq)
468
- goto adjust_out;
763
+ return 0;
764
+}
469765
470
- list_for_each_entry(temp, &cluster_info_list, list_head) {
471
- if (!temp->freq_limit || temp == cluster ||
472
- temp->offline)
473
- continue;
766
+static int rockchip_cpufreq_add_dsu_qos_req(struct cluster_info *cluster,
767
+ struct cpufreq_policy *policy)
768
+{
769
+ struct device *dev = cluster->opp_info.dev;
770
+ struct cluster_info *ci;
771
+ int ret;
474772
475
- policy = rockchip_get_policy(temp);
476
- if (!policy)
477
- continue;
773
+ if (!cluster->is_opp_shared_dsu)
774
+ return 0;
478775
479
- if (temp->threshold_freq &&
480
- temp->threshold_freq < policy->cur)
481
- cpufreq_driver_target(policy,
482
- temp->threshold_freq,
483
- CPUFREQ_RELATION_H);
484
- cpufreq_cpu_put(policy);
776
+ list_for_each_entry(ci, &cluster_info_list, list_head) {
777
+ if (ci->is_opp_shared_dsu)
778
+ continue;
779
+ ret = freq_qos_add_request(&policy->constraints,
780
+ &ci->dsu_qos_req,
781
+ FREQ_QOS_MIN,
782
+ FREQ_QOS_MIN_DEFAULT_VALUE);
783
+ if (ret < 0) {
784
+ dev_err(dev, "failed to add dsu freq constraint\n");
785
+ goto error;
485786 }
486787 }
487788
488
-adjust_out:
789
+ return 0;
489790
490
- return freq;
791
+error:
792
+ rockchip_cpufreq_remove_dsu_qos(cluster);
793
+
794
+ return ret;
491795 }
492
-EXPORT_SYMBOL_GPL(rockchip_cpufreq_adjust_target);
796
+
797
+static int rockchip_cpufreq_notifier(struct notifier_block *nb,
798
+ unsigned long event, void *data)
799
+{
800
+ struct cpufreq_policy *policy = data;
801
+ struct cluster_info *cluster;
802
+
803
+ cluster = rockchip_cluster_info_lookup(policy->cpu);
804
+ if (!cluster)
805
+ return NOTIFY_BAD;
806
+
807
+ if (event == CPUFREQ_CREATE_POLICY) {
808
+ if (rockchip_cpufreq_add_monitor(cluster, policy))
809
+ return NOTIFY_BAD;
810
+ if (rockchip_cpufreq_add_dsu_qos_req(cluster, policy))
811
+ return NOTIFY_BAD;
812
+ } else if (event == CPUFREQ_REMOVE_POLICY) {
813
+ rockchip_cpufreq_remove_monitor(cluster);
814
+ rockchip_cpufreq_remove_dsu_qos(cluster);
815
+ }
816
+
817
+ return NOTIFY_OK;
818
+}
819
+
820
+static struct notifier_block rockchip_cpufreq_notifier_block = {
821
+ .notifier_call = rockchip_cpufreq_notifier,
822
+};
823
+
824
+#ifdef MODULE
825
+static struct pm_qos_request idle_pm_qos;
826
+static int idle_disable_refcnt;
827
+static DEFINE_MUTEX(idle_disable_lock);
828
+
829
+static int rockchip_cpufreq_idle_state_disable(struct cpumask *cpumask,
830
+ int index, bool disable)
831
+{
832
+ mutex_lock(&idle_disable_lock);
833
+
834
+ if (disable) {
835
+ if (idle_disable_refcnt == 0)
836
+ cpu_latency_qos_update_request(&idle_pm_qos, 0);
837
+ idle_disable_refcnt++;
838
+ } else {
839
+ if (--idle_disable_refcnt == 0)
840
+ cpu_latency_qos_update_request(&idle_pm_qos,
841
+ PM_QOS_DEFAULT_VALUE);
842
+ }
843
+
844
+ mutex_unlock(&idle_disable_lock);
845
+
846
+ return 0;
847
+}
848
+#else
849
+static int rockchip_cpufreq_idle_state_disable(struct cpumask *cpumask,
850
+ int index, bool disable)
851
+{
852
+ unsigned int cpu;
853
+
854
+ for_each_cpu(cpu, cpumask) {
855
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
856
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
857
+
858
+ if (!dev || !drv)
859
+ continue;
860
+ if (index >= drv->state_count)
861
+ continue;
862
+ cpuidle_driver_state_disabled(drv, index, disable);
863
+ }
864
+
865
+ if (disable) {
866
+ preempt_disable();
867
+ for_each_cpu(cpu, cpumask) {
868
+ if (cpu != smp_processor_id() && cpu_online(cpu))
869
+ wake_up_if_idle(cpu);
870
+ }
871
+ preempt_enable();
872
+ }
873
+
874
+ return 0;
875
+}
876
+#endif
877
+
878
+#define cpu_to_dsu_freq(freq) ((freq) * 4 / 5)
879
+
880
+static int rockchip_cpufreq_update_dsu_req(struct cluster_info *cluster,
881
+ unsigned int freq)
882
+{
883
+ struct device *dev = cluster->opp_info.dev;
884
+ unsigned int dsu_freq = rounddown(cpu_to_dsu_freq(freq), 100000);
885
+
886
+ if (cluster->is_opp_shared_dsu ||
887
+ !freq_qos_request_active(&cluster->dsu_qos_req))
888
+ return 0;
889
+
890
+ dev_dbg(dev, "cpu to dsu: %u -> %u\n", freq, dsu_freq);
891
+
892
+ return freq_qos_update_request(&cluster->dsu_qos_req, dsu_freq);
893
+}
894
+
895
+static int rockchip_cpufreq_transition_notifier(struct notifier_block *nb,
896
+ unsigned long event, void *data)
897
+{
898
+ struct cpufreq_freqs *freqs = data;
899
+ struct cpufreq_policy *policy = freqs->policy;
900
+ struct cluster_info *cluster;
901
+
902
+ cluster = rockchip_cluster_info_lookup(policy->cpu);
903
+ if (!cluster)
904
+ return NOTIFY_BAD;
905
+
906
+ if (event == CPUFREQ_PRECHANGE) {
907
+ if (cluster->idle_threshold_freq &&
908
+ freqs->new >= cluster->idle_threshold_freq &&
909
+ !cluster->is_idle_disabled) {
910
+ rockchip_cpufreq_idle_state_disable(policy->cpus, 1,
911
+ true);
912
+ cluster->is_idle_disabled = true;
913
+ }
914
+ } else if (event == CPUFREQ_POSTCHANGE) {
915
+ if (cluster->idle_threshold_freq &&
916
+ freqs->new < cluster->idle_threshold_freq &&
917
+ cluster->is_idle_disabled) {
918
+ rockchip_cpufreq_idle_state_disable(policy->cpus, 1,
919
+ false);
920
+ cluster->is_idle_disabled = false;
921
+ }
922
+ rockchip_cpufreq_update_dsu_req(cluster, freqs->new);
923
+ }
924
+
925
+ return NOTIFY_OK;
926
+}
927
+
928
+static struct notifier_block rockchip_cpufreq_transition_notifier_block = {
929
+ .notifier_call = rockchip_cpufreq_transition_notifier,
930
+};
931
+
932
+static int rockchip_cpufreq_panic_notifier(struct notifier_block *nb,
933
+ unsigned long v, void *p)
934
+{
935
+ struct cluster_info *ci;
936
+ struct device *dev;
937
+
938
+ list_for_each_entry(ci, &cluster_info_list, list_head) {
939
+ dev = ci->opp_info.dev;
940
+
941
+ if (ci->regulator_count == 1)
942
+ dev_info(dev, "cur_freq: %lu Hz, volt: %lu uV\n",
943
+ ci->rate, ci->volt);
944
+ else
945
+ dev_info(dev, "cur_freq: %lu Hz, volt_vdd: %lu uV, volt_mem: %lu uV\n",
946
+ ci->rate, ci->volt, ci->mem_volt);
947
+ }
948
+
949
+ return 0;
950
+}
951
+
952
+static struct notifier_block rockchip_cpufreq_panic_notifier_block = {
953
+ .notifier_call = rockchip_cpufreq_panic_notifier,
954
+};
493955
494956 static int __init rockchip_cpufreq_driver_init(void)
495957 {
496958 struct cluster_info *cluster, *pos;
497959 struct cpufreq_dt_platform_data pdata = {0};
498
- int cpu, ret, i = 0;
960
+ int cpu, ret;
499961
500962 for_each_possible_cpu(cpu) {
501963 cluster = rockchip_cluster_info_lookup(cpu);
....@@ -510,33 +972,41 @@
510972
511973 ret = rockchip_cpufreq_cluster_init(cpu, cluster);
512974 if (ret) {
513
- if (ret != -ENOENT) {
514
- pr_err("Failed to initialize dvfs info cpu%d\n",
515
- cpu);
516
- goto release_cluster_info;
517
- }
518
-
519
- /*
520
- * As the OPP document said, only one OPP binding
521
- * should be used per device.
522
- * And if there are multiple clusters on rockchip
523
- * platforms, we should use operating-points-v2.
524
- * So if don't support operating-points-v2, there must
525
- * be only one cluster, the list shuold be null.
526
- */
527
- list_for_each_entry(pos, &cluster_info_list, list_head)
528
- i++;
529
- if (i)
530
- goto release_cluster_info;
531
- list_add(&cluster->list_head, &cluster_info_list);
532
- goto next;
975
+ pr_err("Failed to initialize dvfs info cpu%d\n", cpu);
976
+ goto release_cluster_info;
533977 }
534978 list_add(&cluster->list_head, &cluster_info_list);
535979 }
536980
537
-next:
538981 pdata.have_governor_per_policy = true;
539982 pdata.suspend = rockchip_cpufreq_suspend;
983
+
984
+ ret = cpufreq_register_notifier(&rockchip_cpufreq_notifier_block,
985
+ CPUFREQ_POLICY_NOTIFIER);
986
+ if (ret) {
987
+ pr_err("failed to register cpufreq notifier\n");
988
+ goto release_cluster_info;
989
+ }
990
+
991
+ if (of_machine_is_compatible("rockchip,rk3588")) {
992
+ ret = cpufreq_register_notifier(&rockchip_cpufreq_transition_notifier_block,
993
+ CPUFREQ_TRANSITION_NOTIFIER);
994
+ if (ret) {
995
+ cpufreq_unregister_notifier(&rockchip_cpufreq_notifier_block,
996
+ CPUFREQ_POLICY_NOTIFIER);
997
+ pr_err("failed to register cpufreq notifier\n");
998
+ goto release_cluster_info;
999
+ }
1000
+#ifdef MODULE
1001
+ cpu_latency_qos_add_request(&idle_pm_qos, PM_QOS_DEFAULT_VALUE);
1002
+#endif
1003
+ }
1004
+
1005
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
1006
+ &rockchip_cpufreq_panic_notifier_block);
1007
+ if (ret)
1008
+ pr_err("failed to register cpufreq panic notifier\n");
1009
+
5401010 return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
5411011 -1, (void *)&pdata,
5421012 sizeof(struct cpufreq_dt_platform_data)));