hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/drivers/devfreq/rockchip_dmc.c
....@@ -1,20 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd.
3
- * Author: Lin Huang <hl@rock-chips.com>
3
+ * Rockchip Generic dmc support.
44 *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
5
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
6
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
137 */
148
159 #include <dt-bindings/clock/rockchip-ddr.h>
1610 #include <dt-bindings/soc/rockchip-system-status.h>
17
-#include <drm/drmP.h>
1811 #include <drm/drm_modeset_lock.h>
1912 #include <linux/arm-smccc.h>
2013 #include <linux/clk.h>
....@@ -50,11 +43,13 @@
5043 #include <soc/rockchip/rockchip_system_monitor.h>
5144 #include <soc/rockchip/rockchip-system-status.h>
5245 #include <soc/rockchip/rockchip_opp_select.h>
53
-#include <soc/rockchip/scpi.h>
5446 #include <uapi/drm/drm_mode.h>
5547
5648 #include "governor.h"
5749 #include "rockchip_dmc_timing.h"
50
+#include "../clk/rockchip/clk.h"
51
+#include "../gpu/drm/rockchip/rockchip_drm_drv.h"
52
+#include "../opp/opp.h"
5853
5954 #define system_status_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, \
6055 status_nb)
....@@ -62,35 +57,17 @@
6257 reboot_nb)
6358 #define boost_to_dmcfreq(work) container_of(work, struct rockchip_dmcfreq, \
6459 boost_work)
65
-#define msch_rl_to_dmcfreq(work) container_of(to_delayed_work(work), \
66
- struct rockchip_dmcfreq, \
67
- msch_rl_work)
6860 #define input_hd_to_dmcfreq(hd) container_of(hd, struct rockchip_dmcfreq, \
6961 input_handler)
7062
7163 #define VIDEO_1080P_SIZE (1920 * 1080)
72
-#define FIQ_INIT_HANDLER (0x1)
73
-#define FIQ_CPU_TGT_BOOT (0x0) /* to booting cpu */
74
-#define FIQ_NUM_FOR_DCF (143) /* NA irq map to fiq for dcf */
7564 #define DTS_PAR_OFFSET (4096)
76
-#define MSCH_RL_DELAY_TIME 50 /* ms */
7765
7866 #define FALLBACK_STATIC_TEMPERATURE 55000
7967
80
-struct freq_map_table {
81
- unsigned int min;
82
- unsigned int max;
83
- unsigned long freq;
84
-};
85
-
86
-struct rl_map_table {
87
- unsigned int pn; /* panel number */
88
- unsigned int rl; /* readlatency */
89
-};
90
-
9168 struct dmc_freq_table {
9269 unsigned long freq;
93
- unsigned long volt;
70
+ struct dev_pm_opp_supply supplies[2];
9471 };
9572
9673 struct share_params {
....@@ -116,7 +93,9 @@
11693
11794 u32 freq_count;
11895 u32 freq_info_mhz[6];
119
- /* if need, add parameter after */
96
+ u32 wait_mode;
97
+ u32 vop_scan_line_time_ns;
98
+ /* if need, add parameter after */
12099 };
121100
122101 static struct share_params *ddr_psci_param;
....@@ -128,28 +107,27 @@
128107
129108 struct rockchip_dmcfreq {
130109 struct device *dev;
131
- struct devfreq *devfreq;
110
+ struct dmcfreq_common_info info;
132111 struct rockchip_dmcfreq_ondemand_data ondemand_data;
133112 struct clk *dmc_clk;
134113 struct devfreq_event_dev **edev;
135114 struct mutex lock; /* serializes access to video_info_list */
136115 struct dram_timing *timing;
137116 struct regulator *vdd_center;
117
+ struct regulator *mem_reg;
138118 struct notifier_block status_nb;
119
+ struct notifier_block panic_nb;
139120 struct list_head video_info_list;
140
- struct freq_map_table *vop_bw_tbl;
141
- struct freq_map_table *vop_frame_bw_tbl;
142121 struct freq_map_table *cpu_bw_tbl;
143122 struct work_struct boost_work;
144123 struct input_handler input_handler;
145124 struct monitor_dev_info *mdev_info;
146
- struct rl_map_table *vop_pn_rl_tbl;
147
- struct delayed_work msch_rl_work;
125
+ struct share_params *set_rate_params;
148126
149127 unsigned long *nocp_bw;
150
- unsigned long rate, target_rate;
151
- unsigned long volt, target_volt;
152
-
128
+ unsigned long rate;
129
+ unsigned long volt, mem_volt;
130
+ unsigned long sleep_volt, sleep_mem_volt;
153131 unsigned long auto_min_rate;
154132 unsigned long status_rate;
155133 unsigned long normal_rate;
....@@ -157,15 +135,16 @@
157135 unsigned long video_4k_rate;
158136 unsigned long video_4k_10b_rate;
159137 unsigned long video_4k_60p_rate;
138
+ unsigned long video_svep_rate;
160139 unsigned long performance_rate;
161140 unsigned long hdmi_rate;
141
+ unsigned long hdmirx_rate;
162142 unsigned long idle_rate;
163143 unsigned long suspend_rate;
164144 unsigned long reboot_rate;
165145 unsigned long boost_rate;
166146 unsigned long fixed_rate;
167147 unsigned long low_power_rate;
168
- unsigned long vop_req_rate;
169148
170149 unsigned long freq_count;
171150 unsigned long freq_info_rate[6];
....@@ -175,17 +154,14 @@
175154 unsigned long rate_high;
176155
177156 unsigned int min_cpu_freq;
178
- unsigned int auto_freq_en;
179157 unsigned int system_status_en;
180158 unsigned int refresh;
181
- unsigned int last_refresh;
182
- unsigned int read_latency;
183159 int edev_count;
184160 int dfi_id;
185161 int nocp_cpu_id;
162
+ int regulator_count;
186163
187164 bool is_fixed;
188
- bool is_msch_rl_work_started;
189165 bool is_set_rate_direct;
190166
191167 struct thermal_cooling_device *devfreq_cooling;
....@@ -197,12 +173,19 @@
197173 u64 touchboostpulse_endtime;
198174
199175 int (*set_auto_self_refresh)(u32 en);
200
- int (*set_msch_readlatency)(unsigned int rl);
201176 };
202177
203178 static struct pm_qos_request pm_qos;
204179
205
-static DECLARE_RWSEM(rockchip_dmcfreq_sem);
180
+static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data);
181
+
182
+static struct monitor_dev_profile dmc_mdevp = {
183
+ .type = MONITOR_TYPE_DEV,
184
+ .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
185
+ .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
186
+ .update_volt = rockchip_monitor_check_rate_volt,
187
+ .set_opp = rockchip_dmcfreq_opp_helper,
188
+};
206189
207190 static inline unsigned long is_dualview(unsigned long status)
208191 {
....@@ -215,24 +198,6 @@
215198 (status & SYS_STATUS_CIF0) ||
216199 (status & SYS_STATUS_CIF1);
217200 }
218
-
219
-void rockchip_dmcfreq_lock(void)
220
-{
221
- down_read(&rockchip_dmcfreq_sem);
222
-}
223
-EXPORT_SYMBOL(rockchip_dmcfreq_lock);
224
-
225
-void rockchip_dmcfreq_lock_nested(void)
226
-{
227
- down_read_nested(&rockchip_dmcfreq_sem, SINGLE_DEPTH_NESTING);
228
-}
229
-EXPORT_SYMBOL(rockchip_dmcfreq_lock_nested);
230
-
231
-void rockchip_dmcfreq_unlock(void)
232
-{
233
- up_read(&rockchip_dmcfreq_sem);
234
-}
235
-EXPORT_SYMBOL(rockchip_dmcfreq_unlock);
236201
237202 /*
238203 * function: packaging de-skew setting to px30_ddr_dts_config_timing,
....@@ -347,21 +312,8 @@
347312
348313 static int rk_drm_get_lcdc_type(void)
349314 {
350
- struct drm_device *drm;
351
- u32 lcdc_type = 0;
315
+ u32 lcdc_type = rockchip_drm_get_sub_dev_type();
352316
353
- drm = drm_device_get_by_name("rockchip");
354
- if (drm) {
355
- struct drm_connector *conn;
356
-
357
- list_for_each_entry(conn, &drm->mode_config.connector_list,
358
- head) {
359
- if (conn->encoder) {
360
- lcdc_type = conn->connector_type;
361
- break;
362
- }
363
- }
364
- }
365317 switch (lcdc_type) {
366318 case DRM_MODE_CONNECTOR_DPI:
367319 case DRM_MODE_CONNECTOR_LVDS:
....@@ -397,6 +349,7 @@
397349
398350 ddr_psci_param->hz = target_rate;
399351 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
352
+ ddr_psci_param->vop_scan_line_time_ns = rockchip_drm_get_scan_line_time_ns();
400353 ddr_psci_param->wait_flag1 = 1;
401354 ddr_psci_param->wait_flag0 = 1;
402355
....@@ -409,48 +362,47 @@
409362 return res.a0;
410363 }
411364
412
-static int rockchip_dmcfreq_target(struct device *dev, unsigned long *freq,
413
- u32 flags)
365
+static int rockchip_dmcfreq_set_volt(struct device *dev, struct regulator *reg,
366
+ struct dev_pm_opp_supply *supply,
367
+ char *reg_name)
414368 {
369
+ int ret;
370
+
371
+ dev_dbg(dev, "%s: %s voltages (mV): %lu %lu %lu\n", __func__, reg_name,
372
+ supply->u_volt_min, supply->u_volt, supply->u_volt_max);
373
+ ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
374
+ supply->u_volt, INT_MAX);
375
+ if (ret)
376
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
377
+ __func__, supply->u_volt_min, supply->u_volt,
378
+ supply->u_volt_max, ret);
379
+
380
+ return ret;
381
+}
382
+
383
+static int rockchip_dmcfreq_opp_helper(struct dev_pm_set_opp_data *data)
384
+{
385
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
386
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
387
+ struct regulator *vdd_reg = data->regulators[0];
388
+ struct dev_pm_opp_supply *old_supply_mem;
389
+ struct dev_pm_opp_supply *new_supply_mem;
390
+ struct regulator *mem_reg;
391
+ struct device *dev = data->dev;
392
+ struct clk *clk = data->clk;
415393 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
416
- struct dev_pm_opp *opp;
417394 struct cpufreq_policy *policy;
418
- unsigned long old_clk_rate = dmcfreq->rate;
419
- unsigned long target_volt, target_rate;
420
- unsigned int cpu_cur, cpufreq_cur;
395
+ unsigned long old_freq = data->old_opp.rate;
396
+ unsigned long freq = data->new_opp.rate;
397
+ unsigned int reg_count = data->regulator_count;
421398 bool is_cpufreq_changed = false;
422
- int err = 0;
399
+ unsigned int cpu_cur, cpufreq_cur;
400
+ int ret = 0;
423401
424
- opp = devfreq_recommended_opp(dev, freq, flags);
425
- if (IS_ERR(opp)) {
426
- dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
427
- return PTR_ERR(opp);
428
- }
429
- target_volt = dev_pm_opp_get_voltage(opp);
430
- dev_pm_opp_put(opp);
431
-
432
- if (dmcfreq->is_set_rate_direct) {
433
- target_rate = *freq;
434
- } else {
435
- target_rate = clk_round_rate(dmcfreq->dmc_clk, *freq);
436
- if ((long)target_rate <= 0)
437
- target_rate = *freq;
438
- }
439
-
440
- if (dmcfreq->rate == target_rate) {
441
- if (dmcfreq->volt == target_volt)
442
- return 0;
443
- err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
444
- INT_MAX);
445
- if (err) {
446
- dev_err(dev, "Cannot set voltage %lu uV\n",
447
- target_volt);
448
- return err;
449
- }
450
- dmcfreq->volt = target_volt;
451
- return 0;
452
- } else if (!dmcfreq->volt) {
453
- dmcfreq->volt = regulator_get_voltage(dmcfreq->vdd_center);
402
+ if (reg_count > 1) {
403
+ old_supply_mem = &data->old_opp.supplies[1];
404
+ new_supply_mem = &data->new_opp.supplies[1];
405
+ mem_reg = data->regulators[1];
454406 }
455407
456408 /*
....@@ -460,47 +412,54 @@
460412 * Do this before taking the policy rwsem to avoid deadlocks between the
461413 * mutex that is locked/unlocked in cpu_hotplug_disable/enable. And it
462414 * can also avoid deadlocks between the mutex that is locked/unlocked
463
- * in get/put_online_cpus (such as store_scaling_max_freq()).
415
+ * in cpus_read_lock/unlock (such as store_scaling_max_freq()).
464416 */
465
- get_online_cpus();
417
+ cpus_read_lock();
466418
467
- /*
468
- * Go to specified cpufreq and block other cpufreq changes since
469
- * set_rate needs to complete during vblank.
470
- */
471
- cpu_cur = raw_smp_processor_id();
472
- policy = cpufreq_cpu_get(cpu_cur);
473
- if (!policy) {
474
- dev_err(dev, "cpu%d policy NULL\n", cpu_cur);
475
- goto cpufreq;
476
- }
477
- down_write(&policy->rwsem);
478
- cpufreq_cur = cpufreq_quick_get(cpu_cur);
419
+ if (dmcfreq->min_cpu_freq) {
420
+ /*
421
+ * Go to specified cpufreq and block other cpufreq changes since
422
+ * set_rate needs to complete during vblank.
423
+ */
424
+ cpu_cur = raw_smp_processor_id();
425
+ policy = cpufreq_cpu_get(cpu_cur);
426
+ if (!policy) {
427
+ dev_err(dev, "cpu%d policy NULL\n", cpu_cur);
428
+ ret = -EINVAL;
429
+ goto cpufreq;
430
+ }
431
+ down_write(&policy->rwsem);
432
+ cpufreq_cur = cpufreq_quick_get(cpu_cur);
479433
480
- /* If we're thermally throttled; don't change; */
481
- if (dmcfreq->min_cpu_freq && cpufreq_cur < dmcfreq->min_cpu_freq) {
482
- if (policy->max >= dmcfreq->min_cpu_freq) {
483
- __cpufreq_driver_target(policy, dmcfreq->min_cpu_freq,
484
- CPUFREQ_RELATION_L);
485
- is_cpufreq_changed = true;
486
- } else {
487
- dev_dbg(dev, "CPU may too slow for DMC (%d MHz)\n",
488
- policy->max);
434
+ /* If we're thermally throttled; don't change; */
435
+ if (cpufreq_cur < dmcfreq->min_cpu_freq) {
436
+ if (policy->max >= dmcfreq->min_cpu_freq) {
437
+ __cpufreq_driver_target(policy,
438
+ dmcfreq->min_cpu_freq,
439
+ CPUFREQ_RELATION_L);
440
+ is_cpufreq_changed = true;
441
+ } else {
442
+ dev_dbg(dev,
443
+ "CPU may too slow for DMC (%d MHz)\n",
444
+ policy->max);
445
+ }
489446 }
490447 }
491448
492
- /*
493
- * If frequency scaling from low to high, adjust voltage first.
494
- * If frequency scaling from high to low, adjust frequency first.
495
- */
496
- if (old_clk_rate < target_rate) {
497
- err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
498
- INT_MAX);
499
- if (err) {
500
- dev_err(dev, "Cannot set voltage %lu uV\n",
501
- target_volt);
449
+ /* Scaling up? Scale voltage before frequency */
450
+ if (freq >= old_freq) {
451
+ if (reg_count > 1) {
452
+ ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
453
+ new_supply_mem, "mem");
454
+ if (ret)
455
+ goto restore_voltage;
456
+ }
457
+ ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
458
+ "vdd");
459
+ if (ret)
460
+ goto restore_voltage;
461
+ if (freq == old_freq)
502462 goto out;
503
- }
504463 }
505464
506465 /*
....@@ -510,61 +469,119 @@
510469 * As a (suboptimal) workaround, let writer to spin until it gets the
511470 * lock.
512471 */
513
- while (!down_write_trylock(&rockchip_dmcfreq_sem))
472
+ while (!rockchip_dmcfreq_write_trylock())
514473 cond_resched();
515
- dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_rate);
474
+ dev_dbg(dev, "%lu Hz --> %lu Hz\n", old_freq, freq);
475
+
476
+ if (dmcfreq->set_rate_params) {
477
+ dmcfreq->set_rate_params->lcdc_type = rk_drm_get_lcdc_type();
478
+ dmcfreq->set_rate_params->wait_flag1 = 1;
479
+ dmcfreq->set_rate_params->wait_flag0 = 1;
480
+ }
516481
517482 if (dmcfreq->is_set_rate_direct)
518
- err = rockchip_ddr_set_rate(target_rate);
483
+ ret = rockchip_ddr_set_rate(freq);
519484 else
520
- err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
485
+ ret = clk_set_rate(clk, freq);
521486
522
- up_write(&rockchip_dmcfreq_sem);
523
- if (err) {
524
- dev_err(dev, "Cannot set frequency %lu (%d)\n",
525
- target_rate, err);
526
- regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
527
- INT_MAX);
528
- goto out;
487
+ rockchip_dmcfreq_write_unlock();
488
+ if (ret) {
489
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
490
+ ret);
491
+ goto restore_voltage;
529492 }
530493
531494 /*
532495 * Check the dpll rate,
533496 * There only two result we will get,
534497 * 1. Ddr frequency scaling fail, we still get the old rate.
535
- * 2. Ddr frequency scaling sucessful, we get the rate we set.
498
+ * 2. Ddr frequency scaling successful, we get the rate we set.
536499 */
537
- dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
500
+ dmcfreq->rate = clk_get_rate(clk);
538501
539502 /* If get the incorrect rate, set voltage to old value. */
540
- if (dmcfreq->rate != target_rate) {
503
+ if (dmcfreq->rate != freq) {
541504 dev_err(dev, "Get wrong frequency, Request %lu, Current %lu\n",
542
- target_rate, dmcfreq->rate);
543
- regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
544
- INT_MAX);
545
- goto out;
546
- } else if (old_clk_rate > target_rate) {
547
- err = regulator_set_voltage(dmcfreq->vdd_center, target_volt,
548
- INT_MAX);
549
- if (err) {
550
- dev_err(dev, "Cannot set vol %lu uV\n", target_volt);
551
- goto out;
552
- }
505
+ freq, dmcfreq->rate);
506
+ ret = -EINVAL;
507
+ goto restore_voltage;
553508 }
554509
555
- if (dmcfreq->devfreq)
556
- dmcfreq->devfreq->last_status.current_frequency = *freq;
510
+ /* Scaling down? Scale voltage after frequency */
511
+ if (freq < old_freq) {
512
+ ret = rockchip_dmcfreq_set_volt(dev, vdd_reg, new_supply_vdd,
513
+ "vdd");
514
+ if (ret)
515
+ goto restore_freq;
516
+ if (reg_count > 1) {
517
+ ret = rockchip_dmcfreq_set_volt(dev, mem_reg,
518
+ new_supply_mem, "mem");
519
+ if (ret)
520
+ goto restore_freq;
521
+ }
522
+ }
523
+ dmcfreq->volt = new_supply_vdd->u_volt;
524
+ if (reg_count > 1)
525
+ dmcfreq->mem_volt = new_supply_mem->u_volt;
557526
558
- dmcfreq->volt = target_volt;
527
+ goto out;
528
+
529
+restore_freq:
530
+ if (dmcfreq->is_set_rate_direct)
531
+ ret = rockchip_ddr_set_rate(freq);
532
+ else
533
+ ret = clk_set_rate(clk, freq);
534
+ if (ret)
535
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
536
+ __func__, old_freq);
537
+restore_voltage:
538
+ if (reg_count > 1 && old_supply_mem->u_volt)
539
+ rockchip_dmcfreq_set_volt(dev, mem_reg, old_supply_mem, "mem");
540
+ if (old_supply_vdd->u_volt)
541
+ rockchip_dmcfreq_set_volt(dev, vdd_reg, old_supply_vdd, "vdd");
559542 out:
560
- if (is_cpufreq_changed)
561
- __cpufreq_driver_target(policy, cpufreq_cur,
562
- CPUFREQ_RELATION_L);
563
- up_write(&policy->rwsem);
564
- cpufreq_cpu_put(policy);
543
+ if (dmcfreq->min_cpu_freq) {
544
+ if (is_cpufreq_changed)
545
+ __cpufreq_driver_target(policy, cpufreq_cur,
546
+ CPUFREQ_RELATION_L);
547
+ up_write(&policy->rwsem);
548
+ cpufreq_cpu_put(policy);
549
+ }
565550 cpufreq:
566
- put_online_cpus();
567
- return err;
551
+ cpus_read_unlock();
552
+
553
+ return ret;
554
+}
555
+
556
+static int rockchip_dmcfreq_target(struct device *dev, unsigned long *freq,
557
+ u32 flags)
558
+{
559
+ struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
560
+ struct devfreq *devfreq;
561
+ struct dev_pm_opp *opp;
562
+ int ret = 0;
563
+
564
+ if (!dmc_mdevp.is_checked)
565
+ return -EINVAL;
566
+
567
+ opp = devfreq_recommended_opp(dev, freq, flags);
568
+ if (IS_ERR(opp)) {
569
+ dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
570
+ return PTR_ERR(opp);
571
+ }
572
+ dev_pm_opp_put(opp);
573
+
574
+ rockchip_monitor_volt_adjust_lock(dmcfreq->mdev_info);
575
+ ret = dev_pm_opp_set_rate(dev, *freq);
576
+ if (!ret) {
577
+ if (dmcfreq->info.devfreq) {
578
+ devfreq = dmcfreq->info.devfreq;
579
+ devfreq->last_status.current_frequency = *freq;
580
+ }
581
+ }
582
+ rockchip_monitor_volt_adjust_unlock(dmcfreq->mdev_info);
583
+
584
+ return ret;
568585 }
569586
570587 static int rockchip_dmcfreq_get_dev_status(struct device *dev,
....@@ -574,15 +591,20 @@
574591 struct devfreq_event_data edata;
575592 int i, ret = 0;
576593
577
- if (!dmcfreq->auto_freq_en)
594
+ if (!dmcfreq->info.auto_freq_en)
578595 return -EINVAL;
579596
597
+ /*
598
+ * RK3588 platform may crash if the CPU and MCU access the DFI/DMC
599
+ * registers at same time.
600
+ */
601
+ rockchip_monitor_volt_adjust_lock(dmcfreq->mdev_info);
580602 for (i = 0; i < dmcfreq->edev_count; i++) {
581603 ret = devfreq_event_get_event(dmcfreq->edev[i], &edata);
582604 if (ret < 0) {
583605 dev_err(dev, "failed to get event %s\n",
584606 dmcfreq->edev[i]->desc->name);
585
- return ret;
607
+ goto out;
586608 }
587609 if (i == dmcfreq->dfi_id) {
588610 stat->busy_time = edata.load_count;
....@@ -592,7 +614,10 @@
592614 }
593615 }
594616
595
- return 0;
617
+out:
618
+ rockchip_monitor_volt_adjust_unlock(dmcfreq->mdev_info);
619
+
620
+ return ret;
596621 }
597622
598623 static int rockchip_dmcfreq_get_cur_freq(struct device *dev,
....@@ -963,70 +988,6 @@
963988 of_node_put(np_tim);
964989 }
965990
966
-static struct rk3368_dram_timing *of_get_rk3368_timings(struct device *dev,
967
- struct device_node *np)
968
-{
969
- struct rk3368_dram_timing *timing = NULL;
970
- struct device_node *np_tim;
971
- int ret = 0;
972
-
973
- np_tim = of_parse_phandle(np, "ddr_timing", 0);
974
- if (np_tim) {
975
- timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
976
- if (!timing)
977
- goto err;
978
-
979
- ret |= of_property_read_u32(np_tim, "dram_spd_bin",
980
- &timing->dram_spd_bin);
981
- ret |= of_property_read_u32(np_tim, "sr_idle",
982
- &timing->sr_idle);
983
- ret |= of_property_read_u32(np_tim, "pd_idle",
984
- &timing->pd_idle);
985
- ret |= of_property_read_u32(np_tim, "dram_dll_disb_freq",
986
- &timing->dram_dll_dis_freq);
987
- ret |= of_property_read_u32(np_tim, "phy_dll_disb_freq",
988
- &timing->phy_dll_dis_freq);
989
- ret |= of_property_read_u32(np_tim, "dram_odt_disb_freq",
990
- &timing->dram_odt_dis_freq);
991
- ret |= of_property_read_u32(np_tim, "phy_odt_disb_freq",
992
- &timing->phy_odt_dis_freq);
993
- ret |= of_property_read_u32(np_tim, "ddr3_drv",
994
- &timing->ddr3_drv);
995
- ret |= of_property_read_u32(np_tim, "ddr3_odt",
996
- &timing->ddr3_odt);
997
- ret |= of_property_read_u32(np_tim, "lpddr3_drv",
998
- &timing->lpddr3_drv);
999
- ret |= of_property_read_u32(np_tim, "lpddr3_odt",
1000
- &timing->lpddr3_odt);
1001
- ret |= of_property_read_u32(np_tim, "lpddr2_drv",
1002
- &timing->lpddr2_drv);
1003
- ret |= of_property_read_u32(np_tim, "phy_clk_drv",
1004
- &timing->phy_clk_drv);
1005
- ret |= of_property_read_u32(np_tim, "phy_cmd_drv",
1006
- &timing->phy_cmd_drv);
1007
- ret |= of_property_read_u32(np_tim, "phy_dqs_drv",
1008
- &timing->phy_dqs_drv);
1009
- ret |= of_property_read_u32(np_tim, "phy_odt",
1010
- &timing->phy_odt);
1011
- ret |= of_property_read_u32(np_tim, "ddr_2t",
1012
- &timing->ddr_2t);
1013
- if (ret) {
1014
- devm_kfree(dev, timing);
1015
- goto err;
1016
- }
1017
- of_node_put(np_tim);
1018
- return timing;
1019
- }
1020
-
1021
-err:
1022
- if (timing) {
1023
- devm_kfree(dev, timing);
1024
- timing = NULL;
1025
- }
1026
- of_node_put(np_tim);
1027
- return timing;
1028
-}
1029
-
1030991 static struct rk3399_dram_timing *of_get_rk3399_timings(struct device *dev,
1031992 struct device_node *np)
1032993 {
....@@ -1177,7 +1138,7 @@
11771138 * CPUs only enter WFI when idle to make sure that
11781139 * FIQn can quick response.
11791140 */
1180
- pm_qos_update_request(&pm_qos, 0);
1141
+ cpu_latency_qos_update_request(&pm_qos, 0);
11811142
11821143 if (wait_ctrl.dcf_en == 1) {
11831144 /* start dcf */
....@@ -1193,7 +1154,17 @@
11931154 wait_event_timeout(wait_ctrl.wait_wq, (wait_ctrl.wait_flag == 0),
11941155 msecs_to_jiffies(wait_ctrl.wait_time_out_ms));
11951156
1196
- pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
1157
+ /*
1158
+ * If waiting for wait_ctrl.complt_irq times out, clear the IRQ and stop the MCU by
1159
+ * sip_smc_dram(DRAM_POST_SET_RATE).
1160
+ */
1161
+ if (wait_ctrl.dcf_en == 2 && wait_ctrl.wait_flag != 0) {
1162
+ res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
1163
+ if (res.a0)
1164
+ pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
1165
+ }
1166
+
1167
+ cpu_latency_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
11971168 disable_irq(wait_ctrl.complt_irq);
11981169
11991170 return 0;
....@@ -1231,7 +1202,7 @@
12311202 return ret;
12321203 }
12331204
1234
- freq_table = kmalloc(sizeof(struct dmc_freq_table) * count, GFP_KERNEL);
1205
+ freq_table = kzalloc(sizeof(*freq_table) * count, GFP_KERNEL);
12351206 for (i = 0, rate = 0; i < count; i++, rate++) {
12361207 /* find next rate */
12371208 opp = dev_pm_opp_find_freq_ceil(dmcfreq->dev, &rate);
....@@ -1241,7 +1212,7 @@
12411212 goto out;
12421213 }
12431214 freq_table[i].freq = rate;
1244
- freq_table[i].volt = dev_pm_opp_get_voltage(opp);
1215
+ freq_table[i].supplies[0].u_volt = dev_pm_opp_get_voltage(opp);
12451216 dev_pm_opp_put(opp);
12461217
12471218 for (j = 0; j < dmcfreq->freq_count; j++) {
....@@ -1249,7 +1220,7 @@
12491220 break;
12501221 }
12511222 if (j == dmcfreq->freq_count)
1252
- dev_pm_opp_remove(dmcfreq->dev, rate);
1223
+ dev_pm_opp_disable(dmcfreq->dev, rate);
12531224 }
12541225
12551226 for (i = 0; i < dmcfreq->freq_count; i++) {
....@@ -1258,7 +1229,7 @@
12581229 break;
12591230 } else if (dmcfreq->freq_info_rate[i] < freq_table[j].freq) {
12601231 dev_pm_opp_add(dmcfreq->dev, dmcfreq->freq_info_rate[i],
1261
- freq_table[j].volt);
1232
+ freq_table[j].supplies[0].u_volt);
12621233 break;
12631234 }
12641235 }
....@@ -1272,6 +1243,103 @@
12721243 goto out;
12731244 }
12741245 }
1246
+
1247
+out:
1248
+ kfree(freq_table);
1249
+ return ret;
1250
+}
1251
+
1252
+static __maybe_unused int
1253
+rockchip_dmcfreq_adjust_opp_table(struct rockchip_dmcfreq *dmcfreq)
1254
+{
1255
+ struct device *dev = dmcfreq->dev;
1256
+ struct arm_smccc_res res;
1257
+ struct dev_pm_opp *opp;
1258
+ struct opp_table *opp_table;
1259
+ struct dmc_freq_table *freq_table;
1260
+ int i, j, count = 0, ret = 0;
1261
+
1262
+ res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
1263
+ ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
1264
+ if (res.a0) {
1265
+ dev_err(dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n",
1266
+ res.a0);
1267
+ return -ENOMEM;
1268
+ }
1269
+
1270
+ if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
1271
+ dev_err(dev, "there is no available frequencies!\n");
1272
+ return -EPERM;
1273
+ }
1274
+
1275
+ for (i = 0; i < ddr_psci_param->freq_count; i++)
1276
+ dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
1277
+ dmcfreq->freq_count = ddr_psci_param->freq_count;
1278
+
1279
+ count = dev_pm_opp_get_opp_count(dev);
1280
+ if (count <= 0) {
1281
+ dev_err(dev, "there is no available opp\n");
1282
+ ret = count ? count : -ENODATA;
1283
+ return ret;
1284
+ }
1285
+
1286
+ freq_table = kzalloc(sizeof(*freq_table) * count, GFP_KERNEL);
1287
+ opp_table = dev_pm_opp_get_opp_table(dev);
1288
+ if (!opp_table) {
1289
+ ret = -ENOMEM;
1290
+ goto out;
1291
+ }
1292
+
1293
+ mutex_lock(&opp_table->lock);
1294
+ i = 0;
1295
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
1296
+ if (!opp->available)
1297
+ continue;
1298
+
1299
+ freq_table[i].freq = opp->rate;
1300
+ freq_table[i].supplies[0] = opp->supplies[0];
1301
+ if (dmcfreq->regulator_count > 1)
1302
+ freq_table[i].supplies[1] = opp->supplies[1];
1303
+
1304
+ i++;
1305
+ }
1306
+
1307
+ i = 0;
1308
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
1309
+ if (!opp->available)
1310
+ continue;
1311
+
1312
+ if (i >= dmcfreq->freq_count) {
1313
+ opp->available = false;
1314
+ continue;
1315
+ }
1316
+
1317
+ for (j = 0; j < count; j++) {
1318
+ if (dmcfreq->freq_info_rate[i] <= freq_table[j].freq) {
1319
+ opp->rate = dmcfreq->freq_info_rate[i];
1320
+ opp->supplies[0] = freq_table[j].supplies[0];
1321
+ if (dmcfreq->regulator_count > 1)
1322
+ opp->supplies[1] = freq_table[j].supplies[1];
1323
+
1324
+ break;
1325
+ }
1326
+ }
1327
+ if (j == count) {
1328
+ dev_err(dmcfreq->dev, "failed to match dmc_opp_table for %ld\n",
1329
+ dmcfreq->freq_info_rate[i]);
1330
+ if (i == 0) {
1331
+ ret = -EPERM;
1332
+ goto out;
1333
+ } else {
1334
+ opp->available = false;
1335
+ dmcfreq->freq_count = i;
1336
+ }
1337
+ }
1338
+ i++;
1339
+ }
1340
+
1341
+ mutex_unlock(&opp_table->lock);
1342
+ dev_pm_opp_put_opp_table(opp_table);
12751343
12761344 out:
12771345 kfree(freq_table);
....@@ -1337,6 +1405,10 @@
13371405 complt_irq_data = irq_get_irq_data(complt_irq);
13381406 complt_hwirq = irqd_to_hwirq(complt_irq_data);
13391407 ddr_psci_param->complt_hwirq = complt_hwirq;
1408
+
1409
+ dmcfreq->set_rate_params = ddr_psci_param;
1410
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1411
+ rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
13401412
13411413 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
13421414 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
....@@ -1411,6 +1483,10 @@
14111483 }
14121484 disable_irq(complt_irq);
14131485
1486
+ dmcfreq->set_rate_params = ddr_psci_param;
1487
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1488
+ rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1489
+
14141490 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
14151491 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
14161492 if (res.a0) {
....@@ -1428,12 +1504,6 @@
14281504 struct rockchip_dmcfreq *dmcfreq)
14291505 {
14301506 struct arm_smccc_res res;
1431
- struct drm_device *drm = drm_device_get_by_name("rockchip");
1432
-
1433
- if (!drm) {
1434
- dev_err(&pdev->dev, "Get drm_device fail\n");
1435
- return -EPROBE_DEFER;
1436
- }
14371507
14381508 res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(
14391509 struct rk3128_ddr_dts_config_timing),
....@@ -1448,6 +1518,10 @@
14481518
14491519 ddr_psci_param->hz = 0;
14501520 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1521
+
1522
+ dmcfreq->set_rate_params = ddr_psci_param;
1523
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1524
+
14511525 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
14521526 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
14531527
....@@ -1481,6 +1555,10 @@
14811555 return -ENOMEM;
14821556
14831557 ddr_psci_param->hz = 0;
1558
+
1559
+ dmcfreq->set_rate_params = ddr_psci_param;
1560
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1561
+
14841562 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
14851563 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
14861564
....@@ -1501,13 +1579,7 @@
15011579 struct device *dev = &pdev->dev;
15021580 struct clk *pclk_phy, *pclk_upctl, *dmc_clk;
15031581 struct arm_smccc_res res;
1504
- struct drm_device *drm = drm_device_get_by_name("rockchip");
15051582 int ret;
1506
-
1507
- if (!drm) {
1508
- dev_err(dev, "Get drm_device fail\n");
1509
- return -EPROBE_DEFER;
1510
- }
15111583
15121584 dmc_clk = devm_clk_get(dev, "dmc_clk");
15131585 if (IS_ERR(dmc_clk)) {
....@@ -1576,6 +1648,10 @@
15761648
15771649 ddr_psci_param->hz = 0;
15781650 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1651
+
1652
+ dmcfreq->set_rate_params = ddr_psci_param;
1653
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1654
+
15791655 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
15801656 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
15811657
....@@ -1622,6 +1698,9 @@
16221698 of_get_rk3328_timings(&pdev->dev, pdev->dev.of_node,
16231699 (uint32_t *)ddr_psci_param);
16241700
1701
+ dmcfreq->set_rate_params = ddr_psci_param;
1702
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1703
+
16251704 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
16261705 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
16271706 if (res.a0) {
....@@ -1631,86 +1710,6 @@
16311710 }
16321711
16331712 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1634
-
1635
- return 0;
1636
-}
1637
-
1638
-static __maybe_unused int rk3368_dmc_init(struct platform_device *pdev,
1639
- struct rockchip_dmcfreq *dmcfreq)
1640
-{
1641
- struct device *dev = &pdev->dev;
1642
- struct device_node *np = pdev->dev.of_node;
1643
- struct arm_smccc_res res;
1644
- struct rk3368_dram_timing *dram_timing;
1645
- struct clk *pclk_phy, *pclk_upctl;
1646
- struct drm_device *drm = drm_device_get_by_name("rockchip");
1647
- int ret;
1648
- u32 dram_spd_bin;
1649
- u32 addr_mcu_el3;
1650
- u32 dclk_mode;
1651
- u32 lcdc_type;
1652
-
1653
- if (!drm) {
1654
- dev_err(dev, "Get drm_device fail\n");
1655
- return -EPROBE_DEFER;
1656
- }
1657
-
1658
- pclk_phy = devm_clk_get(dev, "pclk_phy");
1659
- if (IS_ERR(pclk_phy)) {
1660
- dev_err(dev, "Cannot get the clk pclk_phy\n");
1661
- return PTR_ERR(pclk_phy);
1662
- }
1663
- ret = clk_prepare_enable(pclk_phy);
1664
- if (ret < 0) {
1665
- dev_err(dev, "failed to prepare/enable pclk_phy\n");
1666
- return ret;
1667
- }
1668
- pclk_upctl = devm_clk_get(dev, "pclk_upctl");
1669
- if (IS_ERR(pclk_upctl)) {
1670
- dev_err(dev, "Cannot get the clk pclk_upctl\n");
1671
- return PTR_ERR(pclk_upctl);
1672
- }
1673
- ret = clk_prepare_enable(pclk_upctl);
1674
- if (ret < 0) {
1675
- dev_err(dev, "failed to prepare/enable pclk_upctl\n");
1676
- return ret;
1677
- }
1678
-
1679
- /*
1680
- * Get dram timing and pass it to arm trust firmware,
1681
- * the dram drvier in arm trust firmware will get these
1682
- * timing and to do dram initial.
1683
- */
1684
- dram_timing = of_get_rk3368_timings(dev, np);
1685
- if (dram_timing) {
1686
- dram_spd_bin = dram_timing->dram_spd_bin;
1687
- if (scpi_ddr_send_timing((u32 *)dram_timing,
1688
- sizeof(struct rk3368_dram_timing)))
1689
- dev_err(dev, "send ddr timing timeout\n");
1690
- } else {
1691
- dev_err(dev, "get ddr timing from dts error\n");
1692
- dram_spd_bin = DDR3_DEFAULT;
1693
- }
1694
-
1695
- res = sip_smc_mcu_el3fiq(FIQ_INIT_HANDLER,
1696
- FIQ_NUM_FOR_DCF,
1697
- FIQ_CPU_TGT_BOOT);
1698
- if ((res.a0) || (res.a1 == 0) || (res.a1 > 0x80000))
1699
- dev_err(dev, "Trust version error, pls check trust version\n");
1700
- addr_mcu_el3 = res.a1;
1701
-
1702
- if (of_property_read_u32(np, "vop-dclk-mode", &dclk_mode) == 0)
1703
- scpi_ddr_dclk_mode(dclk_mode);
1704
-
1705
- lcdc_type = rk_drm_get_lcdc_type();
1706
-
1707
- if (scpi_ddr_init(dram_spd_bin, 0, lcdc_type,
1708
- addr_mcu_el3))
1709
- dev_err(dev, "ddr init error\n");
1710
- else
1711
- dev_dbg(dev, ("%s out\n"), __func__);
1712
-
1713
- dmcfreq->set_auto_self_refresh = scpi_ddr_set_auto_self_refresh;
17141713
17151714 return 0;
17161715 }
....@@ -1757,11 +1756,17 @@
17571756 }
17581757 }
17591758
1759
+ dmcfreq->set_rate_params =
1760
+ devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
1761
+ if (!dmcfreq->set_rate_params)
1762
+ return -ENOMEM;
1763
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1764
+
17601765 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
17611766 ROCKCHIP_SIP_CONFIG_DRAM_INIT,
17621767 0, 0, 0, 0, &res);
17631768
1764
- dmcfreq->set_msch_readlatency = rk3399_set_msch_readlatency;
1769
+ dmcfreq->info.set_msch_readlatency = rk3399_set_msch_readlatency;
17651770
17661771 return 0;
17671772 }
....@@ -1911,6 +1916,91 @@
19111916 return 0;
19121917 }
19131918
1919
+static __maybe_unused int rk3588_dmc_init(struct platform_device *pdev,
1920
+ struct rockchip_dmcfreq *dmcfreq)
1921
+{
1922
+ struct arm_smccc_res res;
1923
+ struct dev_pm_opp *opp;
1924
+ unsigned long opp_rate;
1925
+ int ret;
1926
+ int complt_irq;
1927
+
1928
+ res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1929
+ dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1930
+ if (res.a0) {
1931
+ dev_err(&pdev->dev, "trusted firmware unsupported, please update.\n");
1932
+ return -ENXIO;
1933
+ }
1934
+
1935
+ /*
1936
+ * first 4KB is used for interface parameters
1937
+ * after 4KB is dts parameters
1938
+ * request share memory size 4KB * 2
1939
+ */
1940
+ res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
1941
+ if (res.a0 != 0) {
1942
+ dev_err(&pdev->dev, "no ATF memory for init\n");
1943
+ return -ENOMEM;
1944
+ }
1945
+ ddr_psci_param = (struct share_params *)res.a1;
1946
+ /* Clear ddr_psci_param, size is 4KB * 2 */
1947
+ memset_io(ddr_psci_param, 0x0, 4096 * 2);
1948
+
1949
+ /* start mcu with sip_smc_dram */
1950
+ wait_ctrl.dcf_en = 2;
1951
+
1952
+ init_waitqueue_head(&wait_ctrl.wait_wq);
1953
+ wait_ctrl.wait_en = 1;
1954
+ wait_ctrl.wait_time_out_ms = 17 * 5;
1955
+
1956
+ complt_irq = platform_get_irq_byname(pdev, "complete");
1957
+ if (complt_irq < 0) {
1958
+ dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1959
+ return complt_irq;
1960
+ }
1961
+ wait_ctrl.complt_irq = complt_irq;
1962
+
1963
+ ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq,
1964
+ 0, dev_name(&pdev->dev), &wait_ctrl);
1965
+ if (ret < 0) {
1966
+ dev_err(&pdev->dev, "cannot request complt_irq\n");
1967
+ return ret;
1968
+ }
1969
+ disable_irq(complt_irq);
1970
+
1971
+ res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1972
+ if (res.a0) {
1973
+ dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1974
+ return -ENOMEM;
1975
+ }
1976
+
1977
+ ret = rockchip_dmcfreq_adjust_opp_table(dmcfreq);
1978
+ if (ret < 0) {
1979
+ dev_err(&pdev->dev, "cannot get frequency info\n");
1980
+ return ret;
1981
+ }
1982
+ dmcfreq->is_set_rate_direct = true;
1983
+
1984
+ /* Config the dmcfreq->sleep_volt for deepsleep */
1985
+ opp_rate = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
1986
+ opp = devfreq_recommended_opp(&pdev->dev, &opp_rate, 0);
1987
+ if (IS_ERR(opp)) {
1988
+ dev_err(&pdev->dev, "Failed to find opp for %lu Hz\n", opp_rate);
1989
+ return PTR_ERR(opp);
1990
+ }
1991
+ dmcfreq->sleep_volt = opp->supplies[0].u_volt;
1992
+ if (dmcfreq->regulator_count > 1)
1993
+ dmcfreq->sleep_mem_volt = opp->supplies[1].u_volt;
1994
+ dev_pm_opp_put(opp);
1995
+
1996
+ if (of_property_read_u32(pdev->dev.of_node, "wait-mode", &ddr_psci_param->wait_mode))
1997
+ ddr_psci_param->wait_mode = 0;
1998
+
1999
+ dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
2000
+
2001
+ return 0;
2002
+}
2003
+
19142004 static __maybe_unused int rv1126_dmc_init(struct platform_device *pdev,
19152005 struct rockchip_dmcfreq *dmcfreq)
19162006 {
....@@ -1979,6 +2069,10 @@
19792069 &ddr_psci_param->update_deskew_cfg))
19802070 ddr_psci_param->update_deskew_cfg = 0;
19812071
2072
+ dmcfreq->set_rate_params = ddr_psci_param;
2073
+ rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
2074
+ rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
2075
+
19822076 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0,
19832077 ROCKCHIP_SIP_CONFIG_DRAM_INIT);
19842078 if (res.a0) {
....@@ -1993,40 +2087,43 @@
19932087 }
19942088
19952089 static const struct of_device_id rockchip_dmcfreq_of_match[] = {
1996
-#ifdef CONFIG_CPU_PX30
2090
+#if IS_ENABLED(CONFIG_CPU_PX30)
19972091 { .compatible = "rockchip,px30-dmc", .data = px30_dmc_init },
19982092 #endif
1999
-#ifdef CONFIG_CPU_RK1808
2093
+#if IS_ENABLED(CONFIG_CPU_RK1808)
20002094 { .compatible = "rockchip,rk1808-dmc", .data = rk1808_dmc_init },
20012095 #endif
2002
-#ifdef CONFIG_CPU_RK312X
2096
+#if IS_ENABLED(CONFIG_CPU_RK312X)
20032097 { .compatible = "rockchip,rk3128-dmc", .data = rk3128_dmc_init },
20042098 #endif
2005
-#ifdef CONFIG_CPU_RK322X
2099
+#if IS_ENABLED(CONFIG_CPU_RK322X)
20062100 { .compatible = "rockchip,rk3228-dmc", .data = rk3228_dmc_init },
20072101 #endif
2008
-#ifdef CONFIG_CPU_RK3288
2102
+#if IS_ENABLED(CONFIG_CPU_RK3288)
20092103 { .compatible = "rockchip,rk3288-dmc", .data = rk3288_dmc_init },
20102104 #endif
2011
-#ifdef CONFIG_CPU_RK3308
2105
+#if IS_ENABLED(CONFIG_CPU_RK3308)
20122106 { .compatible = "rockchip,rk3308-dmc", .data = NULL },
20132107 #endif
2014
-#ifdef CONFIG_CPU_RK3328
2108
+#if IS_ENABLED(CONFIG_CPU_RK3328)
20152109 { .compatible = "rockchip,rk3328-dmc", .data = rk3328_dmc_init },
20162110 #endif
2017
-#ifdef CONFIG_CPU_RK3368
2018
- { .compatible = "rockchip,rk3368-dmc", .data = rk3368_dmc_init },
2019
-#endif
2020
-#ifdef CONFIG_CPU_RK3399
2111
+#if IS_ENABLED(CONFIG_CPU_RK3399)
20212112 { .compatible = "rockchip,rk3399-dmc", .data = rk3399_dmc_init },
20222113 #endif
2023
-#ifdef CONFIG_CPU_RK3528
2114
+#if IS_ENABLED(CONFIG_CPU_RK3528)
20242115 { .compatible = "rockchip,rk3528-dmc", .data = rk3528_dmc_init },
20252116 #endif
2026
-#ifdef CONFIG_CPU_RK3568
2117
+#if IS_ENABLED(CONFIG_CPU_RK3562)
2118
+ { .compatible = "rockchip,rk3562-dmc", .data = rk3568_dmc_init },
2119
+#endif
2120
+#if IS_ENABLED(CONFIG_CPU_RK3568)
20272121 { .compatible = "rockchip,rk3568-dmc", .data = rk3568_dmc_init },
20282122 #endif
2029
-#ifdef CONFIG_CPU_RV1126
2123
+#if IS_ENABLED(CONFIG_CPU_RK3588)
2124
+ { .compatible = "rockchip,rk3588-dmc", .data = rk3588_dmc_init },
2125
+#endif
2126
+#if IS_ENABLED(CONFIG_CPU_RV1126)
20302127 { .compatible = "rockchip,rv1126-dmc", .data = rv1126_dmc_init },
20312128 #endif
20322129 { },
....@@ -2070,7 +2167,7 @@
20702167
20712168 tbl[i].min = 0;
20722169 tbl[i].max = 0;
2073
- tbl[i].freq = CPUFREQ_TABLE_END;
2170
+ tbl[i].freq = DMCFREQ_TABLE_END;
20742171
20752172 *table = tbl;
20762173
....@@ -2109,7 +2206,7 @@
21092206 }
21102207
21112208 tbl[i].pn = 0;
2112
- tbl[i].rl = CPUFREQ_TABLE_END;
2209
+ tbl[i].rl = DMCFREQ_TABLE_END;
21132210
21142211 *table = tbl;
21152212
....@@ -2160,11 +2257,17 @@
21602257 case SYS_STATUS_VIDEO_4K_10B:
21612258 dmcfreq->video_4k_10b_rate = freq * 1000;
21622259 break;
2260
+ case SYS_STATUS_VIDEO_SVEP:
2261
+ dmcfreq->video_svep_rate = freq * 1000;
2262
+ break;
21632263 case SYS_STATUS_PERFORMANCE:
21642264 dmcfreq->performance_rate = freq * 1000;
21652265 break;
21662266 case SYS_STATUS_HDMI:
21672267 dmcfreq->hdmi_rate = freq * 1000;
2268
+ break;
2269
+ case SYS_STATUS_HDMIRX:
2270
+ dmcfreq->hdmirx_rate = freq * 1000;
21682271 break;
21692272 case SYS_STATUS_IDLE:
21702273 dmcfreq->idle_rate = freq * 1000;
....@@ -2271,6 +2374,8 @@
22712374 return -EINVAL;
22722375 }
22732376
2377
+ dmcfreq->auto_min_rate = dmcfreq->rate_low;
2378
+
22742379 for (i = 0; i < count / 2; i++) {
22752380 of_property_read_u32_index(np, porp_name, 2 * i,
22762381 &status);
....@@ -2304,6 +2409,11 @@
23042409 dev_info(dmcfreq->dev, "video_4k_60p_rate = %ld\n",
23052410 dmcfreq->video_4k_60p_rate);
23062411 break;
2412
+ case SYS_STATUS_VIDEO_SVEP:
2413
+ dmcfreq->video_svep_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2414
+ dev_info(dmcfreq->dev, "video_svep_rate = %ld\n",
2415
+ dmcfreq->video_svep_rate);
2416
+ break;
23072417 case SYS_STATUS_PERFORMANCE:
23082418 dmcfreq->performance_rate = rockchip_freq_level_2_rate(dmcfreq, level);
23092419 dev_info(dmcfreq->dev, "performance_rate = %ld\n",
....@@ -2312,6 +2422,10 @@
23122422 case SYS_STATUS_HDMI:
23132423 dmcfreq->hdmi_rate = rockchip_freq_level_2_rate(dmcfreq, level);
23142424 dev_info(dmcfreq->dev, "hdmi_rate = %ld\n", dmcfreq->hdmi_rate);
2425
+ break;
2426
+ case SYS_STATUS_HDMIRX:
2427
+ dmcfreq->hdmirx_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2428
+ dev_info(dmcfreq->dev, "hdmirx_rate = %ld\n", dmcfreq->hdmirx_rate);
23152429 break;
23162430 case SYS_STATUS_IDLE:
23172431 dmcfreq->idle_rate = rockchip_freq_level_2_rate(dmcfreq, level);
....@@ -2351,19 +2465,11 @@
23512465
23522466 static void rockchip_dmcfreq_update_target(struct rockchip_dmcfreq *dmcfreq)
23532467 {
2354
- struct devfreq *df = dmcfreq->devfreq;
2468
+ struct devfreq *devfreq = dmcfreq->info.devfreq;
23552469
2356
- mutex_lock(&df->lock);
2357
-
2358
- if (dmcfreq->last_refresh != dmcfreq->refresh) {
2359
- if (dmcfreq->set_auto_self_refresh)
2360
- dmcfreq->set_auto_self_refresh(dmcfreq->refresh);
2361
- dmcfreq->last_refresh = dmcfreq->refresh;
2362
- }
2363
-
2364
- update_devfreq(df);
2365
-
2366
- mutex_unlock(&df->lock);
2470
+ mutex_lock(&devfreq->lock);
2471
+ update_devfreq(devfreq);
2472
+ mutex_unlock(&devfreq->lock);
23672473 }
23682474
23692475 static int rockchip_dmcfreq_system_status_notifier(struct notifier_block *nb,
....@@ -2384,8 +2490,8 @@
23842490 }
23852491
23862492 if (dmcfreq->reboot_rate && (status & SYS_STATUS_REBOOT)) {
2387
- if (dmcfreq->auto_freq_en)
2388
- devfreq_monitor_stop(dmcfreq->devfreq);
2493
+ if (dmcfreq->info.auto_freq_en)
2494
+ devfreq_monitor_stop(dmcfreq->info.devfreq);
23892495 target_rate = dmcfreq->reboot_rate;
23902496 goto next;
23912497 }
....@@ -2411,6 +2517,11 @@
24112517 target_rate = dmcfreq->hdmi_rate;
24122518 }
24132519
2520
+ if (dmcfreq->hdmirx_rate && (status & SYS_STATUS_HDMIRX)) {
2521
+ if (dmcfreq->hdmirx_rate > target_rate)
2522
+ target_rate = dmcfreq->hdmirx_rate;
2523
+ }
2524
+
24142525 if (dmcfreq->video_4k_rate && (status & SYS_STATUS_VIDEO_4K)) {
24152526 if (dmcfreq->video_4k_rate > target_rate)
24162527 target_rate = dmcfreq->video_4k_rate;
....@@ -2431,15 +2542,41 @@
24312542 target_rate = dmcfreq->video_1080p_rate;
24322543 }
24332544
2545
+ if (dmcfreq->video_svep_rate && (status & SYS_STATUS_VIDEO_SVEP)) {
2546
+ if (dmcfreq->video_svep_rate > target_rate)
2547
+ target_rate = dmcfreq->video_svep_rate;
2548
+ }
2549
+
24342550 next:
24352551
2436
- dev_dbg(&dmcfreq->devfreq->dev, "status=0x%x\n", (unsigned int)status);
2437
- dmcfreq->refresh = refresh;
2552
+ dev_dbg(dmcfreq->dev, "status=0x%x\n", (unsigned int)status);
24382553 dmcfreq->is_fixed = is_fixed;
24392554 dmcfreq->status_rate = target_rate;
2555
+ if (dmcfreq->refresh != refresh) {
2556
+ if (dmcfreq->set_auto_self_refresh)
2557
+ dmcfreq->set_auto_self_refresh(refresh);
2558
+ dmcfreq->refresh = refresh;
2559
+ }
24402560 rockchip_dmcfreq_update_target(dmcfreq);
24412561
24422562 return NOTIFY_OK;
2563
+}
2564
+
2565
+static int rockchip_dmcfreq_panic_notifier(struct notifier_block *nb,
2566
+ unsigned long v, void *p)
2567
+{
2568
+ struct rockchip_dmcfreq *dmcfreq =
2569
+ container_of(nb, struct rockchip_dmcfreq, panic_nb);
2570
+ struct device *dev = dmcfreq->dev;
2571
+
2572
+ if (dmcfreq->regulator_count == 1)
2573
+ dev_info(dev, "cur_freq: %lu Hz, volt: %lu uV\n",
2574
+ dmcfreq->rate, dmcfreq->volt);
2575
+ else
2576
+ dev_info(dev, "cur_freq: %lu Hz, volt_vdd: %lu uV, volt_mem: %lu uV\n",
2577
+ dmcfreq->rate, dmcfreq->volt, dmcfreq->mem_volt);
2578
+
2579
+ return 0;
24432580 }
24442581
24452582 static ssize_t rockchip_dmcfreq_status_show(struct device *dev,
....@@ -2525,133 +2662,6 @@
25252662
25262663 static DEVICE_ATTR_RW(downdifferential);
25272664
2528
-static void rockchip_dmcfreq_set_msch_rl(struct rockchip_dmcfreq *dmcfreq,
2529
- unsigned int readlatency)
2530
-
2531
-{
2532
- down_read(&rockchip_dmcfreq_sem);
2533
- dev_dbg(dmcfreq->dev, "rl 0x%x -> 0x%x\n",
2534
- dmcfreq->read_latency, readlatency);
2535
- if (!dmcfreq->set_msch_readlatency(readlatency))
2536
- dmcfreq->read_latency = readlatency;
2537
- else
2538
- dev_err(dmcfreq->dev, "failed to set msch rl\n");
2539
- up_read(&rockchip_dmcfreq_sem);
2540
-}
2541
-
2542
-static void rockchip_dmcfreq_set_msch_rl_work(struct work_struct *work)
2543
-{
2544
- struct rockchip_dmcfreq *dmcfreq = msch_rl_to_dmcfreq(work);
2545
-
2546
- rockchip_dmcfreq_set_msch_rl(dmcfreq, 0);
2547
- dmcfreq->is_msch_rl_work_started = false;
2548
-}
2549
-
2550
-static void rockchip_dmcfreq_msch_rl_init(struct rockchip_dmcfreq *dmcfreq)
2551
-{
2552
- if (!dmcfreq->set_msch_readlatency)
2553
- return;
2554
- INIT_DELAYED_WORK(&dmcfreq->msch_rl_work,
2555
- rockchip_dmcfreq_set_msch_rl_work);
2556
-}
2557
-
2558
-void rockchip_dmcfreq_vop_bandwidth_update(struct devfreq *devfreq,
2559
- unsigned int line_bw_mbyte,
2560
- unsigned int frame_bw_mbyte,
2561
- unsigned int plane_num)
2562
-{
2563
- struct device *dev;
2564
- struct rockchip_dmcfreq *dmcfreq;
2565
- unsigned long vop_last_rate, target = 0;
2566
- unsigned int readlatency = 0;
2567
- int i;
2568
-
2569
- if (!devfreq)
2570
- return;
2571
-
2572
- dev = devfreq->dev.parent;
2573
- dmcfreq = dev_get_drvdata(dev);
2574
- if (!dmcfreq)
2575
- return;
2576
-
2577
- dev_dbg(dmcfreq->dev, "line bw=%u, frame bw=%u, pn=%u\n",
2578
- line_bw_mbyte, frame_bw_mbyte, plane_num);
2579
-
2580
- if (!dmcfreq->vop_pn_rl_tbl || !dmcfreq->set_msch_readlatency)
2581
- goto vop_bw_tbl;
2582
- for (i = 0; dmcfreq->vop_pn_rl_tbl[i].rl != CPUFREQ_TABLE_END; i++) {
2583
- if (plane_num >= dmcfreq->vop_pn_rl_tbl[i].pn)
2584
- readlatency = dmcfreq->vop_pn_rl_tbl[i].rl;
2585
- }
2586
- if (readlatency) {
2587
- cancel_delayed_work_sync(&dmcfreq->msch_rl_work);
2588
- dmcfreq->is_msch_rl_work_started = false;
2589
- if (dmcfreq->read_latency != readlatency)
2590
- rockchip_dmcfreq_set_msch_rl(dmcfreq, readlatency);
2591
- } else if (dmcfreq->read_latency &&
2592
- !dmcfreq->is_msch_rl_work_started) {
2593
- dmcfreq->is_msch_rl_work_started = true;
2594
- schedule_delayed_work(&dmcfreq->msch_rl_work,
2595
- msecs_to_jiffies(MSCH_RL_DELAY_TIME));
2596
- }
2597
-
2598
-vop_bw_tbl:
2599
- if (!dmcfreq->auto_freq_en || !dmcfreq->vop_bw_tbl)
2600
- goto vop_frame_bw_tbl;
2601
- for (i = 0; dmcfreq->vop_bw_tbl[i].freq != CPUFREQ_TABLE_END; i++) {
2602
- if (line_bw_mbyte >= dmcfreq->vop_bw_tbl[i].min)
2603
- target = dmcfreq->vop_bw_tbl[i].freq;
2604
- }
2605
-
2606
-vop_frame_bw_tbl:
2607
- if (!dmcfreq->auto_freq_en || !dmcfreq->vop_frame_bw_tbl)
2608
- goto next;
2609
- for (i = 0; dmcfreq->vop_frame_bw_tbl[i].freq != CPUFREQ_TABLE_END;
2610
- i++) {
2611
- if (frame_bw_mbyte >= dmcfreq->vop_frame_bw_tbl[i].min) {
2612
- if (target < dmcfreq->vop_frame_bw_tbl[i].freq)
2613
- target = dmcfreq->vop_frame_bw_tbl[i].freq;
2614
- }
2615
- }
2616
-
2617
-next:
2618
- vop_last_rate = dmcfreq->vop_req_rate;
2619
- dmcfreq->vop_req_rate = target;
2620
- if (target > vop_last_rate)
2621
- rockchip_dmcfreq_update_target(dmcfreq);
2622
-}
2623
-EXPORT_SYMBOL(rockchip_dmcfreq_vop_bandwidth_update);
2624
-
2625
-int rockchip_dmcfreq_vop_bandwidth_request(struct devfreq *devfreq,
2626
- unsigned int bw_mbyte)
2627
-{
2628
- struct device *dev;
2629
- struct rockchip_dmcfreq *dmcfreq;
2630
- unsigned long target = 0;
2631
- int i;
2632
-
2633
- if (!devfreq)
2634
- return 0;
2635
-
2636
- dev = devfreq->dev.parent;
2637
- dmcfreq = dev_get_drvdata(dev);
2638
-
2639
- if (!dmcfreq || !dmcfreq->auto_freq_en || !dmcfreq->vop_bw_tbl)
2640
- return 0;
2641
-
2642
- for (i = 0; dmcfreq->vop_bw_tbl[i].freq != CPUFREQ_TABLE_END; i++) {
2643
- if (bw_mbyte <= dmcfreq->vop_bw_tbl[i].max) {
2644
- target = dmcfreq->vop_bw_tbl[i].freq;
2645
- break;
2646
- }
2647
- }
2648
- if (target)
2649
- return 0;
2650
- else
2651
- return -EINVAL;
2652
-}
2653
-EXPORT_SYMBOL(rockchip_dmcfreq_vop_bandwidth_request);
2654
-
26552665 static unsigned long get_nocp_req_rate(struct rockchip_dmcfreq *dmcfreq)
26562666 {
26572667 unsigned long target = 0, cpu_bw = 0;
....@@ -2684,14 +2694,14 @@
26842694 unsigned long target_freq = 0, nocp_req_rate = 0;
26852695 u64 now;
26862696
2687
- if (dmcfreq->auto_freq_en && !dmcfreq->is_fixed) {
2697
+ if (dmcfreq->info.auto_freq_en && !dmcfreq->is_fixed) {
26882698 if (dmcfreq->status_rate)
26892699 target_freq = dmcfreq->status_rate;
26902700 else if (dmcfreq->auto_min_rate)
26912701 target_freq = dmcfreq->auto_min_rate;
26922702 nocp_req_rate = get_nocp_req_rate(dmcfreq);
26932703 target_freq = max3(target_freq, nocp_req_rate,
2694
- dmcfreq->vop_req_rate);
2704
+ dmcfreq->info.vop_req_rate);
26952705 now = ktime_to_us(ktime_get());
26962706 if (now < dmcfreq->touchboostpulse_endtime)
26972707 target_freq = max(target_freq, dmcfreq->boost_rate);
....@@ -2702,7 +2712,7 @@
27022712 target_freq = dmcfreq->normal_rate;
27032713 if (target_freq)
27042714 *freq = target_freq;
2705
- if (dmcfreq->auto_freq_en && !devfreq_update_stats(df))
2715
+ if (dmcfreq->info.auto_freq_en && !devfreq_update_stats(df))
27062716 return 0;
27072717 goto reset_last_status;
27082718 }
....@@ -2773,7 +2783,7 @@
27732783 {
27742784 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(devfreq->dev.parent);
27752785
2776
- if (!dmcfreq->auto_freq_en)
2786
+ if (!dmcfreq->info.auto_freq_en)
27772787 return 0;
27782788
27792789 switch (event) {
....@@ -2785,8 +2795,8 @@
27852795 devfreq_monitor_stop(devfreq);
27862796 break;
27872797
2788
- case DEVFREQ_GOV_INTERVAL:
2789
- devfreq_interval_update(devfreq, (unsigned int *)data);
2798
+ case DEVFREQ_GOV_UPDATE_INTERVAL:
2799
+ devfreq_update_interval(devfreq, (unsigned int *)data);
27902800 break;
27912801
27922802 case DEVFREQ_GOV_SUSPEND:
....@@ -2814,7 +2824,7 @@
28142824 {
28152825 int i, ret;
28162826
2817
- if (!dmcfreq->auto_freq_en)
2827
+ if (!dmcfreq->info.auto_freq_en)
28182828 return 0;
28192829
28202830 for (i = 0; i < dmcfreq->edev_count; i++) {
....@@ -2833,7 +2843,7 @@
28332843 {
28342844 int i, ret;
28352845
2836
- if (!dmcfreq->auto_freq_en)
2846
+ if (!dmcfreq->info.auto_freq_en)
28372847 return 0;
28382848
28392849 for (i = 0; i < dmcfreq->edev_count; i++) {
....@@ -2869,7 +2879,7 @@
28692879 struct device_node *events_np, *np = dev->of_node;
28702880 int i, j, count, available_count = 0;
28712881
2872
- count = devfreq_event_get_edev_count(dev);
2882
+ count = devfreq_event_get_edev_count(dev, "devfreq-events");
28732883 if (count < 0) {
28742884 dev_dbg(dev, "failed to get count of devfreq-event dev\n");
28752885 return 0;
....@@ -2904,7 +2914,7 @@
29042914 return -EINVAL;
29052915 }
29062916 dmcfreq->edev[j] =
2907
- devfreq_event_get_edev_by_phandle(dev, i);
2917
+ devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
29082918 if (IS_ERR(dmcfreq->edev[j]))
29092919 return -EPROBE_DEFER;
29102920 j++;
....@@ -2912,7 +2922,7 @@
29122922 of_node_put(events_np);
29132923 }
29142924 }
2915
- dmcfreq->auto_freq_en = true;
2925
+ dmcfreq->info.auto_freq_en = true;
29162926 dmcfreq->dfi_id = rockchip_get_edev_id(dmcfreq, "dfi");
29172927 dmcfreq->nocp_cpu_id = rockchip_get_edev_id(dmcfreq, "nocp-cpu");
29182928 dmcfreq->nocp_bw =
....@@ -2927,21 +2937,61 @@
29272937 static int rockchip_dmcfreq_power_control(struct rockchip_dmcfreq *dmcfreq)
29282938 {
29292939 struct device *dev = dmcfreq->dev;
2940
+ struct device_node *np = dev->of_node;
2941
+ struct opp_table *opp_table = NULL, *reg_opp_table = NULL;
2942
+ const char * const reg_names[] = {"center", "mem"};
2943
+ int ret = 0;
2944
+
2945
+ if (of_find_property(np, "mem-supply", NULL))
2946
+ dmcfreq->regulator_count = 2;
2947
+ else
2948
+ dmcfreq->regulator_count = 1;
2949
+ reg_opp_table = dev_pm_opp_set_regulators(dev, reg_names,
2950
+ dmcfreq->regulator_count);
2951
+ if (IS_ERR(reg_opp_table)) {
2952
+ dev_err(dev, "failed to set regulators\n");
2953
+ return PTR_ERR(reg_opp_table);
2954
+ }
2955
+ opp_table = dev_pm_opp_register_set_opp_helper(dev, rockchip_dmcfreq_opp_helper);
2956
+ if (IS_ERR(opp_table)) {
2957
+ dev_err(dev, "failed to set opp helper\n");
2958
+ ret = PTR_ERR(opp_table);
2959
+ goto reg_opp_table;
2960
+ }
29302961
29312962 dmcfreq->vdd_center = devm_regulator_get_optional(dev, "center");
29322963 if (IS_ERR(dmcfreq->vdd_center)) {
29332964 dev_err(dev, "Cannot get the regulator \"center\"\n");
2934
- return PTR_ERR(dmcfreq->vdd_center);
2965
+ ret = PTR_ERR(dmcfreq->vdd_center);
2966
+ goto opp_table;
2967
+ }
2968
+ if (dmcfreq->regulator_count > 1) {
2969
+ dmcfreq->mem_reg = devm_regulator_get_optional(dev, "mem");
2970
+ if (IS_ERR(dmcfreq->mem_reg)) {
2971
+ dev_err(dev, "Cannot get the regulator \"mem\"\n");
2972
+ ret = PTR_ERR(dmcfreq->mem_reg);
2973
+ goto opp_table;
2974
+ }
29352975 }
29362976
29372977 dmcfreq->dmc_clk = devm_clk_get(dev, "dmc_clk");
29382978 if (IS_ERR(dmcfreq->dmc_clk)) {
29392979 dev_err(dev, "Cannot get the clk dmc_clk. If using SCMI, trusted firmware need update to V1.01 and above.\n");
2940
- return PTR_ERR(dmcfreq->dmc_clk);
2980
+ ret = PTR_ERR(dmcfreq->dmc_clk);
2981
+ goto opp_table;
29412982 }
29422983 dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
29432984
29442985 return 0;
2986
+
2987
+opp_table:
2988
+ if (opp_table)
2989
+ dev_pm_opp_unregister_set_opp_helper(opp_table);
2990
+reg_opp_table:
2991
+ if (reg_opp_table)
2992
+ dev_pm_opp_put_regulators(reg_opp_table);
2993
+
2994
+ return ret;
29452995 }
29462996
29472997 static int rockchip_dmcfreq_dmc_init(struct platform_device *pdev,
....@@ -2981,24 +3031,26 @@
29813031 &dmcfreq->ondemand_data.upthreshold);
29823032 of_property_read_u32(np, "downdifferential",
29833033 &dmcfreq->ondemand_data.downdifferential);
2984
- if (dmcfreq->auto_freq_en)
3034
+ if (dmcfreq->info.auto_freq_en)
29853035 of_property_read_u32(np, "auto-freq-en",
2986
- &dmcfreq->auto_freq_en);
2987
- of_property_read_u32(np, "auto-min-freq",
2988
- (u32 *)&dmcfreq->auto_min_rate);
2989
- dmcfreq->auto_min_rate *= 1000;
3036
+ &dmcfreq->info.auto_freq_en);
3037
+ if (!dmcfreq->auto_min_rate) {
3038
+ of_property_read_u32(np, "auto-min-freq",
3039
+ (u32 *)&dmcfreq->auto_min_rate);
3040
+ dmcfreq->auto_min_rate *= 1000;
3041
+ }
29903042
29913043 if (rockchip_get_freq_map_talbe(np, "cpu-bw-dmc-freq",
29923044 &dmcfreq->cpu_bw_tbl))
29933045 dev_dbg(dev, "failed to get cpu bandwidth to dmc rate\n");
29943046 if (rockchip_get_freq_map_talbe(np, "vop-frame-bw-dmc-freq",
2995
- &dmcfreq->vop_frame_bw_tbl))
3047
+ &dmcfreq->info.vop_frame_bw_tbl))
29963048 dev_dbg(dev, "failed to get vop frame bandwidth to dmc rate\n");
29973049 if (rockchip_get_freq_map_talbe(np, "vop-bw-dmc-freq",
2998
- &dmcfreq->vop_bw_tbl))
3050
+ &dmcfreq->info.vop_bw_tbl))
29993051 dev_err(dev, "failed to get vop bandwidth to dmc rate\n");
30003052 if (rockchip_get_rl_map_talbe(np, "vop-pn-msch-readlatency",
3001
- &dmcfreq->vop_pn_rl_tbl))
3053
+ &dmcfreq->info.vop_pn_rl_tbl))
30023054 dev_err(dev, "failed to get vop pn to msch rl\n");
30033055
30043056 of_property_read_u32(np, "touchboost_duration",
....@@ -3009,35 +3061,12 @@
30093061 dmcfreq->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
30103062 }
30113063
3012
-static int rockchip_dmcfreq_set_volt_only(struct rockchip_dmcfreq *dmcfreq)
3013
-{
3014
- struct device *dev = dmcfreq->dev;
3015
- struct dev_pm_opp *opp;
3016
- unsigned long opp_volt, opp_rate = dmcfreq->rate;
3017
- int ret;
3018
-
3019
- opp = devfreq_recommended_opp(dev, &opp_rate, 0);
3020
- if (IS_ERR(opp)) {
3021
- dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
3022
- return PTR_ERR(opp);
3023
- }
3024
- opp_volt = dev_pm_opp_get_voltage(opp);
3025
- dev_pm_opp_put(opp);
3026
-
3027
- ret = regulator_set_voltage(dmcfreq->vdd_center, opp_volt, INT_MAX);
3028
- if (ret) {
3029
- dev_err(dev, "Cannot set voltage %lu uV\n", opp_volt);
3030
- return ret;
3031
- }
3032
-
3033
- return 0;
3034
-}
3035
-
30363064 static int rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq *dmcfreq)
30373065 {
30383066 struct devfreq_dev_profile *devp = &rockchip_devfreq_dmc_profile;
30393067 struct device *dev = dmcfreq->dev;
30403068 struct dev_pm_opp *opp;
3069
+ struct devfreq *devfreq;
30413070 unsigned long opp_rate = dmcfreq->rate;
30423071
30433072 opp = devfreq_recommended_opp(dev, &opp_rate, 0);
....@@ -3048,52 +3077,46 @@
30483077 dev_pm_opp_put(opp);
30493078
30503079 devp->initial_freq = dmcfreq->rate;
3051
- dmcfreq->devfreq = devm_devfreq_add_device(dev, devp,
3052
- "dmc_ondemand",
3053
- &dmcfreq->ondemand_data);
3054
- if (IS_ERR(dmcfreq->devfreq)) {
3080
+ devfreq = devm_devfreq_add_device(dev, devp, "dmc_ondemand",
3081
+ &dmcfreq->ondemand_data);
3082
+ if (IS_ERR(devfreq)) {
30553083 dev_err(dev, "failed to add devfreq\n");
3056
- return PTR_ERR(dmcfreq->devfreq);
3084
+ return PTR_ERR(devfreq);
30573085 }
30583086
3059
- devm_devfreq_register_opp_notifier(dev, dmcfreq->devfreq);
3087
+ devm_devfreq_register_opp_notifier(dev, devfreq);
30603088
3061
- dmcfreq->devfreq->last_status.current_frequency = opp_rate;
3089
+ devfreq->last_status.current_frequency = opp_rate;
30623090
3063
- reset_last_status(dmcfreq->devfreq);
3091
+ reset_last_status(devfreq);
3092
+
3093
+ dmcfreq->info.devfreq = devfreq;
30643094
30653095 return 0;
30663096 }
3067
-
3068
-static int rockchip_dmcfreq_low_temp_adjust_volt(struct monitor_dev_info *mdev_info)
3069
-{
3070
- struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(mdev_info->dev);
3071
-
3072
- return rockchip_dmcfreq_set_volt_only(dmcfreq);
3073
-}
3074
-
3075
-static struct monitor_dev_profile dmc_mdevp = {
3076
- .type = MONITOR_TPYE_DEV,
3077
- .low_temp_adjust_volt = rockchip_dmcfreq_low_temp_adjust_volt,
3078
- .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
3079
- .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
3080
-};
30813097
30823098 static void rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq *dmcfreq)
30833099 {
30843100 int ret;
30853101
3086
- if (dmcfreq->system_status_en || dmcfreq->auto_freq_en) {
3102
+ if (dmcfreq->system_status_en || dmcfreq->info.auto_freq_en) {
30873103 if (vop_register_dmc())
30883104 dev_err(dmcfreq->dev, "fail to register notify to vop.\n");
30893105
3090
- dmcfreq->status_nb.notifier_call = rockchip_dmcfreq_system_status_notifier;
3106
+ dmcfreq->status_nb.notifier_call =
3107
+ rockchip_dmcfreq_system_status_notifier;
30913108 ret = rockchip_register_system_status_notifier(&dmcfreq->status_nb);
30923109 if (ret)
30933110 dev_err(dmcfreq->dev, "failed to register system_status nb\n");
30943111 }
30953112
3096
- dmc_mdevp.data = dmcfreq->devfreq;
3113
+ dmcfreq->panic_nb.notifier_call = rockchip_dmcfreq_panic_notifier;
3114
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
3115
+ &dmcfreq->panic_nb);
3116
+ if (ret)
3117
+ dev_err(dmcfreq->dev, "failed to register panic nb\n");
3118
+
3119
+ dmc_mdevp.data = dmcfreq->info.devfreq;
30973120 dmcfreq->mdev_info = rockchip_system_monitor_register(dmcfreq->dev,
30983121 &dmc_mdevp);
30993122 if (IS_ERR(dmcfreq->mdev_info)) {
....@@ -3104,18 +3127,19 @@
31043127
31053128 static void rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq *dmcfreq)
31063129 {
3107
- if (sysfs_create_file(&dmcfreq->devfreq->dev.kobj,
3108
- &dev_attr_upthreshold.attr))
3130
+ struct devfreq *devfreq = dmcfreq->info.devfreq;
3131
+
3132
+ if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_upthreshold.attr))
31093133 dev_err(dmcfreq->dev,
31103134 "failed to register upthreshold sysfs file\n");
3111
- if (sysfs_create_file(&dmcfreq->devfreq->dev.kobj,
3135
+ if (sysfs_create_file(&devfreq->dev.kobj,
31123136 &dev_attr_downdifferential.attr))
31133137 dev_err(dmcfreq->dev,
31143138 "failed to register downdifferential sysfs file\n");
31153139
3116
- if (!rockchip_add_system_status_interface(&dmcfreq->devfreq->dev))
3140
+ if (!rockchip_add_system_status_interface(&devfreq->dev))
31173141 return;
3118
- if (sysfs_create_file(&dmcfreq->devfreq->dev.kobj,
3142
+ if (sysfs_create_file(&devfreq->dev.kobj,
31193143 &dev_attr_system_status.attr))
31203144 dev_err(dmcfreq->dev,
31213145 "failed to register system_status sysfs file\n");
....@@ -3145,7 +3169,7 @@
31453169 return;
31463170 dmcfreq->touchboostpulse_endtime = endtime;
31473171
3148
- schedule_work(&dmcfreq->boost_work);
3172
+ queue_work(system_freezable_wq, &dmcfreq->boost_work);
31493173 }
31503174
31513175 static int rockchip_dmcfreq_input_connect(struct input_handler *handler,
....@@ -3334,7 +3358,7 @@
33343358 return;
33353359 dmcfreq->devfreq_cooling =
33363360 of_devfreq_cooling_register_power(dmcfreq->dev->of_node,
3337
- dmcfreq->devfreq,
3361
+ dmcfreq->info.devfreq,
33383362 &ddr_cooling_power_data);
33393363 if (IS_ERR(dmcfreq->devfreq_cooling)) {
33403364 ret = PTR_ERR(dmcfreq->devfreq_cooling);
....@@ -3355,6 +3379,7 @@
33553379 return -ENOMEM;
33563380
33573381 data->dev = dev;
3382
+ data->info.dev = dev;
33583383 mutex_init(&data->lock);
33593384 INIT_LIST_HEAD(&data->video_info_list);
33603385
....@@ -3375,15 +3400,16 @@
33753400 return ret;
33763401
33773402 rockchip_dmcfreq_parse_dt(data);
3403
+
33783404 platform_set_drvdata(pdev, data);
3379
- if (!data->system_status_en && !data->auto_freq_en) {
3405
+
3406
+ if (!data->system_status_en && !data->info.auto_freq_en) {
33803407 dev_info(dev, "don't add devfreq feature\n");
33813408 rockchip_dmcfreq_register_notifier(data);
3382
- return rockchip_dmcfreq_set_volt_only(data);
3409
+ return 0;
33833410 }
33843411
3385
- pm_qos_add_request(&pm_qos, PM_QOS_CPU_DMA_LATENCY,
3386
- PM_QOS_DEFAULT_VALUE);
3412
+ cpu_latency_qos_add_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
33873413
33883414 ret = devfreq_add_governor(&devfreq_dmc_ondemand);
33893415 if (ret)
....@@ -3400,7 +3426,7 @@
34003426 rockchip_dmcfreq_register_notifier(data);
34013427 rockchip_dmcfreq_add_interface(data);
34023428 rockchip_dmcfreq_boost_init(data);
3403
- rockchip_dmcfreq_msch_rl_init(data);
3429
+ rockchip_dmcfreq_vop_bandwidth_init(&data->info);
34043430 rockchip_dmcfreq_register_cooling_device(data);
34053431
34063432 rockchip_set_system_status(SYS_STATUS_NORMAL);
....@@ -3420,10 +3446,31 @@
34203446 if (ret)
34213447 return ret;
34223448
3423
- if (dmcfreq->devfreq) {
3424
- ret = devfreq_suspend_device(dmcfreq->devfreq);
3449
+ if (dmcfreq->info.devfreq) {
3450
+ ret = devfreq_suspend_device(dmcfreq->info.devfreq);
34253451 if (ret < 0) {
34263452 dev_err(dev, "failed to suspend the devfreq devices\n");
3453
+ return ret;
3454
+ }
3455
+ }
3456
+
3457
+ /* set voltage to sleep_volt if need */
3458
+ if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
3459
+ ret = regulator_set_voltage(dmcfreq->vdd_center,
3460
+ dmcfreq->sleep_volt, INT_MAX);
3461
+ if (ret) {
3462
+ dev_err(dev, "Cannot set vdd voltage %lu uV\n",
3463
+ dmcfreq->sleep_volt);
3464
+ return ret;
3465
+ }
3466
+ }
3467
+ if (dmcfreq->sleep_mem_volt &&
3468
+ dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
3469
+ ret = regulator_set_voltage(dmcfreq->mem_reg,
3470
+ dmcfreq->sleep_mem_volt, INT_MAX);
3471
+ if (ret) {
3472
+ dev_err(dev, "Cannot set mem voltage %lu uV\n",
3473
+ dmcfreq->sleep_mem_volt);
34273474 return ret;
34283475 }
34293476 }
....@@ -3439,12 +3486,33 @@
34393486 if (!dmcfreq)
34403487 return 0;
34413488
3489
+ /* restore voltage if it is sleep_volt */
3490
+ if (dmcfreq->sleep_volt && dmcfreq->sleep_volt != dmcfreq->volt) {
3491
+ ret = regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt,
3492
+ INT_MAX);
3493
+ if (ret) {
3494
+ dev_err(dev, "Cannot set vdd voltage %lu uV\n",
3495
+ dmcfreq->volt);
3496
+ return ret;
3497
+ }
3498
+ }
3499
+ if (dmcfreq->sleep_mem_volt &&
3500
+ dmcfreq->sleep_mem_volt != dmcfreq->mem_volt) {
3501
+ ret = regulator_set_voltage(dmcfreq->mem_reg, dmcfreq->mem_volt,
3502
+ INT_MAX);
3503
+ if (ret) {
3504
+ dev_err(dev, "Cannot set mem voltage %lu uV\n",
3505
+ dmcfreq->mem_volt);
3506
+ return ret;
3507
+ }
3508
+ }
3509
+
34423510 ret = rockchip_dmcfreq_enable_event(dmcfreq);
34433511 if (ret)
34443512 return ret;
34453513
3446
- if (dmcfreq->devfreq) {
3447
- ret = devfreq_resume_device(dmcfreq->devfreq);
3514
+ if (dmcfreq->info.devfreq) {
3515
+ ret = devfreq_resume_device(dmcfreq->info.devfreq);
34483516 if (ret < 0) {
34493517 dev_err(dev, "failed to resume the devfreq devices\n");
34503518 return ret;
....@@ -3466,6 +3534,6 @@
34663534 };
34673535 module_platform_driver(rockchip_dmcfreq_driver);
34683536
3469
-MODULE_LICENSE("GPL v2");
3470
-MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
3537
+MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
34713538 MODULE_DESCRIPTION("rockchip dmcfreq driver with devfreq framework");
3539
+MODULE_LICENSE("GPL v2");