hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2.c
....@@ -17,8 +17,10 @@
1717
1818 #include <linux/devfreq_cooling.h>
1919 #include <soc/rockchip/rockchip_ipa.h>
20
+#include <soc/rockchip/rockchip_dmc.h>
2021 #include <soc/rockchip/rockchip_opp_select.h>
2122 #include <soc/rockchip/rockchip_system_monitor.h>
23
+#include <soc/rockchip/rockchip_iommu.h>
2224
2325 #ifdef CONFIG_PM_DEVFREQ
2426 #include "../../../devfreq/governor.h"
....@@ -304,6 +306,7 @@
304306 static int rkvdec2_run(struct mpp_dev *mpp, struct mpp_task *mpp_task)
305307 {
306308 struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
309
+ u32 timing_en = mpp->srv->timing_en;
307310 u32 reg_en = mpp_task->hw_info->reg_en;
308311 /* set cache size */
309312 u32 reg = RKVDEC_CACHE_PERMIT_CACHEABLE_ACCESS |
....@@ -332,12 +335,20 @@
332335 e = s + req->size / sizeof(u32);
333336 mpp_write_req(mpp, task->reg, s, e, reg_en);
334337 }
338
+
339
+ /* flush tlb before starting hardware */
340
+ mpp_iommu_flush_tlb(mpp->iommu_info);
341
+
335342 /* init current task */
336343 mpp->cur_task = mpp_task;
337
- mpp_time_record(mpp_task);
344
+
345
+ mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
346
+
338347 /* Flush the register before the start the device */
339348 wmb();
340349 mpp_write(mpp, RKVDEC_REG_START_EN_BASE, task->reg[reg_en] | RKVDEC_START_EN);
350
+
351
+ mpp_task_run_end(mpp_task, timing_en);
341352
342353 mpp_debug_leave();
343354
....@@ -381,13 +392,15 @@
381392 u32 err_mask;
382393 struct rkvdec2_task *task = NULL;
383394 struct mpp_task *mpp_task = mpp->cur_task;
395
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
384396
385397 /* FIXME use a spin lock here */
386398 if (!mpp_task) {
387399 dev_err(mpp->dev, "no current task\n");
388400 return IRQ_HANDLED;
389401 }
390
- mpp_time_diff(mpp_task);
402
+ mpp_task->hw_cycles = mpp_read(mpp, RKVDEC_PERF_WORKING_CNT);
403
+ mpp_time_diff_with_hw_time(mpp_task, dec->core_clk_info.real_rate_hz);
391404 mpp->cur_task = NULL;
392405 task = to_rkvdec2_task(mpp_task);
393406 task->irq_status = mpp->irq_status;
....@@ -552,6 +565,16 @@
552565 }
553566 }
554567 } break;
568
+ case MPP_CMD_SET_ERR_REF_HACK: {
569
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(session->mpp);
570
+ u32 err_ref_hack_en = 0;
571
+
572
+ if (copy_from_user(&err_ref_hack_en, req->data, sizeof(u32))) {
573
+ mpp_err("copy_from_user failed\n");
574
+ return -EINVAL;
575
+ }
576
+ dec->err_ref_hack = err_ref_hack_en;
577
+ } break;
555578 default: {
556579 mpp_err("unknown mpp ioctl cmd %x\n", req->cmd);
557580 } break;
....@@ -617,6 +640,10 @@
617640 dec->procfs = NULL;
618641 return -EIO;
619642 }
643
+
644
+ /* for common mpp_dev options */
645
+ mpp_procfs_create_common(dec->procfs, mpp);
646
+
620647 mpp_procfs_create_u32("aclk", 0644,
621648 dec->procfs, &dec->aclk_info.debug_rate_hz);
622649 mpp_procfs_create_u32("clk_core", 0644,
....@@ -1067,17 +1094,41 @@
10671094 return 0;
10681095 }
10691096
1097
+static int rkvdec2_soft_reset(struct mpp_dev *mpp)
1098
+{
1099
+ int ret = 0;
1100
+
1101
+ /*
1102
+ * for rk3528 and rk3562
1103
+ * use mmu reset instead of rkvdec soft reset
1104
+ * rkvdec will reset together when rkvdec_mmu force reset
1105
+ */
1106
+ ret = rockchip_iommu_force_reset(mpp->dev);
1107
+ if (ret)
1108
+ mpp_err("soft mmu reset fail, ret %d\n", ret);
1109
+ mpp_write(mpp, RKVDEC_REG_INT_EN, 0);
1110
+
1111
+ return ret;
1112
+
1113
+}
1114
+
10701115 static int rkvdec2_reset(struct mpp_dev *mpp)
10711116 {
10721117 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
1118
+ int ret = 0;
10731119
10741120 mpp_debug_enter();
10751121 #ifdef CONFIG_PM_DEVFREQ
10761122 if (dec->devfreq)
10771123 mutex_lock(&dec->devfreq->lock);
10781124 #endif
1079
- if (dec->rst_a && dec->rst_h) {
1080
- rockchip_pmu_idle_request(mpp->dev, true);
1125
+ /* safe reset first*/
1126
+ ret = rkvdec2_soft_reset(mpp);
1127
+
1128
+ /* cru reset */
1129
+ if (ret && dec->rst_a && dec->rst_h) {
1130
+ mpp_err("soft reset timeout, use cru reset\n");
1131
+ mpp_pmu_idle_request(mpp, true);
10811132 mpp_safe_reset(dec->rst_niu_a);
10821133 mpp_safe_reset(dec->rst_niu_h);
10831134 mpp_safe_reset(dec->rst_a);
....@@ -1093,12 +1144,30 @@
10931144 mpp_safe_unreset(dec->rst_core);
10941145 mpp_safe_unreset(dec->rst_cabac);
10951146 mpp_safe_unreset(dec->rst_hevc_cabac);
1096
- rockchip_pmu_idle_request(mpp->dev, false);
1147
+ mpp_pmu_idle_request(mpp, false);
10971148 }
10981149 #ifdef CONFIG_PM_DEVFREQ
10991150 if (dec->devfreq)
11001151 mutex_unlock(&dec->devfreq->lock);
11011152 #endif
1153
+ mpp_debug_leave();
1154
+
1155
+ return 0;
1156
+}
1157
+
1158
+static int rkvdec2_sip_reset(struct mpp_dev *mpp)
1159
+{
1160
+ mpp_debug_enter();
1161
+
1162
+ if (IS_REACHABLE(CONFIG_ROCKCHIP_SIP)) {
1163
+ /* sip reset */
1164
+ rockchip_dmcfreq_lock();
1165
+ sip_smc_vpu_reset(0, 0, 0);
1166
+ rockchip_dmcfreq_unlock();
1167
+ } else {
1168
+ rkvdec2_reset(mpp);
1169
+ }
1170
+
11021171 mpp_debug_leave();
11031172
11041173 return 0;
....@@ -1120,7 +1189,7 @@
11201189 .clk_off = rkvdec2_clk_off,
11211190 .get_freq = rkvdec2_get_freq,
11221191 .set_freq = rkvdec2_set_freq,
1123
- .reset = rkvdec2_reset,
1192
+ .reset = rkvdec2_sip_reset,
11241193 };
11251194
11261195 static struct mpp_dev_ops rkvdec_v2_dev_ops = {