forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/mpp/mpp_rkvenc2.c
....@@ -32,6 +32,7 @@
3232 #include <soc/rockchip/rockchip_ipa.h>
3333 #include <soc/rockchip/rockchip_opp_select.h>
3434 #include <soc/rockchip/rockchip_system_monitor.h>
35
+#include <soc/rockchip/rockchip_iommu.h>
3536
3637 #include "mpp_debug.h"
3738 #include "mpp_iommu.h"
....@@ -44,6 +45,8 @@
4445 #define RKVENC_MAX_DCHS_ID 4
4546 #define RKVENC_MAX_SLICE_FIFO_LEN 256
4647 #define RKVENC_SCLR_DONE_STA BIT(2)
48
+#define RKVENC_WDG 0x38
49
+#define TIMEOUT_MS 100
4750
4851 #define to_rkvenc_info(info) \
4952 container_of(info, struct rkvenc_hw_info, hw)
....@@ -128,6 +131,11 @@
128131 #define INT_STA_RBUS_ERR_STA BIT(7)
129132 #define INT_STA_WDG_STA BIT(8)
130133
134
+#define INT_STA_ERROR (INT_STA_BRSP_OTSD_STA | \
135
+ INT_STA_WBUS_ERR_STA | \
136
+ INT_STA_RBUS_ERR_STA | \
137
+ INT_STA_WDG_STA)
138
+
131139 #define DCHS_REG_OFFSET (0x304)
132140 #define DCHS_CLASS_OFFSET (33)
133141 #define DCHS_TXE (0x10)
....@@ -177,6 +185,12 @@
177185
178186 #define RKVENC2_REG_SLICE_NUM_BASE (0x4034)
179187 #define RKVENC2_REG_SLICE_LEN_BASE (0x4038)
188
+
189
+#define RKVENC2_REG_ST_BSB (0x402c)
190
+#define RKVENC2_REG_ADR_BSBT (0x2b0)
191
+#define RKVENC2_REG_ADR_BSBB (0x2b4)
192
+#define RKVENC2_REG_ADR_BSBR (0x2b8)
193
+#define RKVENC2_REG_ADR_BSBS (0x2bc)
180194
181195 union rkvenc2_slice_len_info {
182196 u32 val;
....@@ -283,9 +297,12 @@
283297 u32 sram_enabled;
284298 struct page *rcb_page;
285299
300
+ u32 bs_overflow;
301
+
286302 #ifdef CONFIG_PM_DEVFREQ
287303 struct rockchip_opp_info opp_info;
288304 struct monitor_dev_info *mdev_info;
305
+ struct opp_table *opp_table;
289306 #endif
290307 };
291308
....@@ -1186,6 +1203,7 @@
11861203 struct rkvenc_task *task = to_rkvenc_task(mpp_task);
11871204 struct rkvenc_hw_info *hw = enc->hw_info;
11881205 u32 timing_en = mpp->srv->timing_en;
1206
+ u32 timeout_thd;
11891207
11901208 mpp_debug_enter();
11911209
....@@ -1234,11 +1252,18 @@
12341252 /* init current task */
12351253 mpp->cur_task = mpp_task;
12361254
1255
+ /*
1256
+ * reconfig timeout threshold.
1257
+ * bit0-bit23,x1024 core clk cycles
1258
+ */
1259
+ timeout_thd = mpp_read(mpp, RKVENC_WDG) & 0xff000000;
1260
+ timeout_thd |= TIMEOUT_MS * clk_get_rate(enc->core_clk_info.clk) / 1024000;
1261
+ mpp_write(mpp, RKVENC_WDG, timeout_thd);
1262
+
12371263 mpp_task_run_begin(mpp_task, timing_en, MPP_WORK_TIMEOUT_DELAY);
12381264
12391265 /* Flush the register before the start the device */
12401266 wmb();
1241
-
12421267 mpp_write(mpp, enc->hw_info->enc_start_base, start_val);
12431268
12441269 mpp_task_run_end(mpp_task, timing_en);
....@@ -1248,9 +1273,9 @@
12481273 return 0;
12491274 }
12501275
1251
-static void rkvenc2_read_slice_len(struct mpp_dev *mpp, struct rkvenc_task *task)
1276
+static void rkvenc2_read_slice_len(struct mpp_dev *mpp, struct rkvenc_task *task,
1277
+ u32 last)
12521278 {
1253
- u32 last = mpp_read_relaxed(mpp, 0x002c) & INT_STA_ENC_DONE_STA;
12541279 u32 sli_num = mpp_read_relaxed(mpp, RKVENC2_REG_SLICE_NUM_BASE);
12551280 union rkvenc2_slice_len_info slice_info;
12561281 u32 task_id = task->mpp_task.task_id;
....@@ -1290,47 +1315,97 @@
12901315 struct rkvenc_hw_info *hw = enc->hw_info;
12911316 struct mpp_task *mpp_task = NULL;
12921317 struct rkvenc_task *task = NULL;
1318
+ u32 irq_status;
12931319 int ret = IRQ_NONE;
12941320
12951321 mpp_debug_enter();
12961322
1297
- mpp->irq_status = mpp_read(mpp, hw->int_sta_base);
1298
- if (!mpp->irq_status)
1323
+ irq_status = mpp_read(mpp, hw->int_sta_base);
1324
+
1325
+ mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n",
1326
+ dev_name(mpp->dev), irq_status);
1327
+
1328
+ if (!irq_status)
12991329 return ret;
1330
+
1331
+ /* clear int first */
1332
+ mpp_write(mpp, hw->int_clr_base, irq_status);
1333
+
1334
+ /*
1335
+ * prevent watch dog irq storm.
1336
+ * The encoder did not stop working when watchdog interrupt is triggered,
1337
+ * it still check timeout and trigger watch dog irq.
1338
+ */
1339
+ if (irq_status & INT_STA_WDG_STA)
1340
+ mpp_write(mpp, hw->int_mask_base, INT_STA_WDG_STA);
13001341
13011342 if (mpp->cur_task) {
13021343 mpp_task = mpp->cur_task;
13031344 task = to_rkvenc_task(mpp_task);
13041345 }
13051346
1306
- if (mpp->irq_status & INT_STA_ENC_DONE_STA) {
1307
- if (task) {
1308
- if (task->task_split)
1309
- rkvenc2_read_slice_len(mpp, task);
1347
+ /* 1. read slice number and slice length */
1348
+ if (task && task->task_split &&
1349
+ (irq_status & (INT_STA_SLC_DONE_STA | INT_STA_ENC_DONE_STA))) {
1350
+ mpp_time_part_diff(mpp_task);
1351
+ rkvenc2_read_slice_len(mpp, task, irq_status & INT_STA_ENC_DONE_STA);
1352
+ wake_up(&mpp_task->wait);
1353
+ }
13101354
1311
- wake_up(&mpp_task->wait);
1355
+ /* 2. process slice irq */
1356
+ if (irq_status & INT_STA_SLC_DONE_STA)
1357
+ ret = IRQ_HANDLED;
1358
+
1359
+ /* 3. process bitstream overflow */
1360
+ if (irq_status & INT_STA_BSF_OFLW_STA) {
1361
+ u32 bs_rd = mpp_read(mpp, RKVENC2_REG_ADR_BSBR);
1362
+ u32 bs_wr = mpp_read(mpp, RKVENC2_REG_ST_BSB);
1363
+ u32 bs_top = mpp_read(mpp, RKVENC2_REG_ADR_BSBT);
1364
+ u32 bs_bot = mpp_read(mpp, RKVENC2_REG_ADR_BSBB);
1365
+
1366
+ if (mpp_task)
1367
+ dev_err(mpp->dev, "task %d found bitstream overflow [%#08x %#08x %#08x %#08x]\n",
1368
+ mpp_task->task_index, bs_top, bs_bot, bs_wr, bs_rd);
1369
+ bs_wr += 128;
1370
+ if (bs_wr >= bs_top)
1371
+ bs_wr = bs_bot;
1372
+
1373
+ /* update write addr for enc continue */
1374
+ mpp_write(mpp, RKVENC2_REG_ADR_BSBS, bs_wr);
1375
+ enc->bs_overflow = 1;
1376
+
1377
+ ret = IRQ_HANDLED;
1378
+ }
1379
+
1380
+ /* 4. process frame irq */
1381
+ if (irq_status & INT_STA_ENC_DONE_STA) {
1382
+ mpp->irq_status = irq_status;
1383
+
1384
+ if (enc->bs_overflow) {
1385
+ mpp->irq_status |= INT_STA_BSF_OFLW_STA;
1386
+ enc->bs_overflow = 0;
13121387 }
1313
-
1314
- mpp_write(mpp, hw->int_mask_base, 0x100);
1315
- mpp_write(mpp, hw->int_clr_base, 0xffffffff);
1316
- udelay(5);
1317
- mpp_write(mpp, hw->int_sta_base, 0);
13181388
13191389 ret = IRQ_WAKE_THREAD;
1320
- } else if (mpp->irq_status & INT_STA_SLC_DONE_STA) {
1321
- if (task && task->task_split) {
1322
- mpp_time_part_diff(mpp_task);
1390
+ }
13231391
1324
- rkvenc2_read_slice_len(mpp, task);
1325
- wake_up(&mpp_task->wait);
1326
- }
1392
+ /* 5. process error irq */
1393
+ if (irq_status & INT_STA_ERROR) {
1394
+ mpp->irq_status = irq_status;
13271395
1328
- mpp_write(mpp, hw->int_clr_base, INT_STA_SLC_DONE_STA);
1396
+ dev_err(mpp->dev, "found error status %08x\n", irq_status);
1397
+
1398
+ ret = IRQ_WAKE_THREAD;
13291399 }
13301400
13311401 mpp_debug_leave();
13321402
13331403 return ret;
1404
+}
1405
+
1406
+static int vepu540c_irq(struct mpp_dev *mpp)
1407
+{
1408
+ return rkvenc_irq(mpp);
13341409 }
13351410
13361411 static int rkvenc_isr(struct mpp_dev *mpp)
....@@ -1360,9 +1435,6 @@
13601435 task->irq_status = mpp->irq_status;
13611436
13621437 rkvenc2_update_dchs(enc, task);
1363
-
1364
- mpp_debug(DEBUG_IRQ_STATUS, "%s irq_status: %08x\n",
1365
- dev_name(mpp->dev), task->irq_status);
13661438
13671439 if (task->irq_status & enc->hw_info->err_mask) {
13681440 atomic_inc(&mpp->reset_request);
....@@ -1413,7 +1485,7 @@
14131485 if (task->bs_buf) {
14141486 u32 bs_size = mpp_read(mpp, 0x4064);
14151487
1416
- mpp_dma_buf_sync(task->bs_buf, 0, bs_size / 8 + task->offset_bs,
1488
+ mpp_dma_buf_sync(task->bs_buf, 0, bs_size + task->offset_bs,
14171489 DMA_FROM_DEVICE, true);
14181490 }
14191491
....@@ -1740,16 +1812,19 @@
17401812 if (IS_ERR(reg_table))
17411813 return PTR_ERR(reg_table);
17421814 }
1815
+ enc->opp_table = reg_table;
17431816
17441817 clk_table = dev_pm_opp_set_clkname(dev, "clk_core");
1745
- if (IS_ERR(clk_table))
1746
- return PTR_ERR(clk_table);
1818
+ if (IS_ERR(clk_table)) {
1819
+ ret = PTR_ERR(clk_table);
1820
+ goto put_opp_reg;
1821
+ }
17471822
17481823 rockchip_get_opp_data(rockchip_rkvenc_of_match, &enc->opp_info);
17491824 ret = rockchip_init_opp_table(dev, &enc->opp_info, "leakage", "venc");
17501825 if (ret) {
17511826 dev_err(dev, "failed to init_opp_table\n");
1752
- return ret;
1827
+ goto put_opp_clk;
17531828 }
17541829
17551830 enc->mdev_info = rockchip_system_monitor_register(dev, &venc_mdevp);
....@@ -1758,6 +1833,14 @@
17581833 enc->mdev_info = NULL;
17591834 }
17601835
1836
+ return 0;
1837
+
1838
+put_opp_clk:
1839
+ dev_pm_opp_put_clkname(enc->opp_table);
1840
+put_opp_reg:
1841
+ dev_pm_opp_put_regulators(enc->opp_table);
1842
+ enc->opp_table = NULL;
1843
+
17611844 return ret;
17621845 }
17631846
....@@ -1765,8 +1848,16 @@
17651848 {
17661849 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
17671850
1768
- if (enc->mdev_info)
1851
+ if (enc->mdev_info) {
17691852 rockchip_system_monitor_unregister(enc->mdev_info);
1853
+ enc->mdev_info = NULL;
1854
+ }
1855
+ if (enc->opp_table) {
1856
+ rockchip_uninit_opp_table(mpp->dev, &enc->opp_info);
1857
+ dev_pm_opp_put_clkname(enc->opp_table);
1858
+ dev_pm_opp_put_regulators(enc->opp_table);
1859
+ enc->opp_table = NULL;
1860
+ }
17701861
17711862 return 0;
17721863 }
....@@ -1835,7 +1926,7 @@
18351926
18361927 /* safe reset */
18371928 mpp_write(mpp, hw->int_mask_base, 0x3FF);
1838
- mpp_write(mpp, hw->enc_clr_base, 0x1);
1929
+ mpp_write(mpp, hw->enc_clr_base, 0x3);
18391930 ret = readl_relaxed_poll_timeout(mpp->reg_base + hw->int_sta_base,
18401931 rst_status,
18411932 rst_status & RKVENC_SCLR_DONE_STA,
....@@ -2004,38 +2095,31 @@
20042095
20052096 if (!enc_task->task_split || enc_task->task_split_done) {
20062097 task_done_ret:
2007
- ret = wait_event_timeout(task->wait,
2008
- test_bit(TASK_STATE_DONE, &task->state),
2009
- msecs_to_jiffies(RKVENC2_WAIT_TIMEOUT_DELAY));
2098
+ ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state));
2099
+ if (ret == -ERESTARTSYS)
2100
+ mpp_err("wait task break by signal in normal mode\n");
20102101
2011
- if (ret > 0)
2012
- return rkvenc2_task_default_process(mpp, task);
2102
+ return rkvenc2_task_default_process(mpp, task);
20132103
2014
- rkvenc2_task_timeout_process(session, task);
2015
- return ret;
20162104 }
20172105
20182106 /* not slice return just wait all slice length */
20192107 if (!req) {
20202108 do {
2021
- ret = wait_event_timeout(task->wait,
2022
- kfifo_out(&enc_task->slice_info, &slice_info, 1),
2023
- msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY));
2024
- if (ret > 0) {
2025
- mpp_dbg_slice("task %d rd %3d len %d %s\n",
2026
- task_id, enc_task->slice_rd_cnt, slice_info.slice_len,
2027
- slice_info.last ? "last" : "");
2028
-
2029
- enc_task->slice_rd_cnt++;
2030
-
2031
- if (slice_info.last)
2032
- goto task_done_ret;
2033
-
2034
- continue;
2109
+ ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info,
2110
+ &slice_info, 1));
2111
+ if (ret == -ERESTARTSYS) {
2112
+ mpp_err("wait task break by signal in slice all mode\n");
2113
+ return 0;
20352114 }
2115
+ mpp_dbg_slice("task %d rd %3d len %d %s\n",
2116
+ task_id, enc_task->slice_rd_cnt, slice_info.slice_len,
2117
+ slice_info.last ? "last" : "");
20362118
2037
- rkvenc2_task_timeout_process(session, task);
2038
- return ret;
2119
+ enc_task->slice_rd_cnt++;
2120
+
2121
+ if (slice_info.last)
2122
+ goto task_done_ret;
20392123 } while (1);
20402124 }
20412125
....@@ -2050,40 +2134,41 @@
20502134
20512135 /* handle slice mode poll return */
20522136 do {
2053
- ret = wait_event_timeout(task->wait,
2054
- kfifo_out(&enc_task->slice_info, &slice_info, 1),
2055
- msecs_to_jiffies(RKVENC2_WORK_TIMEOUT_DELAY));
2056
- if (ret > 0) {
2057
- mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id,
2058
- mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len,
2059
- slice_info.last ? "last" : "");
2060
- enc_task->slice_rd_cnt++;
2061
- if (cfg.count_ret < cfg.count_max) {
2062
- struct rkvenc_poll_slice_cfg __user *ucfg =
2063
- (struct rkvenc_poll_slice_cfg __user *)(req->data);
2064
- u32 __user *dst = (u32 __user *)(ucfg + 1);
2065
-
2066
- /* Do NOT return here when put_user error. Just continue */
2067
- if (put_user(slice_info.val, dst + cfg.count_ret))
2068
- ret = -EFAULT;
2069
-
2070
- cfg.count_ret++;
2071
- if (put_user(cfg.count_ret, &ucfg->count_ret))
2072
- ret = -EFAULT;
2073
- }
2074
-
2075
- if (slice_info.last) {
2076
- enc_task->task_split_done = 1;
2077
- goto task_done_ret;
2078
- }
2079
-
2080
- if (cfg.count_ret >= cfg.count_max)
2081
- return 0;
2082
-
2083
- if (ret < 0)
2084
- return ret;
2137
+ ret = wait_event_interruptible(task->wait, kfifo_out(&enc_task->slice_info,
2138
+ &slice_info, 1));
2139
+ if (ret == -ERESTARTSYS) {
2140
+ mpp_err("wait task break by signal in slice one mode\n");
2141
+ return 0;
20852142 }
2086
- } while (ret > 0);
2143
+ mpp_dbg_slice("core %d task %d rd %3d len %d %s\n", task_id,
2144
+ mpp->core_id, enc_task->slice_rd_cnt, slice_info.slice_len,
2145
+ slice_info.last ? "last" : "");
2146
+ enc_task->slice_rd_cnt++;
2147
+ if (cfg.count_ret < cfg.count_max) {
2148
+ struct rkvenc_poll_slice_cfg __user *ucfg =
2149
+ (struct rkvenc_poll_slice_cfg __user *)(req->data);
2150
+ u32 __user *dst = (u32 __user *)(ucfg + 1);
2151
+
2152
+ /* Do NOT return here when put_user error. Just continue */
2153
+ if (put_user(slice_info.val, dst + cfg.count_ret))
2154
+ ret = -EFAULT;
2155
+
2156
+ cfg.count_ret++;
2157
+ if (put_user(cfg.count_ret, &ucfg->count_ret))
2158
+ ret = -EFAULT;
2159
+ }
2160
+
2161
+ if (slice_info.last) {
2162
+ enc_task->task_split_done = 1;
2163
+ goto task_done_ret;
2164
+ }
2165
+
2166
+ if (cfg.count_ret >= cfg.count_max)
2167
+ return 0;
2168
+
2169
+ if (ret < 0)
2170
+ return ret;
2171
+ } while (!ret);
20872172
20882173 rkvenc2_task_timeout_process(session, task);
20892174
....@@ -2130,6 +2215,20 @@
21302215 .dump_session = rkvenc_dump_session,
21312216 };
21322217
2218
+static struct mpp_dev_ops vepu540c_dev_ops_v2 = {
2219
+ .wait_result = rkvenc2_wait_result,
2220
+ .alloc_task = rkvenc_alloc_task,
2221
+ .run = rkvenc_run,
2222
+ .irq = vepu540c_irq,
2223
+ .isr = rkvenc_isr,
2224
+ .finish = rkvenc_finish,
2225
+ .result = rkvenc_result,
2226
+ .free_task = rkvenc_free_task,
2227
+ .ioctl = rkvenc_control,
2228
+ .init_session = rkvenc_init_session,
2229
+ .free_session = rkvenc_free_session,
2230
+ .dump_session = rkvenc_dump_session,
2231
+};
21332232
21342233 static const struct mpp_dev_var rkvenc_v2_data = {
21352234 .device_type = MPP_DEVICE_RKVENC,
....@@ -2144,7 +2243,7 @@
21442243 .hw_info = &rkvenc_540c_hw_info.hw,
21452244 .trans_info = trans_rkvenc_540c,
21462245 .hw_ops = &rkvenc_hw_ops,
2147
- .dev_ops = &rkvenc_dev_ops_v2,
2246
+ .dev_ops = &vepu540c_dev_ops_v2,
21482247 };
21492248
21502249 static const struct mpp_dev_var rkvenc_ccu_data = {
....@@ -2243,8 +2342,10 @@
22432342 ccu_info = ccu->main_core->iommu_info;
22442343 cur_info = enc->mpp.iommu_info;
22452344
2246
- cur_info->domain = ccu_info->domain;
2247
- cur_info->rw_sem = ccu_info->rw_sem;
2345
+ if (cur_info) {
2346
+ cur_info->domain = ccu_info->domain;
2347
+ cur_info->rw_sem = ccu_info->rw_sem;
2348
+ }
22482349 mpp_iommu_attach(cur_info);
22492350
22502351 /* increase main core message capacity */
....@@ -2363,13 +2464,32 @@
23632464 {
23642465 struct mpp_dev *mpp = (struct mpp_dev *)arg;
23652466 struct rkvenc_dev *enc = to_rkvenc_dev(mpp);
2366
- struct mpp_task *mpp_task = mpp->cur_task;
2467
+ struct mpp_task *mpp_task;
2468
+ struct rkvenc_ccu *ccu = enc->ccu;
23672469
2470
+ if (ccu) {
2471
+ struct rkvenc_dev *core = NULL, *n;
2472
+
2473
+ list_for_each_entry_safe(core, n, &ccu->core_list, core_link) {
2474
+ if (core->mpp.iommu_info &&
2475
+ (&core->mpp.iommu_info->pdev->dev == iommu_dev)) {
2476
+ mpp = &core->mpp;
2477
+ break;
2478
+ }
2479
+ }
2480
+ }
2481
+ mpp_task = mpp->cur_task;
23682482 dev_info(mpp->dev, "core %d page fault found dchs %08x\n",
23692483 mpp->core_id, mpp_read_relaxed(&enc->mpp, DCHS_REG_OFFSET));
23702484
23712485 if (mpp_task)
23722486 mpp_task_dump_mem_region(mpp, mpp_task);
2487
+
2488
+ /*
2489
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
2490
+ * Until the pagefault task finish by hw timeout.
2491
+ */
2492
+ rockchip_iommu_mask_irq(mpp->dev);
23732493
23742494 return 0;
23752495 }
....@@ -2414,7 +2534,7 @@
24142534 ret = devm_request_threaded_irq(dev, mpp->irq,
24152535 mpp_dev_irq,
24162536 mpp_dev_isr_sched,
2417
- IRQF_SHARED,
2537
+ IRQF_ONESHOT,
24182538 dev_name(dev), mpp);
24192539 if (ret) {
24202540 dev_err(dev, "register interrupter runtime failed\n");
....@@ -2422,7 +2542,7 @@
24222542 }
24232543 mpp->session_max_buffers = RKVENC_SESSION_MAX_BUFFERS;
24242544 enc->hw_info = to_rkvenc_info(mpp->var->hw_info);
2425
- mpp->iommu_info->hdl = rkvenc2_iommu_fault_handle;
2545
+ mpp->fault_handler = rkvenc2_iommu_fault_handle;
24262546 rkvenc_procfs_init(mpp);
24272547 rkvenc_procfs_ccu_init(mpp);
24282548