hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/drivers/video/rockchip/rga3/rga2_reg_info.c
....@@ -7,7 +7,6 @@
77
88 #define pr_fmt(fmt) "rga2_reg: " fmt
99
10
-#include "rga_job.h"
1110 #include "rga2_reg_info.h"
1211 #include "rga_dma_buf.h"
1312 #include "rga_iommu.h"
....@@ -165,12 +164,8 @@
165164
166165 bRGA_MODE_CTL = (u32 *) (base + RGA2_MODE_CTRL_OFFSET);
167166
168
- if (msg->render_mode == 4)
169
- render_mode = 3;
170
-
171
- /* In slave mode, the current frame completion interrupt must be enabled. */
172
- if (!RGA2_USE_MASTER_MODE)
173
- msg->CMD_fin_int_enable = 1;
167
+ if (msg->render_mode == UPDATE_PALETTE_TABLE_MODE)
168
+ render_mode = 0x3;
174169
175170 reg =
176171 ((reg & (~m_RGA2_MODE_CTRL_SW_RENDER_MODE)) |
....@@ -231,6 +226,7 @@
231226 u32 sw, sh;
232227 u32 dw, dh;
233228 u8 rotate_mode;
229
+ u8 vsp_scale_mode = 0;
234230 u8 scale_w_flag, scale_h_flag;
235231
236232 bRGA_SRC_INFO = (u32 *) (base + RGA2_SRC_INFO_OFFSET);
....@@ -292,6 +288,18 @@
292288 /* uvvds need to force tile mode. */
293289 if (msg->uvvds_mode && scale_w_flag == 0)
294290 scale_w_flag = 3;
291
+ }
292
+
293
+ /* VSP scale mode select, HSD > VSD > VSP > HSP */
294
+ if (scale_h_flag == 0x2) {
295
+ /* After HSD, VSP needs to check dst_width */
296
+ if ((scale_w_flag == 0x1) && (dw < RGA2_VSP_BICUBIC_LIMIT))
297
+ vsp_scale_mode = 0x0;
298
+ else if (sw < RGA2_VSP_BICUBIC_LIMIT)
299
+ vsp_scale_mode = 0x0;
300
+ else
301
+ /* default select bilinear */
302
+ vsp_scale_mode = 0x1;
295303 }
296304
297305 switch (msg->src.format) {
....@@ -564,8 +572,7 @@
564572 ((msg->alpha_rop_flag >> 4) & 0x1)));
565573 reg =
566574 ((reg & (~m_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL)) |
567
- (s_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL((
568
- msg->scale_bicu_mode >> 4))));
575
+ (s_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL((vsp_scale_mode))));
569576 reg =
570577 ((reg & (~m_RGA2_SRC_INFO_SW_SW_YUV10_E)) |
571578 (s_RGA2_SRC_INFO_SW_SW_YUV10_E((yuv10))));
....@@ -1715,7 +1722,7 @@
17151722 *bRGA_MMU_ELS_BASE = (u32) (msg->mmu_info.els_base_addr) >> 4;
17161723 }
17171724
1718
-int rga2_gen_reg_info(u8 *base, struct rga2_req *msg)
1725
+static int rga2_gen_reg_info(u8 *base, struct rga2_req *msg)
17191726 {
17201727 u8 dst_nn_quantize_en = 0;
17211728
....@@ -1847,9 +1854,6 @@
18471854 req->rotate_mode |= (3 << 4);
18481855 break;
18491856 }
1850
-
1851
- if ((req->dst.act_w > 2048) && (req->src.act_h < req->dst.act_h))
1852
- req->scale_bicu_mode |= (1 << 4);
18531857
18541858 req->LUT_addr = req_rga->LUT_addr;
18551859 req->rop_mask_addr = req_rga->rop_mask_addr;
....@@ -2060,16 +2064,18 @@
20602064 }
20612065 }
20622066
2063
-void rga2_soft_reset(struct rga_scheduler_t *scheduler)
2067
+static void rga2_soft_reset(struct rga_scheduler_t *scheduler)
20642068 {
20652069 u32 i;
20662070 u32 reg;
2067
- u32 iommu_dte_addr;
2071
+ u32 iommu_dte_addr = 0;
20682072
20692073 if (scheduler->data->mmu == RGA_IOMMU)
2070
- iommu_dte_addr = rga_read(0xf00, scheduler);
2074
+ iommu_dte_addr = rga_read(RGA_IOMMU_DTE_ADDR, scheduler);
20712075
2072
- rga_write((1 << 3) | (1 << 4) | (1 << 6), RGA2_SYS_CTRL, scheduler);
2076
+ rga_write(m_RGA2_SYS_CTRL_ACLK_SRESET_P | m_RGA2_SYS_CTRL_CCLK_SRESET_P |
2077
+ m_RGA2_SYS_CTRL_RST_PROTECT_P,
2078
+ RGA2_SYS_CTRL, scheduler);
20732079
20742080 for (i = 0; i < RGA_RESET_TIMEOUT; i++) {
20752081 /* RGA_SYS_CTRL */
....@@ -2082,13 +2088,16 @@
20822088 }
20832089
20842090 if (scheduler->data->mmu == RGA_IOMMU) {
2085
- rga_write(iommu_dte_addr, 0xf00, scheduler);
2091
+ rga_write(iommu_dte_addr, RGA_IOMMU_DTE_ADDR, scheduler);
20862092 /* enable iommu */
2087
- rga_write(0, 0xf08, scheduler);
2093
+ rga_write(RGA_IOMMU_CMD_ENABLE_PAGING, RGA_IOMMU_COMMAND, scheduler);
20882094 }
20892095
20902096 if (i == RGA_RESET_TIMEOUT)
2091
- pr_err("soft reset timeout.\n");
2097
+ pr_err("RAG2 soft reset timeout.\n");
2098
+ else
2099
+ pr_info("RGA2 soft reset complete.\n");
2100
+
20922101 }
20932102
20942103 static int rga2_check_param(const struct rga_hw_data *data, const struct rga2_req *req)
....@@ -2215,7 +2224,7 @@
22152224 pr_info("yuv2rgb mode is %x\n", req->yuv2rgb_mode);
22162225 }
22172226
2218
-int rga2_init_reg(struct rga_job *job)
2227
+static int rga2_init_reg(struct rga_job *job)
22192228 {
22202229 struct rga2_req req;
22212230 int ret = 0;
....@@ -2266,6 +2275,10 @@
22662275 return -EFAULT;
22672276 }
22682277 }
2278
+
2279
+ /* In slave mode, the current frame completion interrupt must be enabled. */
2280
+ if (scheduler->data->mmu == RGA_IOMMU)
2281
+ req.CMD_fin_int_enable = 1;
22692282
22702283 if (rga2_gen_reg_info((uint8_t *)job->cmd_reg, &req) == -1) {
22712284 pr_err("gen reg info error\n");
....@@ -2338,7 +2351,7 @@
23382351 cmd_reg[2 + i * 4], cmd_reg[3 + i * 4]);
23392352 }
23402353
2341
-void rga2_dump_read_back_reg(struct rga_scheduler_t *scheduler)
2354
+static void rga2_dump_read_back_reg(struct rga_scheduler_t *scheduler)
23422355 {
23432356 rga2_dump_read_back_sys_reg(scheduler);
23442357 rga2_dump_read_back_csc_reg(scheduler);
....@@ -2351,28 +2364,28 @@
23512364
23522365 if (job->pre_intr_info.read_intr_en) {
23532366 reg = s_RGA2_READ_LINE_SW_INTR_LINE_RD_TH(job->pre_intr_info.read_threshold);
2354
- rga_write(reg, RGA2_READ_LINE_CNT_OFFSET, scheduler);
2367
+ rga_write(reg, RGA2_READ_LINE_CNT, scheduler);
23552368 }
23562369
23572370 if (job->pre_intr_info.write_intr_en) {
23582371 reg = s_RGA2_WRITE_LINE_SW_INTR_LINE_WR_START(job->pre_intr_info.write_start);
23592372 reg = ((reg & (~m_RGA2_WRITE_LINE_SW_INTR_LINE_WR_STEP)) |
23602373 (s_RGA2_WRITE_LINE_SW_INTR_LINE_WR_STEP(job->pre_intr_info.write_step)));
2361
- rga_write(reg, RGA2_WRITE_LINE_CNT_OFFSET, scheduler);
2374
+ rga_write(reg, RGA2_WRITE_LINE_CNT, scheduler);
23622375 }
23632376
2364
- reg = rga_read(RGA2_SYS_CTRL_OFFSET, scheduler);
2365
- reg = ((reg & (~m_RGA2_SYS_HOLD_MODE_EN)) |
2366
- (s_RGA2_SYS_HOLD_MODE_EN(job->pre_intr_info.read_hold_en)));
2367
- rga_write(reg, RGA2_SYS_CTRL_OFFSET, scheduler);
2377
+ reg = rga_read(RGA2_SYS_CTRL, scheduler);
2378
+ reg = ((reg & (~m_RGA2_SYS_CTRL_HOLD_MODE_EN)) |
2379
+ (s_RGA2_SYS_CTRL_HOLD_MODE_EN(job->pre_intr_info.read_hold_en)));
2380
+ rga_write(reg, RGA2_SYS_CTRL, scheduler);
23682381
2369
- reg = rga_read(RGA2_INT_OFFSET, scheduler);
2382
+ reg = rga_read(RGA2_INT, scheduler);
23702383 reg = (reg | s_RGA2_INT_LINE_RD_CLEAR(0x1) | s_RGA2_INT_LINE_WR_CLEAR(0x1));
23712384 reg = ((reg & (~m_RGA2_INT_LINE_RD_EN)) |
23722385 (s_RGA2_INT_LINE_RD_EN(job->pre_intr_info.read_intr_en)));
23732386 reg = ((reg & (~m_RGA2_INT_LINE_WR_EN)) |
23742387 (s_RGA2_INT_LINE_WR_EN(job->pre_intr_info.write_intr_en)));
2375
- rga_write(reg, RGA2_INT_OFFSET, scheduler);
2388
+ rga_write(reg, RGA2_INT, scheduler);
23762389 }
23772390
23782391 static void rga2_set_reg_full_csc(struct rga_job *job, struct rga_scheduler_t *scheduler)
....@@ -2388,29 +2401,40 @@
23882401 /* full csc coefficient */
23892402 /* Y coefficient */
23902403 rga_write(job->full_csc.coe_y.r_v | (clip_y_max << 16) | (clip_y_min << 24),
2391
- RGA2_DST_CSC_00_OFFSET, scheduler);
2404
+ RGA2_DST_CSC_00, scheduler);
23922405 rga_write(job->full_csc.coe_y.g_y | (clip_uv_max << 16) | (clip_uv_min << 24),
2393
- RGA2_DST_CSC_01_OFFSET, scheduler);
2394
- rga_write(job->full_csc.coe_y.b_u, RGA2_DST_CSC_02_OFFSET, scheduler);
2395
- rga_write(job->full_csc.coe_y.off, RGA2_DST_CSC_OFF0_OFFSET, scheduler);
2406
+ RGA2_DST_CSC_01, scheduler);
2407
+ rga_write(job->full_csc.coe_y.b_u, RGA2_DST_CSC_02, scheduler);
2408
+ rga_write(job->full_csc.coe_y.off, RGA2_DST_CSC_OFF0, scheduler);
23962409
23972410 /* U coefficient */
2398
- rga_write(job->full_csc.coe_u.r_v, RGA2_DST_CSC_10_OFFSET, scheduler);
2399
- rga_write(job->full_csc.coe_u.g_y, RGA2_DST_CSC_11_OFFSET, scheduler);
2400
- rga_write(job->full_csc.coe_u.b_u, RGA2_DST_CSC_12_OFFSET, scheduler);
2401
- rga_write(job->full_csc.coe_u.off, RGA2_DST_CSC_OFF1_OFFSET, scheduler);
2411
+ rga_write(job->full_csc.coe_u.r_v, RGA2_DST_CSC_10, scheduler);
2412
+ rga_write(job->full_csc.coe_u.g_y, RGA2_DST_CSC_11, scheduler);
2413
+ rga_write(job->full_csc.coe_u.b_u, RGA2_DST_CSC_12, scheduler);
2414
+ rga_write(job->full_csc.coe_u.off, RGA2_DST_CSC_OFF1, scheduler);
24022415
24032416 /* V coefficient */
2404
- rga_write(job->full_csc.coe_v.r_v, RGA2_DST_CSC_20_OFFSET, scheduler);
2405
- rga_write(job->full_csc.coe_v.g_y, RGA2_DST_CSC_21_OFFSET, scheduler);
2406
- rga_write(job->full_csc.coe_v.b_u, RGA2_DST_CSC_22_OFFSET, scheduler);
2407
- rga_write(job->full_csc.coe_v.off, RGA2_DST_CSC_OFF2_OFFSET, scheduler);
2417
+ rga_write(job->full_csc.coe_v.r_v, RGA2_DST_CSC_20, scheduler);
2418
+ rga_write(job->full_csc.coe_v.g_y, RGA2_DST_CSC_21, scheduler);
2419
+ rga_write(job->full_csc.coe_v.b_u, RGA2_DST_CSC_22, scheduler);
2420
+ rga_write(job->full_csc.coe_v.off, RGA2_DST_CSC_OFF2, scheduler);
24082421 }
24092422
2410
-int rga2_set_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
2423
+static int rga2_set_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
24112424 {
2412
- ktime_t now = ktime_get();
24132425 int i;
2426
+ bool master_mode_en;
2427
+ uint32_t sys_ctrl;
2428
+ ktime_t now = ktime_get();
2429
+
2430
+ /*
2431
+ * Currently there is no iova allocated for storing cmd for the IOMMU device,
2432
+ * so the iommu device needs to use the slave mode.
2433
+ */
2434
+ if (scheduler->data->mmu != RGA_IOMMU)
2435
+ master_mode_en = true;
2436
+ else
2437
+ master_mode_en = false;
24142438
24152439 if (job->pre_intr_info.enable)
24162440 rga2_set_pre_intr_reg(job, scheduler);
....@@ -2419,7 +2443,7 @@
24192443 rga2_set_reg_full_csc(job, scheduler);
24202444
24212445 if (DEBUGGER_EN(REG)) {
2422
- int32_t *p;
2446
+ uint32_t *p;
24232447
24242448 rga2_dump_read_back_sys_reg(scheduler);
24252449 rga2_dump_read_back_csc_reg(scheduler);
....@@ -2434,42 +2458,44 @@
24342458
24352459 /* All CMD finish int */
24362460 rga_write(rga_read(RGA2_INT, scheduler) |
2437
- (0x1 << 10) | (0x1 << 9) | (0x1 << 8), RGA2_INT, scheduler);
2461
+ m_RGA2_INT_ERROR_ENABLE_MASK | m_RGA2_INT_ALL_CMD_DONE_INT_EN,
2462
+ RGA2_INT, scheduler);
24382463
24392464 /* sys_reg init */
2440
- rga_write((0x1 << 2) | (0x1 << 5) | (0x1 << 6) | (0x1 << 11) | (0x1 << 12),
2441
- RGA2_SYS_CTRL, scheduler);
2465
+ sys_ctrl = m_RGA2_SYS_CTRL_AUTO_CKG | m_RGA2_SYS_CTRL_AUTO_RST |
2466
+ m_RGA2_SYS_CTRL_RST_PROTECT_P | m_RGA2_SYS_CTRL_DST_WR_OPT_DIS |
2467
+ m_RGA2_SYS_CTRL_SRC0YUV420SP_RD_OPT_DIS;
24422468
2443
- if (RGA2_USE_MASTER_MODE) {
2469
+ if (master_mode_en) {
24442470 /* master mode */
2445
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | (0x1 << 1),
2446
- RGA2_SYS_CTRL, scheduler);
2471
+ sys_ctrl |= s_RGA2_SYS_CTRL_CMD_MODE(1);
24472472
24482473 /* cmd buffer flush cache to ddr */
24492474 rga_dma_sync_flush_range(&job->cmd_reg[0], &job->cmd_reg[32], scheduler);
24502475
24512476 /* set cmd_addr */
24522477 rga_write(virt_to_phys(job->cmd_reg), RGA2_CMD_BASE, scheduler);
2453
-
2454
- rga_write(1, RGA2_CMD_CTRL, scheduler);
2478
+ rga_write(sys_ctrl, RGA2_SYS_CTRL, scheduler);
2479
+ rga_write(m_RGA2_CMD_CTRL_CMD_LINE_ST_P, RGA2_CMD_CTRL, scheduler);
24552480 } else {
24562481 /* slave mode */
2457
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | (0x0 << 1),
2458
- RGA2_SYS_CTRL, scheduler);
2482
+ sys_ctrl |= s_RGA2_SYS_CTRL_CMD_MODE(0) | m_RGA2_SYS_CTRL_CMD_OP_ST_P;
24592483
24602484 /* set cmd_reg */
24612485 for (i = 0; i <= 32; i++)
24622486 rga_write(job->cmd_reg[i], 0x100 + i * 4, scheduler);
24632487
2464
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | 0x1, RGA2_SYS_CTRL, scheduler);
2488
+ rga_write(sys_ctrl, RGA2_SYS_CTRL, scheduler);
24652489 }
24662490
2467
- if (DEBUGGER_EN(TIME)) {
2468
- pr_info("sys_ctrl = %x, int = %x, set cmd use time = %lld\n",
2491
+ if (DEBUGGER_EN(REG))
2492
+ pr_info("sys_ctrl = %x, int = %x\n",
24692493 rga_read(RGA2_SYS_CTRL, scheduler),
2470
- rga_read(RGA2_INT, scheduler),
2494
+ rga_read(RGA2_INT, scheduler));
2495
+
2496
+ if (DEBUGGER_EN(TIME))
2497
+ pr_info("set cmd use time = %lld\n",
24712498 ktime_us_delta(now, job->timestamp));
2472
- }
24732499
24742500 job->hw_running_time = now;
24752501 job->hw_recoder_time = now;
....@@ -2480,7 +2506,7 @@
24802506 return 0;
24812507 }
24822508
2483
-int rga2_get_version(struct rga_scheduler_t *scheduler)
2509
+static int rga2_get_version(struct rga_scheduler_t *scheduler)
24842510 {
24852511 u32 major_version, minor_version, svn_version;
24862512 u32 reg_version;
....@@ -2511,3 +2537,96 @@
25112537
25122538 return 0;
25132539 }
2540
+
2541
+static int rga2_read_back_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
2542
+{
2543
+ if (job->rga_command_base.osd_info.enable) {
2544
+ job->rga_command_base.osd_info.cur_flags0 = rga_read(RGA2_OSD_CUR_FLAGS0,
2545
+ scheduler);
2546
+ job->rga_command_base.osd_info.cur_flags1 = rga_read(RGA2_OSD_CUR_FLAGS1,
2547
+ scheduler);
2548
+ }
2549
+
2550
+ return 0;
2551
+}
2552
+
2553
+static int rga2_irq(struct rga_scheduler_t *scheduler)
2554
+{
2555
+ struct rga_job *job = scheduler->running_job;
2556
+
2557
+ /* The hardware interrupt top-half don't need to lock the scheduler. */
2558
+ if (job == NULL)
2559
+ return IRQ_HANDLED;
2560
+
2561
+ if (test_bit(RGA_JOB_STATE_INTR_ERR, &job->state))
2562
+ return IRQ_WAKE_THREAD;
2563
+
2564
+ job->intr_status = rga_read(RGA2_INT, scheduler);
2565
+ job->hw_status = rga_read(RGA2_STATUS2, scheduler);
2566
+ job->cmd_status = rga_read(RGA2_STATUS1, scheduler);
2567
+
2568
+ if (DEBUGGER_EN(INT_FLAG))
2569
+ pr_info("irq handler, INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
2570
+ job->intr_status, job->hw_status, job->cmd_status);
2571
+
2572
+ if (job->intr_status &
2573
+ (m_RGA2_INT_CUR_CMD_DONE_INT_FLAG | m_RGA2_INT_ALL_CMD_DONE_INT_FLAG)) {
2574
+ set_bit(RGA_JOB_STATE_FINISH, &job->state);
2575
+ } else if (job->intr_status & m_RGA2_INT_ERROR_FLAG_MASK) {
2576
+ set_bit(RGA_JOB_STATE_INTR_ERR, &job->state);
2577
+
2578
+ pr_err("irq handler err! INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
2579
+ job->intr_status, job->hw_status, job->cmd_status);
2580
+ scheduler->ops->soft_reset(scheduler);
2581
+ }
2582
+
2583
+ /*clear INTR */
2584
+ rga_write(rga_read(RGA2_INT, scheduler) |
2585
+ (m_RGA2_INT_ERROR_CLEAR_MASK |
2586
+ m_RGA2_INT_ALL_CMD_DONE_INT_CLEAR | m_RGA2_INT_NOW_CMD_DONE_INT_CLEAR |
2587
+ m_RGA2_INT_LINE_RD_CLEAR | m_RGA2_INT_LINE_WR_CLEAR),
2588
+ RGA2_INT, scheduler);
2589
+
2590
+ return IRQ_WAKE_THREAD;
2591
+}
2592
+
2593
+static int rga2_isr_thread(struct rga_job *job, struct rga_scheduler_t *scheduler)
2594
+{
2595
+ if (DEBUGGER_EN(INT_FLAG))
2596
+ pr_info("isr thread, INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
2597
+ rga_read(RGA2_INT, scheduler),
2598
+ rga_read(RGA2_STATUS2, scheduler),
2599
+ rga_read(RGA2_STATUS1, scheduler));
2600
+
2601
+ if (test_bit(RGA_JOB_STATE_INTR_ERR, &job->state)) {
2602
+ if (job->hw_status & m_RGA2_STATUS2_RPP_ERROR)
2603
+ pr_err("RGA current status: rpp error!\n");
2604
+ if (job->hw_status & m_RGA2_STATUS2_BUS_ERROR)
2605
+ pr_err("RGA current status: bus error!\n");
2606
+
2607
+ if (job->intr_status & m_RGA2_INT_ERROR_INT_FLAG) {
2608
+ pr_err("RGA bus error intr, please check your configuration and buffer.\n");
2609
+ job->ret = -EFAULT;
2610
+ } else if (job->intr_status & m_RGA2_INT_MMU_INT_FLAG) {
2611
+ pr_err("mmu failed, please check size of the buffer or whether the buffer has been freed.\n");
2612
+ job->ret = -EACCES;
2613
+ }
2614
+
2615
+ if (job->ret == 0) {
2616
+ pr_err("rga intr error[0x%x]!\n", job->intr_status);
2617
+ job->ret = -EFAULT;
2618
+ }
2619
+ }
2620
+
2621
+ return IRQ_HANDLED;
2622
+}
2623
+
2624
+const struct rga_backend_ops rga2_ops = {
2625
+ .get_version = rga2_get_version,
2626
+ .set_reg = rga2_set_reg,
2627
+ .init_reg = rga2_init_reg,
2628
+ .soft_reset = rga2_soft_reset,
2629
+ .read_back_reg = rga2_read_back_reg,
2630
+ .irq = rga2_irq,
2631
+ .isr_thread = rga2_isr_thread,
2632
+};