From 7d07b3ae8ddad407913c5301877e694430a3263f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 23 Nov 2023 08:24:31 +0000
Subject: [PATCH] add build kerneldeb
---
kernel/drivers/video/rockchip/rga3/rga2_reg_info.c | 241 ++++++++++++++++++++++++++++++++++++------------
1 files changed, 180 insertions(+), 61 deletions(-)
diff --git a/kernel/drivers/video/rockchip/rga3/rga2_reg_info.c b/kernel/drivers/video/rockchip/rga3/rga2_reg_info.c
index 923c1f3..a624778 100644
--- a/kernel/drivers/video/rockchip/rga3/rga2_reg_info.c
+++ b/kernel/drivers/video/rockchip/rga3/rga2_reg_info.c
@@ -7,7 +7,6 @@
#define pr_fmt(fmt) "rga2_reg: " fmt
-#include "rga_job.h"
#include "rga2_reg_info.h"
#include "rga_dma_buf.h"
#include "rga_iommu.h"
@@ -165,12 +164,8 @@
bRGA_MODE_CTL = (u32 *) (base + RGA2_MODE_CTRL_OFFSET);
- if (msg->render_mode == 4)
- render_mode = 3;
-
- /* In slave mode, the current frame completion interrupt must be enabled. */
- if (!RGA2_USE_MASTER_MODE)
- msg->CMD_fin_int_enable = 1;
+ if (msg->render_mode == UPDATE_PALETTE_TABLE_MODE)
+ render_mode = 0x3;
reg =
((reg & (~m_RGA2_MODE_CTRL_SW_RENDER_MODE)) |
@@ -231,6 +226,7 @@
u32 sw, sh;
u32 dw, dh;
u8 rotate_mode;
+ u8 vsp_scale_mode = 0;
u8 scale_w_flag, scale_h_flag;
bRGA_SRC_INFO = (u32 *) (base + RGA2_SRC_INFO_OFFSET);
@@ -292,6 +288,18 @@
/* uvvds need to force tile mode. */
if (msg->uvvds_mode && scale_w_flag == 0)
scale_w_flag = 3;
+ }
+
+ /* VSP scale mode select, HSD > VSD > VSP > HSP */
+ if (scale_h_flag == 0x2) {
+ /* After HSD, VSP needs to check dst_width */
+ if ((scale_w_flag == 0x1) && (dw < RGA2_VSP_BICUBIC_LIMIT))
+ vsp_scale_mode = 0x0;
+ else if (sw < RGA2_VSP_BICUBIC_LIMIT)
+ vsp_scale_mode = 0x0;
+ else
+ /* default select bilinear */
+ vsp_scale_mode = 0x1;
}
switch (msg->src.format) {
@@ -564,8 +572,7 @@
((msg->alpha_rop_flag >> 4) & 0x1)));
reg =
((reg & (~m_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL)) |
- (s_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL((
- msg->scale_bicu_mode >> 4))));
+ (s_RGA2_SRC_INFO_SW_SW_VSP_MODE_SEL((vsp_scale_mode))));
reg =
((reg & (~m_RGA2_SRC_INFO_SW_SW_YUV10_E)) |
(s_RGA2_SRC_INFO_SW_SW_YUV10_E((yuv10))));
@@ -1715,7 +1722,7 @@
*bRGA_MMU_ELS_BASE = (u32) (msg->mmu_info.els_base_addr) >> 4;
}
-int rga2_gen_reg_info(u8 *base, struct rga2_req *msg)
+static int rga2_gen_reg_info(u8 *base, struct rga2_req *msg)
{
u8 dst_nn_quantize_en = 0;
@@ -1847,9 +1854,6 @@
req->rotate_mode |= (3 << 4);
break;
}
-
- if ((req->dst.act_w > 2048) && (req->src.act_h < req->dst.act_h))
- req->scale_bicu_mode |= (1 << 4);
req->LUT_addr = req_rga->LUT_addr;
req->rop_mask_addr = req_rga->rop_mask_addr;
@@ -2060,16 +2064,18 @@
}
}
-void rga2_soft_reset(struct rga_scheduler_t *scheduler)
+static void rga2_soft_reset(struct rga_scheduler_t *scheduler)
{
u32 i;
u32 reg;
- u32 iommu_dte_addr;
+ u32 iommu_dte_addr = 0;
if (scheduler->data->mmu == RGA_IOMMU)
- iommu_dte_addr = rga_read(0xf00, scheduler);
+ iommu_dte_addr = rga_read(RGA_IOMMU_DTE_ADDR, scheduler);
- rga_write((1 << 3) | (1 << 4) | (1 << 6), RGA2_SYS_CTRL, scheduler);
+ rga_write(m_RGA2_SYS_CTRL_ACLK_SRESET_P | m_RGA2_SYS_CTRL_CCLK_SRESET_P |
+ m_RGA2_SYS_CTRL_RST_PROTECT_P,
+ RGA2_SYS_CTRL, scheduler);
for (i = 0; i < RGA_RESET_TIMEOUT; i++) {
/* RGA_SYS_CTRL */
@@ -2082,13 +2088,16 @@
}
if (scheduler->data->mmu == RGA_IOMMU) {
- rga_write(iommu_dte_addr, 0xf00, scheduler);
+ rga_write(iommu_dte_addr, RGA_IOMMU_DTE_ADDR, scheduler);
/* enable iommu */
- rga_write(0, 0xf08, scheduler);
+ rga_write(RGA_IOMMU_CMD_ENABLE_PAGING, RGA_IOMMU_COMMAND, scheduler);
}
if (i == RGA_RESET_TIMEOUT)
- pr_err("soft reset timeout.\n");
+ pr_err("RAG2 soft reset timeout.\n");
+ else
+ pr_info("RGA2 soft reset complete.\n");
+
}
static int rga2_check_param(const struct rga_hw_data *data, const struct rga2_req *req)
@@ -2215,7 +2224,7 @@
pr_info("yuv2rgb mode is %x\n", req->yuv2rgb_mode);
}
-int rga2_init_reg(struct rga_job *job)
+static int rga2_init_reg(struct rga_job *job)
{
struct rga2_req req;
int ret = 0;
@@ -2266,6 +2275,10 @@
return -EFAULT;
}
}
+
+ /* In slave mode, the current frame completion interrupt must be enabled. */
+ if (scheduler->data->mmu == RGA_IOMMU)
+ req.CMD_fin_int_enable = 1;
if (rga2_gen_reg_info((uint8_t *)job->cmd_reg, &req) == -1) {
pr_err("gen reg info error\n");
@@ -2338,7 +2351,7 @@
cmd_reg[2 + i * 4], cmd_reg[3 + i * 4]);
}
-void rga2_dump_read_back_reg(struct rga_scheduler_t *scheduler)
+static void rga2_dump_read_back_reg(struct rga_scheduler_t *scheduler)
{
rga2_dump_read_back_sys_reg(scheduler);
rga2_dump_read_back_csc_reg(scheduler);
@@ -2351,28 +2364,28 @@
if (job->pre_intr_info.read_intr_en) {
reg = s_RGA2_READ_LINE_SW_INTR_LINE_RD_TH(job->pre_intr_info.read_threshold);
- rga_write(reg, RGA2_READ_LINE_CNT_OFFSET, scheduler);
+ rga_write(reg, RGA2_READ_LINE_CNT, scheduler);
}
if (job->pre_intr_info.write_intr_en) {
reg = s_RGA2_WRITE_LINE_SW_INTR_LINE_WR_START(job->pre_intr_info.write_start);
reg = ((reg & (~m_RGA2_WRITE_LINE_SW_INTR_LINE_WR_STEP)) |
(s_RGA2_WRITE_LINE_SW_INTR_LINE_WR_STEP(job->pre_intr_info.write_step)));
- rga_write(reg, RGA2_WRITE_LINE_CNT_OFFSET, scheduler);
+ rga_write(reg, RGA2_WRITE_LINE_CNT, scheduler);
}
- reg = rga_read(RGA2_SYS_CTRL_OFFSET, scheduler);
- reg = ((reg & (~m_RGA2_SYS_HOLD_MODE_EN)) |
- (s_RGA2_SYS_HOLD_MODE_EN(job->pre_intr_info.read_hold_en)));
- rga_write(reg, RGA2_SYS_CTRL_OFFSET, scheduler);
+ reg = rga_read(RGA2_SYS_CTRL, scheduler);
+ reg = ((reg & (~m_RGA2_SYS_CTRL_HOLD_MODE_EN)) |
+ (s_RGA2_SYS_CTRL_HOLD_MODE_EN(job->pre_intr_info.read_hold_en)));
+ rga_write(reg, RGA2_SYS_CTRL, scheduler);
- reg = rga_read(RGA2_INT_OFFSET, scheduler);
+ reg = rga_read(RGA2_INT, scheduler);
reg = (reg | s_RGA2_INT_LINE_RD_CLEAR(0x1) | s_RGA2_INT_LINE_WR_CLEAR(0x1));
reg = ((reg & (~m_RGA2_INT_LINE_RD_EN)) |
(s_RGA2_INT_LINE_RD_EN(job->pre_intr_info.read_intr_en)));
reg = ((reg & (~m_RGA2_INT_LINE_WR_EN)) |
(s_RGA2_INT_LINE_WR_EN(job->pre_intr_info.write_intr_en)));
- rga_write(reg, RGA2_INT_OFFSET, scheduler);
+ rga_write(reg, RGA2_INT, scheduler);
}
static void rga2_set_reg_full_csc(struct rga_job *job, struct rga_scheduler_t *scheduler)
@@ -2388,29 +2401,40 @@
/* full csc coefficient */
/* Y coefficient */
rga_write(job->full_csc.coe_y.r_v | (clip_y_max << 16) | (clip_y_min << 24),
- RGA2_DST_CSC_00_OFFSET, scheduler);
+ RGA2_DST_CSC_00, scheduler);
rga_write(job->full_csc.coe_y.g_y | (clip_uv_max << 16) | (clip_uv_min << 24),
- RGA2_DST_CSC_01_OFFSET, scheduler);
- rga_write(job->full_csc.coe_y.b_u, RGA2_DST_CSC_02_OFFSET, scheduler);
- rga_write(job->full_csc.coe_y.off, RGA2_DST_CSC_OFF0_OFFSET, scheduler);
+ RGA2_DST_CSC_01, scheduler);
+ rga_write(job->full_csc.coe_y.b_u, RGA2_DST_CSC_02, scheduler);
+ rga_write(job->full_csc.coe_y.off, RGA2_DST_CSC_OFF0, scheduler);
/* U coefficient */
- rga_write(job->full_csc.coe_u.r_v, RGA2_DST_CSC_10_OFFSET, scheduler);
- rga_write(job->full_csc.coe_u.g_y, RGA2_DST_CSC_11_OFFSET, scheduler);
- rga_write(job->full_csc.coe_u.b_u, RGA2_DST_CSC_12_OFFSET, scheduler);
- rga_write(job->full_csc.coe_u.off, RGA2_DST_CSC_OFF1_OFFSET, scheduler);
+ rga_write(job->full_csc.coe_u.r_v, RGA2_DST_CSC_10, scheduler);
+ rga_write(job->full_csc.coe_u.g_y, RGA2_DST_CSC_11, scheduler);
+ rga_write(job->full_csc.coe_u.b_u, RGA2_DST_CSC_12, scheduler);
+ rga_write(job->full_csc.coe_u.off, RGA2_DST_CSC_OFF1, scheduler);
/* V coefficient */
- rga_write(job->full_csc.coe_v.r_v, RGA2_DST_CSC_20_OFFSET, scheduler);
- rga_write(job->full_csc.coe_v.g_y, RGA2_DST_CSC_21_OFFSET, scheduler);
- rga_write(job->full_csc.coe_v.b_u, RGA2_DST_CSC_22_OFFSET, scheduler);
- rga_write(job->full_csc.coe_v.off, RGA2_DST_CSC_OFF2_OFFSET, scheduler);
+ rga_write(job->full_csc.coe_v.r_v, RGA2_DST_CSC_20, scheduler);
+ rga_write(job->full_csc.coe_v.g_y, RGA2_DST_CSC_21, scheduler);
+ rga_write(job->full_csc.coe_v.b_u, RGA2_DST_CSC_22, scheduler);
+ rga_write(job->full_csc.coe_v.off, RGA2_DST_CSC_OFF2, scheduler);
}
-int rga2_set_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
+static int rga2_set_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
{
- ktime_t now = ktime_get();
int i;
+ bool master_mode_en;
+ uint32_t sys_ctrl;
+ ktime_t now = ktime_get();
+
+ /*
+ * Currently there is no iova allocated for storing cmd for the IOMMU device,
+ * so the iommu device needs to use the slave mode.
+ */
+ if (scheduler->data->mmu != RGA_IOMMU)
+ master_mode_en = true;
+ else
+ master_mode_en = false;
if (job->pre_intr_info.enable)
rga2_set_pre_intr_reg(job, scheduler);
@@ -2419,7 +2443,7 @@
rga2_set_reg_full_csc(job, scheduler);
if (DEBUGGER_EN(REG)) {
- int32_t *p;
+ uint32_t *p;
rga2_dump_read_back_sys_reg(scheduler);
rga2_dump_read_back_csc_reg(scheduler);
@@ -2434,42 +2458,44 @@
/* All CMD finish int */
rga_write(rga_read(RGA2_INT, scheduler) |
- (0x1 << 10) | (0x1 << 9) | (0x1 << 8), RGA2_INT, scheduler);
+ m_RGA2_INT_ERROR_ENABLE_MASK | m_RGA2_INT_ALL_CMD_DONE_INT_EN,
+ RGA2_INT, scheduler);
/* sys_reg init */
- rga_write((0x1 << 2) | (0x1 << 5) | (0x1 << 6) | (0x1 << 11) | (0x1 << 12),
- RGA2_SYS_CTRL, scheduler);
+ sys_ctrl = m_RGA2_SYS_CTRL_AUTO_CKG | m_RGA2_SYS_CTRL_AUTO_RST |
+ m_RGA2_SYS_CTRL_RST_PROTECT_P | m_RGA2_SYS_CTRL_DST_WR_OPT_DIS |
+ m_RGA2_SYS_CTRL_SRC0YUV420SP_RD_OPT_DIS;
- if (RGA2_USE_MASTER_MODE) {
+ if (master_mode_en) {
/* master mode */
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | (0x1 << 1),
- RGA2_SYS_CTRL, scheduler);
+ sys_ctrl |= s_RGA2_SYS_CTRL_CMD_MODE(1);
/* cmd buffer flush cache to ddr */
rga_dma_sync_flush_range(&job->cmd_reg[0], &job->cmd_reg[32], scheduler);
/* set cmd_addr */
rga_write(virt_to_phys(job->cmd_reg), RGA2_CMD_BASE, scheduler);
-
- rga_write(1, RGA2_CMD_CTRL, scheduler);
+ rga_write(sys_ctrl, RGA2_SYS_CTRL, scheduler);
+ rga_write(m_RGA2_CMD_CTRL_CMD_LINE_ST_P, RGA2_CMD_CTRL, scheduler);
} else {
/* slave mode */
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | (0x0 << 1),
- RGA2_SYS_CTRL, scheduler);
+ sys_ctrl |= s_RGA2_SYS_CTRL_CMD_MODE(0) | m_RGA2_SYS_CTRL_CMD_OP_ST_P;
/* set cmd_reg */
for (i = 0; i <= 32; i++)
rga_write(job->cmd_reg[i], 0x100 + i * 4, scheduler);
- rga_write(rga_read(RGA2_SYS_CTRL, scheduler) | 0x1, RGA2_SYS_CTRL, scheduler);
+ rga_write(sys_ctrl, RGA2_SYS_CTRL, scheduler);
}
- if (DEBUGGER_EN(TIME)) {
- pr_info("sys_ctrl = %x, int = %x, set cmd use time = %lld\n",
+ if (DEBUGGER_EN(REG))
+ pr_info("sys_ctrl = %x, int = %x\n",
rga_read(RGA2_SYS_CTRL, scheduler),
- rga_read(RGA2_INT, scheduler),
+ rga_read(RGA2_INT, scheduler));
+
+ if (DEBUGGER_EN(TIME))
+ pr_info("set cmd use time = %lld\n",
ktime_us_delta(now, job->timestamp));
- }
job->hw_running_time = now;
job->hw_recoder_time = now;
@@ -2480,7 +2506,7 @@
return 0;
}
-int rga2_get_version(struct rga_scheduler_t *scheduler)
+static int rga2_get_version(struct rga_scheduler_t *scheduler)
{
u32 major_version, minor_version, svn_version;
u32 reg_version;
@@ -2511,3 +2537,96 @@
return 0;
}
+
+static int rga2_read_back_reg(struct rga_job *job, struct rga_scheduler_t *scheduler)
+{
+ if (job->rga_command_base.osd_info.enable) {
+ job->rga_command_base.osd_info.cur_flags0 = rga_read(RGA2_OSD_CUR_FLAGS0,
+ scheduler);
+ job->rga_command_base.osd_info.cur_flags1 = rga_read(RGA2_OSD_CUR_FLAGS1,
+ scheduler);
+ }
+
+ return 0;
+}
+
+static int rga2_irq(struct rga_scheduler_t *scheduler)
+{
+ struct rga_job *job = scheduler->running_job;
+
+ /* The hardware interrupt top-half don't need to lock the scheduler. */
+ if (job == NULL)
+ return IRQ_HANDLED;
+
+ if (test_bit(RGA_JOB_STATE_INTR_ERR, &job->state))
+ return IRQ_WAKE_THREAD;
+
+ job->intr_status = rga_read(RGA2_INT, scheduler);
+ job->hw_status = rga_read(RGA2_STATUS2, scheduler);
+ job->cmd_status = rga_read(RGA2_STATUS1, scheduler);
+
+ if (DEBUGGER_EN(INT_FLAG))
+ pr_info("irq handler, INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
+ job->intr_status, job->hw_status, job->cmd_status);
+
+ if (job->intr_status &
+ (m_RGA2_INT_CUR_CMD_DONE_INT_FLAG | m_RGA2_INT_ALL_CMD_DONE_INT_FLAG)) {
+ set_bit(RGA_JOB_STATE_FINISH, &job->state);
+ } else if (job->intr_status & m_RGA2_INT_ERROR_FLAG_MASK) {
+ set_bit(RGA_JOB_STATE_INTR_ERR, &job->state);
+
+ pr_err("irq handler err! INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
+ job->intr_status, job->hw_status, job->cmd_status);
+ scheduler->ops->soft_reset(scheduler);
+ }
+
+ /*clear INTR */
+ rga_write(rga_read(RGA2_INT, scheduler) |
+ (m_RGA2_INT_ERROR_CLEAR_MASK |
+ m_RGA2_INT_ALL_CMD_DONE_INT_CLEAR | m_RGA2_INT_NOW_CMD_DONE_INT_CLEAR |
+ m_RGA2_INT_LINE_RD_CLEAR | m_RGA2_INT_LINE_WR_CLEAR),
+ RGA2_INT, scheduler);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int rga2_isr_thread(struct rga_job *job, struct rga_scheduler_t *scheduler)
+{
+ if (DEBUGGER_EN(INT_FLAG))
+ pr_info("isr thread, INTR[0x%x], HW_STATUS[0x%x], CMD_STATUS[0x%x]\n",
+ rga_read(RGA2_INT, scheduler),
+ rga_read(RGA2_STATUS2, scheduler),
+ rga_read(RGA2_STATUS1, scheduler));
+
+ if (test_bit(RGA_JOB_STATE_INTR_ERR, &job->state)) {
+ if (job->hw_status & m_RGA2_STATUS2_RPP_ERROR)
+ pr_err("RGA current status: rpp error!\n");
+ if (job->hw_status & m_RGA2_STATUS2_BUS_ERROR)
+ pr_err("RGA current status: bus error!\n");
+
+ if (job->intr_status & m_RGA2_INT_ERROR_INT_FLAG) {
+ pr_err("RGA bus error intr, please check your configuration and buffer.\n");
+ job->ret = -EFAULT;
+ } else if (job->intr_status & m_RGA2_INT_MMU_INT_FLAG) {
+ pr_err("mmu failed, please check size of the buffer or whether the buffer has been freed.\n");
+ job->ret = -EACCES;
+ }
+
+ if (job->ret == 0) {
+ pr_err("rga intr error[0x%x]!\n", job->intr_status);
+ job->ret = -EFAULT;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+const struct rga_backend_ops rga2_ops = {
+ .get_version = rga2_get_version,
+ .set_reg = rga2_set_reg,
+ .init_reg = rga2_init_reg,
+ .soft_reset = rga2_soft_reset,
+ .read_back_reg = rga2_read_back_reg,
+ .irq = rga2_irq,
+ .isr_thread = rga2_isr_thread,
+};
--
Gitblit v1.6.2