From 0d8657dd3056063fb115946b10157477b5c70451 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 20 Nov 2023 09:09:45 +0000
Subject: [PATCH] enable lvds 1280x800
---
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c | 191 ++++++++++++++++++++++++++++++++---------------
1 files changed, 130 insertions(+), 61 deletions(-)
diff --git a/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c b/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
index dfe74e0..d4a4fdb 100644
--- a/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
+++ b/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
@@ -60,6 +60,10 @@
/* interrupt read back in table buffer */
u32 tb_reg_int;
bool hack_setup;
+ u32 tb_reg_cycle;
+ u32 tb_reg_out;
+ u32 tb_reg_ref_s;
+ u32 tb_reg_ref_e;
struct rkvdec_link_status reg_status;
};
@@ -113,6 +117,7 @@
},
.tb_reg_int = 164,
.hack_setup = 1,
+ .tb_reg_cycle = 179,
.reg_status = {
.dec_num_mask = 0x3fffffff,
.err_flag_base = 0x010,
@@ -122,7 +127,7 @@
/* vdpu382 link hw info */
struct rkvdec_link_info rkvdec_link_v2_hw_info = {
- .tb_reg_num = 218,
+ .tb_reg_num = 222,
.tb_reg_next = 0,
.tb_reg_r = 1,
.tb_reg_second_en = 8,
@@ -162,21 +167,27 @@
.part_r[0] = {
.tb_reg_off = 180,
.reg_start = 224,
- .reg_num = 10,
+ .reg_num = 12,
},
.part_r[1] = {
- .tb_reg_off = 190,
+ .tb_reg_off = 192,
.reg_start = 258,
- .reg_num = 28,
+ .reg_num = 30,
},
- .tb_reg_int = 180,
- .hack_setup = 0,
+ .tb_reg_int = 180,
+ .hack_setup = 0,
+ .tb_reg_cycle = 197,
+ .tb_reg_out = 86,
+ .tb_reg_ref_s = 104,
+ .tb_reg_ref_e = 119,
.reg_status = {
.dec_num_mask = 0x000fffff,
.err_flag_base = 0x024,
.err_flag_bit = BIT(8),
},
};
+
+static void rkvdec2_link_free_task(struct kref *ref);
static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
{
@@ -426,8 +437,6 @@
memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
}
- /* setup error mode flag */
- tb_reg[9] |= BIT(18) | BIT(9);
tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
/* memset read registers */
@@ -514,6 +523,7 @@
}
if (!resend) {
+ u32 timing_en = dev->mpp->srv->timing_en;
u32 i;
for (i = 0; i < task_to_run; i++) {
@@ -523,10 +533,8 @@
if (!task_ddr)
continue;
- set_bit(TASK_STATE_START, &task_ddr->state);
- schedule_delayed_work(&task_ddr->timeout_work,
- msecs_to_jiffies(200));
- mpp_time_record(task_ddr);
+ mpp_task_run_begin(task_ddr, timing_en, MPP_WORK_TIMEOUT_DELAY);
+ mpp_task_run_end(task_ddr, timing_en);
}
} else {
if (task_total)
@@ -538,6 +546,8 @@
/* start config before all registers are set */
wmb();
+
+ mpp_iommu_flush_tlb(dev->mpp->iommu_info);
/* configure done */
writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
@@ -591,6 +601,7 @@
struct rkvdec_link_info *info = link_dec->info;
u32 *table_base = (u32 *)link_dec->table->vaddr;
int i;
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
for (i = 0; i < count; i++) {
int idx = rkvdec_link_get_task_read(link_dec);
@@ -599,15 +610,15 @@
u32 *regs = NULL;
u32 irq_status = 0;
- if (!mpp_task) {
+ if (!mpp_task && info->hack_setup) {
regs = table_base + idx * link_dec->link_reg_count;
mpp_dbg_link_flow("slot %d read task stuff\n", idx);
link_dec->stuff_total++;
if (link_dec->statistic_count &&
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
+ regs[info->tb_reg_cycle]) {
link_dec->stuff_cycle_sum +=
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
+ regs[info->tb_reg_cycle];
link_dec->stuff_cnt++;
if (link_dec->stuff_cnt >=
link_dec->statistic_count) {
@@ -648,22 +659,27 @@
continue;
}
- mpp_time_diff(mpp_task);
+ if (!mpp_task)
+ return 0;
+
task = to_rkvdec2_task(mpp_task);
regs = table_base + idx * link_dec->link_reg_count;
+ link_dec->error_iova = regs[info->tb_reg_out];
irq_status = regs[info->tb_reg_int];
+ mpp_task->hw_cycles = regs[info->tb_reg_cycle];
+ mpp_time_diff_with_hw_time(mpp_task, dec->aclk_info.real_rate_hz);
mpp_dbg_link_flow("slot %d rd task %d\n", idx,
mpp_task->task_index);
task->irq_status = irq_status ? irq_status : mpp->irq_status;
-
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
cancel_delayed_work_sync(&mpp_task->timeout_work);
set_bit(TASK_STATE_HANDLE, &mpp_task->state);
if (link_dec->statistic_count &&
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
+ regs[info->tb_reg_cycle]) {
link_dec->task_cycle_sum +=
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
+ regs[info->tb_reg_cycle];
link_dec->task_cnt++;
if (link_dec->task_cnt >= link_dec->statistic_count) {
dev_info(link_dec->dev, "hw cycle %u\n",
@@ -691,6 +707,8 @@
set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
/* Wake up the GET thread */
wake_up(&task->wait);
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
+ link_dec->tasks_hw[idx] = NULL;
}
return 0;
@@ -729,7 +747,6 @@
static int rkvdec2_link_reset(struct mpp_dev *mpp)
{
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
dev_info(mpp->dev, "resetting...\n");
@@ -740,11 +757,8 @@
rockchip_save_qos(mpp->dev);
- mutex_lock(&dec->sip_reset_lock);
- rockchip_dmcfreq_lock();
- sip_smc_vpu_reset(0, 0, 0);
- rockchip_dmcfreq_unlock();
- mutex_unlock(&dec->sip_reset_lock);
+ if (mpp->hw_ops->reset)
+ mpp->hw_ops->reset(mpp);
rockchip_restore_qos(mpp->dev);
@@ -762,6 +776,49 @@
return 0;
}
+static void rkvdec2_check_err_ref(struct mpp_dev *mpp)
+{
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
+ struct rkvdec_link_info *link_info = link_dec->info;
+ struct mpp_taskqueue *queue = mpp->queue;
+ struct mpp_task *mpp_task = NULL, *n;
+ struct rkvdec2_task *task;
+ int i;
+
+ if (!link_dec->error_iova || !dec->err_ref_hack)
+ return;
+
+ dev_err(mpp->dev, "err task iova %#08x\n", link_dec->error_iova);
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
+ if (mpp_task) {
+ u32 *regs = NULL;
+ u32 *table_base = (u32 *)link_dec->table->vaddr;
+
+ task = to_rkvdec2_task(mpp_task);
+ regs = table_base + task->slot_idx * link_dec->link_reg_count;
+
+ for (i = link_info->tb_reg_ref_s; i <= link_info->tb_reg_ref_e; i++) {
+ if (regs[i] == link_dec->error_iova)
+ regs[i] = 0;
+ }
+ }
+ }
+
+ mutex_lock(&queue->pending_lock);
+ list_for_each_entry_safe(mpp_task, n, &queue->pending_list, queue_link) {
+ task = to_rkvdec2_task(mpp_task);
+
+ /* ref frame reg index start - end */
+ for (i = 164; i <= 179; i++) {
+ if (task->reg[i] == link_dec->error_iova)
+ task->reg[i] = 0;
+ }
+ }
+ mutex_unlock(&queue->pending_lock);
+ link_dec->error_iova = 0;
+}
+
static int rkvdec2_link_irq(struct mpp_dev *mpp)
{
struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
@@ -774,9 +831,6 @@
}
irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
-
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", irq_status);
- mpp_dbg_link_flow("link irq %08x\n", irq_status);
if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
@@ -795,7 +849,8 @@
writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
}
-
+ mpp_debug((DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE), "irq_status: %08x : %08x\n",
+ irq_status, mpp->irq_status);
return 0;
}
@@ -815,6 +870,7 @@
mpp_debug_enter();
disable_irq(mpp->irq);
+ mpp_iommu_disable_irq(mpp->iommu_info);
rkvdec_link_status_update(link_dec);
link_dec->irq_status = irq_status;
prev_dec_num = link_dec->task_decoded;
@@ -822,8 +878,10 @@
if (!link_dec->enabled || task_timeout) {
u32 val;
- if (task_timeout)
+ if (task_timeout) {
rkvdec_link_reg_dump("timeout", link_dec);
+ link_dec->decoded += task_timeout;
+ }
val = mpp_read(mpp, 224 * 4);
if (link_info->hack_setup && !(val & BIT(2))) {
@@ -838,6 +896,7 @@
if (link_dec->enabled && !count && !need_reset) {
/* process extra isr when task is processed */
enable_irq(mpp->irq);
+ mpp_iommu_enable_irq(mpp->iommu_info);
goto done;
}
@@ -851,15 +910,18 @@
goto do_reset;
enable_irq(mpp->irq);
+ mpp_iommu_enable_irq(mpp->iommu_info);
goto done;
do_reset:
+ rkvdec2_check_err_ref(mpp);
/* NOTE: irq may run with reset */
atomic_inc(&mpp->reset_request);
rkvdec2_link_reset(mpp);
link_dec->task_decoded = 0;
link_dec->task_total = 0;
enable_irq(mpp->irq);
+ mpp_iommu_enable_irq(mpp->iommu_info);
if (link_dec->total == link_dec->decoded)
goto done;
@@ -881,6 +943,26 @@
mpp_debug_leave();
return IRQ_HANDLED;
+}
+
+static int rkvdec2_link_iommu_handle(struct iommu_domain *iommu,
+ struct device *iommu_dev,
+ unsigned long iova,
+ int status, void *arg)
+{
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
+
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
+ iova, status, arg);
+
+ if (!mpp) {
+ dev_err(iommu_dev, "pagefault without device to handle\n");
+ return 0;
+ }
+
+ rk_iommu_mask_irq(mpp->dev);
+
+ return 0;
}
int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
@@ -1016,7 +1098,8 @@
if (link_dec->info->hack_setup)
rkvdec2_link_hack_data_setup(dec->fix);
-
+ iommu_set_fault_handler(mpp->iommu_info->domain,
+ rkvdec2_link_iommu_handle, mpp);
link_dec->mpp = mpp;
link_dec->dev = dev;
atomic_set(&link_dec->task_timeout, 0);
@@ -1060,8 +1143,10 @@
}
session = task->session;
- mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
- session->index, task->task_index, task->state);
+ mpp_debug_func(DEBUG_TASK_INFO,
+ "session %d:%d task %d state 0x%lx abort_request %d\n",
+ session->device_type, session->index, task->task_index,
+ task->state, atomic_read(&task->abort_request));
if (!session->mpp) {
mpp_err("session %d session->mpp is null.\n", session->index);
return;
@@ -1112,6 +1197,7 @@
if (!link_dec->irq_enabled) {
enable_irq(mpp->irq);
+ mpp_iommu_enable_irq(mpp->iommu_info);
link_dec->irq_enabled = 1;
}
@@ -1146,6 +1232,7 @@
if (atomic_xchg(&link_dec->power_enabled, 0)) {
disable_irq(mpp->irq);
+ mpp_iommu_disable_irq(mpp->iommu_info);
link_dec->irq_enabled = 0;
if (mpp->hw_ops->clk_off)
@@ -1287,6 +1374,7 @@
u32 task_to_run = 0;
int slot_idx = 0;
int ret;
+ struct mpp_session *session = task->session;
mpp_debug_enter();
@@ -1301,8 +1389,10 @@
}
rkvdec2_link_power_on(mpp);
- mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
- task->session->pid, dev_name(mpp->dev));
+ mpp_debug_func(DEBUG_TASK_INFO,
+ "%s session %d:%d task=%d state=0x%lx\n",
+ dev_name(mpp->dev), session->device_type,
+ session->index, task->task_index, task->state);
/* prepare the task for running */
if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
@@ -1372,7 +1462,6 @@
struct mpp_task *task)
{
set_bit(TASK_STATE_DONE, &task->state);
- kref_put(&task->ref, rkvdec2_link_free_task);
return 0;
}
@@ -1479,10 +1568,12 @@
goto done;
disable_irq(mpp->irq);
+ mpp_iommu_disable_irq(mpp->iommu_info);
rkvdec2_link_reset(mpp);
link_dec->task_decoded = 0;
link_dec->task_total = 0;
enable_irq(mpp->irq);
+ mpp_iommu_enable_irq(mpp->iommu_info);
}
/*
* process pending queue to find the task to accept.
@@ -1500,7 +1591,6 @@
mutex_lock(&queue->pending_lock);
list_del_init(&task->queue_link);
- kref_get(&task->ref);
set_bit(TASK_STATE_ABORT_READY, &task->state);
set_bit(TASK_STATE_PROC_DONE, &task->state);
@@ -1546,28 +1636,7 @@
rkvdec2_link_power_off(mpp);
}
- mutex_lock(&queue->session_lock);
- while (queue->detach_count) {
- struct mpp_session *session = NULL;
-
- session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
- session_link);
- if (session) {
- list_del_init(&session->session_link);
- queue->detach_count--;
- }
-
- mutex_unlock(&queue->session_lock);
-
- if (session) {
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
- queue->detach_count);
- mpp_session_deinit(session);
- }
-
- mutex_lock(&queue->session_lock);
- }
- mutex_unlock(&queue->session_lock);
+ mpp_session_cleanup_detach(queue, work_s);
}
void rkvdec2_link_session_deinit(struct mpp_session *session)
@@ -1580,9 +1649,9 @@
if (session->dma) {
mpp_dbg_session("session %d destroy dma\n", session->index);
- mpp_iommu_down_read(mpp->iommu_info);
+ mpp_iommu_down_write(mpp->iommu_info);
mpp_dma_session_destroy(session->dma);
- mpp_iommu_up_read(mpp->iommu_info);
+ mpp_iommu_up_write(mpp->iommu_info);
session->dma = NULL;
}
if (session->srv) {
--
Gitblit v1.6.2