From 2f7c68cb55ecb7331f2381deb497c27155f32faf Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 03 Jan 2024 09:43:39 +0000 Subject: [PATCH] update kernel to 5.10.198 --- kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c | 258 +++++++++++++++++++++------------------------------ 1 files changed, 106 insertions(+), 152 deletions(-) diff --git a/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c b/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c index eb0e620..7c5a58d 100644 --- a/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c +++ b/kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c @@ -18,8 +18,6 @@ #include "hack/mpp_rkvdec2_link_hack_rk3568.c" -#define WORK_TIMEOUT_MS (500) -#define WAIT_TIMEOUT_MS (2000) #define RKVDEC2_LINK_HACK_TASK_FLAG (0xff) /* vdpu381 link hw info for rk3588 */ @@ -519,11 +517,6 @@ struct rkvdec_link_dev *link_dec = dec->link_dec; u32 irq_status = 0; - if (!atomic_read(&link_dec->power_enabled)) { - dev_info(link_dec->dev, "irq on power off\n"); - return -1; - } - irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE); if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) { @@ -980,6 +973,7 @@ list_move_tail(&task->table->link, &link_dec->unused_list); list_del_init(&mpp_task->queue_link); + link_dec->task_running--; set_bit(TASK_STATE_HANDLE, &mpp_task->state); set_bit(TASK_STATE_PROC_DONE, &mpp_task->state); @@ -988,13 +982,10 @@ if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) set_bit(TASK_STATE_ABORT_READY, &mpp_task->state); - wake_up(&mpp_task->wait); - kref_put(&mpp_task->ref, rkvdec2_link_free_task); - link_dec->task_running--; - mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n", mpp_task->session->index, mpp_task->task_index, irq_status, timeout_flag, abort_flag); + if (irq_status & RKVDEC_INT_ERROR_MASK) { dev_err(mpp->dev, "session %d task %d irq_status %#08x timeout %u abort %u\n", @@ -1003,6 +994,9 @@ if (!reset_flag) atomic_inc(&mpp->reset_request); } + + wake_up(&mpp_task->wait); + kref_put(&mpp_task->ref, rkvdec2_link_free_task); } /* resend running task after reset */ @@ -1192,19 +1186,16 @@ return -EIO; } - ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task), - msecs_to_jiffies(WAIT_TIMEOUT_MS)); - if (ret) { - ret = rkvdec2_result(mpp, mpp_task, msgs); + ret = wait_event_interruptible(mpp_task->wait, task_is_done(mpp_task)); + if (ret == -ERESTARTSYS) + mpp_err("wait task break by signal\n"); - mpp_session_pop_done(session, mpp_task); - } else { - mpp_err("task %d:%d state %lx timeout -> abort\n", - session->index, mpp_task->task_id, mpp_task->state); + ret = rkvdec2_result(mpp, mpp_task, msgs); - atomic_inc(&mpp_task->abort_request); - set_bit(TASK_STATE_ABORT, &mpp_task->state); - } + mpp_session_pop_done(session, mpp_task); + mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n", + session->device_type, session->index, atomic_read(&session->task_count), + mpp_task->task_index, mpp_task->state); mpp_session_pop_pending(session, mpp_task); return ret; @@ -1356,7 +1347,8 @@ /* set the ccu-domain for current device */ ccu_info = queue->cores[0]->iommu_info; cur_info = dec->mpp.iommu_info; - cur_info->domain = ccu_info->domain; + if (cur_info) + cur_info->domain = ccu_info->domain; mpp_iommu_attach(cur_info); } @@ -1563,7 +1555,7 @@ if (mpp->disable) continue; - dev_info(mpp->dev, "resetting...\n"); + dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status); disable_hardirq(mpp->irq); /* foce idle, disconnect core and ccu */ @@ -1626,61 +1618,104 @@ return &task->mpp_task; } -static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp) +static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue, + struct device *iommu_dev) { - u32 i = 0; + struct mpp_dev *mpp = NULL; + struct rkvdec2_dev *dec = NULL; + u32 mmu[2] = {0, 0x40}; + u32 i; - for (i = 0; i < mpp->queue->core_count; i++) { - struct mpp_dev *core = mpp->queue->cores[i]; - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); - void __iomem *mmu_base = dec->mmu_base; - u32 mmu0_st; - u32 mmu1_st; - u32 mmu0_pta; - u32 mmu1_pta; + for (i = 0; i < queue->core_count; i++) { + struct mpp_dev *core = queue->cores[i]; - if (!mmu_base) - return; - - #define FAULT_STATUS 0x7e2 - rkvdec2_ccu_power_on(mpp->queue, dec->ccu); - - mmu0_st = readl(mmu_base + 0x4); - mmu1_st = readl(mmu_base + 0x44); - mmu0_pta = readl(mmu_base + 0xc); - mmu1_pta = readl(mmu_base + 0x4c); - - dec->mmu0_st = mmu0_st; - dec->mmu1_st = mmu1_st; - dec->mmu0_pta = mmu0_pta; - dec->mmu1_pta = mmu1_pta; - - pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n", - core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta); - if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) || - mmu0_pta || mmu1_pta) { - dec->fault_iova = readl(dec->link_dec->reg_base + 0x4); - dec->mmu_fault = 1; - pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova); - rockchip_iommu_mask_irq(core->dev); - } else { - dec->mmu_fault = 0; - dec->fault_iova = 0; + if (&core->iommu_info->pdev->dev == iommu_dev) { + mpp = core; + dec = to_rkvdec2_dev(mpp); } } + + if (!dec || !dec->mmu_base) + goto out; + + /* there are two iommus */ + for (i = 0; i < 2; i++) { + u32 status = readl(dec->mmu_base + mmu[i] + 0x4); + u32 iova = readl(dec->mmu_base + mmu[i] + 0xc); + u32 is_write = (status & BIT(5)) ? 1 : 0; + + if (status && iova) + dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n", + mpp->core_id, iova, is_write ? "write" : "read", status); + } +out: + return mpp; } -int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu, - struct device *iommu_dev, - unsigned long iova, int status, void *arg) +int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu, + struct device *iommu_dev, + unsigned long iova, int status, void *arg) { struct mpp_dev *mpp = (struct mpp_dev *)arg; + struct mpp_taskqueue *queue = mpp->queue; + struct mpp_task *mpp_task; mpp_debug_enter(); - rkvdec2_ccu_check_pagefault_info(mpp); + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); + if (!mpp) { + dev_err(iommu_dev, "iommu fault, but no dev match\n"); + return 0; + } + mpp_task = mpp->cur_task; + if (mpp_task) + mpp_task_dump_mem_region(mpp, mpp_task); - mpp->queue->iommu_fault = 1; + /* + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. + * Until the pagefault task finish by hw timeout. + */ + rockchip_iommu_mask_irq(mpp->dev); + atomic_inc(&mpp->queue->reset_request); + kthread_queue_work(&mpp->queue->worker, &mpp->work); + + mpp_debug_leave(); + + return 0; +} + +int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu, + struct device *iommu_dev, + unsigned long iova, int status, void *arg) +{ + struct mpp_dev *mpp = (struct mpp_dev *)arg; + struct mpp_taskqueue *queue = mpp->queue; + struct mpp_task *mpp_task = NULL, *n; + struct rkvdec2_dev *dec; + u32 err_task_iova; + + mpp_debug_enter(); + + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); + if (!mpp) { + dev_err(iommu_dev, "iommu fault, but no dev match\n"); + return 0; + } + + dec = to_rkvdec2_dev(mpp); + err_task_iova = readl(dec->link_dec->reg_base + 0x4); + dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova); + rockchip_iommu_mask_irq(mpp->dev); + + list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) { + struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); + + if ((u32)task->table->iova == err_task_iova) { + mpp_task_dump_mem_region(mpp, mpp_task); + set_bit(TASK_STATE_ABORT, &mpp_task->state); + break; + } + } atomic_inc(&mpp->queue->reset_request); kthread_queue_work(&mpp->queue->worker, &mpp->work); @@ -1839,36 +1874,6 @@ return flag; } -static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp, - struct mpp_taskqueue *queue) -{ - mutex_lock(&queue->session_lock); - while (atomic_read(&queue->detach_count)) { - struct mpp_session *session = NULL; - - session = list_first_entry_or_null(&queue->session_detach, - struct mpp_session, - session_link); - if (session) { - list_del_init(&session->session_link); - atomic_dec(&queue->detach_count); - } - - mutex_unlock(&queue->session_lock); - - if (session) { - mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev), - atomic_read(&queue->detach_count)); - mpp_session_deinit(session); - } - - mutex_lock(&queue->session_lock); - } - mutex_unlock(&queue->session_lock); - - return 0; -} - void rkvdec2_soft_ccu_worker(struct kthread_work *work_s) { struct mpp_task *mpp_task; @@ -1943,7 +1948,7 @@ rkvdec2_ccu_power_off(queue, dec->ccu); /* 5. check session detach out of queue */ - rkvdec2_ccu_link_session_detach(mpp, queue); + mpp_session_cleanup_detach(queue, work_s); mpp_debug_leave(); } @@ -2079,7 +2084,7 @@ ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE); ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE); mpp_debug(DEBUG_IRQ_CHECK, - "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", + "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", mpp_task->session->index, mpp_task->task_index, task->width, task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status, timeout_flag, abort_flag, (u32)task->table->iova, @@ -2093,7 +2098,7 @@ cancel_delayed_work(&mpp_task->timeout_work); mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle]; mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz); - task->irq_status = irq_status; + task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA; if (irq_status) rkvdec2_hard_ccu_finish(hw, task); @@ -2119,7 +2124,7 @@ /* Wake up the GET thread */ wake_up(&mpp_task->wait); if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) { - pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n", + pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n", mpp_task->session->index, mpp_task->task_index, irq_status, timeout_flag, abort_flag); atomic_inc(&queue->reset_request); @@ -2366,9 +2371,7 @@ writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE); mpp_task_run_end(mpp_task, timing_en); - /* pending to running */ set_bit(TASK_STATE_RUNNING, &mpp_task->state); - mpp_taskqueue_pending_to_run(queue, mpp_task); mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n", mpp_task->session->index, mpp_task->task_index, (u32)task->table->iova, mpp_task->state, @@ -2377,51 +2380,6 @@ mpp_debug_leave(); return 0; -} - -static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec, - struct mpp_task *mpp_task) -{ - struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); - - mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n", - mpp_task->session->index, mpp_task->task_index, - task->width, task->height, dec->mmu0_st, dec->mmu0_pta, - dec->mmu1_st, dec->mmu1_pta, dec->fault_iova); - - set_bit(TASK_STATE_HANDLE, &mpp_task->state); - task->irq_status |= BIT(4); - cancel_delayed_work(&mpp_task->timeout_work); - rkvdec2_hard_ccu_finish(dec->link_dec->info, task); - set_bit(TASK_STATE_FINISH, &mpp_task->state); - set_bit(TASK_STATE_DONE, &mpp_task->state); - list_move_tail(&task->table->link, &dec->ccu->unused_list); - list_del_init(&mpp_task->queue_link); - /* Wake up the GET thread */ - wake_up(&mpp_task->wait); - kref_put(&mpp_task->ref, mpp_free_task); - dec->mmu_fault = 0; - dec->fault_iova = 0; -} - -static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue) -{ - struct mpp_task *loop = NULL, *n; - - list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) { - struct rkvdec2_task *task = to_rkvdec2_task(loop); - u32 iova = (u32)task->table->iova; - u32 i; - - for (i = 0; i < queue->core_count; i++) { - struct mpp_dev *core = queue->cores[i]; - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); - - if (!dec->mmu_fault || dec->fault_iova != iova) - continue; - rkvdec2_hard_ccu_handle_pagefault_task(dec, loop); - } - } } static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue) @@ -2503,11 +2461,6 @@ /* reset process */ rkvdec2_hard_ccu_reset(queue, dec->ccu); atomic_set(&queue->reset_request, 0); - /* if iommu pagefault, find the fault task and drop it */ - if (queue->iommu_fault) { - rkvdec2_hard_ccu_pagefault_proc(queue); - queue->iommu_fault = 0; - } /* relink running task iova in list, and resend them to hw */ if (!list_empty(&queue->running_list)) @@ -2541,6 +2494,7 @@ rkvdec2_ccu_power_on(queue, dec->ccu); rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp); + mpp_taskqueue_pending_to_run(queue, mpp_task); } /* 4. poweroff when running and pending list are empty */ -- Gitblit v1.6.2