.. | .. |
---|
18 | 18 | |
---|
19 | 19 | #include "hack/mpp_rkvdec2_link_hack_rk3568.c" |
---|
20 | 20 | |
---|
21 | | -#define WORK_TIMEOUT_MS (500) |
---|
22 | | -#define WAIT_TIMEOUT_MS (2000) |
---|
23 | 21 | #define RKVDEC2_LINK_HACK_TASK_FLAG (0xff) |
---|
24 | 22 | |
---|
25 | 23 | /* vdpu381 link hw info for rk3588 */ |
---|
.. | .. |
---|
519 | 517 | struct rkvdec_link_dev *link_dec = dec->link_dec; |
---|
520 | 518 | u32 irq_status = 0; |
---|
521 | 519 | |
---|
522 | | - if (!atomic_read(&link_dec->power_enabled)) { |
---|
523 | | - dev_info(link_dec->dev, "irq on power off\n"); |
---|
524 | | - return -1; |
---|
525 | | - } |
---|
526 | | - |
---|
527 | 520 | irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE); |
---|
528 | 521 | |
---|
529 | 522 | if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) { |
---|
.. | .. |
---|
980 | 973 | |
---|
981 | 974 | list_move_tail(&task->table->link, &link_dec->unused_list); |
---|
982 | 975 | list_del_init(&mpp_task->queue_link); |
---|
| 976 | + link_dec->task_running--; |
---|
983 | 977 | |
---|
984 | 978 | set_bit(TASK_STATE_HANDLE, &mpp_task->state); |
---|
985 | 979 | set_bit(TASK_STATE_PROC_DONE, &mpp_task->state); |
---|
.. | .. |
---|
988 | 982 | if (test_bit(TASK_STATE_ABORT, &mpp_task->state)) |
---|
989 | 983 | set_bit(TASK_STATE_ABORT_READY, &mpp_task->state); |
---|
990 | 984 | |
---|
991 | | - wake_up(&mpp_task->wait); |
---|
992 | | - kref_put(&mpp_task->ref, rkvdec2_link_free_task); |
---|
993 | | - link_dec->task_running--; |
---|
994 | | - |
---|
995 | 985 | mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n", |
---|
996 | 986 | mpp_task->session->index, mpp_task->task_index, |
---|
997 | 987 | irq_status, timeout_flag, abort_flag); |
---|
| 988 | + |
---|
998 | 989 | if (irq_status & RKVDEC_INT_ERROR_MASK) { |
---|
999 | 990 | dev_err(mpp->dev, |
---|
1000 | 991 | "session %d task %d irq_status %#08x timeout %u abort %u\n", |
---|
.. | .. |
---|
1003 | 994 | if (!reset_flag) |
---|
1004 | 995 | atomic_inc(&mpp->reset_request); |
---|
1005 | 996 | } |
---|
| 997 | + |
---|
| 998 | + wake_up(&mpp_task->wait); |
---|
| 999 | + kref_put(&mpp_task->ref, rkvdec2_link_free_task); |
---|
1006 | 1000 | } |
---|
1007 | 1001 | |
---|
1008 | 1002 | /* resend running task after reset */ |
---|
.. | .. |
---|
1192 | 1186 | return -EIO; |
---|
1193 | 1187 | } |
---|
1194 | 1188 | |
---|
1195 | | - ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task), |
---|
1196 | | - msecs_to_jiffies(WAIT_TIMEOUT_MS)); |
---|
1197 | | - if (ret) { |
---|
1198 | | - ret = rkvdec2_result(mpp, mpp_task, msgs); |
---|
| 1189 | + ret = wait_event_interruptible(mpp_task->wait, task_is_done(mpp_task)); |
---|
| 1190 | + if (ret == -ERESTARTSYS) |
---|
| 1191 | + mpp_err("wait task break by signal\n"); |
---|
1199 | 1192 | |
---|
1200 | | - mpp_session_pop_done(session, mpp_task); |
---|
1201 | | - } else { |
---|
1202 | | - mpp_err("task %d:%d state %lx timeout -> abort\n", |
---|
1203 | | - session->index, mpp_task->task_id, mpp_task->state); |
---|
| 1193 | + ret = rkvdec2_result(mpp, mpp_task, msgs); |
---|
1204 | 1194 | |
---|
1205 | | - atomic_inc(&mpp_task->abort_request); |
---|
1206 | | - set_bit(TASK_STATE_ABORT, &mpp_task->state); |
---|
1207 | | - } |
---|
| 1195 | + mpp_session_pop_done(session, mpp_task); |
---|
| 1196 | + mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n", |
---|
| 1197 | + session->device_type, session->index, atomic_read(&session->task_count), |
---|
| 1198 | + mpp_task->task_index, mpp_task->state); |
---|
1208 | 1199 | |
---|
1209 | 1200 | mpp_session_pop_pending(session, mpp_task); |
---|
1210 | 1201 | return ret; |
---|
.. | .. |
---|
1356 | 1347 | /* set the ccu-domain for current device */ |
---|
1357 | 1348 | ccu_info = queue->cores[0]->iommu_info; |
---|
1358 | 1349 | cur_info = dec->mpp.iommu_info; |
---|
1359 | | - cur_info->domain = ccu_info->domain; |
---|
| 1350 | + if (cur_info) |
---|
| 1351 | + cur_info->domain = ccu_info->domain; |
---|
1360 | 1352 | mpp_iommu_attach(cur_info); |
---|
1361 | 1353 | } |
---|
1362 | 1354 | |
---|
.. | .. |
---|
1563 | 1555 | if (mpp->disable) |
---|
1564 | 1556 | continue; |
---|
1565 | 1557 | |
---|
1566 | | - dev_info(mpp->dev, "resetting...\n"); |
---|
| 1558 | + dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status); |
---|
1567 | 1559 | disable_hardirq(mpp->irq); |
---|
1568 | 1560 | |
---|
1569 | 1561 | /* foce idle, disconnect core and ccu */ |
---|
.. | .. |
---|
1626 | 1618 | return &task->mpp_task; |
---|
1627 | 1619 | } |
---|
1628 | 1620 | |
---|
1629 | | -static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp) |
---|
| 1621 | +static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue, |
---|
| 1622 | + struct device *iommu_dev) |
---|
1630 | 1623 | { |
---|
1631 | | - u32 i = 0; |
---|
| 1624 | + struct mpp_dev *mpp = NULL; |
---|
| 1625 | + struct rkvdec2_dev *dec = NULL; |
---|
| 1626 | + u32 mmu[2] = {0, 0x40}; |
---|
| 1627 | + u32 i; |
---|
1632 | 1628 | |
---|
1633 | | - for (i = 0; i < mpp->queue->core_count; i++) { |
---|
1634 | | - struct mpp_dev *core = mpp->queue->cores[i]; |
---|
1635 | | - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); |
---|
1636 | | - void __iomem *mmu_base = dec->mmu_base; |
---|
1637 | | - u32 mmu0_st; |
---|
1638 | | - u32 mmu1_st; |
---|
1639 | | - u32 mmu0_pta; |
---|
1640 | | - u32 mmu1_pta; |
---|
| 1629 | + for (i = 0; i < queue->core_count; i++) { |
---|
| 1630 | + struct mpp_dev *core = queue->cores[i]; |
---|
1641 | 1631 | |
---|
1642 | | - if (!mmu_base) |
---|
1643 | | - return; |
---|
1644 | | - |
---|
1645 | | - #define FAULT_STATUS 0x7e2 |
---|
1646 | | - rkvdec2_ccu_power_on(mpp->queue, dec->ccu); |
---|
1647 | | - |
---|
1648 | | - mmu0_st = readl(mmu_base + 0x4); |
---|
1649 | | - mmu1_st = readl(mmu_base + 0x44); |
---|
1650 | | - mmu0_pta = readl(mmu_base + 0xc); |
---|
1651 | | - mmu1_pta = readl(mmu_base + 0x4c); |
---|
1652 | | - |
---|
1653 | | - dec->mmu0_st = mmu0_st; |
---|
1654 | | - dec->mmu1_st = mmu1_st; |
---|
1655 | | - dec->mmu0_pta = mmu0_pta; |
---|
1656 | | - dec->mmu1_pta = mmu1_pta; |
---|
1657 | | - |
---|
1658 | | - pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n", |
---|
1659 | | - core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta); |
---|
1660 | | - if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) || |
---|
1661 | | - mmu0_pta || mmu1_pta) { |
---|
1662 | | - dec->fault_iova = readl(dec->link_dec->reg_base + 0x4); |
---|
1663 | | - dec->mmu_fault = 1; |
---|
1664 | | - pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova); |
---|
1665 | | - rockchip_iommu_mask_irq(core->dev); |
---|
1666 | | - } else { |
---|
1667 | | - dec->mmu_fault = 0; |
---|
1668 | | - dec->fault_iova = 0; |
---|
| 1632 | + if (&core->iommu_info->pdev->dev == iommu_dev) { |
---|
| 1633 | + mpp = core; |
---|
| 1634 | + dec = to_rkvdec2_dev(mpp); |
---|
1669 | 1635 | } |
---|
1670 | 1636 | } |
---|
| 1637 | + |
---|
| 1638 | + if (!dec || !dec->mmu_base) |
---|
| 1639 | + goto out; |
---|
| 1640 | + |
---|
| 1641 | + /* there are two iommus */ |
---|
| 1642 | + for (i = 0; i < 2; i++) { |
---|
| 1643 | + u32 status = readl(dec->mmu_base + mmu[i] + 0x4); |
---|
| 1644 | + u32 iova = readl(dec->mmu_base + mmu[i] + 0xc); |
---|
| 1645 | + u32 is_write = (status & BIT(5)) ? 1 : 0; |
---|
| 1646 | + |
---|
| 1647 | + if (status && iova) |
---|
| 1648 | + dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n", |
---|
| 1649 | + mpp->core_id, iova, is_write ? "write" : "read", status); |
---|
| 1650 | + } |
---|
| 1651 | +out: |
---|
| 1652 | + return mpp; |
---|
1671 | 1653 | } |
---|
1672 | 1654 | |
---|
1673 | | -int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
1674 | | - struct device *iommu_dev, |
---|
1675 | | - unsigned long iova, int status, void *arg) |
---|
| 1655 | +int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 1656 | + struct device *iommu_dev, |
---|
| 1657 | + unsigned long iova, int status, void *arg) |
---|
1676 | 1658 | { |
---|
1677 | 1659 | struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 1660 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1661 | + struct mpp_task *mpp_task; |
---|
1678 | 1662 | |
---|
1679 | 1663 | mpp_debug_enter(); |
---|
1680 | 1664 | |
---|
1681 | | - rkvdec2_ccu_check_pagefault_info(mpp); |
---|
| 1665 | + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); |
---|
| 1666 | + if (!mpp) { |
---|
| 1667 | + dev_err(iommu_dev, "iommu fault, but no dev match\n"); |
---|
| 1668 | + return 0; |
---|
| 1669 | + } |
---|
| 1670 | + mpp_task = mpp->cur_task; |
---|
| 1671 | + if (mpp_task) |
---|
| 1672 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
1682 | 1673 | |
---|
1683 | | - mpp->queue->iommu_fault = 1; |
---|
| 1674 | + /* |
---|
| 1675 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
---|
| 1676 | + * Until the pagefault task finish by hw timeout. |
---|
| 1677 | + */ |
---|
| 1678 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 1679 | + atomic_inc(&mpp->queue->reset_request); |
---|
| 1680 | + kthread_queue_work(&mpp->queue->worker, &mpp->work); |
---|
| 1681 | + |
---|
| 1682 | + mpp_debug_leave(); |
---|
| 1683 | + |
---|
| 1684 | + return 0; |
---|
| 1685 | +} |
---|
| 1686 | + |
---|
| 1687 | +int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 1688 | + struct device *iommu_dev, |
---|
| 1689 | + unsigned long iova, int status, void *arg) |
---|
| 1690 | +{ |
---|
| 1691 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 1692 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1693 | + struct mpp_task *mpp_task = NULL, *n; |
---|
| 1694 | + struct rkvdec2_dev *dec; |
---|
| 1695 | + u32 err_task_iova; |
---|
| 1696 | + |
---|
| 1697 | + mpp_debug_enter(); |
---|
| 1698 | + |
---|
| 1699 | + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); |
---|
| 1700 | + if (!mpp) { |
---|
| 1701 | + dev_err(iommu_dev, "iommu fault, but no dev match\n"); |
---|
| 1702 | + return 0; |
---|
| 1703 | + } |
---|
| 1704 | + |
---|
| 1705 | + dec = to_rkvdec2_dev(mpp); |
---|
| 1706 | + err_task_iova = readl(dec->link_dec->reg_base + 0x4); |
---|
| 1707 | + dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova); |
---|
| 1708 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 1709 | + |
---|
| 1710 | + list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) { |
---|
| 1711 | + struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); |
---|
| 1712 | + |
---|
| 1713 | + if ((u32)task->table->iova == err_task_iova) { |
---|
| 1714 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
| 1715 | + set_bit(TASK_STATE_ABORT, &mpp_task->state); |
---|
| 1716 | + break; |
---|
| 1717 | + } |
---|
| 1718 | + } |
---|
1684 | 1719 | atomic_inc(&mpp->queue->reset_request); |
---|
1685 | 1720 | kthread_queue_work(&mpp->queue->worker, &mpp->work); |
---|
1686 | 1721 | |
---|
.. | .. |
---|
1839 | 1874 | return flag; |
---|
1840 | 1875 | } |
---|
1841 | 1876 | |
---|
1842 | | -static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp, |
---|
1843 | | - struct mpp_taskqueue *queue) |
---|
1844 | | -{ |
---|
1845 | | - mutex_lock(&queue->session_lock); |
---|
1846 | | - while (atomic_read(&queue->detach_count)) { |
---|
1847 | | - struct mpp_session *session = NULL; |
---|
1848 | | - |
---|
1849 | | - session = list_first_entry_or_null(&queue->session_detach, |
---|
1850 | | - struct mpp_session, |
---|
1851 | | - session_link); |
---|
1852 | | - if (session) { |
---|
1853 | | - list_del_init(&session->session_link); |
---|
1854 | | - atomic_dec(&queue->detach_count); |
---|
1855 | | - } |
---|
1856 | | - |
---|
1857 | | - mutex_unlock(&queue->session_lock); |
---|
1858 | | - |
---|
1859 | | - if (session) { |
---|
1860 | | - mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev), |
---|
1861 | | - atomic_read(&queue->detach_count)); |
---|
1862 | | - mpp_session_deinit(session); |
---|
1863 | | - } |
---|
1864 | | - |
---|
1865 | | - mutex_lock(&queue->session_lock); |
---|
1866 | | - } |
---|
1867 | | - mutex_unlock(&queue->session_lock); |
---|
1868 | | - |
---|
1869 | | - return 0; |
---|
1870 | | -} |
---|
1871 | | - |
---|
1872 | 1877 | void rkvdec2_soft_ccu_worker(struct kthread_work *work_s) |
---|
1873 | 1878 | { |
---|
1874 | 1879 | struct mpp_task *mpp_task; |
---|
.. | .. |
---|
1943 | 1948 | rkvdec2_ccu_power_off(queue, dec->ccu); |
---|
1944 | 1949 | |
---|
1945 | 1950 | /* 5. check session detach out of queue */ |
---|
1946 | | - rkvdec2_ccu_link_session_detach(mpp, queue); |
---|
| 1951 | + mpp_session_cleanup_detach(queue, work_s); |
---|
1947 | 1952 | |
---|
1948 | 1953 | mpp_debug_leave(); |
---|
1949 | 1954 | } |
---|
.. | .. |
---|
2079 | 2084 | ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE); |
---|
2080 | 2085 | ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE); |
---|
2081 | 2086 | mpp_debug(DEBUG_IRQ_CHECK, |
---|
2082 | | - "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", |
---|
| 2087 | + "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", |
---|
2083 | 2088 | mpp_task->session->index, mpp_task->task_index, task->width, |
---|
2084 | 2089 | task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status, |
---|
2085 | 2090 | timeout_flag, abort_flag, (u32)task->table->iova, |
---|
.. | .. |
---|
2093 | 2098 | cancel_delayed_work(&mpp_task->timeout_work); |
---|
2094 | 2099 | mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle]; |
---|
2095 | 2100 | mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz); |
---|
2096 | | - task->irq_status = irq_status; |
---|
| 2101 | + task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA; |
---|
2097 | 2102 | |
---|
2098 | 2103 | if (irq_status) |
---|
2099 | 2104 | rkvdec2_hard_ccu_finish(hw, task); |
---|
.. | .. |
---|
2119 | 2124 | /* Wake up the GET thread */ |
---|
2120 | 2125 | wake_up(&mpp_task->wait); |
---|
2121 | 2126 | if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) { |
---|
2122 | | - pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n", |
---|
| 2127 | + pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n", |
---|
2123 | 2128 | mpp_task->session->index, mpp_task->task_index, |
---|
2124 | 2129 | irq_status, timeout_flag, abort_flag); |
---|
2125 | 2130 | atomic_inc(&queue->reset_request); |
---|
.. | .. |
---|
2366 | 2371 | writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE); |
---|
2367 | 2372 | mpp_task_run_end(mpp_task, timing_en); |
---|
2368 | 2373 | |
---|
2369 | | - /* pending to running */ |
---|
2370 | 2374 | set_bit(TASK_STATE_RUNNING, &mpp_task->state); |
---|
2371 | | - mpp_taskqueue_pending_to_run(queue, mpp_task); |
---|
2372 | 2375 | mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n", |
---|
2373 | 2376 | mpp_task->session->index, mpp_task->task_index, |
---|
2374 | 2377 | (u32)task->table->iova, mpp_task->state, |
---|
.. | .. |
---|
2377 | 2380 | mpp_debug_leave(); |
---|
2378 | 2381 | |
---|
2379 | 2382 | return 0; |
---|
2380 | | -} |
---|
2381 | | - |
---|
2382 | | -static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec, |
---|
2383 | | - struct mpp_task *mpp_task) |
---|
2384 | | -{ |
---|
2385 | | - struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); |
---|
2386 | | - |
---|
2387 | | - mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n", |
---|
2388 | | - mpp_task->session->index, mpp_task->task_index, |
---|
2389 | | - task->width, task->height, dec->mmu0_st, dec->mmu0_pta, |
---|
2390 | | - dec->mmu1_st, dec->mmu1_pta, dec->fault_iova); |
---|
2391 | | - |
---|
2392 | | - set_bit(TASK_STATE_HANDLE, &mpp_task->state); |
---|
2393 | | - task->irq_status |= BIT(4); |
---|
2394 | | - cancel_delayed_work(&mpp_task->timeout_work); |
---|
2395 | | - rkvdec2_hard_ccu_finish(dec->link_dec->info, task); |
---|
2396 | | - set_bit(TASK_STATE_FINISH, &mpp_task->state); |
---|
2397 | | - set_bit(TASK_STATE_DONE, &mpp_task->state); |
---|
2398 | | - list_move_tail(&task->table->link, &dec->ccu->unused_list); |
---|
2399 | | - list_del_init(&mpp_task->queue_link); |
---|
2400 | | - /* Wake up the GET thread */ |
---|
2401 | | - wake_up(&mpp_task->wait); |
---|
2402 | | - kref_put(&mpp_task->ref, mpp_free_task); |
---|
2403 | | - dec->mmu_fault = 0; |
---|
2404 | | - dec->fault_iova = 0; |
---|
2405 | | -} |
---|
2406 | | - |
---|
2407 | | -static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue) |
---|
2408 | | -{ |
---|
2409 | | - struct mpp_task *loop = NULL, *n; |
---|
2410 | | - |
---|
2411 | | - list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) { |
---|
2412 | | - struct rkvdec2_task *task = to_rkvdec2_task(loop); |
---|
2413 | | - u32 iova = (u32)task->table->iova; |
---|
2414 | | - u32 i; |
---|
2415 | | - |
---|
2416 | | - for (i = 0; i < queue->core_count; i++) { |
---|
2417 | | - struct mpp_dev *core = queue->cores[i]; |
---|
2418 | | - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); |
---|
2419 | | - |
---|
2420 | | - if (!dec->mmu_fault || dec->fault_iova != iova) |
---|
2421 | | - continue; |
---|
2422 | | - rkvdec2_hard_ccu_handle_pagefault_task(dec, loop); |
---|
2423 | | - } |
---|
2424 | | - } |
---|
2425 | 2383 | } |
---|
2426 | 2384 | |
---|
2427 | 2385 | static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue) |
---|
.. | .. |
---|
2503 | 2461 | /* reset process */ |
---|
2504 | 2462 | rkvdec2_hard_ccu_reset(queue, dec->ccu); |
---|
2505 | 2463 | atomic_set(&queue->reset_request, 0); |
---|
2506 | | - /* if iommu pagefault, find the fault task and drop it */ |
---|
2507 | | - if (queue->iommu_fault) { |
---|
2508 | | - rkvdec2_hard_ccu_pagefault_proc(queue); |
---|
2509 | | - queue->iommu_fault = 0; |
---|
2510 | | - } |
---|
2511 | 2464 | |
---|
2512 | 2465 | /* relink running task iova in list, and resend them to hw */ |
---|
2513 | 2466 | if (!list_empty(&queue->running_list)) |
---|
.. | .. |
---|
2541 | 2494 | |
---|
2542 | 2495 | rkvdec2_ccu_power_on(queue, dec->ccu); |
---|
2543 | 2496 | rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp); |
---|
| 2497 | + mpp_taskqueue_pending_to_run(queue, mpp_task); |
---|
2544 | 2498 | } |
---|
2545 | 2499 | |
---|
2546 | 2500 | /* 4. poweroff when running and pending list are empty */ |
---|