.. | .. |
---|
1555 | 1555 | if (mpp->disable) |
---|
1556 | 1556 | continue; |
---|
1557 | 1557 | |
---|
1558 | | - dev_info(mpp->dev, "resetting...\n"); |
---|
| 1558 | + dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status); |
---|
1559 | 1559 | disable_hardirq(mpp->irq); |
---|
1560 | 1560 | |
---|
1561 | 1561 | /* foce idle, disconnect core and ccu */ |
---|
.. | .. |
---|
1618 | 1618 | return &task->mpp_task; |
---|
1619 | 1619 | } |
---|
1620 | 1620 | |
---|
1621 | | -static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp) |
---|
| 1621 | +static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue, |
---|
| 1622 | + struct device *iommu_dev) |
---|
1622 | 1623 | { |
---|
1623 | | - u32 i = 0; |
---|
| 1624 | + struct mpp_dev *mpp = NULL; |
---|
| 1625 | + struct rkvdec2_dev *dec = NULL; |
---|
| 1626 | + u32 mmu[2] = {0, 0x40}; |
---|
| 1627 | + u32 i; |
---|
1624 | 1628 | |
---|
1625 | | - for (i = 0; i < mpp->queue->core_count; i++) { |
---|
1626 | | - struct mpp_dev *core = mpp->queue->cores[i]; |
---|
1627 | | - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); |
---|
1628 | | - void __iomem *mmu_base = dec->mmu_base; |
---|
1629 | | - u32 mmu0_st; |
---|
1630 | | - u32 mmu1_st; |
---|
1631 | | - u32 mmu0_pta; |
---|
1632 | | - u32 mmu1_pta; |
---|
| 1629 | + for (i = 0; i < queue->core_count; i++) { |
---|
| 1630 | + struct mpp_dev *core = queue->cores[i]; |
---|
1633 | 1631 | |
---|
1634 | | - if (!mmu_base) |
---|
1635 | | - return; |
---|
1636 | | - |
---|
1637 | | - #define FAULT_STATUS 0x7e2 |
---|
1638 | | - rkvdec2_ccu_power_on(mpp->queue, dec->ccu); |
---|
1639 | | - |
---|
1640 | | - mmu0_st = readl(mmu_base + 0x4); |
---|
1641 | | - mmu1_st = readl(mmu_base + 0x44); |
---|
1642 | | - mmu0_pta = readl(mmu_base + 0xc); |
---|
1643 | | - mmu1_pta = readl(mmu_base + 0x4c); |
---|
1644 | | - |
---|
1645 | | - dec->mmu0_st = mmu0_st; |
---|
1646 | | - dec->mmu1_st = mmu1_st; |
---|
1647 | | - dec->mmu0_pta = mmu0_pta; |
---|
1648 | | - dec->mmu1_pta = mmu1_pta; |
---|
1649 | | - |
---|
1650 | | - pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n", |
---|
1651 | | - core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta); |
---|
1652 | | - if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) || |
---|
1653 | | - mmu0_pta || mmu1_pta) { |
---|
1654 | | - dec->fault_iova = readl(dec->link_dec->reg_base + 0x4); |
---|
1655 | | - dec->mmu_fault = 1; |
---|
1656 | | - pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova); |
---|
1657 | | - rockchip_iommu_mask_irq(core->dev); |
---|
1658 | | - } else { |
---|
1659 | | - dec->mmu_fault = 0; |
---|
1660 | | - dec->fault_iova = 0; |
---|
| 1632 | + if (&core->iommu_info->pdev->dev == iommu_dev) { |
---|
| 1633 | + mpp = core; |
---|
| 1634 | + dec = to_rkvdec2_dev(mpp); |
---|
1661 | 1635 | } |
---|
1662 | 1636 | } |
---|
| 1637 | + |
---|
| 1638 | + if (!dec || !dec->mmu_base) |
---|
| 1639 | + goto out; |
---|
| 1640 | + |
---|
| 1641 | + /* there are two iommus */ |
---|
| 1642 | + for (i = 0; i < 2; i++) { |
---|
| 1643 | + u32 status = readl(dec->mmu_base + mmu[i] + 0x4); |
---|
| 1644 | + u32 iova = readl(dec->mmu_base + mmu[i] + 0xc); |
---|
| 1645 | + u32 is_write = (status & BIT(5)) ? 1 : 0; |
---|
| 1646 | + |
---|
| 1647 | + if (status && iova) |
---|
| 1648 | + dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n", |
---|
| 1649 | + mpp->core_id, iova, is_write ? "write" : "read", status); |
---|
| 1650 | + } |
---|
| 1651 | +out: |
---|
| 1652 | + return mpp; |
---|
1663 | 1653 | } |
---|
1664 | 1654 | |
---|
1665 | | -int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
1666 | | - struct device *iommu_dev, |
---|
1667 | | - unsigned long iova, int status, void *arg) |
---|
| 1655 | +int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 1656 | + struct device *iommu_dev, |
---|
| 1657 | + unsigned long iova, int status, void *arg) |
---|
1668 | 1658 | { |
---|
1669 | 1659 | struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 1660 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1661 | + struct mpp_task *mpp_task; |
---|
1670 | 1662 | |
---|
1671 | 1663 | mpp_debug_enter(); |
---|
1672 | 1664 | |
---|
1673 | | - rkvdec2_ccu_check_pagefault_info(mpp); |
---|
| 1665 | + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); |
---|
| 1666 | + if (!mpp) { |
---|
| 1667 | + dev_err(iommu_dev, "iommu fault, but no dev match\n"); |
---|
| 1668 | + return 0; |
---|
| 1669 | + } |
---|
| 1670 | + mpp_task = mpp->cur_task; |
---|
| 1671 | + if (mpp_task) |
---|
| 1672 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
1674 | 1673 | |
---|
1675 | | - mpp->queue->iommu_fault = 1; |
---|
| 1674 | + /* |
---|
| 1675 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
---|
| 1676 | + * Until the pagefault task finish by hw timeout. |
---|
| 1677 | + */ |
---|
| 1678 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 1679 | + atomic_inc(&mpp->queue->reset_request); |
---|
| 1680 | + kthread_queue_work(&mpp->queue->worker, &mpp->work); |
---|
| 1681 | + |
---|
| 1682 | + mpp_debug_leave(); |
---|
| 1683 | + |
---|
| 1684 | + return 0; |
---|
| 1685 | +} |
---|
| 1686 | + |
---|
| 1687 | +int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu, |
---|
| 1688 | + struct device *iommu_dev, |
---|
| 1689 | + unsigned long iova, int status, void *arg) |
---|
| 1690 | +{ |
---|
| 1691 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 1692 | + struct mpp_taskqueue *queue = mpp->queue; |
---|
| 1693 | + struct mpp_task *mpp_task = NULL, *n; |
---|
| 1694 | + struct rkvdec2_dev *dec; |
---|
| 1695 | + u32 err_task_iova; |
---|
| 1696 | + |
---|
| 1697 | + mpp_debug_enter(); |
---|
| 1698 | + |
---|
| 1699 | + mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev); |
---|
| 1700 | + if (!mpp) { |
---|
| 1701 | + dev_err(iommu_dev, "iommu fault, but no dev match\n"); |
---|
| 1702 | + return 0; |
---|
| 1703 | + } |
---|
| 1704 | + |
---|
| 1705 | + dec = to_rkvdec2_dev(mpp); |
---|
| 1706 | + err_task_iova = readl(dec->link_dec->reg_base + 0x4); |
---|
| 1707 | + dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova); |
---|
| 1708 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 1709 | + |
---|
| 1710 | + list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) { |
---|
| 1711 | + struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); |
---|
| 1712 | + |
---|
| 1713 | + if ((u32)task->table->iova == err_task_iova) { |
---|
| 1714 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
| 1715 | + set_bit(TASK_STATE_ABORT, &mpp_task->state); |
---|
| 1716 | + break; |
---|
| 1717 | + } |
---|
| 1718 | + } |
---|
1676 | 1719 | atomic_inc(&mpp->queue->reset_request); |
---|
1677 | 1720 | kthread_queue_work(&mpp->queue->worker, &mpp->work); |
---|
1678 | 1721 | |
---|
.. | .. |
---|
2041 | 2084 | ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE); |
---|
2042 | 2085 | ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE); |
---|
2043 | 2086 | mpp_debug(DEBUG_IRQ_CHECK, |
---|
2044 | | - "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", |
---|
| 2087 | + "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n", |
---|
2045 | 2088 | mpp_task->session->index, mpp_task->task_index, task->width, |
---|
2046 | 2089 | task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status, |
---|
2047 | 2090 | timeout_flag, abort_flag, (u32)task->table->iova, |
---|
.. | .. |
---|
2055 | 2098 | cancel_delayed_work(&mpp_task->timeout_work); |
---|
2056 | 2099 | mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle]; |
---|
2057 | 2100 | mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz); |
---|
2058 | | - task->irq_status = irq_status; |
---|
| 2101 | + task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA; |
---|
2059 | 2102 | |
---|
2060 | 2103 | if (irq_status) |
---|
2061 | 2104 | rkvdec2_hard_ccu_finish(hw, task); |
---|
.. | .. |
---|
2081 | 2124 | /* Wake up the GET thread */ |
---|
2082 | 2125 | wake_up(&mpp_task->wait); |
---|
2083 | 2126 | if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) { |
---|
2084 | | - pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n", |
---|
| 2127 | + pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n", |
---|
2085 | 2128 | mpp_task->session->index, mpp_task->task_index, |
---|
2086 | 2129 | irq_status, timeout_flag, abort_flag); |
---|
2087 | 2130 | atomic_inc(&queue->reset_request); |
---|
.. | .. |
---|
2339 | 2382 | return 0; |
---|
2340 | 2383 | } |
---|
2341 | 2384 | |
---|
2342 | | -static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec, |
---|
2343 | | - struct mpp_task *mpp_task) |
---|
2344 | | -{ |
---|
2345 | | - struct rkvdec2_task *task = to_rkvdec2_task(mpp_task); |
---|
2346 | | - |
---|
2347 | | - mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n", |
---|
2348 | | - mpp_task->session->index, mpp_task->task_index, |
---|
2349 | | - task->width, task->height, dec->mmu0_st, dec->mmu0_pta, |
---|
2350 | | - dec->mmu1_st, dec->mmu1_pta, dec->fault_iova); |
---|
2351 | | - |
---|
2352 | | - set_bit(TASK_STATE_HANDLE, &mpp_task->state); |
---|
2353 | | - task->irq_status |= BIT(4); |
---|
2354 | | - cancel_delayed_work(&mpp_task->timeout_work); |
---|
2355 | | - rkvdec2_hard_ccu_finish(dec->link_dec->info, task); |
---|
2356 | | - set_bit(TASK_STATE_FINISH, &mpp_task->state); |
---|
2357 | | - set_bit(TASK_STATE_DONE, &mpp_task->state); |
---|
2358 | | - list_move_tail(&task->table->link, &dec->ccu->unused_list); |
---|
2359 | | - list_del_init(&mpp_task->queue_link); |
---|
2360 | | - /* Wake up the GET thread */ |
---|
2361 | | - wake_up(&mpp_task->wait); |
---|
2362 | | - kref_put(&mpp_task->ref, mpp_free_task); |
---|
2363 | | - dec->mmu_fault = 0; |
---|
2364 | | - dec->fault_iova = 0; |
---|
2365 | | -} |
---|
2366 | | - |
---|
2367 | | -static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue) |
---|
2368 | | -{ |
---|
2369 | | - struct mpp_task *loop = NULL, *n; |
---|
2370 | | - |
---|
2371 | | - list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) { |
---|
2372 | | - struct rkvdec2_task *task = to_rkvdec2_task(loop); |
---|
2373 | | - u32 iova = (u32)task->table->iova; |
---|
2374 | | - u32 i; |
---|
2375 | | - |
---|
2376 | | - for (i = 0; i < queue->core_count; i++) { |
---|
2377 | | - struct mpp_dev *core = queue->cores[i]; |
---|
2378 | | - struct rkvdec2_dev *dec = to_rkvdec2_dev(core); |
---|
2379 | | - |
---|
2380 | | - if (!dec->mmu_fault || dec->fault_iova != iova) |
---|
2381 | | - continue; |
---|
2382 | | - rkvdec2_hard_ccu_handle_pagefault_task(dec, loop); |
---|
2383 | | - } |
---|
2384 | | - } |
---|
2385 | | -} |
---|
2386 | | - |
---|
2387 | 2385 | static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue) |
---|
2388 | 2386 | { |
---|
2389 | 2387 | struct rkvdec2_task *task_pre = NULL; |
---|
.. | .. |
---|
2463 | 2461 | /* reset process */ |
---|
2464 | 2462 | rkvdec2_hard_ccu_reset(queue, dec->ccu); |
---|
2465 | 2463 | atomic_set(&queue->reset_request, 0); |
---|
2466 | | - /* if iommu pagefault, find the fault task and drop it */ |
---|
2467 | | - if (queue->iommu_fault) { |
---|
2468 | | - rkvdec2_hard_ccu_pagefault_proc(queue); |
---|
2469 | | - queue->iommu_fault = 0; |
---|
2470 | | - } |
---|
2471 | 2464 | |
---|
2472 | 2465 | /* relink running task iova in list, and resend them to hw */ |
---|
2473 | 2466 | if (!list_empty(&queue->running_list)) |
---|