hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -18,8 +18,6 @@
1818
1919 #include "hack/mpp_rkvdec2_link_hack_rk3568.c"
2020
21
-#define WORK_TIMEOUT_MS (500)
22
-#define WAIT_TIMEOUT_MS (2000)
2321 #define RKVDEC2_LINK_HACK_TASK_FLAG (0xff)
2422
2523 /* vdpu381 link hw info for rk3588 */
....@@ -519,11 +517,6 @@
519517 struct rkvdec_link_dev *link_dec = dec->link_dec;
520518 u32 irq_status = 0;
521519
522
- if (!atomic_read(&link_dec->power_enabled)) {
523
- dev_info(link_dec->dev, "irq on power off\n");
524
- return -1;
525
- }
526
-
527520 irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
528521
529522 if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
....@@ -980,6 +973,7 @@
980973
981974 list_move_tail(&task->table->link, &link_dec->unused_list);
982975 list_del_init(&mpp_task->queue_link);
976
+ link_dec->task_running--;
983977
984978 set_bit(TASK_STATE_HANDLE, &mpp_task->state);
985979 set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
....@@ -988,13 +982,10 @@
988982 if (test_bit(TASK_STATE_ABORT, &mpp_task->state))
989983 set_bit(TASK_STATE_ABORT_READY, &mpp_task->state);
990984
991
- wake_up(&mpp_task->wait);
992
- kref_put(&mpp_task->ref, rkvdec2_link_free_task);
993
- link_dec->task_running--;
994
-
995985 mpp_dbg_link("session %d task %d irq_status %#08x timeout %d abort %d\n",
996986 mpp_task->session->index, mpp_task->task_index,
997987 irq_status, timeout_flag, abort_flag);
988
+
998989 if (irq_status & RKVDEC_INT_ERROR_MASK) {
999990 dev_err(mpp->dev,
1000991 "session %d task %d irq_status %#08x timeout %u abort %u\n",
....@@ -1003,6 +994,9 @@
1003994 if (!reset_flag)
1004995 atomic_inc(&mpp->reset_request);
1005996 }
997
+
998
+ wake_up(&mpp_task->wait);
999
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
10061000 }
10071001
10081002 /* resend running task after reset */
....@@ -1192,19 +1186,16 @@
11921186 return -EIO;
11931187 }
11941188
1195
- ret = wait_event_timeout(mpp_task->wait, task_is_done(mpp_task),
1196
- msecs_to_jiffies(WAIT_TIMEOUT_MS));
1197
- if (ret) {
1198
- ret = rkvdec2_result(mpp, mpp_task, msgs);
1189
+ ret = wait_event_interruptible(mpp_task->wait, task_is_done(mpp_task));
1190
+ if (ret == -ERESTARTSYS)
1191
+ mpp_err("wait task break by signal\n");
11991192
1200
- mpp_session_pop_done(session, mpp_task);
1201
- } else {
1202
- mpp_err("task %d:%d state %lx timeout -> abort\n",
1203
- session->index, mpp_task->task_id, mpp_task->state);
1193
+ ret = rkvdec2_result(mpp, mpp_task, msgs);
12041194
1205
- atomic_inc(&mpp_task->abort_request);
1206
- set_bit(TASK_STATE_ABORT, &mpp_task->state);
1207
- }
1195
+ mpp_session_pop_done(session, mpp_task);
1196
+ mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n",
1197
+ session->device_type, session->index, atomic_read(&session->task_count),
1198
+ mpp_task->task_index, mpp_task->state);
12081199
12091200 mpp_session_pop_pending(session, mpp_task);
12101201 return ret;
....@@ -1356,7 +1347,8 @@
13561347 /* set the ccu-domain for current device */
13571348 ccu_info = queue->cores[0]->iommu_info;
13581349 cur_info = dec->mpp.iommu_info;
1359
- cur_info->domain = ccu_info->domain;
1350
+ if (cur_info)
1351
+ cur_info->domain = ccu_info->domain;
13601352 mpp_iommu_attach(cur_info);
13611353 }
13621354
....@@ -1563,7 +1555,7 @@
15631555 if (mpp->disable)
15641556 continue;
15651557
1566
- dev_info(mpp->dev, "resetting...\n");
1558
+ dev_info(mpp->dev, "resetting for err %#x\n", mpp->irq_status);
15671559 disable_hardirq(mpp->irq);
15681560
15691561 /* foce idle, disconnect core and ccu */
....@@ -1626,61 +1618,104 @@
16261618 return &task->mpp_task;
16271619 }
16281620
1629
-static void rkvdec2_ccu_check_pagefault_info(struct mpp_dev *mpp)
1621
+static struct mpp_dev *rkvdec2_ccu_dev_match_by_iommu(struct mpp_taskqueue *queue,
1622
+ struct device *iommu_dev)
16301623 {
1631
- u32 i = 0;
1624
+ struct mpp_dev *mpp = NULL;
1625
+ struct rkvdec2_dev *dec = NULL;
1626
+ u32 mmu[2] = {0, 0x40};
1627
+ u32 i;
16321628
1633
- for (i = 0; i < mpp->queue->core_count; i++) {
1634
- struct mpp_dev *core = mpp->queue->cores[i];
1635
- struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
1636
- void __iomem *mmu_base = dec->mmu_base;
1637
- u32 mmu0_st;
1638
- u32 mmu1_st;
1639
- u32 mmu0_pta;
1640
- u32 mmu1_pta;
1629
+ for (i = 0; i < queue->core_count; i++) {
1630
+ struct mpp_dev *core = queue->cores[i];
16411631
1642
- if (!mmu_base)
1643
- return;
1644
-
1645
- #define FAULT_STATUS 0x7e2
1646
- rkvdec2_ccu_power_on(mpp->queue, dec->ccu);
1647
-
1648
- mmu0_st = readl(mmu_base + 0x4);
1649
- mmu1_st = readl(mmu_base + 0x44);
1650
- mmu0_pta = readl(mmu_base + 0xc);
1651
- mmu1_pta = readl(mmu_base + 0x4c);
1652
-
1653
- dec->mmu0_st = mmu0_st;
1654
- dec->mmu1_st = mmu1_st;
1655
- dec->mmu0_pta = mmu0_pta;
1656
- dec->mmu1_pta = mmu1_pta;
1657
-
1658
- pr_err("core %d mmu0 %08x %08x mm1 %08x %08x\n",
1659
- core->core_id, mmu0_st, mmu0_pta, mmu1_st, mmu1_pta);
1660
- if ((mmu0_st & FAULT_STATUS) || (mmu1_st & FAULT_STATUS) ||
1661
- mmu0_pta || mmu1_pta) {
1662
- dec->fault_iova = readl(dec->link_dec->reg_base + 0x4);
1663
- dec->mmu_fault = 1;
1664
- pr_err("core %d fault iova %08x\n", core->core_id, dec->fault_iova);
1665
- rockchip_iommu_mask_irq(core->dev);
1666
- } else {
1667
- dec->mmu_fault = 0;
1668
- dec->fault_iova = 0;
1632
+ if (&core->iommu_info->pdev->dev == iommu_dev) {
1633
+ mpp = core;
1634
+ dec = to_rkvdec2_dev(mpp);
16691635 }
16701636 }
1637
+
1638
+ if (!dec || !dec->mmu_base)
1639
+ goto out;
1640
+
1641
+ /* there are two iommus */
1642
+ for (i = 0; i < 2; i++) {
1643
+ u32 status = readl(dec->mmu_base + mmu[i] + 0x4);
1644
+ u32 iova = readl(dec->mmu_base + mmu[i] + 0xc);
1645
+ u32 is_write = (status & BIT(5)) ? 1 : 0;
1646
+
1647
+ if (status && iova)
1648
+ dev_err(iommu_dev, "core %d pagfault at iova %#08x type %s status %#x\n",
1649
+ mpp->core_id, iova, is_write ? "write" : "read", status);
1650
+ }
1651
+out:
1652
+ return mpp;
16711653 }
16721654
1673
-int rkvdec2_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1674
- struct device *iommu_dev,
1675
- unsigned long iova, int status, void *arg)
1655
+int rkvdec2_soft_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1656
+ struct device *iommu_dev,
1657
+ unsigned long iova, int status, void *arg)
16761658 {
16771659 struct mpp_dev *mpp = (struct mpp_dev *)arg;
1660
+ struct mpp_taskqueue *queue = mpp->queue;
1661
+ struct mpp_task *mpp_task;
16781662
16791663 mpp_debug_enter();
16801664
1681
- rkvdec2_ccu_check_pagefault_info(mpp);
1665
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1666
+ if (!mpp) {
1667
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1668
+ return 0;
1669
+ }
1670
+ mpp_task = mpp->cur_task;
1671
+ if (mpp_task)
1672
+ mpp_task_dump_mem_region(mpp, mpp_task);
16821673
1683
- mpp->queue->iommu_fault = 1;
1674
+ /*
1675
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
1676
+ * Until the pagefault task finish by hw timeout.
1677
+ */
1678
+ rockchip_iommu_mask_irq(mpp->dev);
1679
+ atomic_inc(&mpp->queue->reset_request);
1680
+ kthread_queue_work(&mpp->queue->worker, &mpp->work);
1681
+
1682
+ mpp_debug_leave();
1683
+
1684
+ return 0;
1685
+}
1686
+
1687
+int rkvdec2_hard_ccu_iommu_fault_handle(struct iommu_domain *iommu,
1688
+ struct device *iommu_dev,
1689
+ unsigned long iova, int status, void *arg)
1690
+{
1691
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
1692
+ struct mpp_taskqueue *queue = mpp->queue;
1693
+ struct mpp_task *mpp_task = NULL, *n;
1694
+ struct rkvdec2_dev *dec;
1695
+ u32 err_task_iova;
1696
+
1697
+ mpp_debug_enter();
1698
+
1699
+ mpp = rkvdec2_ccu_dev_match_by_iommu(queue, iommu_dev);
1700
+ if (!mpp) {
1701
+ dev_err(iommu_dev, "iommu fault, but no dev match\n");
1702
+ return 0;
1703
+ }
1704
+
1705
+ dec = to_rkvdec2_dev(mpp);
1706
+ err_task_iova = readl(dec->link_dec->reg_base + 0x4);
1707
+ dev_err(mpp->dev, "core %d err task iova %#08x\n", mpp->core_id, err_task_iova);
1708
+ rockchip_iommu_mask_irq(mpp->dev);
1709
+
1710
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
1711
+ struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
1712
+
1713
+ if ((u32)task->table->iova == err_task_iova) {
1714
+ mpp_task_dump_mem_region(mpp, mpp_task);
1715
+ set_bit(TASK_STATE_ABORT, &mpp_task->state);
1716
+ break;
1717
+ }
1718
+ }
16841719 atomic_inc(&mpp->queue->reset_request);
16851720 kthread_queue_work(&mpp->queue->worker, &mpp->work);
16861721
....@@ -1839,36 +1874,6 @@
18391874 return flag;
18401875 }
18411876
1842
-static int rkvdec2_ccu_link_session_detach(struct mpp_dev *mpp,
1843
- struct mpp_taskqueue *queue)
1844
-{
1845
- mutex_lock(&queue->session_lock);
1846
- while (atomic_read(&queue->detach_count)) {
1847
- struct mpp_session *session = NULL;
1848
-
1849
- session = list_first_entry_or_null(&queue->session_detach,
1850
- struct mpp_session,
1851
- session_link);
1852
- if (session) {
1853
- list_del_init(&session->session_link);
1854
- atomic_dec(&queue->detach_count);
1855
- }
1856
-
1857
- mutex_unlock(&queue->session_lock);
1858
-
1859
- if (session) {
1860
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1861
- atomic_read(&queue->detach_count));
1862
- mpp_session_deinit(session);
1863
- }
1864
-
1865
- mutex_lock(&queue->session_lock);
1866
- }
1867
- mutex_unlock(&queue->session_lock);
1868
-
1869
- return 0;
1870
-}
1871
-
18721877 void rkvdec2_soft_ccu_worker(struct kthread_work *work_s)
18731878 {
18741879 struct mpp_task *mpp_task;
....@@ -1943,7 +1948,7 @@
19431948 rkvdec2_ccu_power_off(queue, dec->ccu);
19441949
19451950 /* 5. check session detach out of queue */
1946
- rkvdec2_ccu_link_session_detach(mpp, queue);
1951
+ mpp_session_cleanup_detach(queue, work_s);
19471952
19481953 mpp_debug_leave();
19491954 }
....@@ -2079,7 +2084,7 @@
20792084 ccu_decoded_num = readl(ccu->reg_base + RKVDEC_CCU_DEC_NUM_BASE);
20802085 ccu_total_dec_num = readl(ccu->reg_base + RKVDEC_CCU_TOTAL_NUM_BASE);
20812086 mpp_debug(DEBUG_IRQ_CHECK,
2082
- "session %d task %d w:h[%d %d] err %d irq_status %08x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
2087
+ "session %d task %d w:h[%d %d] err %d irq_status %#x timeout=%u abort=%u iova %08x next %08x ccu[%d %d]\n",
20832088 mpp_task->session->index, mpp_task->task_index, task->width,
20842089 task->height, !!(irq_status & RKVDEC_INT_ERROR_MASK), irq_status,
20852090 timeout_flag, abort_flag, (u32)task->table->iova,
....@@ -2093,7 +2098,7 @@
20932098 cancel_delayed_work(&mpp_task->timeout_work);
20942099 mpp_task->hw_cycles = tb_reg[hw->tb_reg_cycle];
20952100 mpp_time_diff_with_hw_time(mpp_task, dec->cycle_clk->real_rate_hz);
2096
- task->irq_status = irq_status;
2101
+ task->irq_status = irq_status ? irq_status : RKVDEC_ERROR_STA;
20972102
20982103 if (irq_status)
20992104 rkvdec2_hard_ccu_finish(hw, task);
....@@ -2119,7 +2124,7 @@
21192124 /* Wake up the GET thread */
21202125 wake_up(&mpp_task->wait);
21212126 if ((irq_status & RKVDEC_INT_ERROR_MASK) || timeout_flag) {
2122
- pr_err("session %d task %d irq_status %08x timeout=%u abort=%u\n",
2127
+ pr_err("session %d task %d irq_status %#x timeout=%u abort=%u\n",
21232128 mpp_task->session->index, mpp_task->task_index,
21242129 irq_status, timeout_flag, abort_flag);
21252130 atomic_inc(&queue->reset_request);
....@@ -2366,9 +2371,7 @@
23662371 writel(RKVDEC_CCU_BIT_CFG_DONE, ccu->reg_base + RKVDEC_CCU_CFG_DONE_BASE);
23672372 mpp_task_run_end(mpp_task, timing_en);
23682373
2369
- /* pending to running */
23702374 set_bit(TASK_STATE_RUNNING, &mpp_task->state);
2371
- mpp_taskqueue_pending_to_run(queue, mpp_task);
23722375 mpp_dbg_ccu("session %d task %d iova=%08x task->state=%lx link_mode=%08x\n",
23732376 mpp_task->session->index, mpp_task->task_index,
23742377 (u32)task->table->iova, mpp_task->state,
....@@ -2377,51 +2380,6 @@
23772380 mpp_debug_leave();
23782381
23792382 return 0;
2380
-}
2381
-
2382
-static void rkvdec2_hard_ccu_handle_pagefault_task(struct rkvdec2_dev *dec,
2383
- struct mpp_task *mpp_task)
2384
-{
2385
- struct rkvdec2_task *task = to_rkvdec2_task(mpp_task);
2386
-
2387
- mpp_dbg_ccu("session %d task %d w:h[%d %d] pagefault mmu0[%08x %08x] mmu1[%08x %08x] fault_iova %08x\n",
2388
- mpp_task->session->index, mpp_task->task_index,
2389
- task->width, task->height, dec->mmu0_st, dec->mmu0_pta,
2390
- dec->mmu1_st, dec->mmu1_pta, dec->fault_iova);
2391
-
2392
- set_bit(TASK_STATE_HANDLE, &mpp_task->state);
2393
- task->irq_status |= BIT(4);
2394
- cancel_delayed_work(&mpp_task->timeout_work);
2395
- rkvdec2_hard_ccu_finish(dec->link_dec->info, task);
2396
- set_bit(TASK_STATE_FINISH, &mpp_task->state);
2397
- set_bit(TASK_STATE_DONE, &mpp_task->state);
2398
- list_move_tail(&task->table->link, &dec->ccu->unused_list);
2399
- list_del_init(&mpp_task->queue_link);
2400
- /* Wake up the GET thread */
2401
- wake_up(&mpp_task->wait);
2402
- kref_put(&mpp_task->ref, mpp_free_task);
2403
- dec->mmu_fault = 0;
2404
- dec->fault_iova = 0;
2405
-}
2406
-
2407
-static void rkvdec2_hard_ccu_pagefault_proc(struct mpp_taskqueue *queue)
2408
-{
2409
- struct mpp_task *loop = NULL, *n;
2410
-
2411
- list_for_each_entry_safe(loop, n, &queue->running_list, queue_link) {
2412
- struct rkvdec2_task *task = to_rkvdec2_task(loop);
2413
- u32 iova = (u32)task->table->iova;
2414
- u32 i;
2415
-
2416
- for (i = 0; i < queue->core_count; i++) {
2417
- struct mpp_dev *core = queue->cores[i];
2418
- struct rkvdec2_dev *dec = to_rkvdec2_dev(core);
2419
-
2420
- if (!dec->mmu_fault || dec->fault_iova != iova)
2421
- continue;
2422
- rkvdec2_hard_ccu_handle_pagefault_task(dec, loop);
2423
- }
2424
- }
24252383 }
24262384
24272385 static void rkvdec2_hard_ccu_resend_tasks(struct mpp_dev *mpp, struct mpp_taskqueue *queue)
....@@ -2503,11 +2461,6 @@
25032461 /* reset process */
25042462 rkvdec2_hard_ccu_reset(queue, dec->ccu);
25052463 atomic_set(&queue->reset_request, 0);
2506
- /* if iommu pagefault, find the fault task and drop it */
2507
- if (queue->iommu_fault) {
2508
- rkvdec2_hard_ccu_pagefault_proc(queue);
2509
- queue->iommu_fault = 0;
2510
- }
25112464
25122465 /* relink running task iova in list, and resend them to hw */
25132466 if (!list_empty(&queue->running_list))
....@@ -2541,6 +2494,7 @@
25412494
25422495 rkvdec2_ccu_power_on(queue, dec->ccu);
25432496 rkvdec2_hard_ccu_enqueue(dec->ccu, mpp_task, queue, mpp);
2497
+ mpp_taskqueue_pending_to_run(queue, mpp_task);
25442498 }
25452499
25462500 /* 4. poweroff when running and pending list are empty */