hc
2023-11-06 15ade055295d13f95d49e3d99b09f3bbfb4a43e7
kernel/drivers/video/rockchip/mpp/mpp_rkvdec2_link.c
....@@ -60,6 +60,10 @@
6060 /* interrupt read back in table buffer */
6161 u32 tb_reg_int;
6262 bool hack_setup;
63
+ u32 tb_reg_cycle;
64
+ u32 tb_reg_out;
65
+ u32 tb_reg_ref_s;
66
+ u32 tb_reg_ref_e;
6367 struct rkvdec_link_status reg_status;
6468 };
6569
....@@ -113,6 +117,7 @@
113117 },
114118 .tb_reg_int = 164,
115119 .hack_setup = 1,
120
+ .tb_reg_cycle = 179,
116121 .reg_status = {
117122 .dec_num_mask = 0x3fffffff,
118123 .err_flag_base = 0x010,
....@@ -122,7 +127,7 @@
122127
123128 /* vdpu382 link hw info */
124129 struct rkvdec_link_info rkvdec_link_v2_hw_info = {
125
- .tb_reg_num = 218,
130
+ .tb_reg_num = 222,
126131 .tb_reg_next = 0,
127132 .tb_reg_r = 1,
128133 .tb_reg_second_en = 8,
....@@ -162,21 +167,27 @@
162167 .part_r[0] = {
163168 .tb_reg_off = 180,
164169 .reg_start = 224,
165
- .reg_num = 10,
170
+ .reg_num = 12,
166171 },
167172 .part_r[1] = {
168
- .tb_reg_off = 190,
173
+ .tb_reg_off = 192,
169174 .reg_start = 258,
170
- .reg_num = 28,
175
+ .reg_num = 30,
171176 },
172
- .tb_reg_int = 180,
173
- .hack_setup = 0,
177
+ .tb_reg_int = 180,
178
+ .hack_setup = 0,
179
+ .tb_reg_cycle = 197,
180
+ .tb_reg_out = 86,
181
+ .tb_reg_ref_s = 104,
182
+ .tb_reg_ref_e = 119,
174183 .reg_status = {
175184 .dec_num_mask = 0x000fffff,
176185 .err_flag_base = 0x024,
177186 .err_flag_bit = BIT(8),
178187 },
179188 };
189
+
190
+static void rkvdec2_link_free_task(struct kref *ref);
180191
181192 static void rkvdec_link_status_update(struct rkvdec_link_dev *dev)
182193 {
....@@ -426,8 +437,6 @@
426437 memcpy(&tb_reg[off], &task->reg[s], n * sizeof(u32));
427438 }
428439
429
- /* setup error mode flag */
430
- tb_reg[9] |= BIT(18) | BIT(9);
431440 tb_reg[info->tb_reg_second_en] |= RKVDEC_WAIT_RESET_EN;
432441
433442 /* memset read registers */
....@@ -514,6 +523,7 @@
514523 }
515524
516525 if (!resend) {
526
+ u32 timing_en = dev->mpp->srv->timing_en;
517527 u32 i;
518528
519529 for (i = 0; i < task_to_run; i++) {
....@@ -523,10 +533,8 @@
523533 if (!task_ddr)
524534 continue;
525535
526
- set_bit(TASK_STATE_START, &task_ddr->state);
527
- schedule_delayed_work(&task_ddr->timeout_work,
528
- msecs_to_jiffies(200));
529
- mpp_time_record(task_ddr);
536
+ mpp_task_run_begin(task_ddr, timing_en, MPP_WORK_TIMEOUT_DELAY);
537
+ mpp_task_run_end(task_ddr, timing_en);
530538 }
531539 } else {
532540 if (task_total)
....@@ -538,6 +546,8 @@
538546
539547 /* start config before all registers are set */
540548 wmb();
549
+
550
+ mpp_iommu_flush_tlb(dev->mpp->iommu_info);
541551
542552 /* configure done */
543553 writel(RKVDEC_LINK_BIT_CFG_DONE, reg_base + RKVDEC_LINK_CFG_CTRL_BASE);
....@@ -591,6 +601,7 @@
591601 struct rkvdec_link_info *info = link_dec->info;
592602 u32 *table_base = (u32 *)link_dec->table->vaddr;
593603 int i;
604
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
594605
595606 for (i = 0; i < count; i++) {
596607 int idx = rkvdec_link_get_task_read(link_dec);
....@@ -599,15 +610,15 @@
599610 u32 *regs = NULL;
600611 u32 irq_status = 0;
601612
602
- if (!mpp_task) {
613
+ if (!mpp_task && info->hack_setup) {
603614 regs = table_base + idx * link_dec->link_reg_count;
604615 mpp_dbg_link_flow("slot %d read task stuff\n", idx);
605616
606617 link_dec->stuff_total++;
607618 if (link_dec->statistic_count &&
608
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
619
+ regs[info->tb_reg_cycle]) {
609620 link_dec->stuff_cycle_sum +=
610
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
621
+ regs[info->tb_reg_cycle];
611622 link_dec->stuff_cnt++;
612623 if (link_dec->stuff_cnt >=
613624 link_dec->statistic_count) {
....@@ -648,22 +659,27 @@
648659 continue;
649660 }
650661
651
- mpp_time_diff(mpp_task);
662
+ if (!mpp_task)
663
+ return 0;
664
+
652665 task = to_rkvdec2_task(mpp_task);
653666 regs = table_base + idx * link_dec->link_reg_count;
667
+ link_dec->error_iova = regs[info->tb_reg_out];
654668 irq_status = regs[info->tb_reg_int];
669
+ mpp_task->hw_cycles = regs[info->tb_reg_cycle];
670
+ mpp_time_diff_with_hw_time(mpp_task, dec->aclk_info.real_rate_hz);
655671 mpp_dbg_link_flow("slot %d rd task %d\n", idx,
656672 mpp_task->task_index);
657673
658674 task->irq_status = irq_status ? irq_status : mpp->irq_status;
659
-
675
+ mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", task->irq_status);
660676 cancel_delayed_work_sync(&mpp_task->timeout_work);
661677 set_bit(TASK_STATE_HANDLE, &mpp_task->state);
662678
663679 if (link_dec->statistic_count &&
664
- regs[RKVDEC_LINK_REG_CYCLE_CNT]) {
680
+ regs[info->tb_reg_cycle]) {
665681 link_dec->task_cycle_sum +=
666
- regs[RKVDEC_LINK_REG_CYCLE_CNT];
682
+ regs[info->tb_reg_cycle];
667683 link_dec->task_cnt++;
668684 if (link_dec->task_cnt >= link_dec->statistic_count) {
669685 dev_info(link_dec->dev, "hw cycle %u\n",
....@@ -691,6 +707,8 @@
691707 set_bit(TASK_STATE_PROC_DONE, &mpp_task->state);
692708 /* Wake up the GET thread */
693709 wake_up(&task->wait);
710
+ kref_put(&mpp_task->ref, rkvdec2_link_free_task);
711
+ link_dec->tasks_hw[idx] = NULL;
694712 }
695713
696714 return 0;
....@@ -729,7 +747,6 @@
729747
730748 static int rkvdec2_link_reset(struct mpp_dev *mpp)
731749 {
732
- struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
733750
734751 dev_info(mpp->dev, "resetting...\n");
735752
....@@ -740,11 +757,8 @@
740757
741758 rockchip_save_qos(mpp->dev);
742759
743
- mutex_lock(&dec->sip_reset_lock);
744
- rockchip_dmcfreq_lock();
745
- sip_smc_vpu_reset(0, 0, 0);
746
- rockchip_dmcfreq_unlock();
747
- mutex_unlock(&dec->sip_reset_lock);
760
+ if (mpp->hw_ops->reset)
761
+ mpp->hw_ops->reset(mpp);
748762
749763 rockchip_restore_qos(mpp->dev);
750764
....@@ -762,6 +776,49 @@
762776 return 0;
763777 }
764778
779
+static void rkvdec2_check_err_ref(struct mpp_dev *mpp)
780
+{
781
+ struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
782
+ struct rkvdec_link_dev *link_dec = dec->link_dec;
783
+ struct rkvdec_link_info *link_info = link_dec->info;
784
+ struct mpp_taskqueue *queue = mpp->queue;
785
+ struct mpp_task *mpp_task = NULL, *n;
786
+ struct rkvdec2_task *task;
787
+ int i;
788
+
789
+ if (!link_dec->error_iova || !dec->err_ref_hack)
790
+ return;
791
+
792
+ dev_err(mpp->dev, "err task iova %#08x\n", link_dec->error_iova);
793
+ list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
794
+ if (mpp_task) {
795
+ u32 *regs = NULL;
796
+ u32 *table_base = (u32 *)link_dec->table->vaddr;
797
+
798
+ task = to_rkvdec2_task(mpp_task);
799
+ regs = table_base + task->slot_idx * link_dec->link_reg_count;
800
+
801
+ for (i = link_info->tb_reg_ref_s; i <= link_info->tb_reg_ref_e; i++) {
802
+ if (regs[i] == link_dec->error_iova)
803
+ regs[i] = 0;
804
+ }
805
+ }
806
+ }
807
+
808
+ mutex_lock(&queue->pending_lock);
809
+ list_for_each_entry_safe(mpp_task, n, &queue->pending_list, queue_link) {
810
+ task = to_rkvdec2_task(mpp_task);
811
+
812
+ /* ref frame reg index start - end */
813
+ for (i = 164; i <= 179; i++) {
814
+ if (task->reg[i] == link_dec->error_iova)
815
+ task->reg[i] = 0;
816
+ }
817
+ }
818
+ mutex_unlock(&queue->pending_lock);
819
+ link_dec->error_iova = 0;
820
+}
821
+
765822 static int rkvdec2_link_irq(struct mpp_dev *mpp)
766823 {
767824 struct rkvdec2_dev *dec = to_rkvdec2_dev(mpp);
....@@ -774,9 +831,6 @@
774831 }
775832
776833 irq_status = readl(link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
777
-
778
- mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", irq_status);
779
- mpp_dbg_link_flow("link irq %08x\n", irq_status);
780834
781835 if (irq_status & RKVDEC_LINK_BIT_IRQ_RAW) {
782836 u32 enabled = readl(link_dec->reg_base + RKVDEC_LINK_EN_BASE);
....@@ -795,7 +849,8 @@
795849
796850 writel_relaxed(0, link_dec->reg_base + RKVDEC_LINK_IRQ_BASE);
797851 }
798
-
852
+ mpp_debug((DEBUG_IRQ_STATUS | DEBUG_LINK_TABLE), "irq_status: %08x : %08x\n",
853
+ irq_status, mpp->irq_status);
799854 return 0;
800855 }
801856
....@@ -815,6 +870,7 @@
815870 mpp_debug_enter();
816871
817872 disable_irq(mpp->irq);
873
+ mpp_iommu_disable_irq(mpp->iommu_info);
818874 rkvdec_link_status_update(link_dec);
819875 link_dec->irq_status = irq_status;
820876 prev_dec_num = link_dec->task_decoded;
....@@ -822,8 +878,10 @@
822878 if (!link_dec->enabled || task_timeout) {
823879 u32 val;
824880
825
- if (task_timeout)
881
+ if (task_timeout) {
826882 rkvdec_link_reg_dump("timeout", link_dec);
883
+ link_dec->decoded += task_timeout;
884
+ }
827885
828886 val = mpp_read(mpp, 224 * 4);
829887 if (link_info->hack_setup && !(val & BIT(2))) {
....@@ -838,6 +896,7 @@
838896 if (link_dec->enabled && !count && !need_reset) {
839897 /* process extra isr when task is processed */
840898 enable_irq(mpp->irq);
899
+ mpp_iommu_enable_irq(mpp->iommu_info);
841900 goto done;
842901 }
843902
....@@ -851,15 +910,18 @@
851910 goto do_reset;
852911
853912 enable_irq(mpp->irq);
913
+ mpp_iommu_enable_irq(mpp->iommu_info);
854914 goto done;
855915
856916 do_reset:
917
+ rkvdec2_check_err_ref(mpp);
857918 /* NOTE: irq may run with reset */
858919 atomic_inc(&mpp->reset_request);
859920 rkvdec2_link_reset(mpp);
860921 link_dec->task_decoded = 0;
861922 link_dec->task_total = 0;
862923 enable_irq(mpp->irq);
924
+ mpp_iommu_enable_irq(mpp->iommu_info);
863925
864926 if (link_dec->total == link_dec->decoded)
865927 goto done;
....@@ -881,6 +943,26 @@
881943 mpp_debug_leave();
882944
883945 return IRQ_HANDLED;
946
+}
947
+
948
+static int rkvdec2_link_iommu_handle(struct iommu_domain *iommu,
949
+ struct device *iommu_dev,
950
+ unsigned long iova,
951
+ int status, void *arg)
952
+{
953
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
954
+
955
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
956
+ iova, status, arg);
957
+
958
+ if (!mpp) {
959
+ dev_err(iommu_dev, "pagefault without device to handle\n");
960
+ return 0;
961
+ }
962
+
963
+ rk_iommu_mask_irq(mpp->dev);
964
+
965
+ return 0;
884966 }
885967
886968 int rkvdec2_link_remove(struct mpp_dev *mpp, struct rkvdec_link_dev *link_dec)
....@@ -1016,7 +1098,8 @@
10161098
10171099 if (link_dec->info->hack_setup)
10181100 rkvdec2_link_hack_data_setup(dec->fix);
1019
-
1101
+ iommu_set_fault_handler(mpp->iommu_info->domain,
1102
+ rkvdec2_link_iommu_handle, mpp);
10201103 link_dec->mpp = mpp;
10211104 link_dec->dev = dev;
10221105 atomic_set(&link_dec->task_timeout, 0);
....@@ -1060,8 +1143,10 @@
10601143 }
10611144 session = task->session;
10621145
1063
- mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d state 0x%lx\n",
1064
- session->index, task->task_index, task->state);
1146
+ mpp_debug_func(DEBUG_TASK_INFO,
1147
+ "session %d:%d task %d state 0x%lx abort_request %d\n",
1148
+ session->device_type, session->index, task->task_index,
1149
+ task->state, atomic_read(&task->abort_request));
10651150 if (!session->mpp) {
10661151 mpp_err("session %d session->mpp is null.\n", session->index);
10671152 return;
....@@ -1112,6 +1197,7 @@
11121197
11131198 if (!link_dec->irq_enabled) {
11141199 enable_irq(mpp->irq);
1200
+ mpp_iommu_enable_irq(mpp->iommu_info);
11151201 link_dec->irq_enabled = 1;
11161202 }
11171203
....@@ -1146,6 +1232,7 @@
11461232
11471233 if (atomic_xchg(&link_dec->power_enabled, 0)) {
11481234 disable_irq(mpp->irq);
1235
+ mpp_iommu_disable_irq(mpp->iommu_info);
11491236 link_dec->irq_enabled = 0;
11501237
11511238 if (mpp->hw_ops->clk_off)
....@@ -1287,6 +1374,7 @@
12871374 u32 task_to_run = 0;
12881375 int slot_idx = 0;
12891376 int ret;
1377
+ struct mpp_session *session = task->session;
12901378
12911379 mpp_debug_enter();
12921380
....@@ -1301,8 +1389,10 @@
13011389 }
13021390
13031391 rkvdec2_link_power_on(mpp);
1304
- mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
1305
- task->session->pid, dev_name(mpp->dev));
1392
+ mpp_debug_func(DEBUG_TASK_INFO,
1393
+ "%s session %d:%d task=%d state=0x%lx\n",
1394
+ dev_name(mpp->dev), session->device_type,
1395
+ session->index, task->task_index, task->state);
13061396
13071397 /* prepare the task for running */
13081398 if (test_and_set_bit(TASK_STATE_PREPARE, &task->state))
....@@ -1372,7 +1462,6 @@
13721462 struct mpp_task *task)
13731463 {
13741464 set_bit(TASK_STATE_DONE, &task->state);
1375
- kref_put(&task->ref, rkvdec2_link_free_task);
13761465
13771466 return 0;
13781467 }
....@@ -1479,10 +1568,12 @@
14791568 goto done;
14801569
14811570 disable_irq(mpp->irq);
1571
+ mpp_iommu_disable_irq(mpp->iommu_info);
14821572 rkvdec2_link_reset(mpp);
14831573 link_dec->task_decoded = 0;
14841574 link_dec->task_total = 0;
14851575 enable_irq(mpp->irq);
1576
+ mpp_iommu_enable_irq(mpp->iommu_info);
14861577 }
14871578 /*
14881579 * process pending queue to find the task to accept.
....@@ -1500,7 +1591,6 @@
15001591 mutex_lock(&queue->pending_lock);
15011592 list_del_init(&task->queue_link);
15021593
1503
- kref_get(&task->ref);
15041594 set_bit(TASK_STATE_ABORT_READY, &task->state);
15051595 set_bit(TASK_STATE_PROC_DONE, &task->state);
15061596
....@@ -1546,28 +1636,7 @@
15461636 rkvdec2_link_power_off(mpp);
15471637 }
15481638
1549
- mutex_lock(&queue->session_lock);
1550
- while (queue->detach_count) {
1551
- struct mpp_session *session = NULL;
1552
-
1553
- session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
1554
- session_link);
1555
- if (session) {
1556
- list_del_init(&session->session_link);
1557
- queue->detach_count--;
1558
- }
1559
-
1560
- mutex_unlock(&queue->session_lock);
1561
-
1562
- if (session) {
1563
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
1564
- queue->detach_count);
1565
- mpp_session_deinit(session);
1566
- }
1567
-
1568
- mutex_lock(&queue->session_lock);
1569
- }
1570
- mutex_unlock(&queue->session_lock);
1639
+ mpp_session_cleanup_detach(queue, work_s);
15711640 }
15721641
15731642 void rkvdec2_link_session_deinit(struct mpp_session *session)
....@@ -1580,9 +1649,9 @@
15801649
15811650 if (session->dma) {
15821651 mpp_dbg_session("session %d destroy dma\n", session->index);
1583
- mpp_iommu_down_read(mpp->iommu_info);
1652
+ mpp_iommu_down_write(mpp->iommu_info);
15841653 mpp_dma_session_destroy(session->dma);
1585
- mpp_iommu_up_read(mpp->iommu_info);
1654
+ mpp_iommu_up_write(mpp->iommu_info);
15861655 session->dma = NULL;
15871656 }
15881657 if (session->srv) {