forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/video/rockchip/mpp/mpp_vepu2.c
....@@ -22,6 +22,7 @@
2222 #include <linux/proc_fs.h>
2323 #include <linux/nospec.h>
2424 #include <soc/rockchip/pm_domains.h>
25
+#include <soc/rockchip/rockchip_iommu.h>
2526
2627 #include "mpp_debug.h"
2728 #include "mpp_common.h"
....@@ -314,43 +315,40 @@
314315
315316 static void *vepu_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task)
316317 {
317
- struct mpp_taskqueue *queue = mpp->queue;
318318 struct vepu_dev *enc = to_vepu_dev(mpp);
319319 struct vepu_ccu *ccu = enc->ccu;
320320 unsigned long core_idle;
321321 unsigned long flags;
322
- u32 core_id_max;
323322 s32 core_id;
324323 u32 i;
325324
326325 spin_lock_irqsave(&ccu->lock, flags);
327326
328
- core_idle = queue->core_idle;
329
- core_id_max = queue->core_id_max;
327
+ core_idle = ccu->core_idle;
330328
331
- for (i = 0; i <= core_id_max; i++) {
332
- struct mpp_dev *mpp = queue->cores[i];
329
+ for (i = 0; i < ccu->core_num; i++) {
330
+ struct mpp_dev *mpp = ccu->cores[i];
333331
334332 if (mpp && mpp->disable)
335
- clear_bit(i, &core_idle);
333
+ clear_bit(mpp->core_id, &core_idle);
336334 }
337335
338
- core_id = find_first_bit(&ccu->core_idle, ccu->core_num);
339
- core_id = array_index_nospec(core_id, MPP_MAX_CORE_NUM);
340
- if (core_id >= core_id_max + 1 || !queue->cores[core_id]) {
336
+ core_id = find_first_bit(&core_idle, ccu->core_num);
337
+ if (core_id >= ARRAY_SIZE(ccu->cores)) {
341338 mpp_task = NULL;
342339 mpp_dbg_core("core %d all busy %lx\n", core_id, ccu->core_idle);
343
- } else {
344
- unsigned long core_idle = ccu->core_idle;
345
-
346
- clear_bit(core_id, &ccu->core_idle);
347
- mpp_task->mpp = ccu->cores[core_id];
348
- mpp_task->core_id = core_id;
349
-
350
- mpp_dbg_core("core cnt %d core %d set idle %lx -> %lx\n",
351
- ccu->core_num, core_id, core_idle, ccu->core_idle);
340
+ goto done;
352341 }
353342
343
+ core_id = array_index_nospec(core_id, MPP_MAX_CORE_NUM);
344
+ clear_bit(core_id, &ccu->core_idle);
345
+ mpp_task->mpp = ccu->cores[core_id];
346
+ mpp_task->core_id = core_id;
347
+
348
+ mpp_dbg_core("core cnt %d core %d set idle %lx -> %lx\n",
349
+ ccu->core_num, core_id, core_idle, ccu->core_idle);
350
+
351
+done:
354352 spin_unlock_irqrestore(&ccu->lock, flags);
355353
356354 return mpp_task;
....@@ -885,6 +883,48 @@
885883 return 0;
886884 }
887885
886
+static int vepu2_iommu_fault_handle(struct iommu_domain *iommu, struct device *iommu_dev,
887
+ unsigned long iova, int status, void *arg)
888
+{
889
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
890
+ struct mpp_task *mpp_task;
891
+ struct vepu_dev *enc = to_vepu_dev(mpp);
892
+ struct vepu_ccu *ccu = enc->ccu;
893
+
894
+ dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n",
895
+ iova, status, arg);
896
+
897
+ if (ccu) {
898
+ int i;
899
+ struct mpp_dev *core;
900
+
901
+ for (i = 0; i < ccu->core_num; i++) {
902
+ core = ccu->cores[i];
903
+ if (core->iommu_info && (&core->iommu_info->pdev->dev == iommu_dev)) {
904
+ mpp = core;
905
+ break;
906
+ }
907
+ }
908
+ }
909
+
910
+ if (!mpp) {
911
+ dev_err(iommu_dev, "pagefault without device to handle\n");
912
+ return 0;
913
+ }
914
+ mpp_task = mpp->cur_task;
915
+ if (mpp_task)
916
+ mpp_task_dump_mem_region(mpp, mpp_task);
917
+
918
+ mpp_task_dump_hw_reg(mpp);
919
+ /*
920
+ * Mask iommu irq, in order for iommu not repeatedly trigger pagefault.
921
+ * Until the pagefault task finish by hw timeout.
922
+ */
923
+ rockchip_iommu_mask_irq(mpp->dev);
924
+
925
+ return 0;
926
+}
927
+
888928 static struct mpp_hw_ops vepu_v2_hw_ops = {
889929 .init = vepu_init,
890930 .clk_on = vepu_clk_on,
....@@ -1050,7 +1090,8 @@
10501090 ccu_info = ccu->main_core->iommu_info;
10511091 cur_info = enc->mpp.iommu_info;
10521092
1053
- cur_info->domain = ccu_info->domain;
1093
+ if (cur_info)
1094
+ cur_info->domain = ccu_info->domain;
10541095 mpp_iommu_attach(cur_info);
10551096 }
10561097 enc->ccu = ccu;
....@@ -1102,6 +1143,7 @@
11021143 return -EINVAL;
11031144 }
11041145
1146
+ mpp->fault_handler = vepu2_iommu_fault_handle;
11051147 mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
11061148 vepu_procfs_init(mpp);
11071149 vepu_procfs_ccu_init(mpp);
....@@ -1151,6 +1193,7 @@
11511193 return -EINVAL;
11521194 }
11531195
1196
+ mpp->fault_handler = vepu2_iommu_fault_handle;
11541197 mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS;
11551198 vepu_procfs_init(mpp);
11561199 /* register current device to mpp service */