.. | .. |
---|
22 | 22 | #include <linux/proc_fs.h> |
---|
23 | 23 | #include <linux/nospec.h> |
---|
24 | 24 | #include <soc/rockchip/pm_domains.h> |
---|
| 25 | +#include <soc/rockchip/rockchip_iommu.h> |
---|
25 | 26 | |
---|
26 | 27 | #include "mpp_debug.h" |
---|
27 | 28 | #include "mpp_common.h" |
---|
.. | .. |
---|
314 | 315 | |
---|
315 | 316 | static void *vepu_prepare(struct mpp_dev *mpp, struct mpp_task *mpp_task) |
---|
316 | 317 | { |
---|
317 | | - struct mpp_taskqueue *queue = mpp->queue; |
---|
318 | 318 | struct vepu_dev *enc = to_vepu_dev(mpp); |
---|
319 | 319 | struct vepu_ccu *ccu = enc->ccu; |
---|
320 | 320 | unsigned long core_idle; |
---|
321 | 321 | unsigned long flags; |
---|
322 | | - u32 core_id_max; |
---|
323 | 322 | s32 core_id; |
---|
324 | 323 | u32 i; |
---|
325 | 324 | |
---|
326 | 325 | spin_lock_irqsave(&ccu->lock, flags); |
---|
327 | 326 | |
---|
328 | | - core_idle = queue->core_idle; |
---|
329 | | - core_id_max = queue->core_id_max; |
---|
| 327 | + core_idle = ccu->core_idle; |
---|
330 | 328 | |
---|
331 | | - for (i = 0; i <= core_id_max; i++) { |
---|
332 | | - struct mpp_dev *mpp = queue->cores[i]; |
---|
| 329 | + for (i = 0; i < ccu->core_num; i++) { |
---|
| 330 | + struct mpp_dev *mpp = ccu->cores[i]; |
---|
333 | 331 | |
---|
334 | 332 | if (mpp && mpp->disable) |
---|
335 | | - clear_bit(i, &core_idle); |
---|
| 333 | + clear_bit(mpp->core_id, &core_idle); |
---|
336 | 334 | } |
---|
337 | 335 | |
---|
338 | | - core_id = find_first_bit(&ccu->core_idle, ccu->core_num); |
---|
339 | | - core_id = array_index_nospec(core_id, MPP_MAX_CORE_NUM); |
---|
340 | | - if (core_id >= core_id_max + 1 || !queue->cores[core_id]) { |
---|
| 336 | + core_id = find_first_bit(&core_idle, ccu->core_num); |
---|
| 337 | + if (core_id >= ARRAY_SIZE(ccu->cores)) { |
---|
341 | 338 | mpp_task = NULL; |
---|
342 | 339 | mpp_dbg_core("core %d all busy %lx\n", core_id, ccu->core_idle); |
---|
343 | | - } else { |
---|
344 | | - unsigned long core_idle = ccu->core_idle; |
---|
345 | | - |
---|
346 | | - clear_bit(core_id, &ccu->core_idle); |
---|
347 | | - mpp_task->mpp = ccu->cores[core_id]; |
---|
348 | | - mpp_task->core_id = core_id; |
---|
349 | | - |
---|
350 | | - mpp_dbg_core("core cnt %d core %d set idle %lx -> %lx\n", |
---|
351 | | - ccu->core_num, core_id, core_idle, ccu->core_idle); |
---|
| 340 | + goto done; |
---|
352 | 341 | } |
---|
353 | 342 | |
---|
| 343 | + core_id = array_index_nospec(core_id, MPP_MAX_CORE_NUM); |
---|
| 344 | + clear_bit(core_id, &ccu->core_idle); |
---|
| 345 | + mpp_task->mpp = ccu->cores[core_id]; |
---|
| 346 | + mpp_task->core_id = core_id; |
---|
| 347 | + |
---|
| 348 | + mpp_dbg_core("core cnt %d core %d set idle %lx -> %lx\n", |
---|
| 349 | + ccu->core_num, core_id, core_idle, ccu->core_idle); |
---|
| 350 | + |
---|
| 351 | +done: |
---|
354 | 352 | spin_unlock_irqrestore(&ccu->lock, flags); |
---|
355 | 353 | |
---|
356 | 354 | return mpp_task; |
---|
.. | .. |
---|
885 | 883 | return 0; |
---|
886 | 884 | } |
---|
887 | 885 | |
---|
| 886 | +static int vepu2_iommu_fault_handle(struct iommu_domain *iommu, struct device *iommu_dev, |
---|
| 887 | + unsigned long iova, int status, void *arg) |
---|
| 888 | +{ |
---|
| 889 | + struct mpp_dev *mpp = (struct mpp_dev *)arg; |
---|
| 890 | + struct mpp_task *mpp_task; |
---|
| 891 | + struct vepu_dev *enc = to_vepu_dev(mpp); |
---|
| 892 | + struct vepu_ccu *ccu = enc->ccu; |
---|
| 893 | + |
---|
| 894 | + dev_err(iommu_dev, "fault addr 0x%08lx status %x arg %p\n", |
---|
| 895 | + iova, status, arg); |
---|
| 896 | + |
---|
| 897 | + if (ccu) { |
---|
| 898 | + int i; |
---|
| 899 | + struct mpp_dev *core; |
---|
| 900 | + |
---|
| 901 | + for (i = 0; i < ccu->core_num; i++) { |
---|
| 902 | + core = ccu->cores[i]; |
---|
| 903 | + if (core->iommu_info && (&core->iommu_info->pdev->dev == iommu_dev)) { |
---|
| 904 | + mpp = core; |
---|
| 905 | + break; |
---|
| 906 | + } |
---|
| 907 | + } |
---|
| 908 | + } |
---|
| 909 | + |
---|
| 910 | + if (!mpp) { |
---|
| 911 | + dev_err(iommu_dev, "pagefault without device to handle\n"); |
---|
| 912 | + return 0; |
---|
| 913 | + } |
---|
| 914 | + mpp_task = mpp->cur_task; |
---|
| 915 | + if (mpp_task) |
---|
| 916 | + mpp_task_dump_mem_region(mpp, mpp_task); |
---|
| 917 | + |
---|
| 918 | + mpp_task_dump_hw_reg(mpp); |
---|
| 919 | + /* |
---|
| 920 | + * Mask iommu irq, in order for iommu not repeatedly trigger pagefault. |
---|
| 921 | + * Until the pagefault task finish by hw timeout. |
---|
| 922 | + */ |
---|
| 923 | + rockchip_iommu_mask_irq(mpp->dev); |
---|
| 924 | + |
---|
| 925 | + return 0; |
---|
| 926 | +} |
---|
| 927 | + |
---|
888 | 928 | static struct mpp_hw_ops vepu_v2_hw_ops = { |
---|
889 | 929 | .init = vepu_init, |
---|
890 | 930 | .clk_on = vepu_clk_on, |
---|
.. | .. |
---|
1050 | 1090 | ccu_info = ccu->main_core->iommu_info; |
---|
1051 | 1091 | cur_info = enc->mpp.iommu_info; |
---|
1052 | 1092 | |
---|
1053 | | - cur_info->domain = ccu_info->domain; |
---|
| 1093 | + if (cur_info) |
---|
| 1094 | + cur_info->domain = ccu_info->domain; |
---|
1054 | 1095 | mpp_iommu_attach(cur_info); |
---|
1055 | 1096 | } |
---|
1056 | 1097 | enc->ccu = ccu; |
---|
.. | .. |
---|
1102 | 1143 | return -EINVAL; |
---|
1103 | 1144 | } |
---|
1104 | 1145 | |
---|
| 1146 | + mpp->fault_handler = vepu2_iommu_fault_handle; |
---|
1105 | 1147 | mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS; |
---|
1106 | 1148 | vepu_procfs_init(mpp); |
---|
1107 | 1149 | vepu_procfs_ccu_init(mpp); |
---|
.. | .. |
---|
1151 | 1193 | return -EINVAL; |
---|
1152 | 1194 | } |
---|
1153 | 1195 | |
---|
| 1196 | + mpp->fault_handler = vepu2_iommu_fault_handle; |
---|
1154 | 1197 | mpp->session_max_buffers = VEPU2_SESSION_MAX_BUFFERS; |
---|
1155 | 1198 | vepu_procfs_init(mpp); |
---|
1156 | 1199 | /* register current device to mpp service */ |
---|