.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2011-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2011-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
23 | 23 | |
---|
24 | 24 | #include <linux/dma-buf.h> |
---|
25 | 25 | #include <asm/cacheflush.h> |
---|
26 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 26 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
27 | 27 | #include <mali_kbase_sync.h> |
---|
28 | 28 | #endif |
---|
29 | 29 | #include <linux/dma-mapping.h> |
---|
.. | .. |
---|
73 | 73 | /* Record the start time of this atom so we could cancel it at |
---|
74 | 74 | * the right time. |
---|
75 | 75 | */ |
---|
76 | | - katom->start_timestamp = ktime_get(); |
---|
| 76 | + katom->start_timestamp = ktime_get_raw(); |
---|
77 | 77 | |
---|
78 | 78 | /* Add the atom to the waiting list before the timer is |
---|
79 | 79 | * (re)started to make sure that it gets processed. |
---|
.. | .. |
---|
95 | 95 | unsigned char *mapped_evt; |
---|
96 | 96 | struct kbase_vmap_struct map; |
---|
97 | 97 | |
---|
98 | | - mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map); |
---|
| 98 | + mapped_evt = kbase_vmap_prot(kctx, evt, sizeof(*mapped_evt), |
---|
| 99 | + KBASE_REG_CPU_RD, &map); |
---|
99 | 100 | if (!mapped_evt) |
---|
100 | 101 | return -EFAULT; |
---|
101 | 102 | |
---|
.. | .. |
---|
116 | 117 | (new_status != BASE_JD_SOFT_EVENT_RESET)) |
---|
117 | 118 | return -EINVAL; |
---|
118 | 119 | |
---|
119 | | - mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map); |
---|
| 120 | + mapped_evt = kbase_vmap_prot(kctx, evt, sizeof(*mapped_evt), |
---|
| 121 | + KBASE_REG_CPU_WR, &map); |
---|
120 | 122 | if (!mapped_evt) |
---|
121 | 123 | return -EFAULT; |
---|
122 | 124 | |
---|
.. | .. |
---|
202 | 204 | return 0; |
---|
203 | 205 | } |
---|
204 | 206 | |
---|
205 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 207 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
206 | 208 | /* Called by the explicit fence mechanism when a fence wait has completed */ |
---|
207 | 209 | void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom) |
---|
208 | 210 | { |
---|
.. | .. |
---|
211 | 213 | mutex_lock(&kctx->jctx.lock); |
---|
212 | 214 | kbasep_remove_waiting_soft_job(katom); |
---|
213 | 215 | kbase_finish_soft_job(katom); |
---|
214 | | - if (jd_done_nolock(katom, NULL)) |
---|
| 216 | + if (kbase_jd_done_nolock(katom, true)) |
---|
215 | 217 | kbase_js_sched_all(kctx->kbdev); |
---|
216 | 218 | mutex_unlock(&kctx->jctx.lock); |
---|
217 | 219 | } |
---|
.. | .. |
---|
225 | 227 | int resched; |
---|
226 | 228 | |
---|
227 | 229 | mutex_lock(&kctx->jctx.lock); |
---|
228 | | - resched = jd_done_nolock(katom, NULL); |
---|
| 230 | + resched = kbase_jd_done_nolock(katom, true); |
---|
229 | 231 | mutex_unlock(&kctx->jctx.lock); |
---|
230 | 232 | |
---|
231 | 233 | if (resched) |
---|
.. | .. |
---|
303 | 305 | info.fence, |
---|
304 | 306 | info.name, |
---|
305 | 307 | kbase_sync_status_string(info.status)); |
---|
306 | | - } |
---|
| 308 | + } |
---|
307 | 309 | } |
---|
308 | 310 | |
---|
309 | 311 | kbase_fence_debug_check_atom(dep); |
---|
.. | .. |
---|
386 | 388 | soft_job_timeout); |
---|
387 | 389 | u32 timeout_ms = (u32)atomic_read( |
---|
388 | 390 | &kctx->kbdev->js_data.soft_job_timeout_ms); |
---|
389 | | - ktime_t cur_time = ktime_get(); |
---|
| 391 | + ktime_t cur_time = ktime_get_raw(); |
---|
390 | 392 | bool restarting = false; |
---|
391 | 393 | unsigned long lflags; |
---|
392 | 394 | struct list_head *entry, *tmp; |
---|
.. | .. |
---|
496 | 498 | static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom) |
---|
497 | 499 | { |
---|
498 | 500 | katom->event_code = BASE_JD_EVENT_JOB_CANCELLED; |
---|
499 | | - if (jd_done_nolock(katom, NULL)) |
---|
| 501 | + if (kbase_jd_done_nolock(katom, true)) |
---|
500 | 502 | kbase_js_sched_all(katom->kctx->kbdev); |
---|
501 | 503 | } |
---|
502 | 504 | |
---|
| 505 | +#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST |
---|
503 | 506 | static void kbase_debug_copy_finish(struct kbase_jd_atom *katom) |
---|
504 | 507 | { |
---|
505 | 508 | struct kbase_debug_copy_buffer *buffers = katom->softjob_data; |
---|
.. | .. |
---|
671 | 674 | case KBASE_MEM_TYPE_IMPORTED_USER_BUF: |
---|
672 | 675 | { |
---|
673 | 676 | struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc; |
---|
674 | | - unsigned long nr_pages = |
---|
675 | | - alloc->imported.user_buf.nr_pages; |
---|
| 677 | + const unsigned long nr_pages = alloc->imported.user_buf.nr_pages; |
---|
| 678 | + const unsigned long start = alloc->imported.user_buf.address; |
---|
676 | 679 | |
---|
677 | 680 | if (alloc->imported.user_buf.mm != current->mm) { |
---|
678 | 681 | ret = -EINVAL; |
---|
.. | .. |
---|
684 | 687 | ret = -ENOMEM; |
---|
685 | 688 | goto out_unlock; |
---|
686 | 689 | } |
---|
687 | | - |
---|
688 | | - ret = get_user_pages_fast( |
---|
689 | | - alloc->imported.user_buf.address, |
---|
690 | | - nr_pages, 0, |
---|
691 | | - buffers[i].extres_pages); |
---|
| 690 | + kbase_gpu_vm_unlock(katom->kctx); |
---|
| 691 | + ret = get_user_pages_fast(start, nr_pages, 0, buffers[i].extres_pages); |
---|
| 692 | + kbase_gpu_vm_lock(katom->kctx); |
---|
692 | 693 | if (ret != nr_pages) { |
---|
693 | 694 | /* Adjust number of pages, so that we only |
---|
694 | 695 | * attempt to release pages in the array that we |
---|
.. | .. |
---|
726 | 727 | |
---|
727 | 728 | return ret; |
---|
728 | 729 | } |
---|
729 | | -#endif /* !MALI_USE_CSF */ |
---|
730 | 730 | |
---|
731 | 731 | #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE |
---|
732 | 732 | static void *dma_buf_kmap_page(struct kbase_mem_phy_alloc *gpu_alloc, |
---|
.. | .. |
---|
758 | 758 | } |
---|
759 | 759 | #endif |
---|
760 | 760 | |
---|
761 | | -int kbase_mem_copy_from_extres(struct kbase_context *kctx, |
---|
762 | | - struct kbase_debug_copy_buffer *buf_data) |
---|
| 761 | +/** |
---|
| 762 | + * kbase_mem_copy_from_extres() - Copy from external resources. |
---|
| 763 | + * |
---|
| 764 | + * @kctx: kbase context within which the copying is to take place. |
---|
| 765 | + * @buf_data: Pointer to the information about external resources: |
---|
| 766 | + * pages pertaining to the external resource, number of |
---|
| 767 | + * pages to copy. |
---|
| 768 | + * |
---|
| 769 | + * Return: 0 on success, error code otherwise. |
---|
| 770 | + */ |
---|
| 771 | +static int kbase_mem_copy_from_extres(struct kbase_context *kctx, |
---|
| 772 | + struct kbase_debug_copy_buffer *buf_data) |
---|
763 | 773 | { |
---|
764 | 774 | unsigned int i; |
---|
765 | 775 | unsigned int target_page_nr = 0; |
---|
.. | .. |
---|
808 | 818 | |
---|
809 | 819 | dma_to_copy = min(dma_buf->size, |
---|
810 | 820 | (size_t)(buf_data->nr_extres_pages * PAGE_SIZE)); |
---|
811 | | - ret = dma_buf_begin_cpu_access(dma_buf, |
---|
812 | | -#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS) |
---|
813 | | - 0, dma_to_copy, |
---|
814 | | -#endif |
---|
815 | | - DMA_FROM_DEVICE); |
---|
| 821 | + ret = dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE); |
---|
816 | 822 | if (ret) |
---|
817 | 823 | goto out_unlock; |
---|
818 | 824 | |
---|
.. | .. |
---|
839 | 845 | break; |
---|
840 | 846 | } |
---|
841 | 847 | } |
---|
842 | | - dma_buf_end_cpu_access(dma_buf, |
---|
843 | | -#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS) |
---|
844 | | - 0, dma_to_copy, |
---|
845 | | -#endif |
---|
846 | | - DMA_FROM_DEVICE); |
---|
| 848 | + dma_buf_end_cpu_access(dma_buf, DMA_FROM_DEVICE); |
---|
847 | 849 | break; |
---|
848 | 850 | } |
---|
849 | 851 | default: |
---|
.. | .. |
---|
854 | 856 | return ret; |
---|
855 | 857 | } |
---|
856 | 858 | |
---|
857 | | -#if !MALI_USE_CSF |
---|
858 | 859 | static int kbase_debug_copy(struct kbase_jd_atom *katom) |
---|
859 | 860 | { |
---|
860 | 861 | struct kbase_debug_copy_buffer *buffers = katom->softjob_data; |
---|
.. | .. |
---|
872 | 873 | |
---|
873 | 874 | return 0; |
---|
874 | 875 | } |
---|
| 876 | +#endif /* IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST */ |
---|
875 | 877 | #endif /* !MALI_USE_CSF */ |
---|
876 | 878 | |
---|
877 | 879 | #define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7) |
---|
.. | .. |
---|
931 | 933 | |
---|
932 | 934 | #if !MALI_USE_CSF |
---|
933 | 935 | |
---|
934 | | -/* |
---|
935 | | - * Sizes of user data to copy for each just-in-time memory interface version |
---|
936 | | - * |
---|
937 | | - * In interface version 2 onwards this is the same as the struct size, allowing |
---|
938 | | - * copying of arrays of structures from userspace. |
---|
939 | | - * |
---|
940 | | - * In interface version 1 the structure size was variable, and hence arrays of |
---|
941 | | - * structures cannot be supported easily, and were not a feature present in |
---|
942 | | - * version 1 anyway. |
---|
943 | | - */ |
---|
944 | | -static const size_t jit_info_copy_size_for_jit_version[] = { |
---|
945 | | - /* in jit_version 1, the structure did not have any end padding, hence |
---|
946 | | - * it could be a different size on 32 and 64-bit clients. We therefore |
---|
947 | | - * do not copy past the last member |
---|
948 | | - */ |
---|
949 | | - [1] = offsetofend(struct base_jit_alloc_info_10_2, id), |
---|
950 | | - [2] = sizeof(struct base_jit_alloc_info_11_5), |
---|
951 | | - [3] = sizeof(struct base_jit_alloc_info) |
---|
952 | | -}; |
---|
953 | | - |
---|
954 | 936 | static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) |
---|
955 | 937 | { |
---|
956 | 938 | __user u8 *data = (__user u8 *)(uintptr_t) katom->jc; |
---|
.. | .. |
---|
960 | 942 | u32 count; |
---|
961 | 943 | int ret; |
---|
962 | 944 | u32 i; |
---|
963 | | - size_t jit_info_user_copy_size; |
---|
964 | 945 | |
---|
965 | | - WARN_ON(kctx->jit_version >= |
---|
966 | | - ARRAY_SIZE(jit_info_copy_size_for_jit_version)); |
---|
967 | | - jit_info_user_copy_size = |
---|
968 | | - jit_info_copy_size_for_jit_version[kctx->jit_version]; |
---|
969 | | - WARN_ON(jit_info_user_copy_size > sizeof(*info)); |
---|
| 946 | + if (!kbase_mem_allow_alloc(kctx)) { |
---|
| 947 | + dev_dbg(kbdev->dev, "Invalid attempt to allocate JIT memory by %s/%d for ctx %d_%d", |
---|
| 948 | + current->comm, current->pid, kctx->tgid, kctx->id); |
---|
| 949 | + ret = -EINVAL; |
---|
| 950 | + goto fail; |
---|
| 951 | + } |
---|
970 | 952 | |
---|
971 | 953 | /* For backwards compatibility, and to prevent reading more than 1 jit |
---|
972 | 954 | * info struct on jit version 1 |
---|
973 | 955 | */ |
---|
974 | | - if (katom->nr_extres == 0 || kctx->jit_version == 1) |
---|
| 956 | + if (katom->nr_extres == 0) |
---|
975 | 957 | katom->nr_extres = 1; |
---|
976 | 958 | count = katom->nr_extres; |
---|
977 | 959 | |
---|
.. | .. |
---|
991 | 973 | |
---|
992 | 974 | katom->softjob_data = info; |
---|
993 | 975 | |
---|
994 | | - for (i = 0; i < count; i++, info++, data += jit_info_user_copy_size) { |
---|
995 | | - if (copy_from_user(info, data, jit_info_user_copy_size) != 0) { |
---|
| 976 | + for (i = 0; i < count; i++, info++, data += sizeof(*info)) { |
---|
| 977 | + if (copy_from_user(info, data, sizeof(*info)) != 0) { |
---|
996 | 978 | ret = -EINVAL; |
---|
997 | 979 | goto free_info; |
---|
998 | 980 | } |
---|
999 | | - /* Clear any remaining bytes when user struct is smaller than |
---|
1000 | | - * kernel struct. For jit version 1, this also clears the |
---|
1001 | | - * padding bytes |
---|
1002 | | - */ |
---|
1003 | | - memset(((u8 *)info) + jit_info_user_copy_size, 0, |
---|
1004 | | - sizeof(*info) - jit_info_user_copy_size); |
---|
1005 | 981 | |
---|
1006 | 982 | ret = kbasep_jit_alloc_validate(kctx, info); |
---|
1007 | 983 | if (ret) |
---|
.. | .. |
---|
1203 | 1179 | * Write the address of the JIT allocation to the user provided |
---|
1204 | 1180 | * GPU allocation. |
---|
1205 | 1181 | */ |
---|
1206 | | - ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr), |
---|
1207 | | - &mapping); |
---|
| 1182 | + ptr = kbase_vmap_prot(kctx, info->gpu_alloc_addr, sizeof(*ptr), |
---|
| 1183 | + KBASE_REG_CPU_WR, &mapping); |
---|
1208 | 1184 | if (!ptr) { |
---|
1209 | 1185 | /* |
---|
1210 | 1186 | * Leave the allocations "live" as the JIT free atom |
---|
.. | .. |
---|
1353 | 1329 | |
---|
1354 | 1330 | mutex_lock(&kctx->jctx.lock); |
---|
1355 | 1331 | kbase_finish_soft_job(katom); |
---|
1356 | | - resched = jd_done_nolock(katom, NULL); |
---|
| 1332 | + resched = kbase_jd_done_nolock(katom, true); |
---|
1357 | 1333 | mutex_unlock(&kctx->jctx.lock); |
---|
1358 | 1334 | |
---|
1359 | 1335 | if (resched) |
---|
.. | .. |
---|
1393 | 1369 | lockdep_assert_held(&kctx->jctx.lock); |
---|
1394 | 1370 | |
---|
1395 | 1371 | ids = kbase_jit_free_get_ids(katom); |
---|
1396 | | - if (WARN_ON(ids == NULL)) { |
---|
| 1372 | + if (WARN_ON(ids == NULL)) |
---|
1397 | 1373 | return; |
---|
1398 | | - } |
---|
1399 | 1374 | |
---|
1400 | 1375 | /* Remove this atom from the jit_atoms_head list */ |
---|
1401 | 1376 | list_del(&katom->jit_node); |
---|
.. | .. |
---|
1483 | 1458 | if (!kbase_sticky_resource_acquire(katom->kctx, |
---|
1484 | 1459 | gpu_addr)) |
---|
1485 | 1460 | goto failed_loop; |
---|
1486 | | - } else |
---|
| 1461 | + } else { |
---|
1487 | 1462 | if (!kbase_sticky_resource_release_force(katom->kctx, NULL, |
---|
1488 | 1463 | gpu_addr)) |
---|
1489 | 1464 | failed = true; |
---|
| 1465 | + } |
---|
1490 | 1466 | } |
---|
1491 | 1467 | |
---|
1492 | 1468 | /* |
---|
.. | .. |
---|
1546 | 1522 | ret = kbase_dump_cpu_gpu_time(katom); |
---|
1547 | 1523 | break; |
---|
1548 | 1524 | |
---|
1549 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 1525 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
1550 | 1526 | case BASE_JD_REQ_SOFT_FENCE_TRIGGER: |
---|
1551 | 1527 | katom->event_code = kbase_sync_fence_out_trigger(katom, |
---|
1552 | 1528 | katom->event_code == BASE_JD_EVENT_DONE ? |
---|
.. | .. |
---|
1575 | 1551 | case BASE_JD_REQ_SOFT_EVENT_RESET: |
---|
1576 | 1552 | kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET); |
---|
1577 | 1553 | break; |
---|
| 1554 | +#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST |
---|
1578 | 1555 | case BASE_JD_REQ_SOFT_DEBUG_COPY: |
---|
1579 | 1556 | { |
---|
1580 | 1557 | int res = kbase_debug_copy(katom); |
---|
.. | .. |
---|
1583 | 1560 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1584 | 1561 | break; |
---|
1585 | 1562 | } |
---|
| 1563 | +#endif /* IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST */ |
---|
1586 | 1564 | case BASE_JD_REQ_SOFT_JIT_ALLOC: |
---|
1587 | 1565 | ret = kbase_jit_allocate_process(katom); |
---|
1588 | 1566 | break; |
---|
.. | .. |
---|
1606 | 1584 | void kbase_cancel_soft_job(struct kbase_jd_atom *katom) |
---|
1607 | 1585 | { |
---|
1608 | 1586 | switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { |
---|
1609 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 1587 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
1610 | 1588 | case BASE_JD_REQ_SOFT_FENCE_WAIT: |
---|
1611 | 1589 | kbase_sync_fence_in_cancel_wait(katom); |
---|
1612 | 1590 | break; |
---|
.. | .. |
---|
1629 | 1607 | return -EINVAL; |
---|
1630 | 1608 | } |
---|
1631 | 1609 | break; |
---|
1632 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 1610 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
1633 | 1611 | case BASE_JD_REQ_SOFT_FENCE_TRIGGER: |
---|
1634 | 1612 | { |
---|
1635 | 1613 | struct base_fence fence; |
---|
.. | .. |
---|
1649 | 1627 | if (copy_to_user((__user void *)(uintptr_t)katom->jc, |
---|
1650 | 1628 | &fence, sizeof(fence)) != 0) { |
---|
1651 | 1629 | kbase_sync_fence_out_remove(katom); |
---|
1652 | | - kbase_sync_fence_close_fd(fd); |
---|
| 1630 | + /* fd should have been closed here, but there's |
---|
| 1631 | + * no good way of doing that. Since |
---|
| 1632 | + * copy_to_user() very rarely fails, and the fd |
---|
| 1633 | + * will get closed on process termination this |
---|
| 1634 | + * won't be a problem. |
---|
| 1635 | + */ |
---|
1653 | 1636 | fence.basep.fd = -EINVAL; |
---|
1654 | 1637 | return -EINVAL; |
---|
1655 | 1638 | } |
---|
.. | .. |
---|
1670 | 1653 | fence.basep.fd); |
---|
1671 | 1654 | if (ret < 0) |
---|
1672 | 1655 | return ret; |
---|
1673 | | - |
---|
1674 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
1675 | | - /* |
---|
1676 | | - * Set KCTX_NO_IMPLICIT_FENCE in the context the first |
---|
1677 | | - * time a soft fence wait job is observed. This will |
---|
1678 | | - * prevent the implicit dma-buf fence to conflict with |
---|
1679 | | - * the Android native sync fences. |
---|
1680 | | - */ |
---|
1681 | | - if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC)) |
---|
1682 | | - kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC); |
---|
1683 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
1684 | 1656 | } |
---|
1685 | 1657 | break; |
---|
1686 | | -#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ |
---|
| 1658 | +#endif /* CONFIG_SYNC_FILE */ |
---|
1687 | 1659 | case BASE_JD_REQ_SOFT_JIT_ALLOC: |
---|
1688 | 1660 | return kbase_jit_allocate_prepare(katom); |
---|
1689 | 1661 | case BASE_JD_REQ_SOFT_JIT_FREE: |
---|
.. | .. |
---|
1694 | 1666 | if (katom->jc == 0) |
---|
1695 | 1667 | return -EINVAL; |
---|
1696 | 1668 | break; |
---|
| 1669 | +#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST |
---|
1697 | 1670 | case BASE_JD_REQ_SOFT_DEBUG_COPY: |
---|
1698 | 1671 | return kbase_debug_copy_prepare(katom); |
---|
| 1672 | +#endif /* IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST */ |
---|
1699 | 1673 | case BASE_JD_REQ_SOFT_EXT_RES_MAP: |
---|
1700 | 1674 | return kbase_ext_res_prepare(katom); |
---|
1701 | 1675 | case BASE_JD_REQ_SOFT_EXT_RES_UNMAP: |
---|
.. | .. |
---|
1716 | 1690 | case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: |
---|
1717 | 1691 | /* Nothing to do */ |
---|
1718 | 1692 | break; |
---|
1719 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
| 1693 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
1720 | 1694 | case BASE_JD_REQ_SOFT_FENCE_TRIGGER: |
---|
1721 | 1695 | /* If fence has not yet been signaled, do it now */ |
---|
1722 | 1696 | kbase_sync_fence_out_trigger(katom, katom->event_code == |
---|
.. | .. |
---|
1726 | 1700 | /* Release katom's reference to fence object */ |
---|
1727 | 1701 | kbase_sync_fence_in_remove(katom); |
---|
1728 | 1702 | break; |
---|
1729 | | -#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ |
---|
| 1703 | +#endif /* CONFIG_SYNC_FILE */ |
---|
| 1704 | +#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST |
---|
1730 | 1705 | case BASE_JD_REQ_SOFT_DEBUG_COPY: |
---|
1731 | 1706 | kbase_debug_copy_finish(katom); |
---|
1732 | 1707 | break; |
---|
| 1708 | +#endif /* IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST */ |
---|
1733 | 1709 | case BASE_JD_REQ_SOFT_JIT_ALLOC: |
---|
1734 | 1710 | kbase_jit_allocate_finish(katom); |
---|
1735 | 1711 | break; |
---|
.. | .. |
---|
1780 | 1756 | |
---|
1781 | 1757 | if (kbase_process_soft_job(katom_iter) == 0) { |
---|
1782 | 1758 | kbase_finish_soft_job(katom_iter); |
---|
1783 | | - resched |= jd_done_nolock(katom_iter, NULL); |
---|
| 1759 | + resched |= kbase_jd_done_nolock(katom_iter, true); |
---|
1784 | 1760 | #ifdef CONFIG_MALI_ARBITER_SUPPORT |
---|
1785 | 1761 | atomic_dec(&kbdev->pm.gpu_users_waiting); |
---|
1786 | 1762 | #endif /* CONFIG_MALI_ARBITER_SUPPORT */ |
---|