.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
28 | 28 | #include <linux/version.h> |
---|
29 | 29 | #include <linux/ratelimit.h> |
---|
30 | 30 | #include <linux/priority_control_manager.h> |
---|
| 31 | +#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE |
---|
| 32 | +#include <linux/sched/signal.h> |
---|
| 33 | +#else |
---|
| 34 | +#include <linux/signal.h> |
---|
| 35 | +#endif |
---|
31 | 36 | |
---|
32 | 37 | #include <mali_kbase_jm.h> |
---|
33 | 38 | #include <mali_kbase_kinstr_jm.h> |
---|
.. | .. |
---|
35 | 40 | #include <tl/mali_kbase_tracepoints.h> |
---|
36 | 41 | #include <mali_linux_trace.h> |
---|
37 | 42 | |
---|
38 | | -#include "mali_kbase_dma_fence.h" |
---|
39 | 43 | #include <mali_kbase_cs_experimental.h> |
---|
40 | 44 | |
---|
41 | 45 | #include <mali_kbase_caps.h> |
---|
42 | | - |
---|
43 | | -#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a) |
---|
44 | 46 | |
---|
45 | 47 | /* Return whether katom will run on the GPU or not. Currently only soft jobs and |
---|
46 | 48 | * dependency-only atoms do not run on the GPU |
---|
.. | .. |
---|
76 | 78 | kbase_kinstr_jm_atom_complete(katom); |
---|
77 | 79 | dev_dbg(katom->kctx->kbdev->dev, "Atom %pK status to completed\n", |
---|
78 | 80 | (void *)katom); |
---|
| 81 | + KBASE_TLSTREAM_TL_JD_ATOM_COMPLETE(katom->kctx->kbdev, katom); |
---|
79 | 82 | } |
---|
80 | 83 | |
---|
81 | 84 | /* Runs an atom, either by handing to the JS or by immediately running it in the case of soft-jobs |
---|
.. | .. |
---|
83 | 86 | * Returns whether the JS needs a reschedule. |
---|
84 | 87 | * |
---|
85 | 88 | * Note that the caller must also check the atom status and |
---|
86 | | - * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock |
---|
| 89 | + * if it is KBASE_JD_ATOM_STATE_COMPLETED must call kbase_jd_done_nolock |
---|
87 | 90 | */ |
---|
88 | 91 | static bool jd_run_atom(struct kbase_jd_atom *katom) |
---|
89 | 92 | { |
---|
.. | .. |
---|
139 | 142 | /* katom dep complete, attempt to run it */ |
---|
140 | 143 | bool resched = false; |
---|
141 | 144 | |
---|
| 145 | + KBASE_TLSTREAM_TL_RUN_ATOM_START( |
---|
| 146 | + katom->kctx->kbdev, katom, |
---|
| 147 | + kbase_jd_atom_id(katom->kctx, katom)); |
---|
142 | 148 | resched = jd_run_atom(katom); |
---|
| 149 | + KBASE_TLSTREAM_TL_RUN_ATOM_END(katom->kctx->kbdev, katom, |
---|
| 150 | + kbase_jd_atom_id(katom->kctx, |
---|
| 151 | + katom)); |
---|
143 | 152 | |
---|
144 | 153 | if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) { |
---|
145 | 154 | /* The atom has already finished */ |
---|
146 | | - resched |= jd_done_nolock(katom, NULL); |
---|
| 155 | + resched |= kbase_jd_done_nolock(katom, true); |
---|
147 | 156 | } |
---|
148 | 157 | |
---|
149 | 158 | if (resched) |
---|
.. | .. |
---|
153 | 162 | |
---|
154 | 163 | void kbase_jd_free_external_resources(struct kbase_jd_atom *katom) |
---|
155 | 164 | { |
---|
156 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
157 | | - /* Flush dma-fence workqueue to ensure that any callbacks that may have |
---|
158 | | - * been queued are done before continuing. |
---|
159 | | - * Any successfully completed atom would have had all it's callbacks |
---|
160 | | - * completed before the atom was run, so only flush for failed atoms. |
---|
161 | | - */ |
---|
162 | | - if (katom->event_code != BASE_JD_EVENT_DONE) |
---|
163 | | - flush_workqueue(katom->kctx->dma_fence.wq); |
---|
164 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
165 | 165 | } |
---|
166 | 166 | |
---|
167 | 167 | static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom) |
---|
168 | 168 | { |
---|
169 | 169 | KBASE_DEBUG_ASSERT(katom); |
---|
170 | 170 | KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES); |
---|
171 | | - |
---|
172 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
173 | | - kbase_dma_fence_signal(katom); |
---|
174 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
175 | 171 | |
---|
176 | 172 | kbase_gpu_vm_lock(katom->kctx); |
---|
177 | 173 | /* only roll back if extres is non-NULL */ |
---|
.. | .. |
---|
180 | 176 | |
---|
181 | 177 | res_no = katom->nr_extres; |
---|
182 | 178 | while (res_no-- > 0) { |
---|
183 | | - struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc; |
---|
184 | | - struct kbase_va_region *reg; |
---|
185 | | - |
---|
186 | | - reg = kbase_region_tracker_find_region_base_address( |
---|
187 | | - katom->kctx, |
---|
188 | | - katom->extres[res_no].gpu_address); |
---|
189 | | - kbase_unmap_external_resource(katom->kctx, reg, alloc); |
---|
| 179 | + kbase_unmap_external_resource(katom->kctx, katom->extres[res_no]); |
---|
190 | 180 | } |
---|
191 | 181 | kfree(katom->extres); |
---|
192 | 182 | katom->extres = NULL; |
---|
.. | .. |
---|
202 | 192 | |
---|
203 | 193 | static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom *user_atom) |
---|
204 | 194 | { |
---|
205 | | - int err_ret_val = -EINVAL; |
---|
| 195 | + int err = -EINVAL; |
---|
206 | 196 | u32 res_no; |
---|
207 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
208 | | - struct kbase_dma_fence_resv_info info = { |
---|
209 | | - .resv_objs = NULL, |
---|
210 | | - .dma_fence_resv_count = 0, |
---|
211 | | - .dma_fence_excl_bitmap = NULL |
---|
212 | | - }; |
---|
213 | | -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) |
---|
214 | | - /* |
---|
215 | | - * When both dma-buf fence and Android native sync is enabled, we |
---|
216 | | - * disable dma-buf fence for contexts that are using Android native |
---|
217 | | - * fences. |
---|
218 | | - */ |
---|
219 | | - const bool implicit_sync = !kbase_ctx_flag(katom->kctx, |
---|
220 | | - KCTX_NO_IMPLICIT_SYNC); |
---|
221 | | -#else /* CONFIG_SYNC || CONFIG_SYNC_FILE*/ |
---|
222 | | - const bool implicit_sync = true; |
---|
223 | | -#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ |
---|
224 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
225 | 197 | struct base_external_resource *input_extres; |
---|
226 | 198 | |
---|
227 | 199 | KBASE_DEBUG_ASSERT(katom); |
---|
.. | .. |
---|
235 | 207 | if (!katom->extres) |
---|
236 | 208 | return -ENOMEM; |
---|
237 | 209 | |
---|
238 | | - /* copy user buffer to the end of our real buffer. |
---|
239 | | - * Make sure the struct sizes haven't changed in a way |
---|
240 | | - * we don't support |
---|
241 | | - */ |
---|
242 | | - BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres)); |
---|
243 | | - input_extres = (struct base_external_resource *) |
---|
244 | | - (((unsigned char *)katom->extres) + |
---|
245 | | - (sizeof(*katom->extres) - sizeof(*input_extres)) * |
---|
246 | | - katom->nr_extres); |
---|
| 210 | + input_extres = kmalloc_array(katom->nr_extres, sizeof(*input_extres), GFP_KERNEL); |
---|
| 211 | + if (!input_extres) { |
---|
| 212 | + err = -ENOMEM; |
---|
| 213 | + goto failed_input_alloc; |
---|
| 214 | + } |
---|
247 | 215 | |
---|
248 | 216 | if (copy_from_user(input_extres, |
---|
249 | 217 | get_compat_pointer(katom->kctx, user_atom->extres_list), |
---|
250 | 218 | sizeof(*input_extres) * katom->nr_extres) != 0) { |
---|
251 | | - err_ret_val = -EINVAL; |
---|
252 | | - goto early_err_out; |
---|
| 219 | + err = -EINVAL; |
---|
| 220 | + goto failed_input_copy; |
---|
253 | 221 | } |
---|
254 | | - |
---|
255 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
256 | | - if (implicit_sync) { |
---|
257 | | - info.resv_objs = |
---|
258 | | - kmalloc_array(katom->nr_extres, |
---|
259 | | -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) |
---|
260 | | - sizeof(struct reservation_object *), |
---|
261 | | -#else |
---|
262 | | - sizeof(struct dma_resv *), |
---|
263 | | -#endif |
---|
264 | | - GFP_KERNEL); |
---|
265 | | - if (!info.resv_objs) { |
---|
266 | | - err_ret_val = -ENOMEM; |
---|
267 | | - goto early_err_out; |
---|
268 | | - } |
---|
269 | | - |
---|
270 | | - info.dma_fence_excl_bitmap = |
---|
271 | | - kcalloc(BITS_TO_LONGS(katom->nr_extres), |
---|
272 | | - sizeof(unsigned long), GFP_KERNEL); |
---|
273 | | - if (!info.dma_fence_excl_bitmap) { |
---|
274 | | - err_ret_val = -ENOMEM; |
---|
275 | | - goto early_err_out; |
---|
276 | | - } |
---|
277 | | - } |
---|
278 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
279 | 222 | |
---|
280 | 223 | /* Take the processes mmap lock */ |
---|
281 | 224 | down_read(kbase_mem_get_process_mmap_lock()); |
---|
.. | .. |
---|
283 | 226 | /* need to keep the GPU VM locked while we set up UMM buffers */ |
---|
284 | 227 | kbase_gpu_vm_lock(katom->kctx); |
---|
285 | 228 | for (res_no = 0; res_no < katom->nr_extres; res_no++) { |
---|
286 | | - struct base_external_resource *res = &input_extres[res_no]; |
---|
| 229 | + struct base_external_resource *user_res = &input_extres[res_no]; |
---|
287 | 230 | struct kbase_va_region *reg; |
---|
288 | | - struct kbase_mem_phy_alloc *alloc; |
---|
289 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
290 | | - bool exclusive; |
---|
291 | | - exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE) |
---|
292 | | - ? true : false; |
---|
293 | | -#endif |
---|
| 231 | + |
---|
294 | 232 | reg = kbase_region_tracker_find_region_enclosing_address( |
---|
295 | | - katom->kctx, |
---|
296 | | - res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE); |
---|
| 233 | + katom->kctx, user_res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE); |
---|
297 | 234 | /* did we find a matching region object? */ |
---|
298 | | - if (kbase_is_region_invalid_or_free(reg)) { |
---|
| 235 | + if (unlikely(kbase_is_region_invalid_or_free(reg))) { |
---|
299 | 236 | /* roll back */ |
---|
300 | 237 | goto failed_loop; |
---|
301 | 238 | } |
---|
.. | .. |
---|
305 | 242 | katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED; |
---|
306 | 243 | } |
---|
307 | 244 | |
---|
308 | | - alloc = kbase_map_external_resource(katom->kctx, reg, |
---|
309 | | - current->mm); |
---|
310 | | - if (!alloc) { |
---|
311 | | - err_ret_val = -EINVAL; |
---|
| 245 | + err = kbase_map_external_resource(katom->kctx, reg, current->mm); |
---|
| 246 | + if (err) |
---|
312 | 247 | goto failed_loop; |
---|
313 | | - } |
---|
314 | 248 | |
---|
315 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
316 | | - if (implicit_sync && |
---|
317 | | - reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) { |
---|
318 | | -#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) |
---|
319 | | - struct reservation_object *resv; |
---|
320 | | -#else |
---|
321 | | - struct dma_resv *resv; |
---|
322 | | -#endif |
---|
323 | | - resv = reg->gpu_alloc->imported.umm.dma_buf->resv; |
---|
324 | | - if (resv) |
---|
325 | | - kbase_dma_fence_add_reservation(resv, &info, |
---|
326 | | - exclusive); |
---|
327 | | - } |
---|
328 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
329 | | - |
---|
330 | | - /* finish with updating out array with the data we found */ |
---|
331 | | - /* NOTE: It is important that this is the last thing we do (or |
---|
332 | | - * at least not before the first write) as we overwrite elements |
---|
333 | | - * as we loop and could be overwriting ourself, so no writes |
---|
334 | | - * until the last read for an element. |
---|
335 | | - */ |
---|
336 | | - katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */ |
---|
337 | | - katom->extres[res_no].alloc = alloc; |
---|
| 249 | + katom->extres[res_no] = reg; |
---|
338 | 250 | } |
---|
339 | 251 | /* successfully parsed the extres array */ |
---|
340 | 252 | /* drop the vm lock now */ |
---|
.. | .. |
---|
343 | 255 | /* Release the processes mmap lock */ |
---|
344 | 256 | up_read(kbase_mem_get_process_mmap_lock()); |
---|
345 | 257 | |
---|
346 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
347 | | - if (implicit_sync) { |
---|
348 | | - if (info.dma_fence_resv_count) { |
---|
349 | | - int ret; |
---|
350 | | - |
---|
351 | | - ret = kbase_dma_fence_wait(katom, &info); |
---|
352 | | - if (ret < 0) |
---|
353 | | - goto failed_dma_fence_setup; |
---|
354 | | - } |
---|
355 | | - |
---|
356 | | - kfree(info.resv_objs); |
---|
357 | | - kfree(info.dma_fence_excl_bitmap); |
---|
358 | | - } |
---|
359 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
| 258 | + /* Free the buffer holding data from userspace */ |
---|
| 259 | + kfree(input_extres); |
---|
360 | 260 | |
---|
361 | 261 | /* all done OK */ |
---|
362 | 262 | return 0; |
---|
363 | 263 | |
---|
364 | 264 | /* error handling section */ |
---|
365 | | - |
---|
366 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
367 | | -failed_dma_fence_setup: |
---|
368 | | - /* Lock the processes mmap lock */ |
---|
369 | | - down_read(kbase_mem_get_process_mmap_lock()); |
---|
370 | | - |
---|
371 | | - /* lock before we unmap */ |
---|
372 | | - kbase_gpu_vm_lock(katom->kctx); |
---|
373 | | -#endif |
---|
374 | | - |
---|
375 | | - failed_loop: |
---|
376 | | - /* undo the loop work */ |
---|
| 265 | +failed_loop: |
---|
| 266 | + /* undo the loop work. We are guaranteed to have access to the VA region |
---|
| 267 | + * as we hold a reference to it until it's unmapped |
---|
| 268 | + */ |
---|
377 | 269 | while (res_no-- > 0) { |
---|
378 | | - struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc; |
---|
| 270 | + struct kbase_va_region *reg = katom->extres[res_no]; |
---|
379 | 271 | |
---|
380 | | - kbase_unmap_external_resource(katom->kctx, NULL, alloc); |
---|
| 272 | + kbase_unmap_external_resource(katom->kctx, reg); |
---|
381 | 273 | } |
---|
382 | 274 | kbase_gpu_vm_unlock(katom->kctx); |
---|
383 | 275 | |
---|
384 | 276 | /* Release the processes mmap lock */ |
---|
385 | 277 | up_read(kbase_mem_get_process_mmap_lock()); |
---|
386 | 278 | |
---|
387 | | - early_err_out: |
---|
| 279 | +failed_input_copy: |
---|
| 280 | + kfree(input_extres); |
---|
| 281 | +failed_input_alloc: |
---|
388 | 282 | kfree(katom->extres); |
---|
389 | 283 | katom->extres = NULL; |
---|
390 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
391 | | - if (implicit_sync) { |
---|
392 | | - kfree(info.resv_objs); |
---|
393 | | - kfree(info.dma_fence_excl_bitmap); |
---|
394 | | - } |
---|
395 | | -#endif |
---|
396 | | - return err_ret_val; |
---|
| 284 | + return err; |
---|
397 | 285 | } |
---|
398 | 286 | |
---|
399 | 287 | static inline void jd_resolve_dep(struct list_head *out_list, |
---|
.. | .. |
---|
416 | 304 | |
---|
417 | 305 | if (katom->event_code != BASE_JD_EVENT_DONE && |
---|
418 | 306 | (dep_type != BASE_JD_DEP_TYPE_ORDER)) { |
---|
419 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
420 | | - kbase_dma_fence_cancel_callbacks(dep_atom); |
---|
421 | | -#endif |
---|
422 | | - |
---|
423 | 307 | dep_atom->event_code = katom->event_code; |
---|
424 | 308 | KBASE_DEBUG_ASSERT(dep_atom->status != |
---|
425 | 309 | KBASE_JD_ATOM_STATE_UNUSED); |
---|
.. | .. |
---|
433 | 317 | (IS_GPU_ATOM(dep_atom) && !ctx_is_dying && |
---|
434 | 318 | !dep_atom->will_fail_event_code && |
---|
435 | 319 | !other_dep_atom->will_fail_event_code))) { |
---|
436 | | - bool dep_satisfied = true; |
---|
437 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
438 | | - int dep_count; |
---|
439 | | - |
---|
440 | | - dep_count = kbase_fence_dep_count_read(dep_atom); |
---|
441 | | - if (likely(dep_count == -1)) { |
---|
442 | | - dep_satisfied = true; |
---|
443 | | - } else { |
---|
444 | | - /* |
---|
445 | | - * There are either still active callbacks, or |
---|
446 | | - * all fences for this @dep_atom has signaled, |
---|
447 | | - * but the worker that will queue the atom has |
---|
448 | | - * not yet run. |
---|
449 | | - * |
---|
450 | | - * Wait for the fences to signal and the fence |
---|
451 | | - * worker to run and handle @dep_atom. If |
---|
452 | | - * @dep_atom was completed due to error on |
---|
453 | | - * @katom, then the fence worker will pick up |
---|
454 | | - * the complete status and error code set on |
---|
455 | | - * @dep_atom above. |
---|
456 | | - */ |
---|
457 | | - dep_satisfied = false; |
---|
458 | | - } |
---|
459 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
460 | | - |
---|
461 | | - if (dep_satisfied) { |
---|
462 | | - dep_atom->in_jd_list = true; |
---|
463 | | - list_add_tail(&dep_atom->jd_item, out_list); |
---|
464 | | - } |
---|
| 320 | + dep_atom->in_jd_list = true; |
---|
| 321 | + list_add_tail(&dep_atom->jd_item, out_list); |
---|
465 | 322 | } |
---|
466 | 323 | } |
---|
467 | 324 | } |
---|
.. | .. |
---|
520 | 377 | dep_atom->dep[0].atom); |
---|
521 | 378 | bool dep1_valid = is_dep_valid( |
---|
522 | 379 | dep_atom->dep[1].atom); |
---|
523 | | - bool dep_satisfied = true; |
---|
524 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
525 | | - int dep_count; |
---|
526 | 380 | |
---|
527 | | - dep_count = kbase_fence_dep_count_read( |
---|
528 | | - dep_atom); |
---|
529 | | - if (likely(dep_count == -1)) { |
---|
530 | | - dep_satisfied = true; |
---|
531 | | - } else { |
---|
532 | | - /* |
---|
533 | | - * There are either still active callbacks, or |
---|
534 | | - * all fences for this @dep_atom has signaled, |
---|
535 | | - * but the worker that will queue the atom has |
---|
536 | | - * not yet run. |
---|
537 | | - * |
---|
538 | | - * Wait for the fences to signal and the fence |
---|
539 | | - * worker to run and handle @dep_atom. If |
---|
540 | | - * @dep_atom was completed due to error on |
---|
541 | | - * @katom, then the fence worker will pick up |
---|
542 | | - * the complete status and error code set on |
---|
543 | | - * @dep_atom above. |
---|
544 | | - */ |
---|
545 | | - dep_satisfied = false; |
---|
546 | | - } |
---|
547 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
548 | | - |
---|
549 | | - if (dep0_valid && dep1_valid && dep_satisfied) { |
---|
| 381 | + if (dep0_valid && dep1_valid) { |
---|
550 | 382 | dep_atom->in_jd_list = true; |
---|
551 | 383 | list_add(&dep_atom->jd_item, out_list); |
---|
552 | 384 | } |
---|
.. | .. |
---|
612 | 444 | else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) |
---|
613 | 445 | size_to_read = sizeof(u64[COUNT]); |
---|
614 | 446 | |
---|
615 | | - ptr = kbase_vmap(kctx, reg->heap_info_gpu_addr, size_to_read, |
---|
616 | | - &mapping); |
---|
| 447 | + ptr = kbase_vmap_prot(kctx, reg->heap_info_gpu_addr, size_to_read, |
---|
| 448 | + KBASE_REG_CPU_RD, &mapping); |
---|
617 | 449 | |
---|
618 | 450 | if (!ptr) { |
---|
619 | 451 | dev_warn(kctx->kbdev->dev, |
---|
.. | .. |
---|
697 | 529 | } |
---|
698 | 530 | #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */ |
---|
699 | 531 | |
---|
700 | | -/* |
---|
701 | | - * Perform the necessary handling of an atom that has finished running |
---|
702 | | - * on the GPU. |
---|
703 | | - * |
---|
704 | | - * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller |
---|
705 | | - * is responsible for calling kbase_finish_soft_job *before* calling this function. |
---|
706 | | - * |
---|
707 | | - * The caller must hold the kbase_jd_context.lock. |
---|
708 | | - */ |
---|
709 | | -bool jd_done_nolock(struct kbase_jd_atom *katom, |
---|
710 | | - struct list_head *completed_jobs_ctx) |
---|
| 532 | +bool kbase_jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately) |
---|
711 | 533 | { |
---|
712 | 534 | struct kbase_context *kctx = katom->kctx; |
---|
713 | 535 | struct list_head completed_jobs; |
---|
714 | 536 | struct list_head runnable_jobs; |
---|
715 | 537 | bool need_to_try_schedule_context = false; |
---|
716 | 538 | int i; |
---|
| 539 | + |
---|
| 540 | + lockdep_assert_held(&kctx->jctx.lock); |
---|
| 541 | + |
---|
| 542 | + KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START(kctx->kbdev, katom); |
---|
717 | 543 | |
---|
718 | 544 | INIT_LIST_HEAD(&completed_jobs); |
---|
719 | 545 | INIT_LIST_HEAD(&runnable_jobs); |
---|
.. | .. |
---|
736 | 562 | } |
---|
737 | 563 | |
---|
738 | 564 | jd_mark_atom_complete(katom); |
---|
| 565 | + |
---|
739 | 566 | list_add_tail(&katom->jd_item, &completed_jobs); |
---|
740 | 567 | |
---|
741 | 568 | while (!list_empty(&completed_jobs)) { |
---|
.. | .. |
---|
767 | 594 | |
---|
768 | 595 | if (node->status != KBASE_JD_ATOM_STATE_COMPLETED && |
---|
769 | 596 | !kbase_ctx_flag(kctx, KCTX_DYING)) { |
---|
| 597 | + KBASE_TLSTREAM_TL_RUN_ATOM_START( |
---|
| 598 | + kctx->kbdev, node, |
---|
| 599 | + kbase_jd_atom_id(kctx, node)); |
---|
770 | 600 | need_to_try_schedule_context |= jd_run_atom(node); |
---|
| 601 | + KBASE_TLSTREAM_TL_RUN_ATOM_END( |
---|
| 602 | + kctx->kbdev, node, |
---|
| 603 | + kbase_jd_atom_id(kctx, node)); |
---|
771 | 604 | } else { |
---|
772 | 605 | node->event_code = katom->event_code; |
---|
773 | 606 | |
---|
.. | .. |
---|
795 | 628 | * is in a disjoint state (ie. being reset). |
---|
796 | 629 | */ |
---|
797 | 630 | kbase_disjoint_event_potential(kctx->kbdev); |
---|
798 | | - if (completed_jobs_ctx) |
---|
799 | | - list_add_tail(&katom->jd_item, completed_jobs_ctx); |
---|
800 | | - else |
---|
| 631 | + if (post_immediately && list_empty(&kctx->completed_jobs)) |
---|
801 | 632 | kbase_event_post(kctx, katom); |
---|
| 633 | + else |
---|
| 634 | + list_add_tail(&katom->jd_item, &kctx->completed_jobs); |
---|
802 | 635 | |
---|
803 | 636 | /* Decrement and check the TOTAL number of jobs. This includes |
---|
804 | 637 | * those not tracked by the scheduler: 'not ready to run' and |
---|
.. | .. |
---|
811 | 644 | */ |
---|
812 | 645 | wake_up(&kctx->jctx.zero_jobs_wait); |
---|
813 | 646 | } |
---|
814 | | - |
---|
| 647 | + KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_END(kctx->kbdev, katom); |
---|
815 | 648 | return need_to_try_schedule_context; |
---|
816 | 649 | } |
---|
817 | 650 | |
---|
818 | | -KBASE_EXPORT_TEST_API(jd_done_nolock); |
---|
| 651 | +KBASE_EXPORT_TEST_API(kbase_jd_done_nolock); |
---|
819 | 652 | |
---|
820 | 653 | #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS) |
---|
821 | 654 | enum { |
---|
.. | .. |
---|
923 | 756 | katom->jobslot = user_atom->jobslot; |
---|
924 | 757 | katom->seq_nr = user_atom->seq_nr; |
---|
925 | 758 | katom->atom_flags = 0; |
---|
926 | | - katom->retry_count = 0; |
---|
927 | 759 | katom->need_cache_flush_cores_retained = 0; |
---|
928 | 760 | katom->pre_dep = NULL; |
---|
929 | 761 | katom->post_dep = NULL; |
---|
.. | .. |
---|
957 | 789 | |
---|
958 | 790 | INIT_LIST_HEAD(&katom->queue); |
---|
959 | 791 | INIT_LIST_HEAD(&katom->jd_item); |
---|
960 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
961 | | - kbase_fence_dep_count_set(katom, -1); |
---|
962 | | -#endif |
---|
963 | 792 | |
---|
964 | 793 | /* Don't do anything if there is a mess up with dependencies. |
---|
965 | | - This is done in a separate cycle to check both the dependencies at ones, otherwise |
---|
966 | | - it will be extra complexity to deal with 1st dependency ( just added to the list ) |
---|
967 | | - if only the 2nd one has invalid config. |
---|
| 794 | + * This is done in a separate cycle to check both the dependencies at ones, otherwise |
---|
| 795 | + * it will be extra complexity to deal with 1st dependency ( just added to the list ) |
---|
| 796 | + * if only the 2nd one has invalid config. |
---|
968 | 797 | */ |
---|
969 | 798 | for (i = 0; i < 2; i++) { |
---|
970 | 799 | int dep_atom_number = user_atom->pre_dep[i].atom_id; |
---|
.. | .. |
---|
984 | 813 | * dependencies. |
---|
985 | 814 | */ |
---|
986 | 815 | jd_trace_atom_submit(kctx, katom, NULL); |
---|
987 | | - |
---|
988 | | - return jd_done_nolock(katom, NULL); |
---|
| 816 | + return kbase_jd_done_nolock(katom, true); |
---|
989 | 817 | } |
---|
990 | 818 | } |
---|
991 | 819 | } |
---|
.. | .. |
---|
1049 | 877 | if (err >= 0) |
---|
1050 | 878 | kbase_finish_soft_job(katom); |
---|
1051 | 879 | } |
---|
1052 | | - |
---|
1053 | | - return jd_done_nolock(katom, NULL); |
---|
| 880 | + return kbase_jd_done_nolock(katom, true); |
---|
1054 | 881 | } |
---|
1055 | 882 | |
---|
1056 | 883 | katom->will_fail_event_code = katom->event_code; |
---|
.. | .. |
---|
1076 | 903 | /* Create a new atom. */ |
---|
1077 | 904 | jd_trace_atom_submit(kctx, katom, &katom->sched_priority); |
---|
1078 | 905 | |
---|
1079 | | -#if !MALI_INCREMENTAL_RENDERING |
---|
| 906 | +#if !MALI_INCREMENTAL_RENDERING_JM |
---|
1080 | 907 | /* Reject atoms for incremental rendering if not supported */ |
---|
1081 | 908 | if (katom->core_req & |
---|
1082 | 909 | (BASE_JD_REQ_START_RENDERPASS|BASE_JD_REQ_END_RENDERPASS)) { |
---|
.. | .. |
---|
1084 | 911 | "Rejecting atom with unsupported core_req 0x%x\n", |
---|
1085 | 912 | katom->core_req); |
---|
1086 | 913 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1087 | | - return jd_done_nolock(katom, NULL); |
---|
| 914 | + return kbase_jd_done_nolock(katom, true); |
---|
1088 | 915 | } |
---|
1089 | | -#endif /* !MALI_INCREMENTAL_RENDERING */ |
---|
| 916 | +#endif /* !MALI_INCREMENTAL_RENDERING_JM */ |
---|
1090 | 917 | |
---|
1091 | 918 | if (katom->core_req & BASE_JD_REQ_END_RENDERPASS) { |
---|
1092 | 919 | WARN_ON(katom->jc != 0); |
---|
.. | .. |
---|
1098 | 925 | */ |
---|
1099 | 926 | dev_err(kctx->kbdev->dev, "Rejecting atom with jc = NULL\n"); |
---|
1100 | 927 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1101 | | - return jd_done_nolock(katom, NULL); |
---|
| 928 | + return kbase_jd_done_nolock(katom, true); |
---|
1102 | 929 | } |
---|
1103 | 930 | |
---|
1104 | 931 | /* Reject atoms with an invalid device_nr */ |
---|
.. | .. |
---|
1108 | 935 | "Rejecting atom with invalid device_nr %d\n", |
---|
1109 | 936 | katom->device_nr); |
---|
1110 | 937 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1111 | | - return jd_done_nolock(katom, NULL); |
---|
| 938 | + return kbase_jd_done_nolock(katom, true); |
---|
1112 | 939 | } |
---|
1113 | 940 | |
---|
1114 | 941 | /* Reject atoms with invalid core requirements */ |
---|
.. | .. |
---|
1118 | 945 | "Rejecting atom with invalid core requirements\n"); |
---|
1119 | 946 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1120 | 947 | katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE; |
---|
1121 | | - return jd_done_nolock(katom, NULL); |
---|
| 948 | + return kbase_jd_done_nolock(katom, true); |
---|
1122 | 949 | } |
---|
1123 | 950 | |
---|
1124 | 951 | /* Reject soft-job atom of certain types from accessing external resources */ |
---|
.. | .. |
---|
1129 | 956 | dev_err(kctx->kbdev->dev, |
---|
1130 | 957 | "Rejecting soft-job atom accessing external resources\n"); |
---|
1131 | 958 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1132 | | - return jd_done_nolock(katom, NULL); |
---|
| 959 | + return kbase_jd_done_nolock(katom, true); |
---|
1133 | 960 | } |
---|
1134 | 961 | |
---|
1135 | 962 | if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) { |
---|
.. | .. |
---|
1137 | 964 | if (kbase_jd_pre_external_resources(katom, user_atom) != 0) { |
---|
1138 | 965 | /* setup failed (no access, bad resource, unknown resource types, etc.) */ |
---|
1139 | 966 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1140 | | - return jd_done_nolock(katom, NULL); |
---|
| 967 | + return kbase_jd_done_nolock(katom, true); |
---|
1141 | 968 | } |
---|
1142 | 969 | } |
---|
1143 | 970 | |
---|
.. | .. |
---|
1148 | 975 | * JIT IDs - atom is invalid. |
---|
1149 | 976 | */ |
---|
1150 | 977 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1151 | | - return jd_done_nolock(katom, NULL); |
---|
| 978 | + return kbase_jd_done_nolock(katom, true); |
---|
1152 | 979 | } |
---|
1153 | 980 | #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */ |
---|
1154 | 981 | |
---|
.. | .. |
---|
1162 | 989 | if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) { |
---|
1163 | 990 | if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) { |
---|
1164 | 991 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1165 | | - return jd_done_nolock(katom, NULL); |
---|
| 992 | + return kbase_jd_done_nolock(katom, true); |
---|
1166 | 993 | } |
---|
1167 | 994 | } else { |
---|
1168 | 995 | /* Soft-job */ |
---|
1169 | 996 | if (kbase_prepare_soft_job(katom) != 0) { |
---|
1170 | 997 | katom->event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1171 | | - return jd_done_nolock(katom, NULL); |
---|
| 998 | + return kbase_jd_done_nolock(katom, true); |
---|
1172 | 999 | } |
---|
1173 | 1000 | } |
---|
1174 | 1001 | |
---|
.. | .. |
---|
1181 | 1008 | if (queued && !IS_GPU_ATOM(katom)) |
---|
1182 | 1009 | return false; |
---|
1183 | 1010 | |
---|
1184 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
1185 | | - if (kbase_fence_dep_count_read(katom) != -1) |
---|
1186 | | - return false; |
---|
1187 | | - |
---|
1188 | | -#endif /* CONFIG_MALI_BIFROST_DMA_FENCE */ |
---|
1189 | | - |
---|
1190 | 1011 | if (katom->core_req & BASE_JD_REQ_SOFT_JOB) { |
---|
1191 | 1012 | if (kbase_process_soft_job(katom) == 0) { |
---|
1192 | 1013 | kbase_finish_soft_job(katom); |
---|
1193 | | - return jd_done_nolock(katom, NULL); |
---|
| 1014 | + return kbase_jd_done_nolock(katom, true); |
---|
1194 | 1015 | } |
---|
1195 | 1016 | return false; |
---|
1196 | 1017 | } |
---|
.. | .. |
---|
1220 | 1041 | } |
---|
1221 | 1042 | |
---|
1222 | 1043 | /* This is a pure dependency. Resolve it immediately */ |
---|
1223 | | - return jd_done_nolock(katom, NULL); |
---|
| 1044 | + return kbase_jd_done_nolock(katom, true); |
---|
1224 | 1045 | } |
---|
1225 | 1046 | |
---|
1226 | 1047 | int kbase_jd_submit(struct kbase_context *kctx, |
---|
.. | .. |
---|
1235 | 1056 | u32 latest_flush; |
---|
1236 | 1057 | |
---|
1237 | 1058 | bool jd_atom_is_v2 = (stride == sizeof(struct base_jd_atom_v2) || |
---|
1238 | | - stride == offsetof(struct base_jd_atom_v2, renderpass_id)); |
---|
| 1059 | + stride == offsetof(struct base_jd_atom_v2, renderpass_id)); |
---|
1239 | 1060 | |
---|
1240 | 1061 | /* |
---|
1241 | 1062 | * kbase_jd_submit isn't expected to fail and so all errors with the |
---|
1242 | 1063 | * jobs are reported by immediately failing them (through event system) |
---|
1243 | 1064 | */ |
---|
1244 | 1065 | kbdev = kctx->kbdev; |
---|
1245 | | - |
---|
1246 | | - beenthere(kctx, "%s", "Enter"); |
---|
1247 | 1066 | |
---|
1248 | 1067 | if (kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) { |
---|
1249 | 1068 | dev_err(kbdev->dev, "Attempt to submit to a context that has SUBMIT_DISABLED set on it\n"); |
---|
.. | .. |
---|
1260 | 1079 | return -EINVAL; |
---|
1261 | 1080 | } |
---|
1262 | 1081 | |
---|
| 1082 | + if (nr_atoms > BASE_JD_ATOM_COUNT) { |
---|
| 1083 | + dev_dbg(kbdev->dev, "Invalid attempt to submit %u atoms at once for kctx %d_%d", |
---|
| 1084 | + nr_atoms, kctx->tgid, kctx->id); |
---|
| 1085 | + return -EINVAL; |
---|
| 1086 | + } |
---|
| 1087 | + |
---|
1263 | 1088 | /* All atoms submitted in this call have the same flush ID */ |
---|
1264 | 1089 | latest_flush = kbase_backend_get_current_flush_id(kbdev); |
---|
1265 | 1090 | |
---|
1266 | 1091 | for (i = 0; i < nr_atoms; i++) { |
---|
1267 | | - struct base_jd_atom user_atom; |
---|
| 1092 | + struct base_jd_atom user_atom = { |
---|
| 1093 | + .seq_nr = 0, |
---|
| 1094 | + }; |
---|
1268 | 1095 | struct base_jd_fragment user_jc_incr; |
---|
1269 | 1096 | struct kbase_jd_atom *katom; |
---|
1270 | 1097 | |
---|
1271 | 1098 | if (unlikely(jd_atom_is_v2)) { |
---|
1272 | 1099 | if (copy_from_user(&user_atom.jc, user_addr, sizeof(struct base_jd_atom_v2)) != 0) { |
---|
1273 | 1100 | dev_dbg(kbdev->dev, |
---|
1274 | | - "Invalid atom address %p passed to job_submit\n", |
---|
| 1101 | + "Invalid atom address %pK passed to job_submit\n", |
---|
1275 | 1102 | user_addr); |
---|
1276 | 1103 | err = -EFAULT; |
---|
1277 | 1104 | break; |
---|
.. | .. |
---|
1282 | 1109 | } else { |
---|
1283 | 1110 | if (copy_from_user(&user_atom, user_addr, stride) != 0) { |
---|
1284 | 1111 | dev_dbg(kbdev->dev, |
---|
1285 | | - "Invalid atom address %p passed to job_submit\n", |
---|
| 1112 | + "Invalid atom address %pK passed to job_submit\n", |
---|
1286 | 1113 | user_addr); |
---|
1287 | 1114 | err = -EFAULT; |
---|
1288 | 1115 | break; |
---|
.. | .. |
---|
1378 | 1205 | } |
---|
1379 | 1206 | mutex_lock(&jctx->lock); |
---|
1380 | 1207 | } |
---|
1381 | | - |
---|
| 1208 | + KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START(kbdev, katom); |
---|
1382 | 1209 | need_to_try_schedule_context |= jd_submit_atom(kctx, &user_atom, |
---|
1383 | 1210 | &user_jc_incr, katom); |
---|
1384 | | - |
---|
| 1211 | + KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_END(kbdev, katom); |
---|
1385 | 1212 | /* Register a completed job as a disjoint event when the GPU is in a disjoint state |
---|
1386 | 1213 | * (ie. being reset). |
---|
1387 | 1214 | */ |
---|
1388 | 1215 | kbase_disjoint_event_potential(kbdev); |
---|
1389 | 1216 | |
---|
1390 | 1217 | mutex_unlock(&jctx->lock); |
---|
| 1218 | + if (fatal_signal_pending(current)) { |
---|
| 1219 | + dev_dbg(kbdev->dev, "Fatal signal pending for kctx %d_%d", |
---|
| 1220 | + kctx->tgid, kctx->id); |
---|
| 1221 | + /* We're being killed so the result code doesn't really matter */ |
---|
| 1222 | + return 0; |
---|
| 1223 | + } |
---|
1391 | 1224 | } |
---|
1392 | 1225 | |
---|
1393 | 1226 | if (need_to_try_schedule_context) |
---|
.. | .. |
---|
1463 | 1296 | } |
---|
1464 | 1297 | |
---|
1465 | 1298 | if ((katom->event_code != BASE_JD_EVENT_DONE) && |
---|
1466 | | - (!kbase_ctx_flag(katom->kctx, KCTX_DYING))) |
---|
1467 | | - dev_err(kbdev->dev, |
---|
1468 | | - "t6xx: GPU fault 0x%02lx from job slot %d\n", |
---|
1469 | | - (unsigned long)katom->event_code, |
---|
1470 | | - katom->slot_nr); |
---|
| 1299 | + (!kbase_ctx_flag(katom->kctx, KCTX_DYING))) { |
---|
| 1300 | + if (!kbase_is_quick_reset_enabled(kbdev)) |
---|
| 1301 | + dev_err(kbdev->dev, |
---|
| 1302 | + "t6xx: GPU fault 0x%02lx from job slot %d\n", |
---|
| 1303 | + (unsigned long)katom->event_code, |
---|
| 1304 | + katom->slot_nr); |
---|
| 1305 | + } |
---|
1471 | 1306 | |
---|
1472 | 1307 | /* Retain state before the katom disappears */ |
---|
1473 | 1308 | kbasep_js_atom_retained_state_copy(&katom_retained_state, katom); |
---|
.. | .. |
---|
1479 | 1314 | kbasep_js_remove_job(kbdev, kctx, katom); |
---|
1480 | 1315 | mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); |
---|
1481 | 1316 | mutex_unlock(&js_devdata->queue_mutex); |
---|
1482 | | - katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_CTX_REF; |
---|
1483 | | - /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */ |
---|
1484 | | - jd_done_nolock(katom, &kctx->completed_jobs); |
---|
| 1317 | + /* kbase_jd_done_nolock() requires the jsctx_mutex lock to be dropped */ |
---|
| 1318 | + kbase_jd_done_nolock(katom, false); |
---|
1485 | 1319 | |
---|
1486 | 1320 | /* katom may have been freed now, do not use! */ |
---|
1487 | 1321 | |
---|
.. | .. |
---|
1498 | 1332 | * drop our reference. But do not call kbase_jm_idle_ctx(), as |
---|
1499 | 1333 | * the context is active and fast-starting is allowed. |
---|
1500 | 1334 | * |
---|
1501 | | - * If an atom has been fast-started then kctx->atoms_pulled will |
---|
1502 | | - * be non-zero but KCTX_ACTIVE will still be false (as the |
---|
1503 | | - * previous pm reference has been inherited). Do NOT drop our |
---|
1504 | | - * reference, as it has been re-used, and leave the context as |
---|
1505 | | - * active. |
---|
| 1335 | + * If an atom has been fast-started then |
---|
| 1336 | + * kbase_jsctx_atoms_pulled(kctx) will return non-zero but |
---|
| 1337 | + * KCTX_ACTIVE will still be false (as the previous pm |
---|
| 1338 | + * reference has been inherited). Do NOT drop our reference, as |
---|
| 1339 | + * it has been re-used, and leave the context as active. |
---|
1506 | 1340 | * |
---|
1507 | | - * If no new atoms have been started then KCTX_ACTIVE will still |
---|
1508 | | - * be false and atoms_pulled will be zero, so drop the reference |
---|
1509 | | - * and call kbase_jm_idle_ctx(). |
---|
| 1341 | + * If no new atoms have been started then KCTX_ACTIVE will |
---|
| 1342 | + * still be false and kbase_jsctx_atoms_pulled(kctx) will |
---|
| 1343 | + * return zero, so drop the reference and call |
---|
| 1344 | + * kbase_jm_idle_ctx(). |
---|
1510 | 1345 | * |
---|
1511 | 1346 | * As the checks are done under both the queue_mutex and |
---|
1512 | 1347 | * hwaccess_lock is should be impossible for this to race |
---|
1513 | 1348 | * with the scheduler code. |
---|
1514 | 1349 | */ |
---|
1515 | 1350 | if (kbase_ctx_flag(kctx, KCTX_ACTIVE) || |
---|
1516 | | - !atomic_read(&kctx->atoms_pulled)) { |
---|
| 1351 | + !kbase_jsctx_atoms_pulled(kctx)) { |
---|
1517 | 1352 | /* Calling kbase_jm_idle_ctx() here will ensure that |
---|
1518 | 1353 | * atoms are not fast-started when we drop the |
---|
1519 | 1354 | * hwaccess_lock. This is not performed if |
---|
.. | .. |
---|
1546 | 1381 | kbase_js_sched_all(kbdev); |
---|
1547 | 1382 | |
---|
1548 | 1383 | if (!atomic_dec_return(&kctx->work_count)) { |
---|
1549 | | - /* If worker now idle then post all events that jd_done_nolock() |
---|
| 1384 | + /* If worker now idle then post all events that kbase_jd_done_nolock() |
---|
1550 | 1385 | * has queued |
---|
1551 | 1386 | */ |
---|
1552 | 1387 | mutex_lock(&jctx->lock); |
---|
.. | .. |
---|
1595 | 1430 | bool need_to_try_schedule_context; |
---|
1596 | 1431 | bool attr_state_changed; |
---|
1597 | 1432 | struct kbase_device *kbdev; |
---|
| 1433 | + CSTD_UNUSED(need_to_try_schedule_context); |
---|
1598 | 1434 | |
---|
1599 | 1435 | /* Soft jobs should never reach this function */ |
---|
1600 | 1436 | KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0); |
---|
.. | .. |
---|
1620 | 1456 | |
---|
1621 | 1457 | mutex_lock(&jctx->lock); |
---|
1622 | 1458 | |
---|
1623 | | - need_to_try_schedule_context = jd_done_nolock(katom, NULL); |
---|
| 1459 | + need_to_try_schedule_context = kbase_jd_done_nolock(katom, true); |
---|
1624 | 1460 | /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to |
---|
1625 | 1461 | * schedule the context. There's also no need for the jsctx_mutex to have been taken |
---|
1626 | 1462 | * around this too. |
---|
.. | .. |
---|
1663 | 1499 | KBASE_DEBUG_ASSERT(kctx); |
---|
1664 | 1500 | kbdev = kctx->kbdev; |
---|
1665 | 1501 | KBASE_DEBUG_ASSERT(kbdev); |
---|
| 1502 | + |
---|
| 1503 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
1666 | 1504 | |
---|
1667 | 1505 | if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) |
---|
1668 | 1506 | katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT; |
---|
.. | .. |
---|
1740 | 1578 | kbase_cancel_soft_job(katom); |
---|
1741 | 1579 | } |
---|
1742 | 1580 | |
---|
1743 | | - |
---|
1744 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
1745 | | - kbase_dma_fence_cancel_all_atoms(kctx); |
---|
1746 | | -#endif |
---|
1747 | | - |
---|
1748 | 1581 | mutex_unlock(&kctx->jctx.lock); |
---|
1749 | | - |
---|
1750 | | -#ifdef CONFIG_MALI_BIFROST_DMA_FENCE |
---|
1751 | | - /* Flush dma-fence workqueue to ensure that any callbacks that may have |
---|
1752 | | - * been queued are done before continuing. |
---|
1753 | | - */ |
---|
1754 | | - flush_workqueue(kctx->dma_fence.wq); |
---|
1755 | | -#endif |
---|
1756 | 1582 | |
---|
1757 | 1583 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
---|
1758 | 1584 | kbase_debug_job_fault_kctx_unblock(kctx); |
---|
.. | .. |
---|
1790 | 1616 | kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID; |
---|
1791 | 1617 | kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED; |
---|
1792 | 1618 | |
---|
1793 | | -#if defined(CONFIG_MALI_BIFROST_DMA_FENCE) || defined(CONFIG_SYNC_FILE) |
---|
| 1619 | +#if IS_ENABLED(CONFIG_SYNC_FILE) |
---|
1794 | 1620 | kctx->jctx.atoms[i].dma_fence.context = |
---|
1795 | 1621 | dma_fence_context_alloc(1); |
---|
1796 | 1622 | atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0); |
---|
1797 | | - INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks); |
---|
1798 | 1623 | #endif |
---|
1799 | 1624 | } |
---|
1800 | 1625 | |
---|