.. | .. |
---|
14 | 14 | #include "rga_iommu.h" |
---|
15 | 15 | #include "rga_debugger.h" |
---|
16 | 16 | |
---|
17 | | -struct rga_job * |
---|
18 | | -rga_scheduler_get_pending_job_list(struct rga_scheduler_t *scheduler) |
---|
19 | | -{ |
---|
20 | | - unsigned long flags; |
---|
21 | | - struct rga_job *job; |
---|
22 | | - |
---|
23 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
24 | | - |
---|
25 | | - job = list_first_entry_or_null(&scheduler->todo_list, |
---|
26 | | - struct rga_job, head); |
---|
27 | | - |
---|
28 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
29 | | - |
---|
30 | | - return job; |
---|
31 | | -} |
---|
32 | | - |
---|
33 | | -struct rga_job * |
---|
34 | | -rga_scheduler_get_running_job(struct rga_scheduler_t *scheduler) |
---|
35 | | -{ |
---|
36 | | - unsigned long flags; |
---|
37 | | - struct rga_job *job; |
---|
38 | | - |
---|
39 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
40 | | - |
---|
41 | | - job = scheduler->running_job; |
---|
42 | | - |
---|
43 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
44 | | - |
---|
45 | | - return job; |
---|
46 | | -} |
---|
47 | | - |
---|
48 | | -struct rga_scheduler_t *rga_job_get_scheduler(struct rga_job *job) |
---|
49 | | -{ |
---|
50 | | - return job->scheduler; |
---|
51 | | -} |
---|
52 | | - |
---|
53 | 17 | static void rga_job_free(struct rga_job *job) |
---|
54 | 18 | { |
---|
55 | 19 | free_page((unsigned long)job); |
---|
56 | 20 | } |
---|
57 | 21 | |
---|
58 | | -void rga_job_session_destroy(struct rga_session *session) |
---|
| 22 | +static void rga_job_kref_release(struct kref *ref) |
---|
59 | 23 | { |
---|
60 | | - struct rga_scheduler_t *scheduler = NULL; |
---|
61 | | - struct rga_job *job_pos, *job_q; |
---|
62 | | - int i; |
---|
| 24 | + struct rga_job *job; |
---|
63 | 25 | |
---|
64 | | - unsigned long flags; |
---|
| 26 | + job = container_of(ref, struct rga_job, refcount); |
---|
65 | 27 | |
---|
66 | | - for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
---|
67 | | - scheduler = rga_drvdata->scheduler[i]; |
---|
| 28 | + rga_job_free(job); |
---|
| 29 | +} |
---|
68 | 30 | |
---|
69 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
| 31 | +static int rga_job_put(struct rga_job *job) |
---|
| 32 | +{ |
---|
| 33 | + return kref_put(&job->refcount, rga_job_kref_release); |
---|
| 34 | +} |
---|
70 | 35 | |
---|
71 | | - list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) { |
---|
72 | | - if (session == job_pos->session) { |
---|
73 | | - list_del(&job_pos->head); |
---|
74 | | - |
---|
75 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
76 | | - |
---|
77 | | - rga_job_free(job_pos); |
---|
78 | | - |
---|
79 | | - spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
80 | | - } |
---|
81 | | - } |
---|
82 | | - |
---|
83 | | - spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
84 | | - } |
---|
| 36 | +static void rga_job_get(struct rga_job *job) |
---|
| 37 | +{ |
---|
| 38 | + kref_get(&job->refcount); |
---|
85 | 39 | } |
---|
86 | 40 | |
---|
87 | 41 | static int rga_job_cleanup(struct rga_job *job) |
---|
.. | .. |
---|
90 | 44 | pr_err("(pid:%d) job clean use time = %lld\n", job->pid, |
---|
91 | 45 | ktime_us_delta(ktime_get(), job->timestamp)); |
---|
92 | 46 | |
---|
93 | | - rga_job_free(job); |
---|
| 47 | + rga_job_put(job); |
---|
94 | 48 | |
---|
95 | 49 | return 0; |
---|
96 | 50 | } |
---|
.. | .. |
---|
165 | 119 | return NULL; |
---|
166 | 120 | |
---|
167 | 121 | INIT_LIST_HEAD(&job->head); |
---|
| 122 | + kref_init(&job->refcount); |
---|
168 | 123 | |
---|
169 | 124 | job->timestamp = ktime_get(); |
---|
170 | 125 | job->pid = current->pid; |
---|
.. | .. |
---|
232 | 187 | return ret; |
---|
233 | 188 | } |
---|
234 | 189 | |
---|
| 190 | + set_bit(RGA_JOB_STATE_RUNNING, &job->state); |
---|
| 191 | + |
---|
235 | 192 | /* for debug */ |
---|
236 | 193 | if (DEBUGGER_EN(MSG)) |
---|
237 | 194 | rga_job_dump_info(job); |
---|
238 | 195 | |
---|
239 | 196 | return ret; |
---|
240 | | - |
---|
241 | 197 | } |
---|
242 | 198 | |
---|
243 | | -static void rga_job_next(struct rga_scheduler_t *scheduler) |
---|
| 199 | +void rga_job_next(struct rga_scheduler_t *scheduler) |
---|
244 | 200 | { |
---|
| 201 | + int ret; |
---|
245 | 202 | struct rga_job *job = NULL; |
---|
246 | 203 | unsigned long flags; |
---|
247 | 204 | |
---|
.. | .. |
---|
261 | 218 | scheduler->job_count--; |
---|
262 | 219 | |
---|
263 | 220 | scheduler->running_job = job; |
---|
| 221 | + set_bit(RGA_JOB_STATE_PREPARE, &job->state); |
---|
| 222 | + rga_job_get(job); |
---|
264 | 223 | |
---|
265 | 224 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
266 | 225 | |
---|
267 | | - job->ret = rga_job_run(job, scheduler); |
---|
| 226 | + ret = rga_job_run(job, scheduler); |
---|
268 | 227 | /* If some error before hw run */ |
---|
269 | | - if (job->ret < 0) { |
---|
270 | | - pr_err("some error on rga_job_run before hw start, %s(%d)\n", |
---|
271 | | - __func__, __LINE__); |
---|
| 228 | + if (ret < 0) { |
---|
| 229 | + pr_err("some error on rga_job_run before hw start, %s(%d)\n", __func__, __LINE__); |
---|
272 | 230 | |
---|
273 | 231 | spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
274 | 232 | |
---|
275 | 233 | scheduler->running_job = NULL; |
---|
| 234 | + rga_job_put(job); |
---|
276 | 235 | |
---|
277 | 236 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
278 | 237 | |
---|
| 238 | + job->ret = ret; |
---|
279 | 239 | rga_request_release_signal(scheduler, job); |
---|
280 | 240 | |
---|
281 | 241 | goto next_job; |
---|
282 | 242 | } |
---|
| 243 | + |
---|
| 244 | + rga_job_put(job); |
---|
283 | 245 | } |
---|
284 | 246 | |
---|
285 | | -static void rga_job_finish_and_next(struct rga_scheduler_t *scheduler, |
---|
286 | | - struct rga_job *job, int ret) |
---|
287 | | -{ |
---|
288 | | - ktime_t now; |
---|
289 | | - |
---|
290 | | - job->ret = ret; |
---|
291 | | - |
---|
292 | | - if (DEBUGGER_EN(TIME)) { |
---|
293 | | - now = ktime_get(); |
---|
294 | | - pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time)); |
---|
295 | | - pr_info("(pid:%d) job done use time = %lld\n", job->pid, |
---|
296 | | - ktime_us_delta(now, job->timestamp)); |
---|
297 | | - } |
---|
298 | | - |
---|
299 | | - rga_mm_unmap_job_info(job); |
---|
300 | | - |
---|
301 | | - rga_request_release_signal(scheduler, job); |
---|
302 | | - |
---|
303 | | - rga_job_next(scheduler); |
---|
304 | | - |
---|
305 | | - rga_power_disable(scheduler); |
---|
306 | | -} |
---|
307 | | - |
---|
308 | | -void rga_job_done(struct rga_scheduler_t *scheduler, int ret) |
---|
| 247 | +struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler) |
---|
309 | 248 | { |
---|
310 | 249 | struct rga_job *job; |
---|
311 | 250 | unsigned long flags; |
---|
.. | .. |
---|
314 | 253 | spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
315 | 254 | |
---|
316 | 255 | job = scheduler->running_job; |
---|
| 256 | + if (job == NULL) { |
---|
| 257 | + pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core); |
---|
| 258 | + |
---|
| 259 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 260 | + return NULL; |
---|
| 261 | + } |
---|
317 | 262 | scheduler->running_job = NULL; |
---|
318 | 263 | |
---|
319 | 264 | scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); |
---|
| 265 | + set_bit(RGA_JOB_STATE_DONE, &job->state); |
---|
320 | 266 | |
---|
321 | 267 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 268 | + |
---|
| 269 | + if (scheduler->ops->read_back_reg) |
---|
| 270 | + scheduler->ops->read_back_reg(job, scheduler); |
---|
322 | 271 | |
---|
323 | 272 | if (DEBUGGER_EN(DUMP_IMAGE)) |
---|
324 | 273 | rga_dump_job_image(job); |
---|
325 | 274 | |
---|
326 | | - rga_job_finish_and_next(scheduler, job, ret); |
---|
| 275 | + if (DEBUGGER_EN(TIME)) { |
---|
| 276 | + pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time)); |
---|
| 277 | + pr_info("(pid:%d) job done use time = %lld\n", job->pid, |
---|
| 278 | + ktime_us_delta(now, job->timestamp)); |
---|
| 279 | + } |
---|
| 280 | + |
---|
| 281 | + rga_mm_unmap_job_info(job); |
---|
| 282 | + |
---|
| 283 | + return job; |
---|
327 | 284 | } |
---|
328 | 285 | |
---|
329 | 286 | static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler) |
---|
.. | .. |
---|
391 | 348 | } |
---|
392 | 349 | |
---|
393 | 350 | scheduler->job_count++; |
---|
| 351 | + set_bit(RGA_JOB_STATE_PENDING, &job->state); |
---|
394 | 352 | |
---|
395 | 353 | spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
396 | 354 | } |
---|
397 | 355 | |
---|
398 | 356 | static struct rga_scheduler_t *rga_job_schedule(struct rga_job *job) |
---|
399 | 357 | { |
---|
| 358 | + int i; |
---|
400 | 359 | struct rga_scheduler_t *scheduler = NULL; |
---|
| 360 | + |
---|
| 361 | + for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
---|
| 362 | + scheduler = rga_drvdata->scheduler[i]; |
---|
| 363 | + rga_job_scheduler_timeout_clean(scheduler); |
---|
| 364 | + } |
---|
401 | 365 | |
---|
402 | 366 | if (rga_drvdata->num_of_scheduler > 1) { |
---|
403 | 367 | job->core = rga_job_assign(job); |
---|
.. | .. |
---|
411 | 375 | job->scheduler = rga_drvdata->scheduler[0]; |
---|
412 | 376 | } |
---|
413 | 377 | |
---|
414 | | - scheduler = rga_job_get_scheduler(job); |
---|
| 378 | + scheduler = job->scheduler; |
---|
415 | 379 | if (scheduler == NULL) { |
---|
416 | 380 | pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__); |
---|
417 | 381 | job->ret = -EFAULT; |
---|
418 | 382 | return NULL; |
---|
419 | 383 | } |
---|
420 | | - |
---|
421 | | - rga_job_scheduler_timeout_clean(scheduler); |
---|
422 | 384 | |
---|
423 | 385 | return scheduler; |
---|
424 | 386 | } |
---|
.. | .. |
---|
557 | 519 | request->current_mm = NULL; |
---|
558 | 520 | } |
---|
559 | 521 | |
---|
560 | | -static int rga_request_alloc_release_fence(struct dma_fence **release_fence) |
---|
561 | | -{ |
---|
562 | | - struct dma_fence *fence; |
---|
563 | | - |
---|
564 | | - fence = rga_dma_fence_alloc(); |
---|
565 | | - if (IS_ERR(fence)) { |
---|
566 | | - pr_err("Can not alloc release fence!\n"); |
---|
567 | | - return IS_ERR(fence); |
---|
568 | | - } |
---|
569 | | - |
---|
570 | | - *release_fence = fence; |
---|
571 | | - |
---|
572 | | - return rga_dma_fence_get_fd(fence); |
---|
573 | | -} |
---|
574 | | - |
---|
575 | | -static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, void *private, |
---|
| 522 | +static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, |
---|
| 523 | + struct rga_request *request, |
---|
576 | 524 | dma_fence_func_t cb_func) |
---|
577 | 525 | { |
---|
578 | 526 | int ret; |
---|
579 | 527 | struct dma_fence *acquire_fence = NULL; |
---|
| 528 | + struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager; |
---|
580 | 529 | |
---|
581 | 530 | if (DEBUGGER_EN(MSG)) |
---|
582 | 531 | pr_info("acquire_fence_fd = %d", acquire_fence_fd); |
---|
.. | .. |
---|
588 | 537 | return -EINVAL; |
---|
589 | 538 | } |
---|
590 | 539 | /* close acquire fence fd */ |
---|
| 540 | +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) |
---|
| 541 | + close_fd(acquire_fence_fd); |
---|
| 542 | +#else |
---|
591 | 543 | ksys_close(acquire_fence_fd); |
---|
| 544 | +#endif |
---|
592 | 545 | |
---|
593 | 546 | ret = rga_dma_fence_get_status(acquire_fence); |
---|
594 | | - if (ret == 0) { |
---|
595 | | - ret = rga_dma_fence_add_callback(acquire_fence, cb_func, private); |
---|
596 | | - if (ret < 0) { |
---|
597 | | - if (ret == -ENOENT) |
---|
598 | | - return 1; |
---|
| 547 | + if (ret < 0) { |
---|
| 548 | + pr_err("%s: Current acquire fence unexpectedly has error status before signal\n", |
---|
| 549 | + __func__); |
---|
| 550 | + return ret; |
---|
| 551 | + } else if (ret > 0) { |
---|
| 552 | + /* has been signaled */ |
---|
| 553 | + return ret; |
---|
| 554 | + } |
---|
599 | 555 | |
---|
| 556 | + /* |
---|
| 557 | + * Ensure that the request will not be free early when |
---|
| 558 | + * the callback is called. |
---|
| 559 | + */ |
---|
| 560 | + mutex_lock(&request_manager->lock); |
---|
| 561 | + rga_request_get(request); |
---|
| 562 | + mutex_unlock(&request_manager->lock); |
---|
| 563 | + |
---|
| 564 | + ret = rga_dma_fence_add_callback(acquire_fence, cb_func, (void *)request); |
---|
| 565 | + if (ret < 0) { |
---|
| 566 | + if (ret != -ENOENT) |
---|
600 | 567 | pr_err("%s: failed to add fence callback\n", __func__); |
---|
601 | | - return ret; |
---|
602 | | - } |
---|
603 | | - } else { |
---|
| 568 | + |
---|
| 569 | + mutex_lock(&request_manager->lock); |
---|
| 570 | + rga_request_put(request); |
---|
| 571 | + mutex_unlock(&request_manager->lock); |
---|
604 | 572 | return ret; |
---|
605 | 573 | } |
---|
606 | 574 | |
---|
.. | .. |
---|
742 | 710 | mutex_unlock(&request_manager->lock); |
---|
743 | 711 | } |
---|
744 | 712 | |
---|
| 713 | +void rga_request_session_destroy_abort(struct rga_session *session) |
---|
| 714 | +{ |
---|
| 715 | + int request_id; |
---|
| 716 | + struct rga_request *request; |
---|
| 717 | + struct rga_pending_request_manager *request_manager; |
---|
| 718 | + |
---|
| 719 | + request_manager = rga_drvdata->pend_request_manager; |
---|
| 720 | + if (request_manager == NULL) { |
---|
| 721 | + pr_err("rga_pending_request_manager is null!\n"); |
---|
| 722 | + return; |
---|
| 723 | + } |
---|
| 724 | + |
---|
| 725 | + mutex_lock(&request_manager->lock); |
---|
| 726 | + |
---|
| 727 | + idr_for_each_entry(&request_manager->request_idr, request, request_id) { |
---|
| 728 | + if (session == request->session) { |
---|
| 729 | + pr_err("[tgid:%d pid:%d] destroy request[%d] when the user exits", |
---|
| 730 | + session->tgid, current->pid, request->id); |
---|
| 731 | + rga_request_put(request); |
---|
| 732 | + } |
---|
| 733 | + } |
---|
| 734 | + |
---|
| 735 | + mutex_unlock(&request_manager->lock); |
---|
| 736 | +} |
---|
| 737 | + |
---|
| 738 | +static int rga_request_timeout_query_state(struct rga_request *request) |
---|
| 739 | +{ |
---|
| 740 | + int i; |
---|
| 741 | + unsigned long flags; |
---|
| 742 | + struct rga_scheduler_t *scheduler = NULL; |
---|
| 743 | + struct rga_job *job = NULL; |
---|
| 744 | + |
---|
| 745 | + for (i = 0; i < rga_drvdata->num_of_scheduler; i++) { |
---|
| 746 | + scheduler = rga_drvdata->scheduler[i]; |
---|
| 747 | + |
---|
| 748 | + spin_lock_irqsave(&scheduler->irq_lock, flags); |
---|
| 749 | + |
---|
| 750 | + if (scheduler->running_job) { |
---|
| 751 | + job = scheduler->running_job; |
---|
| 752 | + if (request->id == job->request_id) { |
---|
| 753 | + if (test_bit(RGA_JOB_STATE_DONE, &job->state) && |
---|
| 754 | + test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
---|
| 755 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 756 | + return request->ret; |
---|
| 757 | + } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) && |
---|
| 758 | + test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
---|
| 759 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 760 | + pr_err("hardware has finished, but the software has timeout!\n"); |
---|
| 761 | + return -EBUSY; |
---|
| 762 | + } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) && |
---|
| 763 | + !test_bit(RGA_JOB_STATE_FINISH, &job->state)) { |
---|
| 764 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 765 | + pr_err("hardware has timeout.\n"); |
---|
| 766 | + return -EBUSY; |
---|
| 767 | + } |
---|
| 768 | + } |
---|
| 769 | + } |
---|
| 770 | + |
---|
| 771 | + spin_unlock_irqrestore(&scheduler->irq_lock, flags); |
---|
| 772 | + } |
---|
| 773 | + |
---|
| 774 | + return request->ret; |
---|
| 775 | +} |
---|
| 776 | + |
---|
745 | 777 | static int rga_request_wait(struct rga_request *request) |
---|
746 | 778 | { |
---|
747 | 779 | int left_time; |
---|
.. | .. |
---|
752 | 784 | |
---|
753 | 785 | switch (left_time) { |
---|
754 | 786 | case 0: |
---|
755 | | - pr_err("%s timeout", __func__); |
---|
756 | | - ret = -EBUSY; |
---|
| 787 | + ret = rga_request_timeout_query_state(request); |
---|
757 | 788 | goto err_request_abort; |
---|
758 | 789 | case -ERESTARTSYS: |
---|
759 | 790 | ret = -ERESTARTSYS; |
---|
.. | .. |
---|
800 | 831 | struct dma_fence_cb *_waiter) |
---|
801 | 832 | { |
---|
802 | 833 | struct rga_fence_waiter *waiter = (struct rga_fence_waiter *)_waiter; |
---|
| 834 | + struct rga_request *request = (struct rga_request *)waiter->private; |
---|
| 835 | + struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager; |
---|
803 | 836 | |
---|
804 | | - if (rga_request_commit((struct rga_request *)waiter->private)) |
---|
| 837 | + if (rga_request_commit(request)) |
---|
805 | 838 | pr_err("rga request commit failed!\n"); |
---|
| 839 | + |
---|
| 840 | + mutex_lock(&request_manager->lock); |
---|
| 841 | + rga_request_put(request); |
---|
| 842 | + mutex_unlock(&request_manager->lock); |
---|
806 | 843 | |
---|
807 | 844 | kfree(waiter); |
---|
808 | 845 | } |
---|
.. | .. |
---|
832 | 869 | rga_request_get(request); |
---|
833 | 870 | mutex_unlock(&request_manager->lock); |
---|
834 | 871 | |
---|
835 | | - rga_job_cleanup(job); |
---|
836 | | - |
---|
837 | 872 | spin_lock_irqsave(&request->lock, flags); |
---|
838 | 873 | |
---|
839 | 874 | if (job->ret < 0) { |
---|
.. | .. |
---|
847 | 882 | finished_count = request->finished_task_count; |
---|
848 | 883 | |
---|
849 | 884 | spin_unlock_irqrestore(&request->lock, flags); |
---|
| 885 | + |
---|
| 886 | + rga_job_cleanup(job); |
---|
850 | 887 | |
---|
851 | 888 | if ((failed_count + finished_count) >= request->task_count) { |
---|
852 | 889 | spin_lock_irqsave(&request->lock, flags); |
---|
.. | .. |
---|
1003 | 1040 | { |
---|
1004 | 1041 | int ret = 0; |
---|
1005 | 1042 | unsigned long flags; |
---|
| 1043 | + struct dma_fence *release_fence; |
---|
1006 | 1044 | |
---|
1007 | 1045 | spin_lock_irqsave(&request->lock, flags); |
---|
1008 | 1046 | |
---|
1009 | 1047 | if (request->is_running) { |
---|
1010 | | - pr_err("can not re-config when request is running"); |
---|
1011 | 1048 | spin_unlock_irqrestore(&request->lock, flags); |
---|
| 1049 | + |
---|
| 1050 | + pr_err("can not re-config when request is running\n"); |
---|
1012 | 1051 | return -EFAULT; |
---|
1013 | 1052 | } |
---|
1014 | 1053 | |
---|
1015 | 1054 | if (request->task_list == NULL) { |
---|
1016 | | - pr_err("can not find task list from id[%d]", request->id); |
---|
1017 | 1055 | spin_unlock_irqrestore(&request->lock, flags); |
---|
| 1056 | + |
---|
| 1057 | + pr_err("can not find task list from id[%d]\n", request->id); |
---|
1018 | 1058 | return -EINVAL; |
---|
1019 | 1059 | } |
---|
1020 | 1060 | |
---|
.. | .. |
---|
1026 | 1066 | |
---|
1027 | 1067 | rga_request_get_current_mm(request); |
---|
1028 | 1068 | |
---|
| 1069 | + /* Unlock after ensuring that the current request will not be resubmitted. */ |
---|
1029 | 1070 | spin_unlock_irqrestore(&request->lock, flags); |
---|
1030 | 1071 | |
---|
1031 | 1072 | if (request->sync_mode == RGA_BLIT_ASYNC) { |
---|
1032 | | - ret = rga_request_alloc_release_fence(&request->release_fence); |
---|
1033 | | - if (ret < 0) { |
---|
1034 | | - pr_err("Failed to alloc release fence fd!\n"); |
---|
1035 | | - return ret; |
---|
| 1073 | + release_fence = rga_dma_fence_alloc(); |
---|
| 1074 | + if (IS_ERR(release_fence)) { |
---|
| 1075 | + pr_err("Can not alloc release fence!\n"); |
---|
| 1076 | + ret = IS_ERR(release_fence); |
---|
| 1077 | + goto error_put_current_mm; |
---|
1036 | 1078 | } |
---|
1037 | | - request->release_fence_fd = ret; |
---|
| 1079 | + request->release_fence = release_fence; |
---|
1038 | 1080 | |
---|
1039 | 1081 | if (request->acquire_fence_fd > 0) { |
---|
1040 | 1082 | ret = rga_request_add_acquire_fence_callback( |
---|
1041 | | - request->acquire_fence_fd, |
---|
1042 | | - (void *)request, |
---|
| 1083 | + request->acquire_fence_fd, request, |
---|
1043 | 1084 | rga_request_acquire_fence_signaled_cb); |
---|
1044 | 1085 | if (ret == 0) { |
---|
1045 | | - return ret; |
---|
1046 | | - } else if (ret == 1) { |
---|
| 1086 | + /* acquire fence active */ |
---|
| 1087 | + goto export_release_fence_fd; |
---|
| 1088 | + } else if (ret > 0) { |
---|
| 1089 | + /* acquire fence has been signaled */ |
---|
1047 | 1090 | goto request_commit; |
---|
1048 | 1091 | } else { |
---|
1049 | 1092 | pr_err("Failed to add callback with acquire fence fd[%d]!\n", |
---|
1050 | 1093 | request->acquire_fence_fd); |
---|
1051 | | - goto error_release_fence_put; |
---|
| 1094 | + goto err_put_release_fence; |
---|
1052 | 1095 | } |
---|
1053 | 1096 | } |
---|
1054 | | - |
---|
1055 | 1097 | } |
---|
1056 | 1098 | |
---|
1057 | 1099 | request_commit: |
---|
1058 | 1100 | ret = rga_request_commit(request); |
---|
1059 | 1101 | if (ret < 0) { |
---|
1060 | 1102 | pr_err("rga request commit failed!\n"); |
---|
1061 | | - goto error_release_fence_put; |
---|
| 1103 | + goto err_put_release_fence; |
---|
| 1104 | + } |
---|
| 1105 | + |
---|
| 1106 | +export_release_fence_fd: |
---|
| 1107 | + if (request->release_fence != NULL) { |
---|
| 1108 | + ret = rga_dma_fence_get_fd(request->release_fence); |
---|
| 1109 | + if (ret < 0) { |
---|
| 1110 | + pr_err("Failed to alloc release fence fd!\n"); |
---|
| 1111 | + rga_request_release_abort(request, ret); |
---|
| 1112 | + return ret; |
---|
| 1113 | + } |
---|
| 1114 | + |
---|
| 1115 | + request->release_fence_fd = ret; |
---|
1062 | 1116 | } |
---|
1063 | 1117 | |
---|
1064 | 1118 | return 0; |
---|
1065 | 1119 | |
---|
1066 | | -error_release_fence_put: |
---|
1067 | | - rga_dma_fence_put(request->release_fence); |
---|
1068 | | - request->release_fence = NULL; |
---|
| 1120 | +err_put_release_fence: |
---|
| 1121 | + if (request->release_fence != NULL) { |
---|
| 1122 | + rga_dma_fence_put(request->release_fence); |
---|
| 1123 | + request->release_fence = NULL; |
---|
| 1124 | + } |
---|
| 1125 | + |
---|
| 1126 | +error_put_current_mm: |
---|
| 1127 | + spin_lock_irqsave(&request->lock, flags); |
---|
| 1128 | + |
---|
| 1129 | + rga_request_put_current_mm(request); |
---|
| 1130 | + request->is_running = false; |
---|
| 1131 | + |
---|
| 1132 | + spin_unlock_irqrestore(&request->lock, flags); |
---|
| 1133 | + |
---|
1069 | 1134 | return ret; |
---|
1070 | 1135 | } |
---|
1071 | 1136 | |
---|
.. | .. |
---|
1159 | 1224 | request = container_of(ref, struct rga_request, refcount); |
---|
1160 | 1225 | |
---|
1161 | 1226 | if (rga_dma_fence_get_status(request->release_fence) == 0) |
---|
1162 | | - rga_dma_fence_signal(request->release_fence, -EEXIST); |
---|
| 1227 | + rga_dma_fence_signal(request->release_fence, -EFAULT); |
---|
1163 | 1228 | |
---|
1164 | 1229 | spin_lock_irqsave(&request->lock, flags); |
---|
1165 | 1230 | |
---|
| 1231 | + rga_request_put_current_mm(request); |
---|
1166 | 1232 | rga_dma_fence_put(request->release_fence); |
---|
1167 | 1233 | |
---|
1168 | 1234 | if (!request->is_running || request->is_done) { |
---|
.. | .. |
---|
1188 | 1254 | |
---|
1189 | 1255 | int rga_request_alloc(uint32_t flags, struct rga_session *session) |
---|
1190 | 1256 | { |
---|
| 1257 | + int new_id; |
---|
1191 | 1258 | struct rga_pending_request_manager *request_manager; |
---|
1192 | 1259 | struct rga_request *request; |
---|
1193 | 1260 | |
---|
.. | .. |
---|
1218 | 1285 | mutex_lock(&request_manager->lock); |
---|
1219 | 1286 | |
---|
1220 | 1287 | idr_preload(GFP_KERNEL); |
---|
1221 | | - request->id = idr_alloc(&request_manager->request_idr, request, 1, 0, GFP_KERNEL); |
---|
| 1288 | + new_id = idr_alloc_cyclic(&request_manager->request_idr, request, 1, 0, GFP_NOWAIT); |
---|
1222 | 1289 | idr_preload_end(); |
---|
1223 | | - |
---|
1224 | | - if (request->id <= 0) { |
---|
1225 | | - pr_err("alloc request_id failed!\n"); |
---|
| 1290 | + if (new_id < 0) { |
---|
| 1291 | + pr_err("request alloc id failed!\n"); |
---|
1226 | 1292 | |
---|
1227 | 1293 | mutex_unlock(&request_manager->lock); |
---|
1228 | 1294 | kfree(request); |
---|
1229 | | - return -EFAULT; |
---|
| 1295 | + return new_id; |
---|
1230 | 1296 | } |
---|
1231 | 1297 | |
---|
| 1298 | + request->id = new_id; |
---|
1232 | 1299 | request_manager->request_count++; |
---|
1233 | 1300 | |
---|
1234 | 1301 | mutex_unlock(&request_manager->lock); |
---|