hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/video/rockchip/rga3/rga_job.c
....@@ -13,84 +13,40 @@
1313 #include "rga_mm.h"
1414 #include "rga_iommu.h"
1515 #include "rga_debugger.h"
16
-
17
-struct rga_job *
18
-rga_scheduler_get_pending_job_list(struct rga_scheduler_t *scheduler)
19
-{
20
- unsigned long flags;
21
- struct rga_job *job;
22
-
23
- spin_lock_irqsave(&scheduler->irq_lock, flags);
24
-
25
- job = list_first_entry_or_null(&scheduler->todo_list,
26
- struct rga_job, head);
27
-
28
- spin_unlock_irqrestore(&scheduler->irq_lock, flags);
29
-
30
- return job;
31
-}
32
-
33
-struct rga_job *
34
-rga_scheduler_get_running_job(struct rga_scheduler_t *scheduler)
35
-{
36
- unsigned long flags;
37
- struct rga_job *job;
38
-
39
- spin_lock_irqsave(&scheduler->irq_lock, flags);
40
-
41
- job = scheduler->running_job;
42
-
43
- spin_unlock_irqrestore(&scheduler->irq_lock, flags);
44
-
45
- return job;
46
-}
47
-
48
-struct rga_scheduler_t *rga_job_get_scheduler(struct rga_job *job)
49
-{
50
- return job->scheduler;
51
-}
16
+#include "rga_common.h"
5217
5318 static void rga_job_free(struct rga_job *job)
5419 {
5520 free_page((unsigned long)job);
5621 }
5722
58
-void rga_job_session_destroy(struct rga_session *session)
23
+static void rga_job_kref_release(struct kref *ref)
5924 {
60
- struct rga_scheduler_t *scheduler = NULL;
61
- struct rga_job *job_pos, *job_q;
62
- int i;
25
+ struct rga_job *job;
6326
64
- unsigned long flags;
27
+ job = container_of(ref, struct rga_job, refcount);
6528
66
- for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
67
- scheduler = rga_drvdata->scheduler[i];
29
+ rga_job_free(job);
30
+}
6831
69
- spin_lock_irqsave(&scheduler->irq_lock, flags);
32
+static int rga_job_put(struct rga_job *job)
33
+{
34
+ return kref_put(&job->refcount, rga_job_kref_release);
35
+}
7036
71
- list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
72
- if (session == job_pos->session) {
73
- list_del(&job_pos->head);
74
-
75
- spin_unlock_irqrestore(&scheduler->irq_lock, flags);
76
-
77
- rga_job_free(job_pos);
78
-
79
- spin_lock_irqsave(&scheduler->irq_lock, flags);
80
- }
81
- }
82
-
83
- spin_unlock_irqrestore(&scheduler->irq_lock, flags);
84
- }
37
+static void rga_job_get(struct rga_job *job)
38
+{
39
+ kref_get(&job->refcount);
8540 }
8641
8742 static int rga_job_cleanup(struct rga_job *job)
8843 {
89
- if (DEBUGGER_EN(TIME))
90
- pr_err("(pid:%d) job clean use time = %lld\n", job->pid,
91
- ktime_us_delta(ktime_get(), job->timestamp));
44
+ rga_job_put(job);
9245
93
- rga_job_free(job);
46
+ if (DEBUGGER_EN(TIME))
47
+ pr_info("request[%d], job cleanup total cost time %lld us\n",
48
+ job->request_id,
49
+ ktime_us_delta(ktime_get(), job->timestamp));
9450
9551 return 0;
9652 }
....@@ -165,6 +121,7 @@
165121 return NULL;
166122
167123 INIT_LIST_HEAD(&job->head);
124
+ kref_init(&job->refcount);
168125
169126 job->timestamp = ktime_get();
170127 job->pid = current->pid;
....@@ -232,16 +189,18 @@
232189 return ret;
233190 }
234191
192
+ set_bit(RGA_JOB_STATE_RUNNING, &job->state);
193
+
235194 /* for debug */
236195 if (DEBUGGER_EN(MSG))
237196 rga_job_dump_info(job);
238197
239198 return ret;
240
-
241199 }
242200
243
-static void rga_job_next(struct rga_scheduler_t *scheduler)
201
+void rga_job_next(struct rga_scheduler_t *scheduler)
244202 {
203
+ int ret;
245204 struct rga_job *job = NULL;
246205 unsigned long flags;
247206
....@@ -261,51 +220,33 @@
261220 scheduler->job_count--;
262221
263222 scheduler->running_job = job;
223
+ set_bit(RGA_JOB_STATE_PREPARE, &job->state);
224
+ rga_job_get(job);
264225
265226 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
266227
267
- job->ret = rga_job_run(job, scheduler);
228
+ ret = rga_job_run(job, scheduler);
268229 /* If some error before hw run */
269
- if (job->ret < 0) {
270
- pr_err("some error on rga_job_run before hw start, %s(%d)\n",
271
- __func__, __LINE__);
230
+ if (ret < 0) {
231
+ pr_err("some error on rga_job_run before hw start, %s(%d)\n", __func__, __LINE__);
272232
273233 spin_lock_irqsave(&scheduler->irq_lock, flags);
274234
275235 scheduler->running_job = NULL;
236
+ rga_job_put(job);
276237
277238 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
278239
240
+ job->ret = ret;
279241 rga_request_release_signal(scheduler, job);
280242
281243 goto next_job;
282244 }
245
+
246
+ rga_job_put(job);
283247 }
284248
285
-static void rga_job_finish_and_next(struct rga_scheduler_t *scheduler,
286
- struct rga_job *job, int ret)
287
-{
288
- ktime_t now;
289
-
290
- job->ret = ret;
291
-
292
- if (DEBUGGER_EN(TIME)) {
293
- now = ktime_get();
294
- pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
295
- pr_info("(pid:%d) job done use time = %lld\n", job->pid,
296
- ktime_us_delta(now, job->timestamp));
297
- }
298
-
299
- rga_mm_unmap_job_info(job);
300
-
301
- rga_request_release_signal(scheduler, job);
302
-
303
- rga_job_next(scheduler);
304
-
305
- rga_power_disable(scheduler);
306
-}
307
-
308
-void rga_job_done(struct rga_scheduler_t *scheduler, int ret)
249
+struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler)
309250 {
310251 struct rga_job *job;
311252 unsigned long flags;
....@@ -314,16 +255,34 @@
314255 spin_lock_irqsave(&scheduler->irq_lock, flags);
315256
316257 job = scheduler->running_job;
258
+ if (job == NULL) {
259
+ pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core);
260
+
261
+ spin_unlock_irqrestore(&scheduler->irq_lock, flags);
262
+ return NULL;
263
+ }
317264 scheduler->running_job = NULL;
318265
319266 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
267
+ set_bit(RGA_JOB_STATE_DONE, &job->state);
320268
321269 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
270
+
271
+ if (scheduler->ops->read_back_reg)
272
+ scheduler->ops->read_back_reg(job, scheduler);
322273
323274 if (DEBUGGER_EN(DUMP_IMAGE))
324275 rga_dump_job_image(job);
325276
326
- rga_job_finish_and_next(scheduler, job, ret);
277
+ if (DEBUGGER_EN(TIME))
278
+ pr_info("request[%d], hardware[%s] cost time %lld us\n",
279
+ job->request_id,
280
+ rga_get_core_name(scheduler->core),
281
+ ktime_us_delta(now, job->hw_running_time));
282
+
283
+ rga_mm_unmap_job_info(job);
284
+
285
+ return job;
327286 }
328287
329288 static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler)
....@@ -391,13 +350,20 @@
391350 }
392351
393352 scheduler->job_count++;
353
+ set_bit(RGA_JOB_STATE_PENDING, &job->state);
394354
395355 spin_unlock_irqrestore(&scheduler->irq_lock, flags);
396356 }
397357
398358 static struct rga_scheduler_t *rga_job_schedule(struct rga_job *job)
399359 {
360
+ int i;
400361 struct rga_scheduler_t *scheduler = NULL;
362
+
363
+ for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
364
+ scheduler = rga_drvdata->scheduler[i];
365
+ rga_job_scheduler_timeout_clean(scheduler);
366
+ }
401367
402368 if (rga_drvdata->num_of_scheduler > 1) {
403369 job->core = rga_job_assign(job);
....@@ -411,14 +377,12 @@
411377 job->scheduler = rga_drvdata->scheduler[0];
412378 }
413379
414
- scheduler = rga_job_get_scheduler(job);
380
+ scheduler = job->scheduler;
415381 if (scheduler == NULL) {
416382 pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
417383 job->ret = -EFAULT;
418384 return NULL;
419385 }
420
-
421
- rga_job_scheduler_timeout_clean(scheduler);
422386
423387 return scheduler;
424388 }
....@@ -530,7 +494,7 @@
530494 return false;
531495 }
532496
533
-static int rga_request_get_current_mm(struct rga_request *request)
497
+static struct mm_struct *rga_request_get_current_mm(struct rga_request *request)
534498 {
535499 int i;
536500
....@@ -538,45 +502,30 @@
538502 if (rga_is_need_current_mm(&(request->task_list[i]))) {
539503 mmgrab(current->mm);
540504 mmget(current->mm);
541
- request->current_mm = current->mm;
542505
543
- break;
506
+ return current->mm;
544507 }
545508 }
546509
547
- return 0;
510
+ return NULL;
548511 }
549512
550
-static void rga_request_put_current_mm(struct rga_request *request)
513
+static void rga_request_put_current_mm(struct mm_struct *mm)
551514 {
552
- if (request->current_mm == NULL)
515
+ if (mm == NULL)
553516 return;
554517
555
- mmput(request->current_mm);
556
- mmdrop(request->current_mm);
557
- request->current_mm = NULL;
518
+ mmput(mm);
519
+ mmdrop(mm);
558520 }
559521
560
-static int rga_request_alloc_release_fence(struct dma_fence **release_fence)
561
-{
562
- struct dma_fence *fence;
563
-
564
- fence = rga_dma_fence_alloc();
565
- if (IS_ERR(fence)) {
566
- pr_err("Can not alloc release fence!\n");
567
- return IS_ERR(fence);
568
- }
569
-
570
- *release_fence = fence;
571
-
572
- return rga_dma_fence_get_fd(fence);
573
-}
574
-
575
-static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, void *private,
522
+static int rga_request_add_acquire_fence_callback(int acquire_fence_fd,
523
+ struct rga_request *request,
576524 dma_fence_func_t cb_func)
577525 {
578526 int ret;
579527 struct dma_fence *acquire_fence = NULL;
528
+ struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
580529
581530 if (DEBUGGER_EN(MSG))
582531 pr_info("acquire_fence_fd = %d", acquire_fence_fd);
....@@ -587,20 +536,48 @@
587536 __func__, acquire_fence_fd);
588537 return -EINVAL;
589538 }
590
- /* close acquire fence fd */
591
- ksys_close(acquire_fence_fd);
539
+
540
+ if (!request->feature.user_close_fence) {
541
+ /* close acquire fence fd */
542
+#ifdef CONFIG_NO_GKI
543
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
544
+ close_fd(acquire_fence_fd);
545
+#else
546
+ ksys_close(acquire_fence_fd);
547
+#endif
548
+#else
549
+ pr_err("Please update the driver to v1.2.28 to prevent acquire_fence_fd leaks.");
550
+ return -EFAULT;
551
+#endif
552
+ }
553
+
592554
593555 ret = rga_dma_fence_get_status(acquire_fence);
594
- if (ret == 0) {
595
- ret = rga_dma_fence_add_callback(acquire_fence, cb_func, private);
596
- if (ret < 0) {
597
- if (ret == -ENOENT)
598
- return 1;
556
+ if (ret < 0) {
557
+ pr_err("%s: Current acquire fence unexpectedly has error status before signal\n",
558
+ __func__);
559
+ return ret;
560
+ } else if (ret > 0) {
561
+ /* has been signaled */
562
+ return ret;
563
+ }
599564
565
+ /*
566
+ * Ensure that the request will not be free early when
567
+ * the callback is called.
568
+ */
569
+ mutex_lock(&request_manager->lock);
570
+ rga_request_get(request);
571
+ mutex_unlock(&request_manager->lock);
572
+
573
+ ret = rga_dma_fence_add_callback(acquire_fence, cb_func, (void *)request);
574
+ if (ret < 0) {
575
+ if (ret != -ENOENT)
600576 pr_err("%s: failed to add fence callback\n", __func__);
601
- return ret;
602
- }
603
- } else {
577
+
578
+ mutex_lock(&request_manager->lock);
579
+ rga_request_put(request);
580
+ mutex_unlock(&request_manager->lock);
604581 return ret;
605582 }
606583
....@@ -682,7 +659,8 @@
682659 scheduler->ops->soft_reset(scheduler);
683660 }
684661
685
- pr_err("reset core[%d] by request abort", scheduler->core);
662
+ pr_err("reset core[%d] by request[%d] abort",
663
+ scheduler->core, request->id);
686664 running_abort_count++;
687665 }
688666 }
....@@ -715,6 +693,7 @@
715693 static void rga_request_release_abort(struct rga_request *request, int err_code)
716694 {
717695 unsigned long flags;
696
+ struct mm_struct *current_mm;
718697 struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
719698
720699 if (rga_request_scheduler_job_abort(request) > 0)
....@@ -729,10 +708,12 @@
729708
730709 request->is_running = false;
731710 request->is_done = false;
732
-
733
- rga_request_put_current_mm(request);
711
+ current_mm = request->current_mm;
712
+ request->current_mm = NULL;
734713
735714 spin_unlock_irqrestore(&request->lock, flags);
715
+
716
+ rga_request_put_current_mm(current_mm);
736717
737718 rga_dma_fence_signal(request->release_fence, err_code);
738719
....@@ -740,6 +721,71 @@
740721 /* current submit request put */
741722 rga_request_put(request);
742723 mutex_unlock(&request_manager->lock);
724
+}
725
+
726
+void rga_request_session_destroy_abort(struct rga_session *session)
727
+{
728
+ int request_id;
729
+ struct rga_request *request;
730
+ struct rga_pending_request_manager *request_manager;
731
+
732
+ request_manager = rga_drvdata->pend_request_manager;
733
+ if (request_manager == NULL) {
734
+ pr_err("rga_pending_request_manager is null!\n");
735
+ return;
736
+ }
737
+
738
+ mutex_lock(&request_manager->lock);
739
+
740
+ idr_for_each_entry(&request_manager->request_idr, request, request_id) {
741
+ if (session == request->session) {
742
+ pr_err("[tgid:%d pid:%d] destroy request[%d] when the user exits",
743
+ session->tgid, current->pid, request->id);
744
+ rga_request_put(request);
745
+ }
746
+ }
747
+
748
+ mutex_unlock(&request_manager->lock);
749
+}
750
+
751
+static int rga_request_timeout_query_state(struct rga_request *request)
752
+{
753
+ int i;
754
+ unsigned long flags;
755
+ struct rga_scheduler_t *scheduler = NULL;
756
+ struct rga_job *job = NULL;
757
+
758
+ for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
759
+ scheduler = rga_drvdata->scheduler[i];
760
+
761
+ spin_lock_irqsave(&scheduler->irq_lock, flags);
762
+
763
+ if (scheduler->running_job) {
764
+ job = scheduler->running_job;
765
+ if (request->id == job->request_id) {
766
+ if (test_bit(RGA_JOB_STATE_DONE, &job->state) &&
767
+ test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
768
+ spin_unlock_irqrestore(&scheduler->irq_lock, flags);
769
+ return request->ret;
770
+ } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
771
+ test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
772
+ spin_unlock_irqrestore(&scheduler->irq_lock, flags);
773
+ pr_err("request[%d] hardware has finished, but the software has timeout!\n",
774
+ request->id);
775
+ return -EBUSY;
776
+ } else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
777
+ !test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
778
+ spin_unlock_irqrestore(&scheduler->irq_lock, flags);
779
+ pr_err("request[%d] hardware has timeout.\n", request->id);
780
+ return -EBUSY;
781
+ }
782
+ }
783
+ }
784
+
785
+ spin_unlock_irqrestore(&scheduler->irq_lock, flags);
786
+ }
787
+
788
+ return request->ret;
743789 }
744790
745791 static int rga_request_wait(struct rga_request *request)
....@@ -752,8 +798,7 @@
752798
753799 switch (left_time) {
754800 case 0:
755
- pr_err("%s timeout", __func__);
756
- ret = -EBUSY;
801
+ ret = rga_request_timeout_query_state(request);
757802 goto err_request_abort;
758803 case -ERESTARTSYS:
759804 ret = -ERESTARTSYS;
....@@ -778,7 +823,14 @@
778823 struct rga_job *job;
779824
780825 for (i = 0; i < request->task_count; i++) {
781
- job = rga_job_commit(&(request->task_list[i]), request);
826
+ struct rga_req *req = &(request->task_list[i]);
827
+
828
+ if (DEBUGGER_EN(MSG)) {
829
+ pr_info("commit request[%d] task[%d]:\n", request->id, i);
830
+ rga_cmd_print_debug_info(req);
831
+ }
832
+
833
+ job = rga_job_commit(req, request);
782834 if (IS_ERR(job)) {
783835 pr_err("request[%d] task[%d] job_commit failed.\n", request->id, i);
784836 rga_request_release_abort(request, PTR_ERR(job));
....@@ -799,10 +851,38 @@
799851 static void rga_request_acquire_fence_signaled_cb(struct dma_fence *fence,
800852 struct dma_fence_cb *_waiter)
801853 {
854
+ int ret;
855
+ unsigned long flags;
856
+ struct mm_struct *current_mm;
802857 struct rga_fence_waiter *waiter = (struct rga_fence_waiter *)_waiter;
858
+ struct rga_request *request = (struct rga_request *)waiter->private;
859
+ struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
803860
804
- if (rga_request_commit((struct rga_request *)waiter->private))
805
- pr_err("rga request commit failed!\n");
861
+ ret = rga_request_commit(request);
862
+ if (ret < 0) {
863
+ pr_err("acquire_fence callback: rga request[%d] commit failed!\n", request->id);
864
+
865
+ spin_lock_irqsave(&request->lock, flags);
866
+
867
+ request->is_running = false;
868
+ current_mm = request->current_mm;
869
+ request->current_mm = NULL;
870
+
871
+ spin_unlock_irqrestore(&request->lock, flags);
872
+
873
+ rga_request_put_current_mm(current_mm);
874
+
875
+ /*
876
+ * Since the callback is called while holding &dma_fence.lock,
877
+ * the _locked API is used here.
878
+ */
879
+ if (dma_fence_get_status_locked(request->release_fence) == 0)
880
+ dma_fence_signal_locked(request->release_fence);
881
+ }
882
+
883
+ mutex_lock(&request_manager->lock);
884
+ rga_request_put(request);
885
+ mutex_unlock(&request_manager->lock);
806886
807887 kfree(waiter);
808888 }
....@@ -811,7 +891,9 @@
811891 {
812892 struct rga_pending_request_manager *request_manager;
813893 struct rga_request *request;
894
+ struct mm_struct *current_mm;
814895 int finished_count, failed_count;
896
+ bool is_finished = false;
815897 unsigned long flags;
816898
817899 request_manager = rga_drvdata->pend_request_manager;
....@@ -832,8 +914,6 @@
832914 rga_request_get(request);
833915 mutex_unlock(&request_manager->lock);
834916
835
- rga_job_cleanup(job);
836
-
837917 spin_lock_irqsave(&request->lock, flags);
838918
839919 if (job->ret < 0) {
....@@ -853,14 +933,16 @@
853933
854934 request->is_running = false;
855935 request->is_done = true;
856
-
857
- rga_request_put_current_mm(request);
936
+ current_mm = request->current_mm;
937
+ request->current_mm = NULL;
858938
859939 spin_unlock_irqrestore(&request->lock, flags);
860940
941
+ rga_request_put_current_mm(current_mm);
942
+
861943 rga_dma_fence_signal(request->release_fence, request->ret);
862944
863
- wake_up(&request->finished_wq);
945
+ is_finished = true;
864946
865947 if (DEBUGGER_EN(MSG))
866948 pr_info("request[%d] finished %d failed %d\n",
....@@ -873,8 +955,20 @@
873955 }
874956
875957 mutex_lock(&request_manager->lock);
958
+
959
+ if (is_finished)
960
+ wake_up(&request->finished_wq);
961
+
876962 rga_request_put(request);
963
+
877964 mutex_unlock(&request_manager->lock);
965
+
966
+ if (DEBUGGER_EN(TIME))
967
+ pr_info("request[%d], job done total cost time %lld us\n",
968
+ job->request_id,
969
+ ktime_us_delta(ktime_get(), job->timestamp));
970
+
971
+ rga_job_cleanup(job);
878972
879973 return 0;
880974 }
....@@ -927,6 +1021,7 @@
9271021 request->sync_mode = user_request->sync_mode;
9281022 request->mpi_config_flags = user_request->mpi_config_flags;
9291023 request->acquire_fence_fd = user_request->acquire_fence_fd;
1024
+ request->feature = task_list[0].feature;
9301025
9311026 spin_unlock_irqrestore(&request->lock, flags);
9321027
....@@ -1003,19 +1098,27 @@
10031098 {
10041099 int ret = 0;
10051100 unsigned long flags;
1101
+ struct dma_fence *release_fence;
1102
+ struct mm_struct *current_mm;
1103
+
1104
+ current_mm = rga_request_get_current_mm(request);
10061105
10071106 spin_lock_irqsave(&request->lock, flags);
10081107
10091108 if (request->is_running) {
1010
- pr_err("can not re-config when request is running");
10111109 spin_unlock_irqrestore(&request->lock, flags);
1012
- return -EFAULT;
1110
+
1111
+ pr_err("can not re-config when request is running\n");
1112
+ ret = -EFAULT;
1113
+ goto err_put_current_mm;
10131114 }
10141115
10151116 if (request->task_list == NULL) {
1016
- pr_err("can not find task list from id[%d]", request->id);
10171117 spin_unlock_irqrestore(&request->lock, flags);
1018
- return -EINVAL;
1118
+
1119
+ pr_err("can not find task list from id[%d]\n", request->id);
1120
+ ret = -EINVAL;
1121
+ goto err_put_current_mm;
10191122 }
10201123
10211124 /* Reset */
....@@ -1023,49 +1126,76 @@
10231126 request->is_done = false;
10241127 request->finished_task_count = 0;
10251128 request->failed_task_count = 0;
1129
+ request->current_mm = current_mm;
10261130
1027
- rga_request_get_current_mm(request);
1028
-
1131
+ /* Unlock after ensuring that the current request will not be resubmitted. */
10291132 spin_unlock_irqrestore(&request->lock, flags);
10301133
10311134 if (request->sync_mode == RGA_BLIT_ASYNC) {
1032
- ret = rga_request_alloc_release_fence(&request->release_fence);
1033
- if (ret < 0) {
1034
- pr_err("Failed to alloc release fence fd!\n");
1035
- return ret;
1135
+ release_fence = rga_dma_fence_alloc();
1136
+ if (IS_ERR(release_fence)) {
1137
+ pr_err("Can not alloc release fence!\n");
1138
+ ret = IS_ERR(release_fence);
1139
+ goto err_reset_request;
10361140 }
1037
- request->release_fence_fd = ret;
1141
+ request->release_fence = release_fence;
10381142
10391143 if (request->acquire_fence_fd > 0) {
10401144 ret = rga_request_add_acquire_fence_callback(
1041
- request->acquire_fence_fd,
1042
- (void *)request,
1145
+ request->acquire_fence_fd, request,
10431146 rga_request_acquire_fence_signaled_cb);
10441147 if (ret == 0) {
1045
- return ret;
1046
- } else if (ret == 1) {
1148
+ /* acquire fence active */
1149
+ goto export_release_fence_fd;
1150
+ } else if (ret > 0) {
1151
+ /* acquire fence has been signaled */
10471152 goto request_commit;
10481153 } else {
10491154 pr_err("Failed to add callback with acquire fence fd[%d]!\n",
10501155 request->acquire_fence_fd);
1051
- goto error_release_fence_put;
1156
+ goto err_put_release_fence;
10521157 }
10531158 }
1054
-
10551159 }
10561160
10571161 request_commit:
10581162 ret = rga_request_commit(request);
10591163 if (ret < 0) {
1060
- pr_err("rga request commit failed!\n");
1061
- goto error_release_fence_put;
1164
+ pr_err("rga request[%d] commit failed!\n", request->id);
1165
+ goto err_put_release_fence;
1166
+ }
1167
+
1168
+export_release_fence_fd:
1169
+ if (request->release_fence != NULL) {
1170
+ ret = rga_dma_fence_get_fd(request->release_fence);
1171
+ if (ret < 0) {
1172
+ pr_err("Failed to alloc release fence fd!\n");
1173
+ rga_request_release_abort(request, ret);
1174
+ return ret;
1175
+ }
1176
+
1177
+ request->release_fence_fd = ret;
10621178 }
10631179
10641180 return 0;
10651181
1066
-error_release_fence_put:
1067
- rga_dma_fence_put(request->release_fence);
1068
- request->release_fence = NULL;
1182
+err_put_release_fence:
1183
+ if (request->release_fence != NULL) {
1184
+ rga_dma_fence_put(request->release_fence);
1185
+ request->release_fence = NULL;
1186
+ }
1187
+
1188
+err_reset_request:
1189
+ spin_lock_irqsave(&request->lock, flags);
1190
+
1191
+ request->current_mm = NULL;
1192
+ request->is_running = false;
1193
+
1194
+ spin_unlock_irqrestore(&request->lock, flags);
1195
+
1196
+err_put_current_mm:
1197
+ rga_request_put_current_mm(current_mm);
1198
+
10691199 return ret;
10701200 }
10711201
....@@ -1154,23 +1284,31 @@
11541284 static void rga_request_kref_release(struct kref *ref)
11551285 {
11561286 struct rga_request *request;
1287
+ struct mm_struct *current_mm;
11571288 unsigned long flags;
11581289
11591290 request = container_of(ref, struct rga_request, refcount);
11601291
11611292 if (rga_dma_fence_get_status(request->release_fence) == 0)
1162
- rga_dma_fence_signal(request->release_fence, -EEXIST);
1293
+ rga_dma_fence_signal(request->release_fence, -EFAULT);
11631294
11641295 spin_lock_irqsave(&request->lock, flags);
11651296
11661297 rga_dma_fence_put(request->release_fence);
1298
+ current_mm = request->current_mm;
1299
+ request->current_mm = NULL;
11671300
11681301 if (!request->is_running || request->is_done) {
11691302 spin_unlock_irqrestore(&request->lock, flags);
1303
+
1304
+ rga_request_put_current_mm(current_mm);
1305
+
11701306 goto free_request;
11711307 }
11721308
11731309 spin_unlock_irqrestore(&request->lock, flags);
1310
+
1311
+ rga_request_put_current_mm(current_mm);
11741312
11751313 rga_request_scheduler_job_abort(request);
11761314
....@@ -1188,6 +1326,7 @@
11881326
11891327 int rga_request_alloc(uint32_t flags, struct rga_session *session)
11901328 {
1329
+ int new_id;
11911330 struct rga_pending_request_manager *request_manager;
11921331 struct rga_request *request;
11931332
....@@ -1218,17 +1357,17 @@
12181357 mutex_lock(&request_manager->lock);
12191358
12201359 idr_preload(GFP_KERNEL);
1221
- request->id = idr_alloc(&request_manager->request_idr, request, 1, 0, GFP_KERNEL);
1360
+ new_id = idr_alloc_cyclic(&request_manager->request_idr, request, 1, 0, GFP_NOWAIT);
12221361 idr_preload_end();
1223
-
1224
- if (request->id <= 0) {
1225
- pr_err("alloc request_id failed!\n");
1362
+ if (new_id < 0) {
1363
+ pr_err("request alloc id failed!\n");
12261364
12271365 mutex_unlock(&request_manager->lock);
12281366 kfree(request);
1229
- return -EFAULT;
1367
+ return new_id;
12301368 }
12311369
1370
+ request->id = new_id;
12321371 request_manager->request_count++;
12331372
12341373 mutex_unlock(&request_manager->lock);