forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/video/rockchip/mpp/mpp_common.c
....@@ -53,18 +53,29 @@
5353 __u64 data_ptr;
5454 };
5555
56
+#define MPP_BAT_MSG_DONE (0x00000001)
57
+
58
+struct mpp_bat_msg {
59
+ __u64 flag;
60
+ __u32 fd;
61
+ __s32 ret;
62
+};
63
+
5664 #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS
5765 const char *mpp_device_name[MPP_DEVICE_BUTT] = {
5866 [MPP_DEVICE_VDPU1] = "VDPU1",
5967 [MPP_DEVICE_VDPU2] = "VDPU2",
6068 [MPP_DEVICE_VDPU1_PP] = "VDPU1_PP",
6169 [MPP_DEVICE_VDPU2_PP] = "VDPU2_PP",
70
+ [MPP_DEVICE_AV1DEC] = "AV1DEC",
6271 [MPP_DEVICE_HEVC_DEC] = "HEVC_DEC",
6372 [MPP_DEVICE_RKVDEC] = "RKVDEC",
6473 [MPP_DEVICE_AVSPLUS_DEC] = "AVSPLUS_DEC",
74
+ [MPP_DEVICE_RKJPEGD] = "RKJPEGD",
6575 [MPP_DEVICE_RKVENC] = "RKVENC",
6676 [MPP_DEVICE_VEPU1] = "VEPU1",
6777 [MPP_DEVICE_VEPU2] = "VEPU2",
78
+ [MPP_DEVICE_VEPU2_JPEG] = "VEPU2",
6879 [MPP_DEVICE_VEPU22] = "VEPU22",
6980 [MPP_DEVICE_IEP2] = "IEP2",
7081 [MPP_DEVICE_VDPP] = "VDPP",
....@@ -86,25 +97,8 @@
8697
8798 #endif
8899
89
-static void mpp_free_task(struct kref *ref);
90100 static void mpp_attach_workqueue(struct mpp_dev *mpp,
91101 struct mpp_taskqueue *queue);
92
-
93
-/* task queue schedule */
94
-static int
95
-mpp_taskqueue_push_pending(struct mpp_taskqueue *queue,
96
- struct mpp_task *task)
97
-{
98
- if (!task->session || !task->session->mpp)
99
- return -EINVAL;
100
-
101
- kref_get(&task->ref);
102
- mutex_lock(&queue->pending_lock);
103
- list_add_tail(&task->queue_link, &queue->pending_list);
104
- mutex_unlock(&queue->pending_lock);
105
-
106
- return 0;
107
-}
108102
109103 static int
110104 mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue,
....@@ -148,9 +142,7 @@
148142 return flag;
149143 }
150144
151
-static int
152
-mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue,
153
- struct mpp_task *task)
145
+int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task)
154146 {
155147 unsigned long flags;
156148
....@@ -230,7 +222,104 @@
230222 return 0;
231223 }
232224
233
-static int mpp_session_clear_pending(struct mpp_session *session)
225
+static void task_msgs_reset(struct mpp_task_msgs *msgs)
226
+{
227
+ list_del_init(&msgs->list);
228
+
229
+ msgs->flags = 0;
230
+ msgs->req_cnt = 0;
231
+ msgs->set_cnt = 0;
232
+ msgs->poll_cnt = 0;
233
+}
234
+
235
+static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session)
236
+{
237
+ INIT_LIST_HEAD(&msgs->list);
238
+
239
+ msgs->session = session;
240
+ msgs->queue = NULL;
241
+ msgs->task = NULL;
242
+ msgs->mpp = NULL;
243
+
244
+ msgs->ext_fd = -1;
245
+
246
+ task_msgs_reset(msgs);
247
+}
248
+
249
+static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session)
250
+{
251
+ unsigned long flags;
252
+ struct mpp_task_msgs *msgs;
253
+
254
+ spin_lock_irqsave(&session->lock_msgs, flags);
255
+ msgs = list_first_entry_or_null(&session->list_msgs_idle,
256
+ struct mpp_task_msgs, list_session);
257
+ if (msgs) {
258
+ list_move_tail(&msgs->list_session, &session->list_msgs);
259
+ spin_unlock_irqrestore(&session->lock_msgs, flags);
260
+
261
+ return msgs;
262
+ }
263
+ spin_unlock_irqrestore(&session->lock_msgs, flags);
264
+
265
+ msgs = kzalloc(sizeof(*msgs), GFP_KERNEL);
266
+ task_msgs_init(msgs, session);
267
+ INIT_LIST_HEAD(&msgs->list_session);
268
+
269
+ spin_lock_irqsave(&session->lock_msgs, flags);
270
+ list_move_tail(&msgs->list_session, &session->list_msgs);
271
+ session->msgs_cnt++;
272
+ spin_unlock_irqrestore(&session->lock_msgs, flags);
273
+
274
+ mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n",
275
+ session->pid, session->index, session->msgs_cnt);
276
+
277
+ return msgs;
278
+}
279
+
280
+static void put_task_msgs(struct mpp_task_msgs *msgs)
281
+{
282
+ struct mpp_session *session = msgs->session;
283
+ unsigned long flags;
284
+
285
+ if (!session) {
286
+ pr_err("invalid msgs without session\n");
287
+ return;
288
+ }
289
+
290
+ if (msgs->ext_fd >= 0) {
291
+ fdput(msgs->f);
292
+ msgs->ext_fd = -1;
293
+ }
294
+
295
+ task_msgs_reset(msgs);
296
+
297
+ spin_lock_irqsave(&session->lock_msgs, flags);
298
+ list_move_tail(&msgs->list_session, &session->list_msgs_idle);
299
+ spin_unlock_irqrestore(&session->lock_msgs, flags);
300
+}
301
+
302
+static void clear_task_msgs(struct mpp_session *session)
303
+{
304
+ struct mpp_task_msgs *msgs, *n;
305
+ LIST_HEAD(list_to_free);
306
+ unsigned long flags;
307
+
308
+ spin_lock_irqsave(&session->lock_msgs, flags);
309
+
310
+ list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session)
311
+ list_move_tail(&msgs->list_session, &list_to_free);
312
+
313
+ list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session)
314
+ list_move_tail(&msgs->list_session, &list_to_free);
315
+
316
+ spin_unlock_irqrestore(&session->lock_msgs, flags);
317
+
318
+ list_for_each_entry_safe(msgs, n, &list_to_free, list_session)
319
+ kfree(msgs);
320
+}
321
+
322
+static void mpp_session_clear_pending(struct mpp_session *session)
234323 {
235324 struct mpp_task *task = NULL, *n;
236325
....@@ -245,8 +334,6 @@
245334 kref_put(&task->ref, mpp_free_task);
246335 }
247336 mutex_unlock(&session->pending_lock);
248
-
249
- return 0;
250337 }
251338
252339 void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work)
....@@ -309,6 +396,10 @@
309396 atomic_set(&session->task_count, 0);
310397 atomic_set(&session->release_request, 0);
311398
399
+ INIT_LIST_HEAD(&session->list_msgs);
400
+ INIT_LIST_HEAD(&session->list_msgs_idle);
401
+ spin_lock_init(&session->lock_msgs);
402
+
312403 mpp_dbg_session("session %p init\n", session);
313404 return session;
314405 }
....@@ -352,7 +443,7 @@
352443 else
353444 pr_err("invalid NULL session deinit function\n");
354445
355
- mpp_dbg_session("session %p:%d deinit\n", session, session->index);
446
+ clear_task_msgs(session);
356447
357448 kfree(session);
358449 }
....@@ -429,7 +520,7 @@
429520 return task;
430521 }
431522
432
-static void mpp_free_task(struct kref *ref)
523
+void mpp_free_task(struct kref *ref)
433524 {
434525 struct mpp_dev *mpp;
435526 struct mpp_session *session;
....@@ -441,18 +532,14 @@
441532 }
442533 session = task->session;
443534
444
- mpp_debug_func(DEBUG_TASK_INFO,
445
- "session %d:%d task %d state 0x%lx abort_request %d\n",
446
- session->device_type, session->index, task->task_index,
447
- task->state, atomic_read(&task->abort_request));
448
- if (!session->mpp) {
449
- mpp_err("session %p, session->mpp is null.\n", session);
450
- return;
451
- }
452
- mpp = session->mpp;
535
+ mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
536
+ session->index, task->task_id, task->state,
537
+ atomic_read(&task->abort_request));
453538
539
+ mpp = mpp_get_task_used_device(task, session);
454540 if (mpp->dev_ops->free_task)
455541 mpp->dev_ops->free_task(session, task);
542
+
456543 /* Decrease reference count */
457544 atomic_dec(&session->task_count);
458545 atomic_dec(&mpp->task_count);
....@@ -466,10 +553,8 @@
466553 struct mpp_task,
467554 timeout_work);
468555
469
- if (!test_bit(TASK_STATE_START, &task->state)) {
470
- mpp_err("task has not start\n");
471
- schedule_delayed_work(&task->timeout_work,
472
- msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY));
556
+ if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
557
+ mpp_err("task has been handled\n");
473558 return;
474559 }
475560
....@@ -479,28 +564,24 @@
479564 }
480565
481566 session = task->session;
567
+ mpp_err("task %d:%d:%d processing time out!\n", session->pid,
568
+ session->index, task->task_id);
482569
483570 if (!session->mpp) {
484571 mpp_err("session %d:%d, session mpp is null.\n", session->pid,
485572 session->index);
486573 return;
487574 }
488
- mpp = session->mpp;
489
- dev_err(mpp->dev, "session %d:%d task %d state %lx processing time out!\n",
490
- session->device_type, session->index, task->task_index, task->state);
491
- synchronize_hardirq(mpp->irq);
492
-
493
- if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
494
- mpp_err("task has been handled\n");
495
- return;
496
- }
497575
498576 mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
577
+
578
+ mpp = mpp_get_task_used_device(task, session);
499579
500580 /* disable core irq */
501581 disable_irq(mpp->irq);
502582 /* disable mmu irq */
503
- mpp_iommu_disable_irq(mpp->iommu_info);
583
+ if (mpp->iommu_info && mpp->iommu_info->got_irq)
584
+ disable_irq(mpp->iommu_info->irq);
504585
505586 /* hardware maybe dead, reset it */
506587 mpp_reset_up_read(mpp->reset_group);
....@@ -518,13 +599,14 @@
518599 /* enable core irq */
519600 enable_irq(mpp->irq);
520601 /* enable mmu irq */
521
- mpp_iommu_enable_irq(mpp->iommu_info);
602
+ if (mpp->iommu_info && mpp->iommu_info->got_irq)
603
+ enable_irq(mpp->iommu_info->irq);
522604
523605 mpp_taskqueue_trigger_work(mpp);
524606 }
525607
526608 static int mpp_process_task_default(struct mpp_session *session,
527
- struct mpp_task_msgs *msgs)
609
+ struct mpp_task_msgs *msgs)
528610 {
529611 struct mpp_task *task = NULL;
530612 struct mpp_dev *mpp = session->mpp;
....@@ -532,7 +614,7 @@
532614 ktime_t on_create;
533615
534616 if (unlikely(!mpp)) {
535
- mpp_err("pid %d clinet %d found invalid process function\n",
617
+ mpp_err("pid %d client %d found invalid process function\n",
536618 session->pid, session->device_type);
537619 return -EINVAL;
538620 }
....@@ -555,14 +637,22 @@
555637 set_bit(TASK_TIMING_CREATE, &task->state);
556638 }
557639
640
+ /* ensure current device */
641
+ mpp = mpp_get_task_used_device(task, session);
642
+
558643 kref_init(&task->ref);
559644 init_waitqueue_head(&task->wait);
560645 atomic_set(&task->abort_request, 0);
561646 task->task_index = atomic_fetch_inc(&mpp->task_index);
647
+ task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
562648 INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
563649
564650 if (mpp->auto_freq_en && mpp->hw_ops->get_freq)
565651 mpp->hw_ops->get_freq(mpp, task);
652
+
653
+ msgs->queue = mpp->queue;
654
+ msgs->task = task;
655
+ msgs->mpp = mpp;
566656
567657 /*
568658 * Push task to session should be in front of push task to queue.
....@@ -572,17 +662,7 @@
572662 */
573663 atomic_inc(&session->task_count);
574664 mpp_session_push_pending(session, task);
575
- /* push current task to queue */
576
- atomic_inc(&mpp->task_count);
577
- mpp_taskqueue_push_pending(mpp->queue, task);
578
- set_bit(TASK_STATE_PENDING, &task->state);
579
- /* trigger current queue to run task */
580
- mpp_taskqueue_trigger_work(mpp);
581
- kref_put(&task->ref, mpp_free_task);
582
- mpp_debug_func(DEBUG_TASK_INFO,
583
- "session %d:%d task %d state 0x%lx\n",
584
- session->device_type, session->index,
585
- task->task_index, task->state);
665
+
586666 return 0;
587667 }
588668
....@@ -637,6 +717,10 @@
637717 group->resets[type] = rst;
638718 group->queue = mpp->queue;
639719 }
720
+ /* if reset not in the same queue, it means different device
721
+ * may reset in the same time, then rw_sem_on should set true.
722
+ */
723
+ group->rw_sem_on |= (group->queue != mpp->queue) ? true : false;
640724 dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on);
641725 up_write(&group->rw_sem);
642726
....@@ -662,10 +746,9 @@
662746 mpp_iommu_down_write(mpp->iommu_info);
663747 mpp_reset_down_write(mpp->reset_group);
664748 atomic_set(&mpp->reset_request, 0);
665
- rockchip_save_qos(mpp->dev);
749
+
666750 if (mpp->hw_ops->reset)
667751 mpp->hw_ops->reset(mpp);
668
- rockchip_restore_qos(mpp->dev);
669752
670753 /* Note: if the domain does not change, iommu attach will be return
671754 * as an empty operation. Therefore, force to close and then open,
....@@ -714,7 +797,6 @@
714797 struct mpp_task *task)
715798 {
716799 int ret;
717
- struct mpp_session *session = task->session;
718800 u32 timing_en;
719801
720802 mpp_debug_enter();
....@@ -749,11 +831,8 @@
749831 }
750832
751833 mpp_power_on(mpp);
752
- mpp_time_record(task);
753
- mpp_debug_func(DEBUG_TASK_INFO,
754
- "%s session %d:%d task=%d state=0x%lx\n",
755
- dev_name(mpp->dev), session->device_type,
756
- session->index, task->task_index, task->state);
834
+ mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
835
+ task->session->pid, dev_name(mpp->dev));
757836
758837 if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
759838 mpp->hw_ops->set_freq(mpp, task);
....@@ -763,9 +842,9 @@
763842 */
764843 mpp_reset_down_read(mpp->reset_group);
765844
845
+ mpp_iommu_dev_activate(mpp->iommu_info, mpp);
766846 if (mpp->dev_ops->run)
767847 mpp->dev_ops->run(mpp, task);
768
- set_bit(TASK_STATE_START, &task->state);
769848
770849 mpp_debug_leave();
771850
....@@ -780,7 +859,7 @@
780859
781860 mpp_debug_enter();
782861
783
-get_task:
862
+again:
784863 task = mpp_taskqueue_get_pending_task(queue);
785864 if (!task)
786865 goto done;
....@@ -788,7 +867,7 @@
788867 /* if task timeout and aborted, remove it */
789868 if (atomic_read(&task->abort_request) > 0) {
790869 mpp_taskqueue_pop_pending(queue, task);
791
- goto get_task;
870
+ goto again;
792871 }
793872
794873 /* get device for current task */
....@@ -813,10 +892,15 @@
813892 */
814893 /* Push a pending task to running queue */
815894 if (task) {
895
+ struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session);
896
+
897
+ atomic_inc(&task_mpp->task_count);
816898 mpp_taskqueue_pending_to_run(queue, task);
817899 set_bit(TASK_STATE_RUNNING, &task->state);
818
- if (mpp_task_run(mpp, task))
819
- mpp_taskqueue_pop_running(mpp->queue, task);
900
+ if (mpp_task_run(task_mpp, task))
901
+ mpp_taskqueue_pop_running(queue, task);
902
+ else
903
+ goto again;
820904 }
821905
822906 done:
....@@ -824,17 +908,11 @@
824908 }
825909
826910 static int mpp_wait_result_default(struct mpp_session *session,
827
- struct mpp_task_msgs *msgs)
911
+ struct mpp_task_msgs *msgs)
828912 {
829913 int ret;
830914 struct mpp_task *task;
831
- struct mpp_dev *mpp = session->mpp;
832
-
833
- if (unlikely(!mpp)) {
834
- mpp_err("pid %d clinet %d found invalid wait result function\n",
835
- session->pid, session->device_type);
836
- return -EINVAL;
837
- }
915
+ struct mpp_dev *mpp;
838916
839917 task = mpp_session_get_pending_task(session);
840918 if (!task) {
....@@ -842,6 +920,7 @@
842920 session->pid, session->index);
843921 return -EIO;
844922 }
923
+ mpp = mpp_get_task_used_device(task, session);
845924
846925 ret = wait_event_timeout(task->wait,
847926 test_bit(TASK_STATE_DONE, &task->state),
....@@ -852,16 +931,15 @@
852931 } else {
853932 atomic_inc(&task->abort_request);
854933 set_bit(TASK_STATE_ABORT, &task->state);
855
- mpp_err("timeout, pid %d session %d:%d count %d cur_task %d state %lx\n",
856
- session->pid, session->device_type, session->index,
857
- atomic_read(&session->task_count), task->task_index, task->state);
934
+ mpp_err("timeout, pid %d session %d:%d count %d cur_task %p id %d\n",
935
+ session->pid, session->pid, session->index,
936
+ atomic_read(&session->task_count), task,
937
+ task->task_id);
858938 }
859939
860
- mpp_debug_func(DEBUG_TASK_INFO,
861
- "session %d:%d task %d state 0x%lx kref_read %d ret %d\n",
862
- session->device_type,
863
- session->index, task->task_index, task->state,
864
- kref_read(&task->ref), ret);
940
+ mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n",
941
+ task->task_id, kref_read(&task->ref));
942
+
865943 mpp_session_pop_pending(session, task);
866944
867945 return ret;
....@@ -896,36 +974,32 @@
896974 of_node_put(np);
897975 if (!pdev) {
898976 dev_err(dev, "failed to get mpp service from node\n");
899
- ret = -ENODEV;
900
- goto err_put_pdev;
977
+ return -ENODEV;
901978 }
902979
903
- mpp->pdev_srv = pdev;
904980 mpp->srv = platform_get_drvdata(pdev);
981
+ platform_device_put(pdev);
905982 if (!mpp->srv) {
906
- dev_err(&pdev->dev, "failed attach service\n");
907
- ret = -EINVAL;
908
- goto err_put_pdev;
983
+ dev_err(dev, "failed attach service\n");
984
+ return -EINVAL;
909985 }
910986
911987 ret = of_property_read_u32(dev->of_node,
912988 "rockchip,taskqueue-node", &taskqueue_node);
913989 if (ret) {
914990 dev_err(dev, "failed to get taskqueue-node\n");
915
- goto err_put_pdev;
991
+ return ret;
916992 } else if (taskqueue_node >= mpp->srv->taskqueue_cnt) {
917993 dev_err(dev, "taskqueue-node %d must less than %d\n",
918994 taskqueue_node, mpp->srv->taskqueue_cnt);
919
- ret = -ENODEV;
920
- goto err_put_pdev;
995
+ return -ENODEV;
921996 }
922997 /* set taskqueue according dtsi */
923998 queue = mpp->srv->task_queues[taskqueue_node];
924999 if (!queue) {
9251000 dev_err(dev, "taskqueue attach to invalid node %d\n",
9261001 taskqueue_node);
927
- ret = -ENODEV;
928
- goto err_put_pdev;
1002
+ return -ENODEV;
9291003 }
9301004 mpp_attach_workqueue(mpp, queue);
9311005
....@@ -936,23 +1010,13 @@
9361010 if (reset_group_node >= mpp->srv->reset_group_cnt) {
9371011 dev_err(dev, "resetgroup-node %d must less than %d\n",
9381012 reset_group_node, mpp->srv->reset_group_cnt);
939
- ret = -ENODEV;
940
- goto err_put_pdev;
1013
+ return -ENODEV;
9411014 } else {
9421015 mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
943
- if (!mpp->reset_group->queue)
944
- mpp->reset_group->queue = queue;
945
- if (mpp->reset_group->queue != mpp->queue)
946
- mpp->reset_group->rw_sem_on = true;
9471016 }
9481017 }
9491018
9501019 return 0;
951
-
952
-err_put_pdev:
953
- platform_device_put(pdev);
954
-
955
- return ret;
9561020 }
9571021
9581022 struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev)
....@@ -976,10 +1040,10 @@
9761040
9771041 /* default taskqueue has max 16 task capacity */
9781042 queue->task_capacity = MPP_MAX_TASK_CAPACITY;
979
-
980
- mutex_init(&queue->ref_lock);
981
- atomic_set(&queue->runtime_cnt, 0);
1043
+ atomic_set(&queue->reset_request, 0);
9821044 atomic_set(&queue->detach_count, 0);
1045
+ atomic_set(&queue->task_id, 0);
1046
+ queue->dev_active_flags = 0;
9831047
9841048 return queue;
9851049 }
....@@ -987,12 +1051,51 @@
9871051 static void mpp_attach_workqueue(struct mpp_dev *mpp,
9881052 struct mpp_taskqueue *queue)
9891053 {
990
- mpp->queue = queue;
1054
+ s32 core_id;
1055
+
9911056 INIT_LIST_HEAD(&mpp->queue_link);
1057
+
9921058 mutex_lock(&queue->dev_lock);
1059
+
1060
+ if (mpp->core_id >= 0)
1061
+ core_id = mpp->core_id;
1062
+ else
1063
+ core_id = queue->core_count;
1064
+
1065
+ if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) {
1066
+ dev_err(mpp->dev, "invalid core id %d\n", core_id);
1067
+ goto done;
1068
+ }
1069
+
1070
+ /*
1071
+ * multi devices with no multicores share one queue,
1072
+ * the core_id is default value 0.
1073
+ */
1074
+ if (queue->cores[core_id]) {
1075
+ if (queue->cores[core_id] == mpp)
1076
+ goto done;
1077
+
1078
+ core_id = queue->core_count;
1079
+ }
1080
+
1081
+ queue->cores[core_id] = mpp;
1082
+ queue->core_count++;
1083
+
1084
+ set_bit(core_id, &queue->core_idle);
9931085 list_add_tail(&mpp->queue_link, &queue->dev_list);
1086
+ if (queue->core_id_max < (u32)core_id)
1087
+ queue->core_id_max = (u32)core_id;
1088
+
1089
+ mpp->core_id = core_id;
1090
+ mpp->queue = queue;
1091
+
1092
+ mpp_dbg_core("%s attach queue as core %d\n",
1093
+ dev_name(mpp->dev), mpp->core_id);
1094
+
9941095 if (queue->task_capacity > mpp->task_capacity)
9951096 queue->task_capacity = mpp->task_capacity;
1097
+
1098
+done:
9961099 mutex_unlock(&queue->dev_lock);
9971100 }
9981101
....@@ -1002,7 +1105,15 @@
10021105
10031106 if (queue) {
10041107 mutex_lock(&queue->dev_lock);
1108
+
1109
+ queue->cores[mpp->core_id] = NULL;
1110
+ queue->core_count--;
1111
+
1112
+ clear_bit(mpp->core_id, &queue->core_idle);
10051113 list_del_init(&mpp->queue_link);
1114
+
1115
+ mpp->queue = NULL;
1116
+
10061117 mutex_unlock(&queue->dev_lock);
10071118 }
10081119 }
....@@ -1018,27 +1129,6 @@
10181129 found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found;
10191130
10201131 return found ? 0 : -EINVAL;
1021
-}
1022
-
1023
-static int mpp_parse_msg_v1(struct mpp_msg_v1 *msg,
1024
- struct mpp_request *req)
1025
-{
1026
- int ret = 0;
1027
-
1028
- req->cmd = msg->cmd;
1029
- req->flags = msg->flags;
1030
- req->size = msg->size;
1031
- req->offset = msg->offset;
1032
- req->data = (void __user *)(unsigned long)msg->data_ptr;
1033
-
1034
- mpp_debug(DEBUG_IOCTL, "cmd %x, flags %08x, size %d, offset %x\n",
1035
- req->cmd, req->flags, req->size, req->offset);
1036
-
1037
- ret = mpp_check_cmd_v1(req->cmd);
1038
- if (ret)
1039
- mpp_err("mpp cmd %x is not supproted.\n", req->cmd);
1040
-
1041
- return ret;
10421132 }
10431133
10441134 static inline int mpp_msg_is_last(struct mpp_request *req)
....@@ -1090,7 +1180,8 @@
10901180 int ret;
10911181 struct mpp_dev *mpp;
10921182
1093
- mpp_debug(DEBUG_IOCTL, "req->cmd %x\n", req->cmd);
1183
+ mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd);
1184
+
10941185 switch (req->cmd) {
10951186 case MPP_CMD_QUERY_HW_SUPPORT: {
10961187 u32 hw_support = srv->hw_support;
....@@ -1116,8 +1207,10 @@
11161207 if (test_bit(client_type, &srv->hw_support))
11171208 mpp = srv->sub_devices[client_type];
11181209 }
1210
+
11191211 if (!mpp)
11201212 return -EINVAL;
1213
+
11211214 hw_info = mpp->var->hw_info;
11221215 mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id);
11231216 if (put_user(hw_info->hw_id, (u32 __user *)req->data))
....@@ -1148,6 +1241,7 @@
11481241 mpp = srv->sub_devices[client_type];
11491242 if (!mpp)
11501243 return -EINVAL;
1244
+
11511245 session->device_type = (enum MPP_DEVICE_TYPE)client_type;
11521246 session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers);
11531247 session->mpp = mpp;
....@@ -1169,6 +1263,7 @@
11691263 if (ret)
11701264 return ret;
11711265 }
1266
+
11721267 mpp_session_attach_workqueue(session, mpp->queue);
11731268 } break;
11741269 case MPP_CMD_INIT_DRIVER_DATA: {
....@@ -1211,6 +1306,21 @@
12111306 case MPP_CMD_POLL_HW_FINISH: {
12121307 msgs->flags |= req->flags;
12131308 msgs->poll_cnt++;
1309
+ msgs->poll_req = NULL;
1310
+ } break;
1311
+ case MPP_CMD_POLL_HW_IRQ: {
1312
+ if (msgs->poll_cnt || msgs->poll_req)
1313
+ mpp_err("Do NOT poll hw irq when previous call not return\n");
1314
+
1315
+ msgs->flags |= req->flags;
1316
+ msgs->poll_cnt++;
1317
+
1318
+ if (req->size && req->data) {
1319
+ if (!msgs->poll_req)
1320
+ msgs->poll_req = req;
1321
+ } else {
1322
+ msgs->poll_req = NULL;
1323
+ }
12141324 } break;
12151325 case MPP_CMD_RESET_SESSION: {
12161326 int ret;
....@@ -1300,7 +1410,7 @@
13001410 default: {
13011411 mpp = session->mpp;
13021412 if (!mpp) {
1303
- mpp_err("pid %d not find clinet %d\n",
1413
+ mpp_err("pid %d not find client %d\n",
13041414 session->pid, session->device_type);
13051415 return -EINVAL;
13061416 }
....@@ -1314,17 +1424,228 @@
13141424 return 0;
13151425 }
13161426
1317
-static long mpp_dev_ioctl(struct file *filp,
1318
- unsigned int cmd,
1319
- unsigned long arg)
1427
+static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head)
13201428 {
1429
+ struct mpp_session *session = msgs->session;
13211430 int ret = 0;
1322
- struct mpp_service *srv;
1323
- void __user *msg;
1431
+
1432
+ /* process each task */
1433
+ if (msgs->set_cnt) {
1434
+ /* NOTE: update msg_flags for fd over 1024 */
1435
+ session->msg_flags = msgs->flags;
1436
+ ret = mpp_process_task(session, msgs);
1437
+ }
1438
+
1439
+ if (!ret) {
1440
+ INIT_LIST_HEAD(&msgs->list);
1441
+ list_add_tail(&msgs->list, head);
1442
+ } else {
1443
+ put_task_msgs(msgs);
1444
+ }
1445
+}
1446
+
1447
+static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session,
1448
+ unsigned int cmd, void __user *msg)
1449
+{
1450
+ struct mpp_msg_v1 msg_v1;
13241451 struct mpp_request *req;
1325
- struct mpp_task_msgs task_msgs;
1326
- struct mpp_session *session =
1327
- (struct mpp_session *)filp->private_data;
1452
+ struct mpp_task_msgs *msgs = NULL;
1453
+ int last = 1;
1454
+ int ret;
1455
+
1456
+ if (cmd != MPP_IOC_CFG_V1) {
1457
+ mpp_err("unknown ioctl cmd %x\n", cmd);
1458
+ return -EINVAL;
1459
+ }
1460
+
1461
+next:
1462
+ /* first, parse to fixed struct */
1463
+ if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
1464
+ return -EFAULT;
1465
+
1466
+ msg += sizeof(msg_v1);
1467
+
1468
+ mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n",
1469
+ msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset);
1470
+
1471
+ if (mpp_check_cmd_v1(msg_v1.cmd)) {
1472
+ mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd);
1473
+ return -EFAULT;
1474
+ }
1475
+
1476
+ if (msg_v1.flags & MPP_FLAGS_MULTI_MSG)
1477
+ last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
1478
+ else
1479
+ last = 1;
1480
+
1481
+ /* check cmd for change msgs session */
1482
+ if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) {
1483
+ struct mpp_bat_msg bat_msg;
1484
+ struct mpp_bat_msg __user *usr_cmd;
1485
+ struct fd f;
1486
+
1487
+ /* try session switch here */
1488
+ usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr;
1489
+
1490
+ if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg)))
1491
+ return -EFAULT;
1492
+
1493
+ /* skip finished message */
1494
+ if (bat_msg.flag & MPP_BAT_MSG_DONE)
1495
+ goto session_switch_done;
1496
+
1497
+ f = fdget(bat_msg.fd);
1498
+ if (!f.file) {
1499
+ int ret = -EBADF;
1500
+
1501
+ mpp_err("fd %d get session failed\n", bat_msg.fd);
1502
+
1503
+ if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret)))
1504
+ mpp_err("copy_to_user failed.\n");
1505
+ goto session_switch_done;
1506
+ }
1507
+
1508
+ /* NOTE: add previous ready task to queue and drop empty task */
1509
+ if (msgs) {
1510
+ if (msgs->req_cnt)
1511
+ task_msgs_add(msgs, head);
1512
+ else
1513
+ put_task_msgs(msgs);
1514
+
1515
+ msgs = NULL;
1516
+ }
1517
+
1518
+ /* switch session */
1519
+ session = f.file->private_data;
1520
+ msgs = get_task_msgs(session);
1521
+
1522
+ if (f.file->private_data == session)
1523
+ msgs->ext_fd = bat_msg.fd;
1524
+
1525
+ msgs->f = f;
1526
+
1527
+ mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n",
1528
+ bat_msg.fd, session->index, session->msgs_cnt);
1529
+
1530
+session_switch_done:
1531
+ /* session id should NOT be the last message */
1532
+ if (last)
1533
+ return 0;
1534
+
1535
+ goto next;
1536
+ }
1537
+
1538
+ if (!msgs)
1539
+ msgs = get_task_msgs(session);
1540
+
1541
+ if (!msgs) {
1542
+ pr_err("session %d:%d failed to get task msgs",
1543
+ session->pid, session->index);
1544
+ return -EINVAL;
1545
+ }
1546
+
1547
+ if (msgs->req_cnt >= MPP_MAX_MSG_NUM) {
1548
+ mpp_err("session %d message count %d more than %d.\n",
1549
+ session->index, msgs->req_cnt, MPP_MAX_MSG_NUM);
1550
+ return -EINVAL;
1551
+ }
1552
+
1553
+ req = &msgs->reqs[msgs->req_cnt++];
1554
+ req->cmd = msg_v1.cmd;
1555
+ req->flags = msg_v1.flags;
1556
+ req->size = msg_v1.size;
1557
+ req->offset = msg_v1.offset;
1558
+ req->data = (void __user *)(unsigned long)msg_v1.data_ptr;
1559
+
1560
+ ret = mpp_process_request(session, session->srv, req, msgs);
1561
+ if (ret) {
1562
+ mpp_err("session %d process cmd %x ret %d\n",
1563
+ session->index, req->cmd, ret);
1564
+ return ret;
1565
+ }
1566
+
1567
+ if (!last)
1568
+ goto next;
1569
+
1570
+ task_msgs_add(msgs, head);
1571
+ msgs = NULL;
1572
+
1573
+ return 0;
1574
+}
1575
+
1576
+static void mpp_msgs_trigger(struct list_head *msgs_list)
1577
+{
1578
+ struct mpp_task_msgs *msgs, *n;
1579
+ struct mpp_dev *mpp_prev = NULL;
1580
+ struct mpp_taskqueue *queue_prev = NULL;
1581
+
1582
+ /* push task to queue */
1583
+ list_for_each_entry_safe(msgs, n, msgs_list, list) {
1584
+ struct mpp_dev *mpp;
1585
+ struct mpp_task *task;
1586
+ struct mpp_taskqueue *queue;
1587
+
1588
+ if (!msgs->set_cnt || !msgs->queue)
1589
+ continue;
1590
+
1591
+ mpp = msgs->mpp;
1592
+ task = msgs->task;
1593
+ queue = msgs->queue;
1594
+
1595
+ if (queue_prev != queue) {
1596
+ if (queue_prev && mpp_prev) {
1597
+ mutex_unlock(&queue_prev->pending_lock);
1598
+ mpp_taskqueue_trigger_work(mpp_prev);
1599
+ }
1600
+
1601
+ if (queue)
1602
+ mutex_lock(&queue->pending_lock);
1603
+
1604
+ mpp_prev = mpp;
1605
+ queue_prev = queue;
1606
+ }
1607
+
1608
+ if (test_bit(TASK_STATE_ABORT, &task->state))
1609
+ pr_info("try to trigger abort task %d\n", task->task_id);
1610
+
1611
+ set_bit(TASK_STATE_PENDING, &task->state);
1612
+ list_add_tail(&task->queue_link, &queue->pending_list);
1613
+ }
1614
+
1615
+ if (mpp_prev && queue_prev) {
1616
+ mutex_unlock(&queue_prev->pending_lock);
1617
+ mpp_taskqueue_trigger_work(mpp_prev);
1618
+ }
1619
+}
1620
+
1621
+static void mpp_msgs_wait(struct list_head *msgs_list)
1622
+{
1623
+ struct mpp_task_msgs *msgs, *n;
1624
+
1625
+ /* poll and release each task */
1626
+ list_for_each_entry_safe(msgs, n, msgs_list, list) {
1627
+ struct mpp_session *session = msgs->session;
1628
+
1629
+ if (msgs->poll_cnt) {
1630
+ int ret = mpp_wait_result(session, msgs);
1631
+
1632
+ if (ret) {
1633
+ mpp_err("session %d wait result ret %d\n",
1634
+ session->index, ret);
1635
+ }
1636
+ }
1637
+
1638
+ put_task_msgs(msgs);
1639
+
1640
+ }
1641
+}
1642
+
1643
+static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1644
+{
1645
+ struct mpp_service *srv;
1646
+ struct mpp_session *session = (struct mpp_session *)filp->private_data;
1647
+ struct list_head msgs_list;
1648
+ int ret = 0;
13281649
13291650 mpp_debug_enter();
13301651
....@@ -1332,7 +1653,9 @@
13321653 mpp_err("session %p\n", session);
13331654 return -EINVAL;
13341655 }
1656
+
13351657 srv = session->srv;
1658
+
13361659 if (atomic_read(&session->release_request) > 0) {
13371660 mpp_debug(DEBUG_IOCTL, "release session had request\n");
13381661 return -EBUSY;
....@@ -1342,54 +1665,15 @@
13421665 return -EBUSY;
13431666 }
13441667
1345
- msg = (void __user *)arg;
1346
- memset(&task_msgs, 0, sizeof(task_msgs));
1347
- do {
1348
- req = &task_msgs.reqs[task_msgs.req_cnt];
1349
- /* first, parse to fixed struct */
1350
- switch (cmd) {
1351
- case MPP_IOC_CFG_V1: {
1352
- struct mpp_msg_v1 msg_v1;
1668
+ INIT_LIST_HEAD(&msgs_list);
13531669
1354
- memset(&msg_v1, 0, sizeof(msg_v1));
1355
- if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
1356
- return -EFAULT;
1357
- ret = mpp_parse_msg_v1(&msg_v1, req);
1358
- if (ret)
1359
- return -EFAULT;
1670
+ ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg);
1671
+ if (ret)
1672
+ mpp_err("collect msgs failed %d\n", ret);
13601673
1361
- msg += sizeof(msg_v1);
1362
- } break;
1363
- default:
1364
- mpp_err("unknown ioctl cmd %x\n", cmd);
1365
- return -EINVAL;
1366
- }
1367
- task_msgs.req_cnt++;
1368
- /* check loop times */
1369
- if (task_msgs.req_cnt > MPP_MAX_MSG_NUM) {
1370
- mpp_err("fail, message count %d more than %d.\n",
1371
- task_msgs.req_cnt, MPP_MAX_MSG_NUM);
1372
- return -EINVAL;
1373
- }
1374
- /* second, process request */
1375
- ret = mpp_process_request(session, srv, req, &task_msgs);
1376
- if (ret)
1377
- return -EFAULT;
1378
- /* last, process task message */
1379
- if (mpp_msg_is_last(req)) {
1380
- session->msg_flags = task_msgs.flags;
1381
- if (task_msgs.set_cnt > 0) {
1382
- ret = mpp_process_task(session, &task_msgs);
1383
- if (ret)
1384
- return ret;
1385
- }
1386
- if (task_msgs.poll_cnt > 0) {
1387
- ret = mpp_wait_result(session, &task_msgs);
1388
- if (ret)
1389
- return ret;
1390
- }
1391
- }
1392
- } while (!mpp_msg_is_last(req));
1674
+ mpp_msgs_trigger(&msgs_list);
1675
+
1676
+ mpp_msgs_wait(&msgs_list);
13931677
13941678 mpp_debug_leave();
13951679
....@@ -1493,9 +1777,9 @@
14931777 mpp_iommu_down_read(mpp->iommu_info);
14941778 buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd);
14951779 mpp_iommu_up_read(mpp->iommu_info);
1496
- if (IS_ERR_OR_NULL(buffer)) {
1780
+ if (IS_ERR(buffer)) {
14971781 mpp_err("can't import dma-buf %d\n", fd);
1498
- return ERR_PTR(-ENOMEM);
1782
+ return ERR_CAST(buffer);
14991783 }
15001784
15011785 mem_region->hdl = buffer;
....@@ -1525,7 +1809,7 @@
15251809 cnt = session->trans_count;
15261810 tbl = session->trans_table;
15271811 } else {
1528
- struct mpp_dev *mpp = session->mpp;
1812
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
15291813 struct mpp_trans_info *trans_info = mpp->var->trans_info;
15301814
15311815 cnt = trans_info[fmt].count;
....@@ -1661,8 +1945,7 @@
16611945 return 0;
16621946 }
16631947
1664
-int mpp_task_init(struct mpp_session *session,
1665
- struct mpp_task *task)
1948
+int mpp_task_init(struct mpp_session *session, struct mpp_task *task)
16661949 {
16671950 INIT_LIST_HEAD(&task->pending_link);
16681951 INIT_LIST_HEAD(&task->queue_link);
....@@ -1677,7 +1960,7 @@
16771960 int mpp_task_finish(struct mpp_session *session,
16781961 struct mpp_task *task)
16791962 {
1680
- struct mpp_dev *mpp = session->mpp;
1963
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
16811964
16821965 if (mpp->dev_ops->finish)
16831966 mpp->dev_ops->finish(mpp, task);
....@@ -1713,7 +1996,7 @@
17131996 struct mpp_task *task)
17141997 {
17151998 struct mpp_mem_region *mem_region = NULL, *n;
1716
- struct mpp_dev *mpp = session->mpp;
1999
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
17172000
17182001 /* release memory region attach to this registers table. */
17192002 list_for_each_entry_safe(mem_region, n,
....@@ -1738,7 +2021,7 @@
17382021 if (!task)
17392022 return -EIO;
17402023
1741
- mpp_err("--- dump mem region ---\n");
2024
+ mpp_err("--- dump task %d mem region ---\n", task->task_index);
17422025 if (!list_empty(&task->mem_region_list)) {
17432026 list_for_each_entry_safe(mem, n,
17442027 &task->mem_region_list,
....@@ -1778,54 +2061,41 @@
17782061 return 0;
17792062 }
17802063
1781
-int mpp_task_dump_hw_reg(struct mpp_dev *mpp, struct mpp_task *task)
2064
+int mpp_task_dump_hw_reg(struct mpp_dev *mpp)
17822065 {
1783
- if (!task)
1784
- return -EIO;
2066
+ u32 i;
2067
+ u32 s = mpp->var->hw_info->reg_start;
2068
+ u32 e = mpp->var->hw_info->reg_end;
17852069
1786
- if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
1787
- u32 i;
1788
- u32 s = task->hw_info->reg_start;
1789
- u32 e = task->hw_info->reg_end;
2070
+ mpp_err("--- dump hardware register ---\n");
2071
+ for (i = s; i <= e; i++) {
2072
+ u32 reg = i * sizeof(u32);
17902073
1791
- mpp_err("--- dump hardware register ---\n");
1792
- for (i = s; i <= e; i++) {
1793
- u32 reg = i * sizeof(u32);
1794
-
1795
- mpp_err("reg[%03d]: %04x: 0x%08x\n",
2074
+ mpp_err("reg[%03d]: %04x: 0x%08x\n",
17962075 i, reg, readl_relaxed(mpp->reg_base + reg));
1797
- }
17982076 }
17992077
18002078 return 0;
18012079 }
18022080
1803
-static int mpp_iommu_handle(struct iommu_domain *iommu,
1804
- struct device *iommu_dev,
1805
- unsigned long iova,
1806
- int status, void *arg)
2081
+void mpp_reg_show(struct mpp_dev *mpp, u32 offset)
18072082 {
1808
- struct mpp_dev *mpp = (struct mpp_dev *)arg;
1809
- struct mpp_taskqueue *queue = mpp->queue;
1810
- struct mpp_task *task = mpp_taskqueue_get_running_task(queue);
2083
+ if (!mpp)
2084
+ return;
18112085
1812
- /*
1813
- * NOTE: In link mode, this task may not be the task of the current
1814
- * hardware processing error
1815
- */
1816
- if (!task || !task->session)
1817
- return -EIO;
1818
- /* get mpp from cur task */
1819
- mpp = task->session->mpp;
1820
- dev_err(mpp->dev, "fault addr 0x%08lx status %x\n", iova, status);
2086
+ dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n",
2087
+ offset >> 2, offset, mpp_read_relaxed(mpp, offset));
2088
+}
18212089
1822
- mpp_task_dump_mem_region(mpp, task);
1823
- mpp_task_dump_hw_reg(mpp, task);
2090
+void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end)
2091
+{
2092
+ u32 offset;
18242093
1825
- if (mpp->iommu_info->hdl)
1826
- mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, mpp);
2094
+ if (!mpp)
2095
+ return;
18272096
1828
- return 0;
2097
+ for (offset = start; offset < end; offset += sizeof(u32))
2098
+ mpp_reg_show(mpp, offset);
18292099 }
18302100
18312101 /* The device will do more probing work after this */
....@@ -1843,6 +2113,16 @@
18432113 /* read flag for pum idle request */
18442114 mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
18452115
2116
+ /* read link table capacity */
2117
+ ret = of_property_read_u32(np, "rockchip,task-capacity",
2118
+ &mpp->task_capacity);
2119
+ if (ret)
2120
+ mpp->task_capacity = 1;
2121
+
2122
+ mpp->dev = dev;
2123
+ mpp->hw_ops = mpp->var->hw_ops;
2124
+ mpp->dev_ops = mpp->var->dev_ops;
2125
+
18462126 /* Get and attach to service */
18472127 ret = mpp_attach_service(mpp, dev);
18482128 if (ret) {
....@@ -1850,24 +2130,9 @@
18502130 return -ENODEV;
18512131 }
18522132
1853
- mpp->dev = dev;
1854
- mpp->hw_ops = mpp->var->hw_ops;
1855
- mpp->dev_ops = mpp->var->dev_ops;
1856
-
1857
- /* read link table capacity */
1858
- ret = of_property_read_u32(np, "rockchip,task-capacity",
1859
- &mpp->task_capacity);
1860
- if (ret) {
1861
- mpp->task_capacity = 1;
1862
-
1863
- /* power domain autosuspend delay 2s */
1864
- pm_runtime_set_autosuspend_delay(dev, 2000);
1865
- pm_runtime_use_autosuspend(dev);
1866
- } else {
1867
- dev_info(dev, "%d task capacity link mode detected\n",
1868
- mpp->task_capacity);
1869
- /* do not setup autosuspend on multi task device */
1870
- }
2133
+ /* power domain autosuspend delay 2s */
2134
+ pm_runtime_set_autosuspend_delay(dev, 2000);
2135
+ pm_runtime_use_autosuspend(dev);
18712136
18722137 kthread_init_work(&mpp->work, mpp_task_worker_default);
18732138
....@@ -1878,7 +2143,6 @@
18782143
18792144 device_init_wakeup(dev, true);
18802145 pm_runtime_enable(dev);
1881
-
18822146 mpp->irq = platform_get_irq(pdev, 0);
18832147 if (mpp->irq < 0) {
18842148 dev_err(dev, "No interrupt resource found\n");
....@@ -1905,42 +2169,36 @@
19052169 ret = -ENOMEM;
19062170 goto failed;
19072171 }
2172
+ mpp->io_base = res->start;
19082173
1909
- pm_runtime_get_sync(dev);
19102174 /*
19112175 * TODO: here or at the device itself, some device does not
19122176 * have the iommu, maybe in the device is better.
19132177 */
19142178 mpp->iommu_info = mpp_iommu_probe(dev);
19152179 if (IS_ERR(mpp->iommu_info)) {
1916
- dev_err(dev, "failed to attach iommu: %ld\n",
1917
- PTR_ERR(mpp->iommu_info));
2180
+ dev_err(dev, "failed to attach iommu\n");
2181
+ mpp->iommu_info = NULL;
19182182 }
19192183 if (mpp->hw_ops->init) {
19202184 ret = mpp->hw_ops->init(mpp);
19212185 if (ret)
1922
- goto failed_init;
2186
+ goto failed;
19232187 }
1924
- /* set iommu fault handler */
1925
- if (!IS_ERR(mpp->iommu_info))
1926
- iommu_set_fault_handler(mpp->iommu_info->domain,
1927
- mpp_iommu_handle, mpp);
19282188
19292189 /* read hardware id */
19302190 if (hw_info->reg_id >= 0) {
2191
+ pm_runtime_get_sync(dev);
19312192 if (mpp->hw_ops->clk_on)
19322193 mpp->hw_ops->clk_on(mpp);
19332194
19342195 hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32));
19352196 if (mpp->hw_ops->clk_off)
19362197 mpp->hw_ops->clk_off(mpp);
2198
+ pm_runtime_put_sync(dev);
19372199 }
19382200
1939
- pm_runtime_put_sync(dev);
1940
-
19412201 return ret;
1942
-failed_init:
1943
- pm_runtime_put_sync(dev);
19442202 failed:
19452203 mpp_detach_workqueue(mpp);
19462204 device_init_wakeup(dev, false);
....@@ -1955,12 +2213,31 @@
19552213 mpp->hw_ops->exit(mpp);
19562214
19572215 mpp_iommu_remove(mpp->iommu_info);
1958
- platform_device_put(mpp->pdev_srv);
19592216 mpp_detach_workqueue(mpp);
19602217 device_init_wakeup(mpp->dev, false);
19612218 pm_runtime_disable(mpp->dev);
19622219
19632220 return 0;
2221
+}
2222
+
2223
+void mpp_dev_shutdown(struct platform_device *pdev)
2224
+{
2225
+ int ret;
2226
+ int val;
2227
+ struct device *dev = &pdev->dev;
2228
+ struct mpp_dev *mpp = dev_get_drvdata(dev);
2229
+
2230
+ dev_info(dev, "shutdown device\n");
2231
+
2232
+ atomic_inc(&mpp->srv->shutdown_request);
2233
+ ret = readx_poll_timeout(atomic_read,
2234
+ &mpp->task_count,
2235
+ val, val == 0, 20000, 200000);
2236
+ if (ret == -ETIMEDOUT)
2237
+ dev_err(dev, "wait total %d running time out\n",
2238
+ atomic_read(&mpp->task_count));
2239
+ else
2240
+ dev_info(dev, "shutdown success\n");
19642241 }
19652242
19662243 int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv)
....@@ -2007,6 +2284,9 @@
20072284 /* normal condition, set state and wake up isr thread */
20082285 set_bit(TASK_STATE_IRQ, &task->state);
20092286 }
2287
+
2288
+ if (irq_ret == IRQ_WAKE_THREAD)
2289
+ mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
20102290 } else {
20112291 mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
20122292 }
....@@ -2083,27 +2363,31 @@
20832363
20842364 int mpp_time_part_diff(struct mpp_task *task)
20852365 {
2086
- ktime_t end;
2087
- struct mpp_dev *mpp = task->session->mpp;
2366
+ if (mpp_debug_unlikely(DEBUG_TIMING)) {
2367
+ ktime_t end;
2368
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
20882369
2089
- end = ktime_get();
2090
- mpp_debug(DEBUG_PART_TIMING, "%s: session %d:%d part time: %lld us\n",
2091
- dev_name(mpp->dev), task->session->pid, task->session->index,
2092
- ktime_us_delta(end, task->part));
2093
- task->part = end;
2370
+ end = ktime_get();
2371
+ mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n",
2372
+ dev_name(mpp->dev), task->core_id, task->session->pid,
2373
+ task->session->index, ktime_us_delta(end, task->part));
2374
+ task->part = end;
2375
+ }
20942376
20952377 return 0;
20962378 }
20972379
20982380 int mpp_time_diff(struct mpp_task *task)
20992381 {
2100
- ktime_t end;
2101
- struct mpp_dev *mpp = task->session->mpp;
2382
+ if (mpp_debug_unlikely(DEBUG_TIMING)) {
2383
+ ktime_t end;
2384
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
21022385
2103
- end = ktime_get();
2104
- mpp_debug(DEBUG_TIMING, "%s: session %d:%d task time: %lld us\n",
2105
- dev_name(mpp->dev), task->session->pid, task->session->index,
2106
- ktime_us_delta(end, task->start));
2386
+ end = ktime_get();
2387
+ mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2388
+ dev_name(mpp->dev), task->core_id, task->session->pid,
2389
+ task->session->index, ktime_us_delta(end, task->start));
2390
+ }
21072391
21082392 return 0;
21092393 }
....@@ -2112,19 +2396,19 @@
21122396 {
21132397 if (mpp_debug_unlikely(DEBUG_TIMING)) {
21142398 ktime_t end;
2115
- struct mpp_dev *mpp = task->session->mpp;
2399
+ struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
21162400
21172401 end = ktime_get();
21182402
21192403 if (clk_hz)
2120
- mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us hw %d us\n",
2121
- dev_name(mpp->dev), task->session->index,
2122
- ktime_us_delta(end, task->start),
2404
+ mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n",
2405
+ dev_name(mpp->dev), task->core_id, task->session->pid,
2406
+ task->session->index, ktime_us_delta(end, task->start),
21232407 task->hw_cycles / (clk_hz / 1000000));
21242408 else
2125
- mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us\n",
2126
- dev_name(mpp->dev), task->session->index,
2127
- ktime_us_delta(end, task->start));
2409
+ mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
2410
+ dev_name(mpp->dev), task->core_id, task->session->pid,
2411
+ task->session->index, ktime_us_delta(end, task->start));
21282412 }
21292413
21302414 return 0;
....@@ -2143,7 +2427,7 @@
21432427 ktime_t s = task->on_create;
21442428 unsigned long state = task->state;
21452429
2146
- pr_info("task %d dump timing at %lld us:", task->task_index, time_diff);
2430
+ pr_info("task %d dump timing at %lld us:", task->task_id, time_diff);
21472431
21482432 pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s));
21492433 LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s);
....@@ -2325,11 +2609,11 @@
23252609 return count;
23262610 }
23272611
2328
-static const struct file_operations procfs_fops_u32 = {
2329
- .open = fops_open_u32,
2330
- .read = seq_read,
2331
- .release = single_release,
2332
- .write = fops_write_u32,
2612
+static const struct proc_ops procfs_fops_u32 = {
2613
+ .proc_open = fops_open_u32,
2614
+ .proc_read = seq_read,
2615
+ .proc_release = single_release,
2616
+ .proc_write = fops_write_u32,
23332617 };
23342618
23352619 struct proc_dir_entry *
....@@ -2341,6 +2625,7 @@
23412625
23422626 void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
23432627 {
2628
+ mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable);
23442629 mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check);
23452630 }
23462631 #endif