.. | .. |
---|
36 | 36 | #include "mpp_common.h" |
---|
37 | 37 | #include "mpp_iommu.h" |
---|
38 | 38 | |
---|
39 | | -#define MPP_WAIT_TIMEOUT_DELAY (2000) |
---|
40 | | - |
---|
41 | | -/* Use 'v' as magic number */ |
---|
42 | | -#define MPP_IOC_MAGIC 'v' |
---|
43 | | - |
---|
44 | | -#define MPP_IOC_CFG_V1 _IOW(MPP_IOC_MAGIC, 1, unsigned int) |
---|
45 | | -#define MPP_IOC_CFG_V2 _IOW(MPP_IOC_MAGIC, 2, unsigned int) |
---|
46 | | - |
---|
47 | 39 | /* input parmater structure for version 1 */ |
---|
48 | 40 | struct mpp_msg_v1 { |
---|
49 | 41 | __u32 cmd; |
---|
.. | .. |
---|
51 | 43 | __u32 size; |
---|
52 | 44 | __u32 offset; |
---|
53 | 45 | __u64 data_ptr; |
---|
54 | | -}; |
---|
55 | | - |
---|
56 | | -#define MPP_BAT_MSG_DONE (0x00000001) |
---|
57 | | - |
---|
58 | | -struct mpp_bat_msg { |
---|
59 | | - __u64 flag; |
---|
60 | | - __u32 fd; |
---|
61 | | - __s32 ret; |
---|
62 | 46 | }; |
---|
63 | 47 | |
---|
64 | 48 | #ifdef CONFIG_ROCKCHIP_MPP_PROC_FS |
---|
.. | .. |
---|
355 | 339 | mutex_unlock(&queue->session_lock); |
---|
356 | 340 | |
---|
357 | 341 | if (task_count) { |
---|
358 | | - mpp_dbg_session("session %d:%d task not finished %d\n", |
---|
359 | | - session->pid, session->index, |
---|
360 | | - atomic_read(&queue->detach_count)); |
---|
| 342 | + mpp_dbg_session("session %d:%d not finished %d task cnt %d\n", |
---|
| 343 | + session->device_type, session->index, |
---|
| 344 | + atomic_read(&queue->detach_count), task_count); |
---|
361 | 345 | |
---|
362 | 346 | mpp_session_clear_pending(session); |
---|
363 | 347 | } else { |
---|
.. | .. |
---|
588 | 572 | mpp_dev_reset(mpp); |
---|
589 | 573 | mpp_power_off(mpp); |
---|
590 | 574 | |
---|
| 575 | + mpp_iommu_dev_deactivate(mpp->iommu_info, mpp); |
---|
591 | 576 | set_bit(TASK_STATE_TIMEOUT, &task->state); |
---|
592 | 577 | set_bit(TASK_STATE_DONE, &task->state); |
---|
593 | 578 | /* Wake up the GET thread */ |
---|
.. | .. |
---|
717 | 702 | group->resets[type] = rst; |
---|
718 | 703 | group->queue = mpp->queue; |
---|
719 | 704 | } |
---|
720 | | - /* if reset not in the same queue, it means different device |
---|
721 | | - * may reset in the same time, then rw_sem_on should set true. |
---|
722 | | - */ |
---|
723 | | - group->rw_sem_on |= (group->queue != mpp->queue) ? true : false; |
---|
724 | 705 | dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on); |
---|
725 | 706 | up_write(&group->rw_sem); |
---|
726 | 707 | |
---|
.. | .. |
---|
821 | 802 | mpp_set_grf(mpp->grf_info); |
---|
822 | 803 | } |
---|
823 | 804 | /* |
---|
| 805 | + * Lock the reader locker of the device resource lock here, |
---|
| 806 | + * release at the finish operation |
---|
| 807 | + */ |
---|
| 808 | + mpp_reset_down_read(mpp->reset_group); |
---|
| 809 | + |
---|
| 810 | + /* |
---|
824 | 811 | * for iommu share hardware, should attach to ensure |
---|
825 | 812 | * working in current device |
---|
826 | 813 | */ |
---|
827 | 814 | ret = mpp_iommu_attach(mpp->iommu_info); |
---|
828 | 815 | if (ret) { |
---|
829 | 816 | dev_err(mpp->dev, "mpp_iommu_attach failed\n"); |
---|
| 817 | + mpp_reset_up_read(mpp->reset_group); |
---|
830 | 818 | return -ENODATA; |
---|
831 | 819 | } |
---|
832 | 820 | |
---|
.. | .. |
---|
836 | 824 | |
---|
837 | 825 | if (mpp->auto_freq_en && mpp->hw_ops->set_freq) |
---|
838 | 826 | mpp->hw_ops->set_freq(mpp, task); |
---|
839 | | - /* |
---|
840 | | - * TODO: Lock the reader locker of the device resource lock here, |
---|
841 | | - * release at the finish operation |
---|
842 | | - */ |
---|
843 | | - mpp_reset_down_read(mpp->reset_group); |
---|
844 | 827 | |
---|
845 | 828 | mpp_iommu_dev_activate(mpp->iommu_info, mpp); |
---|
846 | 829 | if (mpp->dev_ops->run) |
---|
.. | .. |
---|
922 | 905 | } |
---|
923 | 906 | mpp = mpp_get_task_used_device(task, session); |
---|
924 | 907 | |
---|
925 | | - ret = wait_event_timeout(task->wait, |
---|
926 | | - test_bit(TASK_STATE_DONE, &task->state), |
---|
927 | | - msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY)); |
---|
928 | | - if (ret > 0) { |
---|
929 | | - if (mpp->dev_ops->result) |
---|
930 | | - ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
931 | | - } else { |
---|
932 | | - atomic_inc(&task->abort_request); |
---|
933 | | - set_bit(TASK_STATE_ABORT, &task->state); |
---|
934 | | - mpp_err("timeout, pid %d session %d:%d count %d cur_task %p id %d\n", |
---|
935 | | - session->pid, session->pid, session->index, |
---|
936 | | - atomic_read(&session->task_count), task, |
---|
937 | | - task->task_id); |
---|
938 | | - } |
---|
| 908 | + ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state)); |
---|
| 909 | + if (ret == -ERESTARTSYS) |
---|
| 910 | + mpp_err("wait task break by signal\n"); |
---|
939 | 911 | |
---|
940 | | - mpp_debug_func(DEBUG_TASK_INFO, "task %d kref_%d\n", |
---|
941 | | - task->task_id, kref_read(&task->ref)); |
---|
| 912 | + if (mpp->dev_ops->result) |
---|
| 913 | + ret = mpp->dev_ops->result(mpp, task, msgs); |
---|
| 914 | + mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n", |
---|
| 915 | + session->device_type, session->index, atomic_read(&session->task_count), |
---|
| 916 | + task->task_index, task->state); |
---|
942 | 917 | |
---|
943 | 918 | mpp_session_pop_pending(session, task); |
---|
944 | 919 | |
---|
.. | .. |
---|
1013 | 988 | return -ENODEV; |
---|
1014 | 989 | } else { |
---|
1015 | 990 | mpp->reset_group = mpp->srv->reset_groups[reset_group_node]; |
---|
| 991 | + if (!mpp->reset_group->queue) |
---|
| 992 | + mpp->reset_group->queue = queue; |
---|
| 993 | + if (mpp->reset_group->queue != mpp->queue) |
---|
| 994 | + mpp->reset_group->rw_sem_on = true; |
---|
1016 | 995 | } |
---|
1017 | 996 | } |
---|
1018 | 997 | |
---|
.. | .. |
---|
2266 | 2245 | irq_ret = mpp->dev_ops->irq(mpp); |
---|
2267 | 2246 | |
---|
2268 | 2247 | if (task) { |
---|
2269 | | - if (irq_ret != IRQ_NONE) { |
---|
| 2248 | + if (irq_ret == IRQ_WAKE_THREAD) { |
---|
2270 | 2249 | /* if wait or delayed work timeout, abort request will turn on, |
---|
2271 | 2250 | * isr should not to response, and handle it in delayed work |
---|
2272 | 2251 | */ |
---|