From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 19 Dec 2024 01:47:39 +0000
Subject: [PATCH] add wifi6 8852be driver

---
 kernel/drivers/video/rockchip/mpp/mpp_common.c |  898 ++++++++++++++++++++++++++++++++++++++---------------------
 1 files changed, 581 insertions(+), 317 deletions(-)

diff --git a/kernel/drivers/video/rockchip/mpp/mpp_common.c b/kernel/drivers/video/rockchip/mpp/mpp_common.c
index 3b3d02a..d43ebf4 100644
--- a/kernel/drivers/video/rockchip/mpp/mpp_common.c
+++ b/kernel/drivers/video/rockchip/mpp/mpp_common.c
@@ -36,14 +36,6 @@
 #include "mpp_common.h"
 #include "mpp_iommu.h"
 
-#define MPP_WAIT_TIMEOUT_DELAY		(2000)
-
-/* Use 'v' as magic number */
-#define MPP_IOC_MAGIC		'v'
-
-#define MPP_IOC_CFG_V1	_IOW(MPP_IOC_MAGIC, 1, unsigned int)
-#define MPP_IOC_CFG_V2	_IOW(MPP_IOC_MAGIC, 2, unsigned int)
-
 /* input parmater structure for version 1 */
 struct mpp_msg_v1 {
 	__u32 cmd;
@@ -59,12 +51,15 @@
 	[MPP_DEVICE_VDPU2]		= "VDPU2",
 	[MPP_DEVICE_VDPU1_PP]		= "VDPU1_PP",
 	[MPP_DEVICE_VDPU2_PP]		= "VDPU2_PP",
+	[MPP_DEVICE_AV1DEC]		= "AV1DEC",
 	[MPP_DEVICE_HEVC_DEC]		= "HEVC_DEC",
 	[MPP_DEVICE_RKVDEC]		= "RKVDEC",
 	[MPP_DEVICE_AVSPLUS_DEC]	= "AVSPLUS_DEC",
+	[MPP_DEVICE_RKJPEGD]		= "RKJPEGD",
 	[MPP_DEVICE_RKVENC]		= "RKVENC",
 	[MPP_DEVICE_VEPU1]		= "VEPU1",
 	[MPP_DEVICE_VEPU2]		= "VEPU2",
+	[MPP_DEVICE_VEPU2_JPEG]		= "VEPU2",
 	[MPP_DEVICE_VEPU22]		= "VEPU22",
 	[MPP_DEVICE_IEP2]		= "IEP2",
 	[MPP_DEVICE_VDPP]		= "VDPP",
@@ -86,25 +81,8 @@
 
 #endif
 
-static void mpp_free_task(struct kref *ref);
 static void mpp_attach_workqueue(struct mpp_dev *mpp,
 				 struct mpp_taskqueue *queue);
-
-/* task queue schedule */
-static int
-mpp_taskqueue_push_pending(struct mpp_taskqueue *queue,
-			   struct mpp_task *task)
-{
-	if (!task->session || !task->session->mpp)
-		return -EINVAL;
-
-	kref_get(&task->ref);
-	mutex_lock(&queue->pending_lock);
-	list_add_tail(&task->queue_link, &queue->pending_list);
-	mutex_unlock(&queue->pending_lock);
-
-	return 0;
-}
 
 static int
 mpp_taskqueue_pop_pending(struct mpp_taskqueue *queue,
@@ -148,9 +126,7 @@
 	return flag;
 }
 
-static int
-mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue,
-			     struct mpp_task *task)
+int mpp_taskqueue_pending_to_run(struct mpp_taskqueue *queue, struct mpp_task *task)
 {
 	unsigned long flags;
 
@@ -230,7 +206,104 @@
 	return 0;
 }
 
-static int mpp_session_clear_pending(struct mpp_session *session)
+static void task_msgs_reset(struct mpp_task_msgs *msgs)
+{
+	list_del_init(&msgs->list);
+
+	msgs->flags = 0;
+	msgs->req_cnt = 0;
+	msgs->set_cnt = 0;
+	msgs->poll_cnt = 0;
+}
+
+static void task_msgs_init(struct mpp_task_msgs *msgs, struct mpp_session *session)
+{
+	INIT_LIST_HEAD(&msgs->list);
+
+	msgs->session = session;
+	msgs->queue = NULL;
+	msgs->task = NULL;
+	msgs->mpp = NULL;
+
+	msgs->ext_fd = -1;
+
+	task_msgs_reset(msgs);
+}
+
+static struct mpp_task_msgs *get_task_msgs(struct mpp_session *session)
+{
+	unsigned long flags;
+	struct mpp_task_msgs *msgs;
+
+	spin_lock_irqsave(&session->lock_msgs, flags);
+	msgs = list_first_entry_or_null(&session->list_msgs_idle,
+					struct mpp_task_msgs, list_session);
+	if (msgs) {
+		list_move_tail(&msgs->list_session, &session->list_msgs);
+		spin_unlock_irqrestore(&session->lock_msgs, flags);
+
+		return msgs;
+	}
+	spin_unlock_irqrestore(&session->lock_msgs, flags);
+
+	msgs = kzalloc(sizeof(*msgs), GFP_KERNEL);
+	task_msgs_init(msgs, session);
+	INIT_LIST_HEAD(&msgs->list_session);
+
+	spin_lock_irqsave(&session->lock_msgs, flags);
+	list_move_tail(&msgs->list_session, &session->list_msgs);
+	session->msgs_cnt++;
+	spin_unlock_irqrestore(&session->lock_msgs, flags);
+
+	mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d msgs cnt %d\n",
+		       session->pid, session->index, session->msgs_cnt);
+
+	return msgs;
+}
+
+static void put_task_msgs(struct mpp_task_msgs *msgs)
+{
+	struct mpp_session *session = msgs->session;
+	unsigned long flags;
+
+	if (!session) {
+		pr_err("invalid msgs without session\n");
+		return;
+	}
+
+	if (msgs->ext_fd >= 0) {
+		fdput(msgs->f);
+		msgs->ext_fd = -1;
+	}
+
+	task_msgs_reset(msgs);
+
+	spin_lock_irqsave(&session->lock_msgs, flags);
+	list_move_tail(&msgs->list_session, &session->list_msgs_idle);
+	spin_unlock_irqrestore(&session->lock_msgs, flags);
+}
+
+static void clear_task_msgs(struct mpp_session *session)
+{
+	struct mpp_task_msgs *msgs, *n;
+	LIST_HEAD(list_to_free);
+	unsigned long flags;
+
+	spin_lock_irqsave(&session->lock_msgs, flags);
+
+	list_for_each_entry_safe(msgs, n, &session->list_msgs, list_session)
+		list_move_tail(&msgs->list_session, &list_to_free);
+
+	list_for_each_entry_safe(msgs, n, &session->list_msgs_idle, list_session)
+		list_move_tail(&msgs->list_session, &list_to_free);
+
+	spin_unlock_irqrestore(&session->lock_msgs, flags);
+
+	list_for_each_entry_safe(msgs, n, &list_to_free, list_session)
+		kfree(msgs);
+}
+
+static void mpp_session_clear_pending(struct mpp_session *session)
 {
 	struct mpp_task *task = NULL, *n;
 
@@ -245,8 +318,6 @@
 		kref_put(&task->ref, mpp_free_task);
 	}
 	mutex_unlock(&session->pending_lock);
-
-	return 0;
 }
 
 void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work)
@@ -268,9 +339,9 @@
 		mutex_unlock(&queue->session_lock);
 
 		if (task_count) {
-			mpp_dbg_session("session %d:%d task not finished %d\n",
-					session->pid, session->index,
-					atomic_read(&queue->detach_count));
+			mpp_dbg_session("session %d:%d not finished %d task cnt %d\n",
+					session->device_type, session->index,
+					atomic_read(&queue->detach_count), task_count);
 
 			mpp_session_clear_pending(session);
 		} else {
@@ -308,6 +379,10 @@
 
 	atomic_set(&session->task_count, 0);
 	atomic_set(&session->release_request, 0);
+
+	INIT_LIST_HEAD(&session->list_msgs);
+	INIT_LIST_HEAD(&session->list_msgs_idle);
+	spin_lock_init(&session->lock_msgs);
 
 	mpp_dbg_session("session %p init\n", session);
 	return session;
@@ -352,7 +427,7 @@
 	else
 		pr_err("invalid NULL session deinit function\n");
 
-	mpp_dbg_session("session %p:%d deinit\n", session, session->index);
+	clear_task_msgs(session);
 
 	kfree(session);
 }
@@ -429,7 +504,7 @@
 	return task;
 }
 
-static void mpp_free_task(struct kref *ref)
+void mpp_free_task(struct kref *ref)
 {
 	struct mpp_dev *mpp;
 	struct mpp_session *session;
@@ -441,18 +516,14 @@
 	}
 	session = task->session;
 
-	mpp_debug_func(DEBUG_TASK_INFO,
-		       "session %d:%d task %d state 0x%lx abort_request %d\n",
-		       session->device_type, session->index, task->task_index,
-		       task->state, atomic_read(&task->abort_request));
-	if (!session->mpp) {
-		mpp_err("session %p, session->mpp is null.\n", session);
-		return;
-	}
-	mpp = session->mpp;
+	mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
+		       session->index, task->task_id, task->state,
+		       atomic_read(&task->abort_request));
 
+	mpp = mpp_get_task_used_device(task, session);
 	if (mpp->dev_ops->free_task)
 		mpp->dev_ops->free_task(session, task);
+
 	/* Decrease reference count */
 	atomic_dec(&session->task_count);
 	atomic_dec(&mpp->task_count);
@@ -466,10 +537,8 @@
 					     struct mpp_task,
 					     timeout_work);
 
-	if (!test_bit(TASK_STATE_START, &task->state)) {
-		mpp_err("task has not start\n");
-		schedule_delayed_work(&task->timeout_work,
-					msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY));
+	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
+		mpp_err("task has been handled\n");
 		return;
 	}
 
@@ -479,34 +548,31 @@
 	}
 
 	session = task->session;
+	mpp_err("task %d:%d:%d processing time out!\n", session->pid,
+		session->index, task->task_id);
 
 	if (!session->mpp) {
 		mpp_err("session %d:%d, session mpp is null.\n", session->pid,
 			session->index);
 		return;
 	}
-	mpp = session->mpp;
-	dev_err(mpp->dev, "session %d:%d task %d state %lx processing time out!\n",
-		session->device_type, session->index, task->task_index, task->state);
-	synchronize_hardirq(mpp->irq);
-
-	if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
-		mpp_err("task has been handled\n");
-		return;
-	}
 
 	mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
+
+	mpp = mpp_get_task_used_device(task, session);
 
 	/* disable core irq */
 	disable_irq(mpp->irq);
 	/* disable mmu irq */
-	mpp_iommu_disable_irq(mpp->iommu_info);
+	if (mpp->iommu_info && mpp->iommu_info->got_irq)
+		disable_irq(mpp->iommu_info->irq);
 
 	/* hardware maybe dead, reset it */
 	mpp_reset_up_read(mpp->reset_group);
 	mpp_dev_reset(mpp);
 	mpp_power_off(mpp);
 
+	mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
 	set_bit(TASK_STATE_TIMEOUT, &task->state);
 	set_bit(TASK_STATE_DONE, &task->state);
 	/* Wake up the GET thread */
@@ -518,13 +584,14 @@
 	/* enable core irq */
 	enable_irq(mpp->irq);
 	/* enable mmu irq */
-	mpp_iommu_enable_irq(mpp->iommu_info);
+	if (mpp->iommu_info && mpp->iommu_info->got_irq)
+		enable_irq(mpp->iommu_info->irq);
 
 	mpp_taskqueue_trigger_work(mpp);
 }
 
 static int mpp_process_task_default(struct mpp_session *session,
-				struct mpp_task_msgs *msgs)
+				    struct mpp_task_msgs *msgs)
 {
 	struct mpp_task *task = NULL;
 	struct mpp_dev *mpp = session->mpp;
@@ -532,7 +599,7 @@
 	ktime_t on_create;
 
 	if (unlikely(!mpp)) {
-		mpp_err("pid %d clinet %d found invalid process function\n",
+		mpp_err("pid %d client %d found invalid process function\n",
 			session->pid, session->device_type);
 		return -EINVAL;
 	}
@@ -555,14 +622,22 @@
 		set_bit(TASK_TIMING_CREATE, &task->state);
 	}
 
+	/* ensure current device */
+	mpp = mpp_get_task_used_device(task, session);
+
 	kref_init(&task->ref);
 	init_waitqueue_head(&task->wait);
 	atomic_set(&task->abort_request, 0);
 	task->task_index = atomic_fetch_inc(&mpp->task_index);
+	task->task_id = atomic_fetch_inc(&mpp->queue->task_id);
 	INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
 
 	if (mpp->auto_freq_en && mpp->hw_ops->get_freq)
 		mpp->hw_ops->get_freq(mpp, task);
+
+	msgs->queue = mpp->queue;
+	msgs->task = task;
+	msgs->mpp = mpp;
 
 	/*
 	 * Push task to session should be in front of push task to queue.
@@ -572,17 +647,7 @@
 	 */
 	atomic_inc(&session->task_count);
 	mpp_session_push_pending(session, task);
-	/* push current task to queue */
-	atomic_inc(&mpp->task_count);
-	mpp_taskqueue_push_pending(mpp->queue, task);
-	set_bit(TASK_STATE_PENDING, &task->state);
-	/* trigger current queue to run task */
-	mpp_taskqueue_trigger_work(mpp);
-	kref_put(&task->ref, mpp_free_task);
-	mpp_debug_func(DEBUG_TASK_INFO,
-		       "session %d:%d task %d state 0x%lx\n",
-		       session->device_type, session->index,
-		       task->task_index, task->state);
+
 	return 0;
 }
 
@@ -662,10 +727,9 @@
 	mpp_iommu_down_write(mpp->iommu_info);
 	mpp_reset_down_write(mpp->reset_group);
 	atomic_set(&mpp->reset_request, 0);
-	rockchip_save_qos(mpp->dev);
+
 	if (mpp->hw_ops->reset)
 		mpp->hw_ops->reset(mpp);
-	rockchip_restore_qos(mpp->dev);
 
 	/* Note: if the domain does not change, iommu attach will be return
 	 * as an empty operation. Therefore, force to close and then open,
@@ -714,7 +778,6 @@
 			struct mpp_task *task)
 {
 	int ret;
-	struct mpp_session *session = task->session;
 	u32 timing_en;
 
 	mpp_debug_enter();
@@ -739,33 +802,32 @@
 		mpp_set_grf(mpp->grf_info);
 	}
 	/*
+	 * Lock the reader locker of the device resource lock here,
+	 * release at the finish operation
+	 */
+	mpp_reset_down_read(mpp->reset_group);
+
+	/*
 	 * for iommu share hardware, should attach to ensure
 	 * working in current device
 	 */
 	ret = mpp_iommu_attach(mpp->iommu_info);
 	if (ret) {
 		dev_err(mpp->dev, "mpp_iommu_attach failed\n");
+		mpp_reset_up_read(mpp->reset_group);
 		return -ENODATA;
 	}
 
 	mpp_power_on(mpp);
-	mpp_time_record(task);
-	mpp_debug_func(DEBUG_TASK_INFO,
-		       "%s session %d:%d task=%d state=0x%lx\n",
-		       dev_name(mpp->dev), session->device_type,
-		       session->index, task->task_index, task->state);
+	mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
+		       task->session->pid, dev_name(mpp->dev));
 
 	if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
 		mpp->hw_ops->set_freq(mpp, task);
-	/*
-	 * TODO: Lock the reader locker of the device resource lock here,
-	 * release at the finish operation
-	 */
-	mpp_reset_down_read(mpp->reset_group);
 
+	mpp_iommu_dev_activate(mpp->iommu_info, mpp);
 	if (mpp->dev_ops->run)
 		mpp->dev_ops->run(mpp, task);
-	set_bit(TASK_STATE_START, &task->state);
 
 	mpp_debug_leave();
 
@@ -780,7 +842,7 @@
 
 	mpp_debug_enter();
 
-get_task:
+again:
 	task = mpp_taskqueue_get_pending_task(queue);
 	if (!task)
 		goto done;
@@ -788,7 +850,7 @@
 	/* if task timeout and aborted, remove it */
 	if (atomic_read(&task->abort_request) > 0) {
 		mpp_taskqueue_pop_pending(queue, task);
-		goto get_task;
+		goto again;
 	}
 
 	/* get device for current task */
@@ -813,10 +875,15 @@
 	 */
 	/* Push a pending task to running queue */
 	if (task) {
+		struct mpp_dev *task_mpp = mpp_get_task_used_device(task, task->session);
+
+		atomic_inc(&task_mpp->task_count);
 		mpp_taskqueue_pending_to_run(queue, task);
 		set_bit(TASK_STATE_RUNNING, &task->state);
-		if (mpp_task_run(mpp, task))
-			mpp_taskqueue_pop_running(mpp->queue, task);
+		if (mpp_task_run(task_mpp, task))
+			mpp_taskqueue_pop_running(queue, task);
+		else
+			goto again;
 	}
 
 done:
@@ -824,17 +891,11 @@
 }
 
 static int mpp_wait_result_default(struct mpp_session *session,
-			       struct mpp_task_msgs *msgs)
+				   struct mpp_task_msgs *msgs)
 {
 	int ret;
 	struct mpp_task *task;
-	struct mpp_dev *mpp = session->mpp;
-
-	if (unlikely(!mpp)) {
-		mpp_err("pid %d clinet %d found invalid wait result function\n",
-			session->pid, session->device_type);
-		return -EINVAL;
-	}
+	struct mpp_dev *mpp;
 
 	task = mpp_session_get_pending_task(session);
 	if (!task) {
@@ -842,26 +903,18 @@
 			session->pid, session->index);
 		return -EIO;
 	}
+	mpp = mpp_get_task_used_device(task, session);
 
-	ret = wait_event_timeout(task->wait,
-				 test_bit(TASK_STATE_DONE, &task->state),
-				 msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
-	if (ret > 0) {
-		if (mpp->dev_ops->result)
-			ret = mpp->dev_ops->result(mpp, task, msgs);
-	} else {
-		atomic_inc(&task->abort_request);
-		set_bit(TASK_STATE_ABORT, &task->state);
-		mpp_err("timeout, pid %d session %d:%d count %d cur_task %d state %lx\n",
-			session->pid, session->device_type, session->index,
-			atomic_read(&session->task_count), task->task_index, task->state);
-	}
+	ret = wait_event_interruptible(task->wait, test_bit(TASK_STATE_DONE, &task->state));
+	if (ret == -ERESTARTSYS)
+		mpp_err("wait task break by signal\n");
 
-	mpp_debug_func(DEBUG_TASK_INFO,
-		       "session %d:%d task %d state 0x%lx kref_read %d ret %d\n",
-		       session->device_type,
-		       session->index, task->task_index, task->state,
-		       kref_read(&task->ref), ret);
+	if (mpp->dev_ops->result)
+		ret = mpp->dev_ops->result(mpp, task, msgs);
+	mpp_debug_func(DEBUG_TASK_INFO, "wait done session %d:%d count %d task %d state %lx\n",
+		       session->device_type, session->index, atomic_read(&session->task_count),
+		       task->task_index, task->state);
+
 	mpp_session_pop_pending(session, task);
 
 	return ret;
@@ -896,36 +949,32 @@
 	of_node_put(np);
 	if (!pdev) {
 		dev_err(dev, "failed to get mpp service from node\n");
-		ret = -ENODEV;
-		goto err_put_pdev;
+		return -ENODEV;
 	}
 
-	mpp->pdev_srv = pdev;
 	mpp->srv = platform_get_drvdata(pdev);
+	platform_device_put(pdev);
 	if (!mpp->srv) {
-		dev_err(&pdev->dev, "failed attach service\n");
-		ret = -EINVAL;
-		goto err_put_pdev;
+		dev_err(dev, "failed attach service\n");
+		return -EINVAL;
 	}
 
 	ret = of_property_read_u32(dev->of_node,
 				   "rockchip,taskqueue-node", &taskqueue_node);
 	if (ret) {
 		dev_err(dev, "failed to get taskqueue-node\n");
-		goto err_put_pdev;
+		return ret;
 	} else if (taskqueue_node >= mpp->srv->taskqueue_cnt) {
 		dev_err(dev, "taskqueue-node %d must less than %d\n",
 			taskqueue_node, mpp->srv->taskqueue_cnt);
-		ret = -ENODEV;
-		goto err_put_pdev;
+		return -ENODEV;
 	}
 	/* set taskqueue according dtsi */
 	queue = mpp->srv->task_queues[taskqueue_node];
 	if (!queue) {
 		dev_err(dev, "taskqueue attach to invalid node %d\n",
 			taskqueue_node);
-		ret = -ENODEV;
-		goto err_put_pdev;
+		return -ENODEV;
 	}
 	mpp_attach_workqueue(mpp, queue);
 
@@ -936,8 +985,7 @@
 		if (reset_group_node >= mpp->srv->reset_group_cnt) {
 			dev_err(dev, "resetgroup-node %d must less than %d\n",
 				reset_group_node, mpp->srv->reset_group_cnt);
-			ret = -ENODEV;
-			goto err_put_pdev;
+			return -ENODEV;
 		} else {
 			mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
 			if (!mpp->reset_group->queue)
@@ -948,11 +996,6 @@
 	}
 
 	return 0;
-
-err_put_pdev:
-	platform_device_put(pdev);
-
-	return ret;
 }
 
 struct mpp_taskqueue *mpp_taskqueue_init(struct device *dev)
@@ -976,10 +1019,10 @@
 
 	/* default taskqueue has max 16 task capacity */
 	queue->task_capacity = MPP_MAX_TASK_CAPACITY;
-
-	mutex_init(&queue->ref_lock);
-	atomic_set(&queue->runtime_cnt, 0);
+	atomic_set(&queue->reset_request, 0);
 	atomic_set(&queue->detach_count, 0);
+	atomic_set(&queue->task_id, 0);
+	queue->dev_active_flags = 0;
 
 	return queue;
 }
@@ -987,12 +1030,51 @@
 static void mpp_attach_workqueue(struct mpp_dev *mpp,
 				 struct mpp_taskqueue *queue)
 {
-	mpp->queue = queue;
+	s32 core_id;
+
 	INIT_LIST_HEAD(&mpp->queue_link);
+
 	mutex_lock(&queue->dev_lock);
+
+	if (mpp->core_id >= 0)
+		core_id = mpp->core_id;
+	else
+		core_id = queue->core_count;
+
+	if (core_id < 0 || core_id >= MPP_MAX_CORE_NUM) {
+		dev_err(mpp->dev, "invalid core id %d\n", core_id);
+		goto done;
+	}
+
+	/*
+	 * multi devices with no multicores share one queue,
+	 * the core_id is default value 0.
+	 */
+	if (queue->cores[core_id]) {
+		if (queue->cores[core_id] == mpp)
+			goto done;
+
+		core_id = queue->core_count;
+	}
+
+	queue->cores[core_id] = mpp;
+	queue->core_count++;
+
+	set_bit(core_id, &queue->core_idle);
 	list_add_tail(&mpp->queue_link, &queue->dev_list);
+	if (queue->core_id_max < (u32)core_id)
+		queue->core_id_max = (u32)core_id;
+
+	mpp->core_id = core_id;
+	mpp->queue = queue;
+
+	mpp_dbg_core("%s attach queue as core %d\n",
+			dev_name(mpp->dev), mpp->core_id);
+
 	if (queue->task_capacity > mpp->task_capacity)
 		queue->task_capacity = mpp->task_capacity;
+
+done:
 	mutex_unlock(&queue->dev_lock);
 }
 
@@ -1002,7 +1084,15 @@
 
 	if (queue) {
 		mutex_lock(&queue->dev_lock);
+
+		queue->cores[mpp->core_id] = NULL;
+		queue->core_count--;
+
+		clear_bit(mpp->core_id, &queue->core_idle);
 		list_del_init(&mpp->queue_link);
+
+		mpp->queue = NULL;
+
 		mutex_unlock(&queue->dev_lock);
 	}
 }
@@ -1018,27 +1108,6 @@
 	found = (cmd >= MPP_CMD_CONTROL_BASE && cmd < MPP_CMD_CONTROL_BUTT) ? true : found;
 
 	return found ? 0 : -EINVAL;
-}
-
-static int mpp_parse_msg_v1(struct mpp_msg_v1 *msg,
-			    struct mpp_request *req)
-{
-	int ret = 0;
-
-	req->cmd = msg->cmd;
-	req->flags = msg->flags;
-	req->size = msg->size;
-	req->offset = msg->offset;
-	req->data = (void __user *)(unsigned long)msg->data_ptr;
-
-	mpp_debug(DEBUG_IOCTL, "cmd %x, flags %08x, size %d, offset %x\n",
-		  req->cmd, req->flags, req->size, req->offset);
-
-	ret = mpp_check_cmd_v1(req->cmd);
-	if (ret)
-		mpp_err("mpp cmd %x is not supproted.\n", req->cmd);
-
-	return ret;
 }
 
 static inline int mpp_msg_is_last(struct mpp_request *req)
@@ -1090,7 +1159,8 @@
 	int ret;
 	struct mpp_dev *mpp;
 
-	mpp_debug(DEBUG_IOCTL, "req->cmd %x\n", req->cmd);
+	mpp_debug(DEBUG_IOCTL, "cmd %x process\n", req->cmd);
+
 	switch (req->cmd) {
 	case MPP_CMD_QUERY_HW_SUPPORT: {
 		u32 hw_support = srv->hw_support;
@@ -1116,8 +1186,10 @@
 			if (test_bit(client_type, &srv->hw_support))
 				mpp = srv->sub_devices[client_type];
 		}
+
 		if (!mpp)
 			return -EINVAL;
+
 		hw_info = mpp->var->hw_info;
 		mpp_debug(DEBUG_IOCTL, "hw_id %08x\n", hw_info->hw_id);
 		if (put_user(hw_info->hw_id, (u32 __user *)req->data))
@@ -1148,6 +1220,7 @@
 		mpp = srv->sub_devices[client_type];
 		if (!mpp)
 			return -EINVAL;
+
 		session->device_type = (enum MPP_DEVICE_TYPE)client_type;
 		session->dma = mpp_dma_session_create(mpp->dev, mpp->session_max_buffers);
 		session->mpp = mpp;
@@ -1169,6 +1242,7 @@
 			if (ret)
 				return ret;
 		}
+
 		mpp_session_attach_workqueue(session, mpp->queue);
 	} break;
 	case MPP_CMD_INIT_DRIVER_DATA: {
@@ -1211,6 +1285,21 @@
 	case MPP_CMD_POLL_HW_FINISH: {
 		msgs->flags |= req->flags;
 		msgs->poll_cnt++;
+		msgs->poll_req = NULL;
+	} break;
+	case MPP_CMD_POLL_HW_IRQ: {
+		if (msgs->poll_cnt || msgs->poll_req)
+			mpp_err("Do NOT poll hw irq when previous call not return\n");
+
+		msgs->flags |= req->flags;
+		msgs->poll_cnt++;
+
+		if (req->size && req->data) {
+			if (!msgs->poll_req)
+				msgs->poll_req = req;
+		} else {
+			msgs->poll_req = NULL;
+		}
 	} break;
 	case MPP_CMD_RESET_SESSION: {
 		int ret;
@@ -1300,7 +1389,7 @@
 	default: {
 		mpp = session->mpp;
 		if (!mpp) {
-			mpp_err("pid %d not find clinet %d\n",
+			mpp_err("pid %d not find client %d\n",
 				session->pid, session->device_type);
 			return -EINVAL;
 		}
@@ -1314,17 +1403,228 @@
 	return 0;
 }
 
-static long mpp_dev_ioctl(struct file *filp,
-			  unsigned int cmd,
-			  unsigned long arg)
+static void task_msgs_add(struct mpp_task_msgs *msgs, struct list_head *head)
 {
+	struct mpp_session *session = msgs->session;
 	int ret = 0;
-	struct mpp_service *srv;
-	void __user *msg;
+
+	/* process each task */
+	if (msgs->set_cnt) {
+		/* NOTE: update msg_flags for fd over 1024 */
+		session->msg_flags = msgs->flags;
+		ret = mpp_process_task(session, msgs);
+	}
+
+	if (!ret) {
+		INIT_LIST_HEAD(&msgs->list);
+		list_add_tail(&msgs->list, head);
+	} else {
+		put_task_msgs(msgs);
+	}
+}
+
+static int mpp_collect_msgs(struct list_head *head, struct mpp_session *session,
+			    unsigned int cmd, void __user *msg)
+{
+	struct mpp_msg_v1 msg_v1;
 	struct mpp_request *req;
-	struct mpp_task_msgs task_msgs;
-	struct mpp_session *session =
-		(struct mpp_session *)filp->private_data;
+	struct mpp_task_msgs *msgs = NULL;
+	int last = 1;
+	int ret;
+
+	if (cmd != MPP_IOC_CFG_V1) {
+		mpp_err("unknown ioctl cmd %x\n", cmd);
+		return -EINVAL;
+	}
+
+next:
+	/* first, parse to fixed struct */
+	if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
+		return -EFAULT;
+
+	msg += sizeof(msg_v1);
+
+	mpp_debug(DEBUG_IOCTL, "cmd %x collect flags %08x, size %d, offset %x\n",
+		  msg_v1.cmd, msg_v1.flags, msg_v1.size, msg_v1.offset);
+
+	if (mpp_check_cmd_v1(msg_v1.cmd)) {
+		mpp_err("mpp cmd %x is not supported.\n", msg_v1.cmd);
+		return -EFAULT;
+	}
+
+	if (msg_v1.flags & MPP_FLAGS_MULTI_MSG)
+		last = (msg_v1.flags & MPP_FLAGS_LAST_MSG) ? 1 : 0;
+	else
+		last = 1;
+
+	/* check cmd for change msgs session */
+	if (msg_v1.cmd == MPP_CMD_SET_SESSION_FD) {
+		struct mpp_bat_msg bat_msg;
+		struct mpp_bat_msg __user *usr_cmd;
+		struct fd f;
+
+		/* try session switch here */
+		usr_cmd = (struct mpp_bat_msg __user *)(unsigned long)msg_v1.data_ptr;
+
+		if (copy_from_user(&bat_msg, usr_cmd, sizeof(bat_msg)))
+			return -EFAULT;
+
+		/* skip finished message */
+		if (bat_msg.flag & MPP_BAT_MSG_DONE)
+			goto session_switch_done;
+
+		f = fdget(bat_msg.fd);
+		if (!f.file) {
+			int ret = -EBADF;
+
+			mpp_err("fd %d get session failed\n", bat_msg.fd);
+
+			if (copy_to_user(&usr_cmd->ret, &ret, sizeof(usr_cmd->ret)))
+				mpp_err("copy_to_user failed.\n");
+			goto session_switch_done;
+		}
+
+		/* NOTE: add previous ready task to queue and drop empty task */
+		if (msgs) {
+			if (msgs->req_cnt)
+				task_msgs_add(msgs, head);
+			else
+				put_task_msgs(msgs);
+
+			msgs = NULL;
+		}
+
+		/* switch session */
+		session = f.file->private_data;
+		msgs = get_task_msgs(session);
+
+		if (f.file->private_data == session)
+			msgs->ext_fd = bat_msg.fd;
+
+		msgs->f = f;
+
+		mpp_debug(DEBUG_IOCTL, "fd %d, session %d msg_cnt %d\n",
+				bat_msg.fd, session->index, session->msgs_cnt);
+
+session_switch_done:
+		/* session id should NOT be the last message */
+		if (last)
+			return 0;
+
+		goto next;
+	}
+
+	if (!msgs)
+		msgs = get_task_msgs(session);
+
+	if (!msgs) {
+		pr_err("session %d:%d failed to get task msgs",
+		       session->pid, session->index);
+		return -EINVAL;
+	}
+
+	if (msgs->req_cnt >= MPP_MAX_MSG_NUM) {
+		mpp_err("session %d message count %d more than %d.\n",
+			session->index, msgs->req_cnt, MPP_MAX_MSG_NUM);
+		return -EINVAL;
+	}
+
+	req = &msgs->reqs[msgs->req_cnt++];
+	req->cmd = msg_v1.cmd;
+	req->flags = msg_v1.flags;
+	req->size = msg_v1.size;
+	req->offset = msg_v1.offset;
+	req->data = (void __user *)(unsigned long)msg_v1.data_ptr;
+
+	ret = mpp_process_request(session, session->srv, req, msgs);
+	if (ret) {
+		mpp_err("session %d process cmd %x ret %d\n",
+			session->index, req->cmd, ret);
+		return ret;
+	}
+
+	if (!last)
+		goto next;
+
+	task_msgs_add(msgs, head);
+	msgs = NULL;
+
+	return 0;
+}
+
+static void mpp_msgs_trigger(struct list_head *msgs_list)
+{
+	struct mpp_task_msgs *msgs, *n;
+	struct mpp_dev *mpp_prev = NULL;
+	struct mpp_taskqueue *queue_prev = NULL;
+
+	/* push task to queue */
+	list_for_each_entry_safe(msgs, n, msgs_list, list) {
+		struct mpp_dev *mpp;
+		struct mpp_task *task;
+		struct mpp_taskqueue *queue;
+
+		if (!msgs->set_cnt || !msgs->queue)
+			continue;
+
+		mpp = msgs->mpp;
+		task = msgs->task;
+		queue = msgs->queue;
+
+		if (queue_prev != queue) {
+			if (queue_prev && mpp_prev) {
+				mutex_unlock(&queue_prev->pending_lock);
+				mpp_taskqueue_trigger_work(mpp_prev);
+			}
+
+			if (queue)
+				mutex_lock(&queue->pending_lock);
+
+			mpp_prev = mpp;
+			queue_prev = queue;
+		}
+
+		if (test_bit(TASK_STATE_ABORT, &task->state))
+			pr_info("try to trigger abort task %d\n", task->task_id);
+
+		set_bit(TASK_STATE_PENDING, &task->state);
+		list_add_tail(&task->queue_link, &queue->pending_list);
+	}
+
+	if (mpp_prev && queue_prev) {
+		mutex_unlock(&queue_prev->pending_lock);
+		mpp_taskqueue_trigger_work(mpp_prev);
+	}
+}
+
+static void mpp_msgs_wait(struct list_head *msgs_list)
+{
+	struct mpp_task_msgs *msgs, *n;
+
+	/* poll and release each task */
+	list_for_each_entry_safe(msgs, n, msgs_list, list) {
+		struct mpp_session *session = msgs->session;
+
+		if (msgs->poll_cnt) {
+			int ret = mpp_wait_result(session, msgs);
+
+			if (ret) {
+				mpp_err("session %d wait result ret %d\n",
+					session->index, ret);
+			}
+		}
+
+		put_task_msgs(msgs);
+
+	}
+}
+
+static long mpp_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct mpp_service *srv;
+	struct mpp_session *session = (struct mpp_session *)filp->private_data;
+	struct list_head msgs_list;
+	int ret = 0;
 
 	mpp_debug_enter();
 
@@ -1332,7 +1632,9 @@
 		mpp_err("session %p\n", session);
 		return -EINVAL;
 	}
+
 	srv = session->srv;
+
 	if (atomic_read(&session->release_request) > 0) {
 		mpp_debug(DEBUG_IOCTL, "release session had request\n");
 		return -EBUSY;
@@ -1342,54 +1644,15 @@
 		return -EBUSY;
 	}
 
-	msg = (void __user *)arg;
-	memset(&task_msgs, 0, sizeof(task_msgs));
-	do {
-		req = &task_msgs.reqs[task_msgs.req_cnt];
-		/* first, parse to fixed struct */
-		switch (cmd) {
-		case MPP_IOC_CFG_V1: {
-			struct mpp_msg_v1 msg_v1;
+	INIT_LIST_HEAD(&msgs_list);
 
-			memset(&msg_v1, 0, sizeof(msg_v1));
-			if (copy_from_user(&msg_v1, msg, sizeof(msg_v1)))
-				return -EFAULT;
-			ret = mpp_parse_msg_v1(&msg_v1, req);
-			if (ret)
-				return -EFAULT;
+	ret = mpp_collect_msgs(&msgs_list, session, cmd, (void __user *)arg);
+	if (ret)
+		mpp_err("collect msgs failed %d\n", ret);
 
-			msg += sizeof(msg_v1);
-		} break;
-		default:
-			mpp_err("unknown ioctl cmd %x\n", cmd);
-			return -EINVAL;
-		}
-		task_msgs.req_cnt++;
-		/* check loop times */
-		if (task_msgs.req_cnt > MPP_MAX_MSG_NUM) {
-			mpp_err("fail, message count %d more than %d.\n",
-				task_msgs.req_cnt, MPP_MAX_MSG_NUM);
-			return -EINVAL;
-		}
-		/* second, process request */
-		ret = mpp_process_request(session, srv, req, &task_msgs);
-		if (ret)
-			return -EFAULT;
-		/* last, process task message */
-		if (mpp_msg_is_last(req)) {
-			session->msg_flags = task_msgs.flags;
-			if (task_msgs.set_cnt > 0) {
-				ret = mpp_process_task(session, &task_msgs);
-				if (ret)
-					return ret;
-			}
-			if (task_msgs.poll_cnt > 0) {
-				ret = mpp_wait_result(session, &task_msgs);
-				if (ret)
-					return ret;
-			}
-		}
-	} while (!mpp_msg_is_last(req));
+	mpp_msgs_trigger(&msgs_list);
+
+	mpp_msgs_wait(&msgs_list);
 
 	mpp_debug_leave();
 
@@ -1493,9 +1756,9 @@
 		mpp_iommu_down_read(mpp->iommu_info);
 		buffer = mpp_dma_import_fd(mpp->iommu_info, dma, fd);
 		mpp_iommu_up_read(mpp->iommu_info);
-		if (IS_ERR_OR_NULL(buffer)) {
+		if (IS_ERR(buffer)) {
 			mpp_err("can't import dma-buf %d\n", fd);
-			return ERR_PTR(-ENOMEM);
+			return ERR_CAST(buffer);
 		}
 
 		mem_region->hdl = buffer;
@@ -1525,7 +1788,7 @@
 		cnt = session->trans_count;
 		tbl = session->trans_table;
 	} else {
-		struct mpp_dev *mpp = session->mpp;
+		struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
 		struct mpp_trans_info *trans_info = mpp->var->trans_info;
 
 		cnt = trans_info[fmt].count;
@@ -1661,8 +1924,7 @@
 	return 0;
 }
 
-int mpp_task_init(struct mpp_session *session,
-		  struct mpp_task *task)
+int mpp_task_init(struct mpp_session *session, struct mpp_task *task)
 {
 	INIT_LIST_HEAD(&task->pending_link);
 	INIT_LIST_HEAD(&task->queue_link);
@@ -1677,7 +1939,7 @@
 int mpp_task_finish(struct mpp_session *session,
 		    struct mpp_task *task)
 {
-	struct mpp_dev *mpp = session->mpp;
+	struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
 
 	if (mpp->dev_ops->finish)
 		mpp->dev_ops->finish(mpp, task);
@@ -1713,7 +1975,7 @@
 		      struct mpp_task *task)
 {
 	struct mpp_mem_region *mem_region = NULL, *n;
-	struct mpp_dev *mpp = session->mpp;
+	struct mpp_dev *mpp = mpp_get_task_used_device(task, session);
 
 	/* release memory region attach to this registers table. */
 	list_for_each_entry_safe(mem_region, n,
@@ -1738,7 +2000,7 @@
 	if (!task)
 		return -EIO;
 
-	mpp_err("--- dump mem region ---\n");
+	mpp_err("--- dump task %d mem region ---\n", task->task_index);
 	if (!list_empty(&task->mem_region_list)) {
 		list_for_each_entry_safe(mem, n,
 					 &task->mem_region_list,
@@ -1778,54 +2040,41 @@
 	return 0;
 }
 
-int mpp_task_dump_hw_reg(struct mpp_dev *mpp, struct mpp_task *task)
+int mpp_task_dump_hw_reg(struct mpp_dev *mpp)
 {
-	if (!task)
-		return -EIO;
+	u32 i;
+	u32 s = mpp->var->hw_info->reg_start;
+	u32 e = mpp->var->hw_info->reg_end;
 
-	if (mpp_debug_unlikely(DEBUG_DUMP_ERR_REG)) {
-		u32 i;
-		u32 s = task->hw_info->reg_start;
-		u32 e = task->hw_info->reg_end;
+	mpp_err("--- dump hardware register ---\n");
+	for (i = s; i <= e; i++) {
+		u32 reg = i * sizeof(u32);
 
-		mpp_err("--- dump hardware register ---\n");
-		for (i = s; i <= e; i++) {
-			u32 reg = i * sizeof(u32);
-
-			mpp_err("reg[%03d]: %04x: 0x%08x\n",
+		mpp_err("reg[%03d]: %04x: 0x%08x\n",
 				i, reg, readl_relaxed(mpp->reg_base + reg));
-		}
 	}
 
 	return 0;
 }
 
-static int mpp_iommu_handle(struct iommu_domain *iommu,
-			    struct device *iommu_dev,
-			    unsigned long iova,
-			    int status, void *arg)
+void mpp_reg_show(struct mpp_dev *mpp, u32 offset)
 {
-	struct mpp_dev *mpp = (struct mpp_dev *)arg;
-	struct mpp_taskqueue *queue = mpp->queue;
-	struct mpp_task *task = mpp_taskqueue_get_running_task(queue);
+	if (!mpp)
+		return;
 
-	/*
-	 * NOTE: In link mode, this task may not be the task of the current
-	 * hardware processing error
-	 */
-	if (!task || !task->session)
-		return -EIO;
-	/* get mpp from cur task */
-	mpp = task->session->mpp;
-	dev_err(mpp->dev, "fault addr 0x%08lx status %x\n", iova, status);
+	dev_err(mpp->dev, "reg[%03d]: %04x: 0x%08x\n",
+		offset >> 2, offset, mpp_read_relaxed(mpp, offset));
+}
 
-	mpp_task_dump_mem_region(mpp, task);
-	mpp_task_dump_hw_reg(mpp, task);
+void mpp_reg_show_range(struct mpp_dev *mpp, u32 start, u32 end)
+{
+	u32 offset;
 
-	if (mpp->iommu_info->hdl)
-		mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, mpp);
+	if (!mpp)
+		return;
 
-	return 0;
+	for (offset = start; offset < end; offset += sizeof(u32))
+		mpp_reg_show(mpp, offset);
 }
 
 /* The device will do more probing work after this */
@@ -1843,6 +2092,16 @@
 	/* read flag for pum idle request */
 	mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
 
+	/* read link table capacity */
+	ret = of_property_read_u32(np, "rockchip,task-capacity",
+				   &mpp->task_capacity);
+	if (ret)
+		mpp->task_capacity = 1;
+
+	mpp->dev = dev;
+	mpp->hw_ops = mpp->var->hw_ops;
+	mpp->dev_ops = mpp->var->dev_ops;
+
 	/* Get and attach to service */
 	ret = mpp_attach_service(mpp, dev);
 	if (ret) {
@@ -1850,24 +2109,9 @@
 		return -ENODEV;
 	}
 
-	mpp->dev = dev;
-	mpp->hw_ops = mpp->var->hw_ops;
-	mpp->dev_ops = mpp->var->dev_ops;
-
-	/* read link table capacity */
-	ret = of_property_read_u32(np, "rockchip,task-capacity",
-				   &mpp->task_capacity);
-	if (ret) {
-		mpp->task_capacity = 1;
-
-		/* power domain autosuspend delay 2s */
-		pm_runtime_set_autosuspend_delay(dev, 2000);
-		pm_runtime_use_autosuspend(dev);
-	} else {
-		dev_info(dev, "%d task capacity link mode detected\n",
-			 mpp->task_capacity);
-		/* do not setup autosuspend on multi task device */
-	}
+	/* power domain autosuspend delay 2s */
+	pm_runtime_set_autosuspend_delay(dev, 2000);
+	pm_runtime_use_autosuspend(dev);
 
 	kthread_init_work(&mpp->work, mpp_task_worker_default);
 
@@ -1878,7 +2122,6 @@
 
 	device_init_wakeup(dev, true);
 	pm_runtime_enable(dev);
-
 	mpp->irq = platform_get_irq(pdev, 0);
 	if (mpp->irq < 0) {
 		dev_err(dev, "No interrupt resource found\n");
@@ -1905,42 +2148,36 @@
 		ret = -ENOMEM;
 		goto failed;
 	}
+	mpp->io_base = res->start;
 
-	pm_runtime_get_sync(dev);
 	/*
 	 * TODO: here or at the device itself, some device does not
 	 * have the iommu, maybe in the device is better.
 	 */
 	mpp->iommu_info = mpp_iommu_probe(dev);
 	if (IS_ERR(mpp->iommu_info)) {
-		dev_err(dev, "failed to attach iommu: %ld\n",
-			PTR_ERR(mpp->iommu_info));
+		dev_err(dev, "failed to attach iommu\n");
+		mpp->iommu_info = NULL;
 	}
 	if (mpp->hw_ops->init) {
 		ret = mpp->hw_ops->init(mpp);
 		if (ret)
-			goto failed_init;
+			goto failed;
 	}
-	/* set iommu fault handler */
-	if (!IS_ERR(mpp->iommu_info))
-		iommu_set_fault_handler(mpp->iommu_info->domain,
-					mpp_iommu_handle, mpp);
 
 	/* read hardware id */
 	if (hw_info->reg_id >= 0) {
+		pm_runtime_get_sync(dev);
 		if (mpp->hw_ops->clk_on)
 			mpp->hw_ops->clk_on(mpp);
 
 		hw_info->hw_id = mpp_read(mpp, hw_info->reg_id * sizeof(u32));
 		if (mpp->hw_ops->clk_off)
 			mpp->hw_ops->clk_off(mpp);
+		pm_runtime_put_sync(dev);
 	}
 
-	pm_runtime_put_sync(dev);
-
 	return ret;
-failed_init:
-	pm_runtime_put_sync(dev);
 failed:
 	mpp_detach_workqueue(mpp);
 	device_init_wakeup(dev, false);
@@ -1955,12 +2192,31 @@
 		mpp->hw_ops->exit(mpp);
 
 	mpp_iommu_remove(mpp->iommu_info);
-	platform_device_put(mpp->pdev_srv);
 	mpp_detach_workqueue(mpp);
 	device_init_wakeup(mpp->dev, false);
 	pm_runtime_disable(mpp->dev);
 
 	return 0;
+}
+
+void mpp_dev_shutdown(struct platform_device *pdev)
+{
+	int ret;
+	int val;
+	struct device *dev = &pdev->dev;
+	struct mpp_dev *mpp = dev_get_drvdata(dev);
+
+	dev_info(dev, "shutdown device\n");
+
+	atomic_inc(&mpp->srv->shutdown_request);
+	ret = readx_poll_timeout(atomic_read,
+				 &mpp->task_count,
+				 val, val == 0, 20000, 200000);
+	if (ret == -ETIMEDOUT)
+		dev_err(dev, "wait total %d running time out\n",
+			atomic_read(&mpp->task_count));
+	else
+		dev_info(dev, "shutdown success\n");
 }
 
 int mpp_dev_register_srv(struct mpp_dev *mpp, struct mpp_service *srv)
@@ -1989,7 +2245,7 @@
 		irq_ret = mpp->dev_ops->irq(mpp);
 
 	if (task) {
-		if (irq_ret != IRQ_NONE) {
+		if (irq_ret == IRQ_WAKE_THREAD) {
 			/* if wait or delayed work timeout, abort request will turn on,
 			 * isr should not to response, and handle it in delayed work
 			 */
@@ -2007,6 +2263,9 @@
 			/* normal condition, set state and wake up isr thread */
 			set_bit(TASK_STATE_IRQ, &task->state);
 		}
+
+		if (irq_ret == IRQ_WAKE_THREAD)
+			mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
 	} else {
 		mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
 	}
@@ -2083,27 +2342,31 @@
 
 int mpp_time_part_diff(struct mpp_task *task)
 {
-	ktime_t end;
-	struct mpp_dev *mpp = task->session->mpp;
+	if (mpp_debug_unlikely(DEBUG_TIMING)) {
+		ktime_t end;
+		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
 
-	end = ktime_get();
-	mpp_debug(DEBUG_PART_TIMING, "%s: session %d:%d part time: %lld us\n",
-		  dev_name(mpp->dev), task->session->pid, task->session->index,
-		  ktime_us_delta(end, task->part));
-	task->part = end;
+		end = ktime_get();
+		mpp_debug(DEBUG_PART_TIMING, "%s:%d session %d:%d part time: %lld us\n",
+			dev_name(mpp->dev), task->core_id, task->session->pid,
+			task->session->index, ktime_us_delta(end, task->part));
+		task->part = end;
+	}
 
 	return 0;
 }
 
 int mpp_time_diff(struct mpp_task *task)
 {
-	ktime_t end;
-	struct mpp_dev *mpp = task->session->mpp;
+	if (mpp_debug_unlikely(DEBUG_TIMING)) {
+		ktime_t end;
+		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
 
-	end = ktime_get();
-	mpp_debug(DEBUG_TIMING, "%s: session %d:%d task time: %lld us\n",
-		  dev_name(mpp->dev), task->session->pid, task->session->index,
-		  ktime_us_delta(end, task->start));
+		end = ktime_get();
+		mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
+			dev_name(mpp->dev), task->core_id, task->session->pid,
+			task->session->index, ktime_us_delta(end, task->start));
+	}
 
 	return 0;
 }
@@ -2112,19 +2375,19 @@
 {
 	if (mpp_debug_unlikely(DEBUG_TIMING)) {
 		ktime_t end;
-		struct mpp_dev *mpp = task->session->mpp;
+		struct mpp_dev *mpp = mpp_get_task_used_device(task, task->session);
 
 		end = ktime_get();
 
 		if (clk_hz)
-			mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us hw %d us\n",
-				dev_name(mpp->dev), task->session->index,
-				ktime_us_delta(end, task->start),
+			mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us hw %d us\n",
+				dev_name(mpp->dev), task->core_id, task->session->pid,
+				task->session->index, ktime_us_delta(end, task->start),
 				task->hw_cycles / (clk_hz / 1000000));
 		else
-			mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us\n",
-				  dev_name(mpp->dev), task->session->index,
-				  ktime_us_delta(end, task->start));
+			mpp_debug(DEBUG_TIMING, "%s:%d session %d:%d time: %lld us\n",
+				dev_name(mpp->dev), task->core_id, task->session->pid,
+				task->session->index, ktime_us_delta(end, task->start));
 	}
 
 	return 0;
@@ -2143,7 +2406,7 @@
 	ktime_t s = task->on_create;
 	unsigned long state = task->state;
 
-	pr_info("task %d dump timing at %lld us:", task->task_index, time_diff);
+	pr_info("task %d dump timing at %lld us:", task->task_id, time_diff);
 
 	pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s));
 	LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end",     task->on_create_end, s);
@@ -2325,11 +2588,11 @@
 	return count;
 }
 
-static const struct file_operations procfs_fops_u32 = {
-	.open = fops_open_u32,
-	.read = seq_read,
-	.release = single_release,
-	.write = fops_write_u32,
+static const struct proc_ops procfs_fops_u32 = {
+	.proc_open = fops_open_u32,
+	.proc_read = seq_read,
+	.proc_release = single_release,
+	.proc_write = fops_write_u32,
 };
 
 struct proc_dir_entry *
@@ -2341,6 +2604,7 @@
 
 void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
 {
+	mpp_procfs_create_u32("disable_work", 0644, parent, &mpp->disable);
 	mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check);
 }
 #endif

--
Gitblit v1.6.2