From 0d8657dd3056063fb115946b10157477b5c70451 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 20 Nov 2023 09:09:45 +0000
Subject: [PATCH] enable lvds 1280x800
---
kernel/drivers/video/rockchip/mpp/mpp_common.c | 455 +++++++++++++++++++++++++++++++++++---------------------
1 files changed, 284 insertions(+), 171 deletions(-)
diff --git a/kernel/drivers/video/rockchip/mpp/mpp_common.c b/kernel/drivers/video/rockchip/mpp/mpp_common.c
index 728704a..3b3d02a 100644
--- a/kernel/drivers/video/rockchip/mpp/mpp_common.c
+++ b/kernel/drivers/video/rockchip/mpp/mpp_common.c
@@ -36,7 +36,6 @@
#include "mpp_common.h"
#include "mpp_iommu.h"
-#define MPP_WORK_TIMEOUT_DELAY (200)
#define MPP_WAIT_TIMEOUT_DELAY (2000)
/* Use 'v' as magic number */
@@ -231,20 +230,9 @@
return 0;
}
-static int mpp_session_clear(struct mpp_dev *mpp,
- struct mpp_session *session)
+static int mpp_session_clear_pending(struct mpp_session *session)
{
struct mpp_task *task = NULL, *n;
-
- /* clear session done list */
- mutex_lock(&session->done_lock);
- list_for_each_entry_safe(task, n,
- &session->done_list,
- done_link) {
- list_del_init(&task->done_link);
- kref_put(&task->ref, mpp_free_task);
- }
- mutex_unlock(&session->done_lock);
/* clear session pending list */
mutex_lock(&session->pending_lock);
@@ -261,6 +249,49 @@
return 0;
}
+void mpp_session_cleanup_detach(struct mpp_taskqueue *queue, struct kthread_work *work)
+{
+ struct mpp_session *session, *n;
+
+ if (!atomic_read(&queue->detach_count))
+ return;
+
+ mutex_lock(&queue->session_lock);
+ list_for_each_entry_safe(session, n, &queue->session_detach, session_link) {
+ s32 task_count = atomic_read(&session->task_count);
+
+ if (!task_count) {
+ list_del_init(&session->session_link);
+ atomic_dec(&queue->detach_count);
+ }
+
+ mutex_unlock(&queue->session_lock);
+
+ if (task_count) {
+ mpp_dbg_session("session %d:%d task not finished %d\n",
+ session->pid, session->index,
+ atomic_read(&queue->detach_count));
+
+ mpp_session_clear_pending(session);
+ } else {
+ mpp_dbg_session("queue detach %d\n",
+ atomic_read(&queue->detach_count));
+
+ mpp_session_deinit(session);
+ }
+
+ mutex_lock(&queue->session_lock);
+ }
+ mutex_unlock(&queue->session_lock);
+
+ if (atomic_read(&queue->detach_count)) {
+ mpp_dbg_session("queue detach %d again\n",
+ atomic_read(&queue->detach_count));
+
+ kthread_queue_work(&queue->worker, work);
+ }
+}
+
static struct mpp_session *mpp_session_init(void)
{
struct mpp_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
@@ -271,13 +302,10 @@
session->pid = current->pid;
mutex_init(&session->pending_lock);
- mutex_init(&session->done_lock);
INIT_LIST_HEAD(&session->pending_list);
- INIT_LIST_HEAD(&session->done_list);
INIT_LIST_HEAD(&session->service_link);
INIT_LIST_HEAD(&session->session_link);
- init_waitqueue_head(&session->wait);
atomic_set(&session->task_count, 0);
atomic_set(&session->release_request, 0);
@@ -293,7 +321,7 @@
if (mpp->dev_ops->free_session)
mpp->dev_ops->free_session(session);
- mpp_session_clear(mpp, session);
+ mpp_session_clear_pending(session);
if (session->dma) {
mpp_iommu_down_read(mpp->iommu_info);
@@ -314,14 +342,10 @@
list_del_init(&session->session_link);
}
-int mpp_session_deinit(struct mpp_session *session)
+void mpp_session_deinit(struct mpp_session *session)
{
- u32 task_count = atomic_read(&session->task_count);
-
- mpp_dbg_session("session %p:%d task %d release\n",
- session, session->index, task_count);
- if (task_count)
- return -1;
+ mpp_dbg_session("session %d:%d task %d deinit\n", session->pid,
+ session->index, atomic_read(&session->task_count));
if (likely(session->deinit))
session->deinit(session);
@@ -331,13 +355,12 @@
mpp_dbg_session("session %p:%d deinit\n", session, session->index);
kfree(session);
- return 0;
}
static void mpp_session_attach_workqueue(struct mpp_session *session,
struct mpp_taskqueue *queue)
{
- mpp_dbg_session("session %p:%d attach\n", session, session->index);
+ mpp_dbg_session("session %d:%d attach\n", session->pid, session->index);
mutex_lock(&queue->session_lock);
list_add_tail(&session->session_link, &queue->session_attach);
mutex_unlock(&queue->session_lock);
@@ -351,14 +374,14 @@
if (!session->mpp || !session->mpp->queue)
return;
- mpp_dbg_session("session %p:%d detach\n", session, session->index);
+ mpp_dbg_session("session %d:%d detach\n", session->pid, session->index);
mpp = session->mpp;
queue = mpp->queue;
mutex_lock(&queue->session_lock);
list_del_init(&session->session_link);
list_add_tail(&session->session_link, &queue->session_detach);
- queue->detach_count++;
+ atomic_inc(&queue->detach_count);
mutex_unlock(&queue->session_lock);
mpp_taskqueue_trigger_work(mpp);
@@ -370,6 +393,10 @@
{
kref_get(&task->ref);
mutex_lock(&session->pending_lock);
+ if (session->srv->timing_en) {
+ task->on_pending = ktime_get();
+ set_bit(TASK_TIMING_PENDING, &task->state);
+ }
list_add_tail(&task->pending_link, &session->pending_list);
mutex_unlock(&session->pending_lock);
@@ -402,29 +429,6 @@
return task;
}
-static int mpp_session_push_done(struct mpp_session *session,
- struct mpp_task *task)
-{
- kref_get(&task->ref);
- mutex_lock(&session->done_lock);
- list_add_tail(&task->done_link, &session->done_list);
- mutex_unlock(&session->done_lock);
-
- return 0;
-}
-
-static int mpp_session_pop_done(struct mpp_session *session,
- struct mpp_task *task)
-{
- mutex_lock(&session->done_lock);
- list_del_init(&task->done_link);
- mutex_unlock(&session->done_lock);
- set_bit(TASK_STATE_DONE, &task->state);
- kref_put(&task->ref, mpp_free_task);
-
- return 0;
-}
-
static void mpp_free_task(struct kref *ref)
{
struct mpp_dev *mpp;
@@ -438,9 +442,9 @@
session = task->session;
mpp_debug_func(DEBUG_TASK_INFO,
- "session=%p, task=%p, state=0x%lx, abort_request=%d\n",
- session, task, task->state,
- atomic_read(&task->abort_request));
+ "session %d:%d task %d state 0x%lx abort_request %d\n",
+ session->device_type, session->index, task->task_index,
+ task->state, atomic_read(&task->abort_request));
if (!session->mpp) {
mpp_err("session %p, session->mpp is null.\n", session);
return;
@@ -469,19 +473,21 @@
return;
}
- mpp_err("task %p processing time out!\n", task);
if (!task->session) {
mpp_err("task %p, task->session is null.\n", task);
return;
}
+
session = task->session;
if (!session->mpp) {
- mpp_err("session %p, session->mpp is null.\n", session);
+ mpp_err("session %d:%d, session mpp is null.\n", session->pid,
+ session->index);
return;
}
mpp = session->mpp;
-
+ dev_err(mpp->dev, "session %d:%d task %d state %lx processing time out!\n",
+ session->device_type, session->index, task->task_index, task->state);
synchronize_hardirq(mpp->irq);
if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
@@ -489,18 +495,32 @@
return;
}
+ mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
+
+ /* disable core irq */
+ disable_irq(mpp->irq);
+ /* disable mmu irq */
+ mpp_iommu_disable_irq(mpp->iommu_info);
+
/* hardware maybe dead, reset it */
mpp_reset_up_read(mpp->reset_group);
mpp_dev_reset(mpp);
mpp_power_off(mpp);
- mpp_session_push_done(session, task);
+ set_bit(TASK_STATE_TIMEOUT, &task->state);
+ set_bit(TASK_STATE_DONE, &task->state);
/* Wake up the GET thread */
- wake_up(&session->wait);
+ wake_up(&task->wait);
/* remove task from taskqueue running list */
- set_bit(TASK_STATE_TIMEOUT, &task->state);
mpp_taskqueue_pop_running(mpp->queue, task);
+
+ /* enable core irq */
+ enable_irq(mpp->irq);
+ /* enable mmu irq */
+ mpp_iommu_enable_irq(mpp->iommu_info);
+
+ mpp_taskqueue_trigger_work(mpp);
}
static int mpp_process_task_default(struct mpp_session *session,
@@ -508,6 +528,8 @@
{
struct mpp_task *task = NULL;
struct mpp_dev *mpp = session->mpp;
+ u32 timing_en;
+ ktime_t on_create;
if (unlikely(!mpp)) {
mpp_err("pid %d clinet %d found invalid process function\n",
@@ -515,13 +537,26 @@
return -EINVAL;
}
+ timing_en = session->srv->timing_en;
+ if (timing_en)
+ on_create = ktime_get();
+
if (mpp->dev_ops->alloc_task)
task = mpp->dev_ops->alloc_task(session, msgs);
if (!task) {
mpp_err("alloc_task failed.\n");
return -ENOMEM;
}
+
+ if (timing_en) {
+ task->on_create_end = ktime_get();
+ task->on_create = on_create;
+ set_bit(TASK_TIMING_CREATE_END, &task->state);
+ set_bit(TASK_TIMING_CREATE, &task->state);
+ }
+
kref_init(&task->ref);
+ init_waitqueue_head(&task->wait);
atomic_set(&task->abort_request, 0);
task->task_index = atomic_fetch_inc(&mpp->task_index);
INIT_DELAYED_WORK(&task->timeout_work, mpp_task_timeout_work);
@@ -544,7 +579,10 @@
/* trigger current queue to run task */
mpp_taskqueue_trigger_work(mpp);
kref_put(&task->ref, mpp_free_task);
-
+ mpp_debug_func(DEBUG_TASK_INFO,
+ "session %d:%d task %d state 0x%lx\n",
+ session->device_type, session->index,
+ task->task_index, task->state);
return 0;
}
@@ -599,10 +637,6 @@
group->resets[type] = rst;
group->queue = mpp->queue;
}
- /* if reset not in the same queue, it means different device
- * may reset in the same time, then rw_sem_on should set true.
- */
- group->rw_sem_on |= (group->queue != mpp->queue) ? true : false;
dev_info(mpp->dev, "reset_group->rw_sem_on=%d\n", group->rw_sem_on);
up_write(&group->rw_sem);
@@ -647,12 +681,49 @@
return 0;
}
+void mpp_task_run_begin(struct mpp_task *task, u32 timing_en, u32 timeout)
+{
+ preempt_disable();
+
+ set_bit(TASK_STATE_START, &task->state);
+
+ mpp_time_record(task);
+ schedule_delayed_work(&task->timeout_work, msecs_to_jiffies(timeout));
+
+ if (timing_en) {
+ task->on_sched_timeout = ktime_get();
+ set_bit(TASK_TIMING_TO_SCHED, &task->state);
+ }
+}
+
+void mpp_task_run_end(struct mpp_task *task, u32 timing_en)
+{
+ if (timing_en) {
+ task->on_run_end = ktime_get();
+ set_bit(TASK_TIMING_RUN_END, &task->state);
+ }
+
+#ifdef MODULE
+ preempt_enable();
+#else
+ preempt_enable_no_resched();
+#endif
+}
+
static int mpp_task_run(struct mpp_dev *mpp,
struct mpp_task *task)
{
int ret;
+ struct mpp_session *session = task->session;
+ u32 timing_en;
mpp_debug_enter();
+
+ timing_en = mpp->srv->timing_en;
+ if (timing_en) {
+ task->on_run = ktime_get();
+ set_bit(TASK_TIMING_RUN, &task->state);
+ }
/*
* before running, we have to switch grf ctrl bit to ensure
@@ -679,8 +750,10 @@
mpp_power_on(mpp);
mpp_time_record(task);
- mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
- task->session->pid, dev_name(mpp->dev));
+ mpp_debug_func(DEBUG_TASK_INFO,
+ "%s session %d:%d task=%d state=0x%lx\n",
+ dev_name(mpp->dev), session->device_type,
+ session->index, task->task_index, task->state);
if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
mpp->hw_ops->set_freq(mpp, task);
@@ -690,8 +763,6 @@
*/
mpp_reset_down_read(mpp->reset_group);
- schedule_delayed_work(&task->timeout_work,
- msecs_to_jiffies(MPP_WORK_TIMEOUT_DELAY));
if (mpp->dev_ops->run)
mpp->dev_ops->run(mpp, task);
set_bit(TASK_STATE_START, &task->state);
@@ -709,6 +780,7 @@
mpp_debug_enter();
+get_task:
task = mpp_taskqueue_get_pending_task(queue);
if (!task)
goto done;
@@ -716,7 +788,7 @@
/* if task timeout and aborted, remove it */
if (atomic_read(&task->abort_request) > 0) {
mpp_taskqueue_pop_pending(queue, task);
- goto done;
+ goto get_task;
}
/* get device for current task */
@@ -748,28 +820,7 @@
}
done:
- mutex_lock(&queue->session_lock);
- while (queue->detach_count) {
- struct mpp_session *session = NULL;
-
- session = list_first_entry_or_null(&queue->session_detach, struct mpp_session,
- session_link);
- if (session) {
- list_del_init(&session->session_link);
- queue->detach_count--;
- }
-
- mutex_unlock(&queue->session_lock);
-
- if (session) {
- mpp_dbg_session("%s detach count %d\n", dev_name(mpp->dev),
- queue->detach_count);
- mpp_session_deinit(session);
- }
-
- mutex_lock(&queue->session_lock);
- }
- mutex_unlock(&queue->session_lock);
+ mpp_session_cleanup_detach(queue, work_s);
}
static int mpp_wait_result_default(struct mpp_session *session,
@@ -785,62 +836,32 @@
return -EINVAL;
}
- ret = wait_event_timeout(session->wait,
- !list_empty(&session->done_list),
- msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
-
task = mpp_session_get_pending_task(session);
if (!task) {
- mpp_err("session %p pending list is empty!\n", session);
+ mpp_err("session %d:%d pending list is empty!\n",
+ session->pid, session->index);
return -EIO;
}
+ ret = wait_event_timeout(task->wait,
+ test_bit(TASK_STATE_DONE, &task->state),
+ msecs_to_jiffies(MPP_WAIT_TIMEOUT_DELAY));
if (ret > 0) {
- u32 task_found = 0;
- struct mpp_task *loop = NULL, *n;
-
- /* find task in session done list */
- mutex_lock(&session->done_lock);
- list_for_each_entry_safe(loop, n,
- &session->done_list,
- done_link) {
- if (loop == task) {
- task_found = 1;
- break;
- }
- }
- mutex_unlock(&session->done_lock);
- if (task_found) {
- if (mpp->dev_ops->result)
- ret = mpp->dev_ops->result(mpp, task, msgs);
- mpp_session_pop_done(session, task);
-
- if (test_bit(TASK_STATE_TIMEOUT, &task->state))
- ret = -ETIMEDOUT;
- } else {
- mpp_err("session %p task %p, not found in done list!\n",
- session, task);
- ret = -EIO;
- }
+ if (mpp->dev_ops->result)
+ ret = mpp->dev_ops->result(mpp, task, msgs);
} else {
atomic_inc(&task->abort_request);
- mpp_err("timeout, pid %d session %p:%d count %d cur_task %p index %d.\n",
- session->pid, session, session->index,
- atomic_read(&session->task_count), task,
- task->task_index);
- /* if twice and return timeout, otherwise, re-wait */
- if (atomic_read(&task->abort_request) > 1) {
- mpp_err("session %p:%d, task %p index %d abort wait twice!\n",
- session, session->index,
- task, task->task_index);
- ret = -ETIMEDOUT;
- } else {
- return mpp_wait_result_default(session, msgs);
- }
+ set_bit(TASK_STATE_ABORT, &task->state);
+ mpp_err("timeout, pid %d session %d:%d count %d cur_task %d state %lx\n",
+ session->pid, session->device_type, session->index,
+ atomic_read(&session->task_count), task->task_index, task->state);
}
mpp_debug_func(DEBUG_TASK_INFO,
- "kref_read=%d, ret=%d\n", kref_read(&task->ref), ret);
+ "session %d:%d task %d state 0x%lx kref_read %d ret %d\n",
+ session->device_type,
+ session->index, task->task_index, task->state,
+ kref_read(&task->ref), ret);
mpp_session_pop_pending(session, task);
return ret;
@@ -919,6 +940,10 @@
goto err_put_pdev;
} else {
mpp->reset_group = mpp->srv->reset_groups[reset_group_node];
+ if (!mpp->reset_group->queue)
+ mpp->reset_group->queue = queue;
+ if (mpp->reset_group->queue != mpp->queue)
+ mpp->reset_group->rw_sem_on = true;
}
}
@@ -951,6 +976,10 @@
/* default taskqueue has max 16 task capacity */
queue->task_capacity = MPP_MAX_TASK_CAPACITY;
+
+ mutex_init(&queue->ref_lock);
+ atomic_set(&queue->runtime_cnt, 0);
+ atomic_set(&queue->detach_count, 0);
return queue;
}
@@ -1197,7 +1226,7 @@
if (!mpp)
return -EINVAL;
- mpp_session_clear(mpp, session);
+ mpp_session_clear_pending(session);
mpp_iommu_down_write(mpp->iommu_info);
ret = mpp_dma_session_destroy(session->dma);
mpp_iommu_up_write(mpp->iommu_info);
@@ -1410,7 +1439,7 @@
/* wait for task all done */
atomic_inc(&session->release_request);
- if (session->mpp)
+ if (session->mpp || atomic_read(&session->task_count))
mpp_session_detach_workqueue(session);
else
mpp_session_deinit(session);
@@ -1421,24 +1450,9 @@
return 0;
}
-static unsigned int
-mpp_dev_poll(struct file *filp, poll_table *wait)
-{
- unsigned int mask = 0;
- struct mpp_session *session =
- (struct mpp_session *)filp->private_data;
-
- poll_wait(filp, &session->wait, wait);
- if (!list_empty(&session->done_list))
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-
const struct file_operations rockchip_mpp_fops = {
.open = mpp_dev_open,
.release = mpp_dev_release,
- .poll = mpp_dev_poll,
.unlocked_ioctl = mpp_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mpp_dev_ioctl,
@@ -1673,12 +1687,23 @@
mpp_dev_reset(mpp);
mpp_power_off(mpp);
- if (!atomic_read(&task->abort_request)) {
- mpp_session_push_done(session, task);
- /* Wake up the GET thread */
- wake_up(&session->wait);
- }
set_bit(TASK_STATE_FINISH, &task->state);
+ set_bit(TASK_STATE_DONE, &task->state);
+
+ if (session->srv->timing_en) {
+ s64 time_diff;
+
+ task->on_finish = ktime_get();
+ set_bit(TASK_TIMING_FINISH, &task->state);
+
+ time_diff = ktime_us_delta(task->on_finish, task->on_create);
+
+ if (mpp->timing_check && time_diff > (s64)mpp->timing_check)
+ mpp_task_dump_timing(task, time_diff);
+ }
+
+ /* Wake up the GET thread */
+ wake_up(&task->wait);
mpp_taskqueue_pop_running(mpp->queue, task);
return 0;
@@ -1780,9 +1805,9 @@
unsigned long iova,
int status, void *arg)
{
- struct mpp_taskqueue *queue = (struct mpp_taskqueue *)arg;
+ struct mpp_dev *mpp = (struct mpp_dev *)arg;
+ struct mpp_taskqueue *queue = mpp->queue;
struct mpp_task *task = mpp_taskqueue_get_running_task(queue);
- struct mpp_dev *mpp;
/*
* NOTE: In link mode, this task may not be the task of the current
@@ -1798,7 +1823,7 @@
mpp_task_dump_hw_reg(mpp, task);
if (mpp->iommu_info->hdl)
- mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, arg);
+ mpp->iommu_info->hdl(iommu, iommu_dev, iova, status, mpp);
return 0;
}
@@ -1815,6 +1840,8 @@
/* Get disable auto frequent flag from dtsi */
mpp->auto_freq_en = !device_property_read_bool(dev, "rockchip,disable-auto-freq");
+ /* read flag for pum idle request */
+ mpp->skip_idle = device_property_read_bool(dev, "rockchip,skip-pmu-idle-request");
/* Get and attach to service */
ret = mpp_attach_service(mpp, dev);
@@ -1897,7 +1924,7 @@
/* set iommu fault handler */
if (!IS_ERR(mpp->iommu_info))
iommu_set_fault_handler(mpp->iommu_info->domain,
- mpp_iommu_handle, mpp->queue);
+ mpp_iommu_handle, mpp);
/* read hardware id */
if (hw_info->reg_id >= 0) {
@@ -1951,6 +1978,12 @@
struct mpp_dev *mpp = param;
struct mpp_task *task = mpp->cur_task;
irqreturn_t irq_ret = IRQ_NONE;
+ u32 timing_en = mpp->srv->timing_en;
+
+ if (task && timing_en) {
+ task->on_irq = ktime_get();
+ set_bit(TASK_TIMING_IRQ, &task->state);
+ }
if (mpp->dev_ops->irq)
irq_ret = mpp->dev_ops->irq(mpp);
@@ -1965,6 +1998,10 @@
mpp->irq_status);
irq_ret = IRQ_HANDLED;
goto done;
+ }
+ if (timing_en) {
+ task->on_cancel_timeout = ktime_get();
+ set_bit(TASK_TIMING_TO_CANCEL, &task->state);
}
cancel_delayed_work(&task->timeout_work);
/* normal condition, set state and wake up isr thread */
@@ -1981,6 +2018,12 @@
{
irqreturn_t ret = IRQ_NONE;
struct mpp_dev *mpp = param;
+ struct mpp_task *task = mpp->cur_task;
+
+ if (task && mpp->srv->timing_en) {
+ task->on_isr = ktime_get();
+ set_bit(TASK_TIMING_ISR, &task->state);
+ }
if (mpp->auto_freq_en &&
mpp->hw_ops->reduce_freq &&
@@ -2030,24 +2073,88 @@
int mpp_time_record(struct mpp_task *task)
{
- if (mpp_debug_unlikely(DEBUG_TIMING) && task)
- do_gettimeofday(&task->start);
+ if (mpp_debug_unlikely(DEBUG_TIMING) && task) {
+ task->start = ktime_get();
+ task->part = task->start;
+ }
+
+ return 0;
+}
+
+int mpp_time_part_diff(struct mpp_task *task)
+{
+ ktime_t end;
+ struct mpp_dev *mpp = task->session->mpp;
+
+ end = ktime_get();
+ mpp_debug(DEBUG_PART_TIMING, "%s: session %d:%d part time: %lld us\n",
+ dev_name(mpp->dev), task->session->pid, task->session->index,
+ ktime_us_delta(end, task->part));
+ task->part = end;
return 0;
}
int mpp_time_diff(struct mpp_task *task)
{
- struct timeval end;
+ ktime_t end;
struct mpp_dev *mpp = task->session->mpp;
- do_gettimeofday(&end);
- mpp_debug(DEBUG_TIMING, "%s: pid: %d, session: %p, time: %ld us\n",
- dev_name(mpp->dev), task->session->pid, task->session,
- (end.tv_sec - task->start.tv_sec) * 1000000 +
- (end.tv_usec - task->start.tv_usec));
+ end = ktime_get();
+ mpp_debug(DEBUG_TIMING, "%s: session %d:%d task time: %lld us\n",
+ dev_name(mpp->dev), task->session->pid, task->session->index,
+ ktime_us_delta(end, task->start));
return 0;
+}
+
+int mpp_time_diff_with_hw_time(struct mpp_task *task, u32 clk_hz)
+{
+ if (mpp_debug_unlikely(DEBUG_TIMING)) {
+ ktime_t end;
+ struct mpp_dev *mpp = task->session->mpp;
+
+ end = ktime_get();
+
+ if (clk_hz)
+ mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us hw %d us\n",
+ dev_name(mpp->dev), task->session->index,
+ ktime_us_delta(end, task->start),
+ task->hw_cycles / (clk_hz / 1000000));
+ else
+ mpp_debug(DEBUG_TIMING, "%s: session %d time: %lld us\n",
+ dev_name(mpp->dev), task->session->index,
+ ktime_us_delta(end, task->start));
+ }
+
+ return 0;
+}
+
+#define LOG_TIMING(state, id, stage, time, base) \
+ do { \
+ if (test_bit(id, &state)) \
+ pr_info("timing: %-14s : %lld us\n", stage, ktime_us_delta(time, base)); \
+ else \
+ pr_info("timing: %-14s : invalid\n", stage); \
+ } while (0)
+
+void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff)
+{
+ ktime_t s = task->on_create;
+ unsigned long state = task->state;
+
+ pr_info("task %d dump timing at %lld us:", task->task_index, time_diff);
+
+ pr_info("timing: %-14s : %lld us\n", "create", ktime_to_us(s));
+ LOG_TIMING(state, TASK_TIMING_CREATE_END, "create end", task->on_create_end, s);
+ LOG_TIMING(state, TASK_TIMING_PENDING, "pending", task->on_pending, s);
+ LOG_TIMING(state, TASK_TIMING_RUN, "run", task->on_run, s);
+ LOG_TIMING(state, TASK_TIMING_TO_SCHED, "timeout start", task->on_sched_timeout, s);
+ LOG_TIMING(state, TASK_TIMING_RUN_END, "run end", task->on_run_end, s);
+ LOG_TIMING(state, TASK_TIMING_IRQ, "irq", task->on_irq, s);
+ LOG_TIMING(state, TASK_TIMING_TO_CANCEL, "timeout cancel", task->on_cancel_timeout, s);
+ LOG_TIMING(state, TASK_TIMING_ISR, "isr", task->on_isr, s);
+ LOG_TIMING(state, TASK_TIMING_FINISH, "finish", task->on_finish, s);
}
int mpp_write_req(struct mpp_dev *mpp, u32 *regs,
@@ -2184,6 +2291,7 @@
if (clk_rate_hz) {
clk_info->used_rate_hz = clk_rate_hz;
clk_set_rate(clk_info->clk, clk_rate_hz);
+ clk_info->real_rate_hz = clk_get_rate(clk_info->clk);
}
return 0;
@@ -2230,4 +2338,9 @@
{
return proc_create_data(name, mode, parent, &procfs_fops_u32, data);
}
+
+void mpp_procfs_create_common(struct proc_dir_entry *parent, struct mpp_dev *mpp)
+{
+ mpp_procfs_create_u32("timing_check", 0644, parent, &mpp->timing_check);
+}
#endif
--
Gitblit v1.6.2