From 15ade055295d13f95d49e3d99b09f3bbfb4a43e7 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 06 Nov 2023 07:25:24 +0000
Subject: [PATCH] add at24 driver

---
 kernel/drivers/video/rockchip/rga3/rga_job.c |  357 +++++++++++++++++++++++++++++++++++------------------------
 1 files changed, 212 insertions(+), 145 deletions(-)

diff --git a/kernel/drivers/video/rockchip/rga3/rga_job.c b/kernel/drivers/video/rockchip/rga3/rga_job.c
index 47bb908..bbe6fa8 100644
--- a/kernel/drivers/video/rockchip/rga3/rga_job.c
+++ b/kernel/drivers/video/rockchip/rga3/rga_job.c
@@ -14,74 +14,28 @@
 #include "rga_iommu.h"
 #include "rga_debugger.h"
 
-struct rga_job *
-rga_scheduler_get_pending_job_list(struct rga_scheduler_t *scheduler)
-{
-	unsigned long flags;
-	struct rga_job *job;
-
-	spin_lock_irqsave(&scheduler->irq_lock, flags);
-
-	job = list_first_entry_or_null(&scheduler->todo_list,
-		struct rga_job, head);
-
-	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
-
-	return job;
-}
-
-struct rga_job *
-rga_scheduler_get_running_job(struct rga_scheduler_t *scheduler)
-{
-	unsigned long flags;
-	struct rga_job *job;
-
-	spin_lock_irqsave(&scheduler->irq_lock, flags);
-
-	job = scheduler->running_job;
-
-	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
-
-	return job;
-}
-
-struct rga_scheduler_t *rga_job_get_scheduler(struct rga_job *job)
-{
-	return job->scheduler;
-}
-
 static void rga_job_free(struct rga_job *job)
 {
 	free_page((unsigned long)job);
 }
 
-void rga_job_session_destroy(struct rga_session *session)
+static void rga_job_kref_release(struct kref *ref)
 {
-	struct rga_scheduler_t *scheduler = NULL;
-	struct rga_job *job_pos, *job_q;
-	int i;
+	struct rga_job *job;
 
-	unsigned long flags;
+	job = container_of(ref, struct rga_job, refcount);
 
-	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
-		scheduler = rga_drvdata->scheduler[i];
+	rga_job_free(job);
+}
 
-		spin_lock_irqsave(&scheduler->irq_lock, flags);
+static int rga_job_put(struct rga_job *job)
+{
+	return kref_put(&job->refcount, rga_job_kref_release);
+}
 
-		list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
-			if (session == job_pos->session) {
-				list_del(&job_pos->head);
-
-				spin_unlock_irqrestore(&scheduler->irq_lock, flags);
-
-				rga_job_free(job_pos);
-
-				spin_lock_irqsave(&scheduler->irq_lock, flags);
-			}
-		}
-
-		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
-	}
+static void rga_job_get(struct rga_job *job)
+{
+	kref_get(&job->refcount);
 }
 
 static int rga_job_cleanup(struct rga_job *job)
@@ -90,7 +44,7 @@
 		pr_err("(pid:%d) job clean use time = %lld\n", job->pid,
 			ktime_us_delta(ktime_get(), job->timestamp));
 
-	rga_job_free(job);
+	rga_job_put(job);
 
 	return 0;
 }
@@ -165,6 +119,7 @@
 		return NULL;
 
 	INIT_LIST_HEAD(&job->head);
+	kref_init(&job->refcount);
 
 	job->timestamp = ktime_get();
 	job->pid = current->pid;
@@ -232,16 +187,18 @@
 		return ret;
 	}
 
+	set_bit(RGA_JOB_STATE_RUNNING, &job->state);
+
 	/* for debug */
 	if (DEBUGGER_EN(MSG))
 		rga_job_dump_info(job);
 
 	return ret;
-
 }
 
-static void rga_job_next(struct rga_scheduler_t *scheduler)
+void rga_job_next(struct rga_scheduler_t *scheduler)
 {
+	int ret;
 	struct rga_job *job = NULL;
 	unsigned long flags;
 
@@ -261,51 +218,33 @@
 	scheduler->job_count--;
 
 	scheduler->running_job = job;
+	set_bit(RGA_JOB_STATE_PREPARE, &job->state);
+	rga_job_get(job);
 
 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
 
-	job->ret = rga_job_run(job, scheduler);
+	ret = rga_job_run(job, scheduler);
 	/* If some error before hw run */
-	if (job->ret < 0) {
-		pr_err("some error on rga_job_run before hw start, %s(%d)\n",
-			__func__, __LINE__);
+	if (ret < 0) {
+		pr_err("some error on rga_job_run before hw start, %s(%d)\n", __func__, __LINE__);
 
 		spin_lock_irqsave(&scheduler->irq_lock, flags);
 
 		scheduler->running_job = NULL;
+		rga_job_put(job);
 
 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
 
+		job->ret = ret;
 		rga_request_release_signal(scheduler, job);
 
 		goto next_job;
 	}
+
+	rga_job_put(job);
 }
 
-static void rga_job_finish_and_next(struct rga_scheduler_t *scheduler,
-		struct rga_job *job, int ret)
-{
-	ktime_t now;
-
-	job->ret = ret;
-
-	if (DEBUGGER_EN(TIME)) {
-		now = ktime_get();
-		pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
-		pr_info("(pid:%d) job done use time = %lld\n", job->pid,
-			ktime_us_delta(now, job->timestamp));
-	}
-
-	rga_mm_unmap_job_info(job);
-
-	rga_request_release_signal(scheduler, job);
-
-	rga_job_next(scheduler);
-
-	rga_power_disable(scheduler);
-}
-
-void rga_job_done(struct rga_scheduler_t *scheduler, int ret)
+struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler)
 {
 	struct rga_job *job;
 	unsigned long flags;
@@ -314,16 +253,34 @@
 	spin_lock_irqsave(&scheduler->irq_lock, flags);
 
 	job = scheduler->running_job;
+	if (job == NULL) {
+		pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core);
+
+		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+		return NULL;
+	}
 	scheduler->running_job = NULL;
 
 	scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
+	set_bit(RGA_JOB_STATE_DONE, &job->state);
 
 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+
+	if (scheduler->ops->read_back_reg)
+		scheduler->ops->read_back_reg(job, scheduler);
 
 	if (DEBUGGER_EN(DUMP_IMAGE))
 		rga_dump_job_image(job);
 
-	rga_job_finish_and_next(scheduler, job, ret);
+	if (DEBUGGER_EN(TIME)) {
+		pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
+		pr_info("(pid:%d) job done use time = %lld\n", job->pid,
+			ktime_us_delta(now, job->timestamp));
+	}
+
+	rga_mm_unmap_job_info(job);
+
+	return job;
 }
 
 static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler)
@@ -391,13 +348,20 @@
 	}
 
 	scheduler->job_count++;
+	set_bit(RGA_JOB_STATE_PENDING, &job->state);
 
 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
 }
 
 static struct rga_scheduler_t *rga_job_schedule(struct rga_job *job)
 {
+	int i;
 	struct rga_scheduler_t *scheduler = NULL;
+
+	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
+		scheduler = rga_drvdata->scheduler[i];
+		rga_job_scheduler_timeout_clean(scheduler);
+	}
 
 	if (rga_drvdata->num_of_scheduler > 1) {
 		job->core = rga_job_assign(job);
@@ -411,14 +375,12 @@
 		job->scheduler = rga_drvdata->scheduler[0];
 	}
 
-	scheduler = rga_job_get_scheduler(job);
+	scheduler = job->scheduler;
 	if (scheduler == NULL) {
 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
 		job->ret = -EFAULT;
 		return NULL;
 	}
-
-	rga_job_scheduler_timeout_clean(scheduler);
 
 	return scheduler;
 }
@@ -557,26 +519,13 @@
 	request->current_mm = NULL;
 }
 
-static int rga_request_alloc_release_fence(struct dma_fence **release_fence)
-{
-	struct dma_fence *fence;
-
-	fence = rga_dma_fence_alloc();
-	if (IS_ERR(fence)) {
-		pr_err("Can not alloc release fence!\n");
-		return IS_ERR(fence);
-	}
-
-	*release_fence = fence;
-
-	return rga_dma_fence_get_fd(fence);
-}
-
-static int rga_request_add_acquire_fence_callback(int acquire_fence_fd, void *private,
+static int rga_request_add_acquire_fence_callback(int acquire_fence_fd,
+						  struct rga_request *request,
 						  dma_fence_func_t cb_func)
 {
 	int ret;
 	struct dma_fence *acquire_fence = NULL;
+	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
 
 	if (DEBUGGER_EN(MSG))
 		pr_info("acquire_fence_fd = %d", acquire_fence_fd);
@@ -588,19 +537,38 @@
 		return -EINVAL;
 	}
 	/* close acquire fence fd */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
+	close_fd(acquire_fence_fd);
+#else
 	ksys_close(acquire_fence_fd);
+#endif
 
 	ret = rga_dma_fence_get_status(acquire_fence);
-	if (ret == 0) {
-		ret = rga_dma_fence_add_callback(acquire_fence, cb_func, private);
-		if (ret < 0) {
-			if (ret == -ENOENT)
-				return 1;
+	if (ret < 0) {
+		pr_err("%s: Current acquire fence unexpectedly has error status before signal\n",
+		       __func__);
+		return ret;
+	} else if (ret > 0) {
+		/* has been signaled */
+		return ret;
+	}
 
+	/*
+	 * Ensure that the request will not be free early when
+	 * the callback is called.
+	 */
+	mutex_lock(&request_manager->lock);
+	rga_request_get(request);
+	mutex_unlock(&request_manager->lock);
+
+	ret = rga_dma_fence_add_callback(acquire_fence, cb_func, (void *)request);
+	if (ret < 0) {
+		if (ret != -ENOENT)
 			pr_err("%s: failed to add fence callback\n", __func__);
-			return ret;
-		}
-	} else {
+
+		mutex_lock(&request_manager->lock);
+		rga_request_put(request);
+		mutex_unlock(&request_manager->lock);
 		return ret;
 	}
 
@@ -742,6 +710,70 @@
 	mutex_unlock(&request_manager->lock);
 }
 
+void rga_request_session_destroy_abort(struct rga_session *session)
+{
+	int request_id;
+	struct rga_request *request;
+	struct rga_pending_request_manager *request_manager;
+
+	request_manager = rga_drvdata->pend_request_manager;
+	if (request_manager == NULL) {
+		pr_err("rga_pending_request_manager is null!\n");
+		return;
+	}
+
+	mutex_lock(&request_manager->lock);
+
+	idr_for_each_entry(&request_manager->request_idr, request, request_id) {
+		if (session == request->session) {
+			pr_err("[tgid:%d pid:%d] destroy request[%d] when the user exits",
+			       session->tgid, current->pid, request->id);
+			rga_request_put(request);
+		}
+	}
+
+	mutex_unlock(&request_manager->lock);
+}
+
+static int rga_request_timeout_query_state(struct rga_request *request)
+{
+	int i;
+	unsigned long flags;
+	struct rga_scheduler_t *scheduler = NULL;
+	struct rga_job *job = NULL;
+
+	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
+		scheduler = rga_drvdata->scheduler[i];
+
+		spin_lock_irqsave(&scheduler->irq_lock, flags);
+
+		if (scheduler->running_job) {
+			job = scheduler->running_job;
+			if (request->id == job->request_id) {
+				if (test_bit(RGA_JOB_STATE_DONE, &job->state) &&
+				    test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
+					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+					return request->ret;
+				} else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
+					   test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
+					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+					pr_err("hardware has finished, but the software has timeout!\n");
+					return -EBUSY;
+				} else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
+					   !test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
+					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+					pr_err("hardware has timeout.\n");
+					return -EBUSY;
+				}
+			}
+		}
+
+		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
+	}
+
+	return request->ret;
+}
+
 static int rga_request_wait(struct rga_request *request)
 {
 	int left_time;
@@ -752,8 +784,7 @@
 
 	switch (left_time) {
 	case 0:
-		pr_err("%s timeout", __func__);
-		ret = -EBUSY;
+		ret = rga_request_timeout_query_state(request);
 		goto err_request_abort;
 	case -ERESTARTSYS:
 		ret = -ERESTARTSYS;
@@ -800,9 +831,15 @@
 						  struct dma_fence_cb *_waiter)
 {
 	struct rga_fence_waiter *waiter = (struct rga_fence_waiter *)_waiter;
+	struct rga_request *request = (struct rga_request *)waiter->private;
+	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
 
-	if (rga_request_commit((struct rga_request *)waiter->private))
+	if (rga_request_commit(request))
 		pr_err("rga request commit failed!\n");
+
+	mutex_lock(&request_manager->lock);
+	rga_request_put(request);
+	mutex_unlock(&request_manager->lock);
 
 	kfree(waiter);
 }
@@ -832,8 +869,6 @@
 	rga_request_get(request);
 	mutex_unlock(&request_manager->lock);
 
-	rga_job_cleanup(job);
-
 	spin_lock_irqsave(&request->lock, flags);
 
 	if (job->ret < 0) {
@@ -847,6 +882,8 @@
 	finished_count = request->finished_task_count;
 
 	spin_unlock_irqrestore(&request->lock, flags);
+
+	rga_job_cleanup(job);
 
 	if ((failed_count + finished_count) >= request->task_count) {
 		spin_lock_irqsave(&request->lock, flags);
@@ -1003,18 +1040,21 @@
 {
 	int ret = 0;
 	unsigned long flags;
+	struct dma_fence *release_fence;
 
 	spin_lock_irqsave(&request->lock, flags);
 
 	if (request->is_running) {
-		pr_err("can not re-config when request is running");
 		spin_unlock_irqrestore(&request->lock, flags);
+
+		pr_err("can not re-config when request is running\n");
 		return -EFAULT;
 	}
 
 	if (request->task_list == NULL) {
-		pr_err("can not find task list from id[%d]", request->id);
 		spin_unlock_irqrestore(&request->lock, flags);
+
+		pr_err("can not find task list from id[%d]\n", request->id);
 		return -EINVAL;
 	}
 
@@ -1026,46 +1066,71 @@
 
 	rga_request_get_current_mm(request);
 
+	/* Unlock after ensuring that the current request will not be resubmitted. */
 	spin_unlock_irqrestore(&request->lock, flags);
 
 	if (request->sync_mode == RGA_BLIT_ASYNC) {
-		ret = rga_request_alloc_release_fence(&request->release_fence);
-		if (ret < 0) {
-			pr_err("Failed to alloc release fence fd!\n");
-			return ret;
+		release_fence = rga_dma_fence_alloc();
+		if (IS_ERR(release_fence)) {
+			pr_err("Can not alloc release fence!\n");
+			ret = IS_ERR(release_fence);
+			goto error_put_current_mm;
 		}
-		request->release_fence_fd = ret;
+		request->release_fence = release_fence;
 
 		if (request->acquire_fence_fd > 0) {
 			ret = rga_request_add_acquire_fence_callback(
-				request->acquire_fence_fd,
-				(void *)request,
+				request->acquire_fence_fd, request,
 				rga_request_acquire_fence_signaled_cb);
 			if (ret == 0) {
-				return ret;
-			} else if (ret == 1) {
+				/* acquire fence active */
+				goto export_release_fence_fd;
+			} else if (ret > 0) {
+				/* acquire fence has been signaled */
 				goto request_commit;
 			} else {
 				pr_err("Failed to add callback with acquire fence fd[%d]!\n",
 				       request->acquire_fence_fd);
-				goto error_release_fence_put;
+				goto err_put_release_fence;
 			}
 		}
-
 	}
 
 request_commit:
 	ret = rga_request_commit(request);
 	if (ret < 0) {
 		pr_err("rga request commit failed!\n");
-		goto error_release_fence_put;
+		goto err_put_release_fence;
+	}
+
+export_release_fence_fd:
+	if (request->release_fence != NULL) {
+		ret = rga_dma_fence_get_fd(request->release_fence);
+		if (ret < 0) {
+			pr_err("Failed to alloc release fence fd!\n");
+			rga_request_release_abort(request, ret);
+			return ret;
+		}
+
+		request->release_fence_fd = ret;
 	}
 
 	return 0;
 
-error_release_fence_put:
-	rga_dma_fence_put(request->release_fence);
-	request->release_fence = NULL;
+err_put_release_fence:
+	if (request->release_fence != NULL) {
+		rga_dma_fence_put(request->release_fence);
+		request->release_fence = NULL;
+	}
+
+error_put_current_mm:
+	spin_lock_irqsave(&request->lock, flags);
+
+	rga_request_put_current_mm(request);
+	request->is_running = false;
+
+	spin_unlock_irqrestore(&request->lock, flags);
+
 	return ret;
 }
 
@@ -1159,10 +1224,11 @@
 	request = container_of(ref, struct rga_request, refcount);
 
 	if (rga_dma_fence_get_status(request->release_fence) == 0)
-		rga_dma_fence_signal(request->release_fence, -EEXIST);
+		rga_dma_fence_signal(request->release_fence, -EFAULT);
 
 	spin_lock_irqsave(&request->lock, flags);
 
+	rga_request_put_current_mm(request);
 	rga_dma_fence_put(request->release_fence);
 
 	if (!request->is_running || request->is_done) {
@@ -1188,6 +1254,7 @@
 
 int rga_request_alloc(uint32_t flags, struct rga_session *session)
 {
+	int new_id;
 	struct rga_pending_request_manager *request_manager;
 	struct rga_request *request;
 
@@ -1218,17 +1285,17 @@
 	mutex_lock(&request_manager->lock);
 
 	idr_preload(GFP_KERNEL);
-	request->id = idr_alloc(&request_manager->request_idr, request, 1, 0, GFP_KERNEL);
+	new_id = idr_alloc_cyclic(&request_manager->request_idr, request, 1, 0, GFP_NOWAIT);
 	idr_preload_end();
-
-	if (request->id <= 0) {
-		pr_err("alloc request_id failed!\n");
+	if (new_id < 0) {
+		pr_err("request alloc id failed!\n");
 
 		mutex_unlock(&request_manager->lock);
 		kfree(request);
-		return -EFAULT;
+		return new_id;
 	}
 
+	request->id = new_id;
 	request_manager->request_count++;
 
 	mutex_unlock(&request_manager->lock);

--
Gitblit v1.6.2