From e3e12f52b214121840b44c91de5b3e5af5d3eb84 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 06 Nov 2023 03:04:41 +0000
Subject: [PATCH] rk3568 rt init

---
 kernel/drivers/video/rockchip/rga3/rga_iommu.c |   49 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 48 insertions(+), 1 deletions(-)

diff --git a/kernel/drivers/video/rockchip/rga3/rga_iommu.c b/kernel/drivers/video/rockchip/rga3/rga_iommu.c
index 3b7a4ef..6ef9cbc 100644
--- a/kernel/drivers/video/rockchip/rga3/rga_iommu.c
+++ b/kernel/drivers/video/rockchip/rga3/rga_iommu.c
@@ -5,7 +5,7 @@
  * Author: Huang Lee <Putin.li@rock-chips.com>
  */
 
-#define pr_fmt(fmt) "rga2_mmu: " fmt
+#define pr_fmt(fmt) "rga_iommu: " fmt
 
 #include "rga_iommu.h"
 #include "rga_dma_buf.h"
@@ -170,6 +170,12 @@
 	 * size * channel_num * address_size
 	 */
 	order = get_order(size * 3 * sizeof(*mmu_base->buf_virtual));
+	if (order >= MAX_ORDER) {
+		pr_err("Can not alloc pages with order[%d] for mmu_page_table, max_order = %d\n",
+		       order, MAX_ORDER);
+		goto err_free_mmu_base;
+	}
+
 	mmu_base->buf_virtual = (uint32_t *) __get_free_pages(GFP_KERNEL | GFP_DMA32, order);
 	if (mmu_base->buf_virtual == NULL) {
 		pr_err("Can not alloc pages for mmu_page_table\n");
@@ -178,6 +184,12 @@
 	mmu_base->buf_order = order;
 
 	order = get_order(size * sizeof(*mmu_base->pages));
+	if (order >= MAX_ORDER) {
+		pr_err("Can not alloc pages with order[%d] for mmu_base->pages, max_order = %d\n",
+		       order, MAX_ORDER);
+		goto err_free_buf_virtual;
+	}
+
 	mmu_base->pages = (struct page **)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
 	if (mmu_base->pages == NULL) {
 		pr_err("Can not alloc pages for mmu_base->pages\n");
@@ -219,6 +231,38 @@
 
 	kfree(base);
 	*mmu_base = NULL;
+}
+
+static int rga_iommu_intr_fault_handler(struct iommu_domain *iommu, struct device *iommu_dev,
+					unsigned long iova, int status, void *arg)
+{
+	struct rga_scheduler_t *scheduler = (struct rga_scheduler_t *)arg;
+	struct rga_job *job = scheduler->running_job;
+
+	if (job == NULL)
+		return 0;
+
+	pr_err("IOMMU intr fault, IOVA[0x%lx], STATUS[0x%x]\n", iova, status);
+	if (scheduler->ops->irq)
+		scheduler->ops->irq(scheduler);
+
+	/* iommu interrupts on rga2 do not affect rga2 itself. */
+	if (!test_bit(RGA_JOB_STATE_INTR_ERR, &job->state)) {
+		set_bit(RGA_JOB_STATE_INTR_ERR, &job->state);
+		scheduler->ops->soft_reset(scheduler);
+	}
+
+	if (status & RGA_IOMMU_IRQ_PAGE_FAULT) {
+		pr_err("RGA IOMMU: page fault! Please check the memory size.\n");
+		job->ret = -EACCES;
+	} else if (status & RGA_IOMMU_IRQ_BUS_ERROR) {
+		pr_err("RGA IOMMU: bus error! Please check if the memory is invalid or has been freed.\n");
+		job->ret = -EACCES;
+	} else {
+		pr_err("RGA IOMMU: Wrong IOMMU interrupt signal!\n");
+	}
+
+	return 0;
 }
 
 int rga_iommu_detach(struct rga_iommu_info *info)
@@ -306,6 +350,9 @@
 			if (main_iommu == NULL) {
 				main_iommu = scheduler->iommu_info;
 				main_iommu_index = i;
+				iommu_set_fault_handler(main_iommu->domain,
+							rga_iommu_intr_fault_handler,
+							(void *)scheduler);
 			} else {
 				scheduler->iommu_info->domain = main_iommu->domain;
 				scheduler->iommu_info->default_dev = main_iommu->default_dev;

--
Gitblit v1.6.2