From 072de836f53be56a70cecf70b43ae43b7ce17376 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 10:08:36 +0000
Subject: [PATCH] mk-rootfs.sh
---
kernel/drivers/video/rockchip/rga3/rga_mm.c | 107 +++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 82 insertions(+), 25 deletions(-)
diff --git a/kernel/drivers/video/rockchip/rga3/rga_mm.c b/kernel/drivers/video/rockchip/rga3/rga_mm.c
index cd461b5..d261833 100644
--- a/kernel/drivers/video/rockchip/rga3/rga_mm.c
+++ b/kernel/drivers/video/rockchip/rga3/rga_mm.c
@@ -577,6 +577,13 @@
mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
}
+ /*
+ * Some userspace virtual addresses do not have an
+ * interface for flushing the cache, so it is mandatory
+ * to flush the cache when the virtual address is used.
+ */
+ mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
+
if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
scheduler->core, mm_flag);
@@ -1295,13 +1302,6 @@
struct sg_table *sgt;
struct rga_scheduler_t *scheduler;
- sgt = rga_mm_lookup_sgt(buffer);
- if (sgt == NULL) {
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
- __func__, __LINE__, job->core);
- return -EINVAL;
- }
-
scheduler = buffer->dma_buffer->scheduler;
if (scheduler == NULL) {
pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
@@ -1309,7 +1309,18 @@
return -EFAULT;
}
- dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
+ dma_sync_single_for_device(scheduler->dev, buffer->phys_addr, buffer->size, dir);
+ } else {
+ sgt = rga_mm_lookup_sgt(buffer);
+ if (sgt == NULL) {
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
+ __func__, __LINE__, job->core);
+ return -EINVAL;
+ }
+
+ dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
+ }
return 0;
}
@@ -1321,13 +1332,6 @@
struct sg_table *sgt;
struct rga_scheduler_t *scheduler;
- sgt = rga_mm_lookup_sgt(buffer);
- if (sgt == NULL) {
- pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
- __func__, __LINE__, job->core);
- return -EINVAL;
- }
-
scheduler = buffer->dma_buffer->scheduler;
if (scheduler == NULL) {
pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
@@ -1335,7 +1339,18 @@
return -EFAULT;
}
- dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
+ dma_sync_single_for_cpu(scheduler->dev, buffer->phys_addr, buffer->size, dir);
+ } else {
+ sgt = rga_mm_lookup_sgt(buffer);
+ if (sgt == NULL) {
+ pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
+ __func__, __LINE__, job->core);
+ return -EINVAL;
+ }
+
+ dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
+ }
return 0;
}
@@ -1434,7 +1449,7 @@
goto put_internal_buffer;
}
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
/*
* Some userspace virtual addresses do not have an
* interface for flushing the cache, so it is mandatory
@@ -1463,7 +1478,7 @@
struct rga_internal_buffer *internal_buffer,
enum dma_data_direction dir)
{
- if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
+ if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
pr_err("sync sgt for cpu error!\n");
@@ -1574,6 +1589,53 @@
req = &job->rga_command_base;
mm = rga_drvdata->mm;
+
+ switch (req->render_mode) {
+ case BITBLT_MODE:
+ case COLOR_PALETTE_MODE:
+ if (unlikely(req->src.yrgb_addr <= 0)) {
+ pr_err("render_mode[0x%x] src0 channel handle[%ld] must is valid!",
+ req->render_mode, (unsigned long)req->src.yrgb_addr);
+ return -EINVAL;
+ }
+
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
+ return -EINVAL;
+ }
+
+ if (req->bsfilter_flag) {
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
+ pr_err("render_mode[0x%x] src1/pat channel handle[%ld] must is valid!",
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
+ return -EINVAL;
+ }
+ }
+
+ break;
+ case COLOR_FILL_MODE:
+ if (unlikely(req->dst.yrgb_addr <= 0)) {
+ pr_err("render_mode[0x%x] dst channel handle[%ld] must is valid!",
+ req->render_mode, (unsigned long)req->dst.yrgb_addr);
+ return -EINVAL;
+ }
+
+ break;
+
+ case UPDATE_PALETTE_TABLE_MODE:
+ case UPDATE_PATTEN_BUF_MODE:
+ if (unlikely(req->pat.yrgb_addr <= 0)) {
+ pr_err("render_mode[0x%x] lut/pat channel handle[%ld] must is valid!, req->render_mode",
+ req->render_mode, (unsigned long)req->pat.yrgb_addr);
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ pr_err("%s, unknown render mode!\n", __func__);
+ break;
+ }
if (likely(req->src.yrgb_addr > 0)) {
ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
@@ -1765,7 +1827,7 @@
struct rga_job_buffer *job_buffer,
enum dma_data_direction dir)
{
- if (job_buffer->addr->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE)
+ if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
pr_err("sync sgt for cpu error!\n");
@@ -1802,12 +1864,7 @@
goto error_unmap_buffer;
}
- if (buffer->type == RGA_VIRTUAL_ADDRESS) {
- /*
- * Some userspace virtual addresses do not have an
- * interface for flushing the cache, so it is mandatory
- * to flush the cache when the virtual address is used.
- */
+ if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
if (ret < 0) {
pr_err("sync sgt for device error!\n");
--
Gitblit v1.6.2