From 7d07b3ae8ddad407913c5301877e694430a3263f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 23 Nov 2023 08:24:31 +0000
Subject: [PATCH] add build kerneldeb
---
kernel/drivers/video/rockchip/rga3/rga_mm.c | 279 ++++++++++++++++++++++++++++++++++++-------------------
1 files changed, 184 insertions(+), 95 deletions(-)
diff --git a/kernel/drivers/video/rockchip/rga3/rga_mm.c b/kernel/drivers/video/rockchip/rga3/rga_mm.c
index 7d59472..cd461b5 100644
--- a/kernel/drivers/video/rockchip/rga3/rga_mm.c
+++ b/kernel/drivers/video/rockchip/rga3/rga_mm.c
@@ -53,14 +53,14 @@
for (i = 0; i < pageCount; i++) {
vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
if (!vma) {
- pr_err("failed to get vma\n");
+ pr_err("page[%d] failed to get vma\n", i);
ret = RGA_OUT_OF_RESOURCES;
break;
}
pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
- pr_err("failed to get pgd\n");
+ pr_err("page[%d] failed to get pgd\n", i);
ret = RGA_OUT_OF_RESOURCES;
break;
}
@@ -71,7 +71,7 @@
*/
p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
- pr_err("failed to get p4d\n");
+ pr_err("page[%d] failed to get p4d\n", i);
ret = RGA_OUT_OF_RESOURCES;
break;
}
@@ -82,20 +82,20 @@
#endif
if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
- pr_err("failed to get pud\n");
+ pr_err("page[%d] failed to get pud\n", i);
ret = RGA_OUT_OF_RESOURCES;
break;
}
pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
- pr_err("failed to get pmd\n");
+ pr_err("page[%d] failed to get pmd\n", i);
ret = RGA_OUT_OF_RESOURCES;
break;
}
pte = pte_offset_map_lock(current_mm, pmd,
(Memory + i) << PAGE_SHIFT, &ptl);
if (pte_none(*pte)) {
- pr_err("failed to get pte\n");
+ pr_err("page[%d] failed to get pte\n", i);
pte_unmap_unlock(pte, ptl);
ret = RGA_OUT_OF_RESOURCES;
break;
@@ -105,6 +105,10 @@
pages[i] = pfn_to_page(pfn);
pte_unmap_unlock(pte, ptl);
}
+
+ if (ret == RGA_OUT_OF_RESOURCES && i > 0)
+ pr_err("Only get buffer %d byte from vma, but current image required %d byte",
+ (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
return ret;
}
@@ -144,9 +148,9 @@
put_page(pages[i]);
ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
- if (ret < 0) {
- pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n",
- result, pageCount);
+ if (ret < 0 && result > 0) {
+ pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n",
+ (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
}
}
@@ -177,9 +181,12 @@
}
/* get sg form pages. */
- ret = sg_alloc_table_from_pages(sgt, virt_addr->pages,
+ /* iova requires minimum page alignment, so sgt cannot have offset */
+ ret = sg_alloc_table_from_pages(sgt,
+ virt_addr->pages,
virt_addr->page_count,
- 0, virt_addr->size,
+ 0,
+ virt_addr->size,
GFP_KERNEL);
if (ret) {
pr_err("sg_alloc_table_from_pages failed");
@@ -245,21 +252,29 @@
if (!size) {
pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n",
size, count, (unsigned long)offset);
+ rga_dump_memory_parm(memory_parm);
return -EFAULT;
}
/* alloc pages and page_table */
order = get_order(count * sizeof(struct page *));
+ if (order >= MAX_ORDER) {
+ pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n",
+ order, MAX_ORDER);
+ return -ENOMEM;
+ }
+
pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
if (pages == NULL) {
- pr_err("%s can not alloc pages for pages\n", __func__);
+ pr_err("%s can not alloc pages for viraddr pages\n", __func__);
return -ENOMEM;
}
/* get pages from virtual address. */
ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm);
if (ret < 0) {
- pr_err("failed to get pages");
+ pr_err("failed to get pages from virtual adrees: 0x%lx\n",
+ (unsigned long)viraddr);
ret = -EINVAL;
goto out_free_pages;
} else if (ret > 0) {
@@ -301,7 +316,7 @@
if (scheduler->data->mmu == RGA_MMU &&
!(mm_flag & RGA_MEM_UNDER_4G)) {
- pr_err("%s unsupported Memory larger than 4G!\n",
+ pr_err("%s unsupported memory larger than 4G!\n",
rga_get_mmu_type_str(scheduler->data->mmu));
return false;
}
@@ -358,6 +373,7 @@
struct rga_job *job)
{
int ret;
+ int ex_buffer_size;
uint32_t mm_flag = 0;
phys_addr_t phys_addr = 0;
struct rga_dma_buffer *buffer;
@@ -369,6 +385,19 @@
if (scheduler == NULL) {
pr_err("Invalid scheduler device!\n");
return -EINVAL;
+ }
+
+ if (external_buffer->memory_parm.size)
+ ex_buffer_size = external_buffer->memory_parm.size;
+ else
+ ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width,
+ external_buffer->memory_parm.height,
+ external_buffer->memory_parm.format,
+ NULL, NULL, NULL);
+ if (ex_buffer_size <= 0) {
+ pr_err("failed to calculating buffer size!\n");
+ rga_dump_memory_parm(&external_buffer->memory_parm);
+ return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size;
}
/*
@@ -402,6 +431,15 @@
pr_err("%s core[%d] map dma buffer error!\n",
__func__, scheduler->core);
goto free_buffer;
+ }
+
+ if (buffer->size < ex_buffer_size) {
+ pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n",
+ buffer->size, rga_get_memory_type_str(external_buffer->type),
+ (unsigned long)external_buffer->memory, ex_buffer_size);
+ rga_dump_memory_parm(&external_buffer->memory_parm);
+ ret = -EINVAL;
+ goto unmap_buffer;
}
buffer->scheduler = scheduler;
@@ -576,8 +614,8 @@
if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
break;
- pr_err("Current RGA mmu[%d] cannot support virtual address!\n",
- scheduler->data->mmu);
+ pr_err("Current %s[%d] cannot support virtual address!\n",
+ rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
goto free_dma_buffer;
}
@@ -589,7 +627,7 @@
internal_buffer->virt_addr = virt_addr;
internal_buffer->dma_buffer = buffer;
internal_buffer->mm_flag = mm_flag;
- internal_buffer->phys_addr = phys_addr ? phys_addr : 0;
+ internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0;
return 0;
@@ -649,7 +687,8 @@
internal_buffer->memory_parm.format,
NULL, NULL, NULL);
if (buffer_size <= 0) {
- pr_err("Fault to get phys addr size!\n");
+ pr_err("Failed to get phys addr size!\n");
+ rga_dump_memory_parm(&internal_buffer->memory_parm);
return buffer_size == 0 ? -EINVAL : buffer_size;
}
@@ -674,7 +713,7 @@
ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev);
if (ret < 0) {
pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core);
- return ret;
+ goto free_dma_buffer;
}
}
@@ -686,6 +725,11 @@
internal_buffer->dma_buffer = buffer;
return 0;
+
+free_dma_buffer:
+ kfree(buffer);
+
+ return ret;
}
static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
@@ -738,7 +782,7 @@
ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag);
if (ret < 0) {
- pr_err("%s iommu_map virtual address error!\n", __func__);
+ pr_err("%s map virtual address error!\n", __func__);
return ret;
}
@@ -751,7 +795,7 @@
ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job);
if (ret < 0) {
- pr_err("%s iommu_map physical address error!\n", __func__);
+ pr_err("%s map physical address error!\n", __func__);
return ret;
}
@@ -1130,9 +1174,15 @@
if (job->flags & RGA_JOB_USE_HANDLE) {
order = get_order(page_count * sizeof(uint32_t *));
+ if (order >= MAX_ORDER) {
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
+ order, MAX_ORDER);
+ return -ENOMEM;
+ }
+
page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
if (page_table == NULL) {
- pr_err("%s can not alloc pages for pages, order = %d\n",
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
__func__, order);
return -ENOMEM;
}
@@ -1189,9 +1239,15 @@
if (job->flags & RGA_JOB_USE_HANDLE) {
order = get_order(page_count * sizeof(uint32_t *));
+ if (order >= MAX_ORDER) {
+ pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
+ order, MAX_ORDER);
+ return -ENOMEM;
+ }
+
page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
if (page_table == NULL) {
- pr_err("%s can not alloc pages for pages, order = %d\n",
+ pr_err("%s can not alloc pages for page_table, order = %d\n",
__func__, order);
return -ENOMEM;
}
@@ -1334,6 +1390,7 @@
uint64_t handle,
uint64_t *channel_addr,
struct rga_internal_buffer **buf,
+ int require_size,
enum dma_data_direction dir)
{
int ret = 0;
@@ -1369,6 +1426,14 @@
return ret;
}
+ if (internal_buffer->size < require_size) {
+ ret = -EINVAL;
+ pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n",
+ internal_buffer->size, (unsigned long)handle, require_size);
+
+ goto put_internal_buffer;
+ }
+
if (internal_buffer->type == RGA_VIRTUAL_ADDRESS) {
/*
* Some userspace virtual addresses do not have an
@@ -1378,11 +1443,19 @@
ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir);
if (ret < 0) {
pr_err("sync sgt for device error!\n");
- return ret;
+ goto put_internal_buffer;
}
}
return 0;
+
+put_internal_buffer:
+ mutex_lock(&mm->lock);
+ kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
+ mutex_unlock(&mm->lock);
+
+ return ret;
+
}
static void rga_mm_put_buffer(struct rga_mm *mm,
@@ -1397,72 +1470,6 @@
mutex_lock(&mm->lock);
kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
mutex_unlock(&mm->lock);
-}
-
-static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
- struct rga_job *job,
- struct rga_img_info_t *img,
- struct rga_job_buffer *job_buf,
- enum dma_data_direction dir)
-{
- int ret = 0;
- int handle = 0;
-
- /* using third-address */
- if (img->uv_addr > 0) {
- handle = img->yrgb_addr;
- if (handle > 0) {
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
- &job_buf->y_addr, dir);
- if (ret < 0) {
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
- return ret;
- }
- }
-
- handle = img->uv_addr;
- if (handle > 0) {
- ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
- &job_buf->uv_addr, dir);
- if (ret < 0) {
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
- return ret;
- }
- }
-
- handle = img->v_addr;
- if (handle > 0) {
- ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
- &job_buf->v_addr, dir);
- if (ret < 0) {
- pr_err("handle[%d] Can't get src uv address info!\n", handle);
- return ret;
- }
- }
- } else {
- handle = img->yrgb_addr;
- if (handle > 0) {
- ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
- &job_buf->addr, dir);
- if (ret < 0) {
- pr_err("handle[%d] Can't get src y/rgb address info!\n", handle);
- return ret;
- }
- }
-
- rga_convert_addr(img, false);
- }
-
- if (job->scheduler->data->mmu == RGA_MMU &&
- rga_mm_is_need_mmu(job, job_buf->addr)) {
- ret = rga_mm_set_mmu_base(job, img, job_buf);
- if (ret < 0) {
- pr_err("Can't set RGA2 MMU_BASE from handle!\n");
- return ret;
- }
- }
-
- return 0;
}
static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
@@ -1481,6 +1488,83 @@
free_pages((unsigned long)job_buf->page_table, job_buf->order);
}
+static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
+ struct rga_job *job,
+ struct rga_img_info_t *img,
+ struct rga_job_buffer *job_buf,
+ enum dma_data_direction dir)
+{
+ int ret = 0;
+ int handle = 0;
+ int img_size, yrgb_size, uv_size, v_size;
+
+ img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
+ &yrgb_size, &uv_size, &v_size);
+ if (img_size <= 0) {
+ pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
+ img->vir_w, img->vir_h, rga_get_format_name(img->format));
+ return -EINVAL;
+ }
+
+ /* using third-address */
+ if (img->uv_addr > 0) {
+ handle = img->yrgb_addr;
+ if (handle > 0) {
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
+ &job_buf->y_addr, yrgb_size, dir);
+ if (ret < 0) {
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
+ return ret;
+ }
+ }
+
+ handle = img->uv_addr;
+ if (handle > 0) {
+ ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
+ &job_buf->uv_addr, uv_size, dir);
+ if (ret < 0) {
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
+ return ret;
+ }
+ }
+
+ handle = img->v_addr;
+ if (handle > 0) {
+ ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
+ &job_buf->v_addr, v_size, dir);
+ if (ret < 0) {
+ pr_err("handle[%d] Can't get uv address info!\n", handle);
+ return ret;
+ }
+ }
+ } else {
+ handle = img->yrgb_addr;
+ if (handle > 0) {
+ ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
+ &job_buf->addr, img_size, dir);
+ if (ret < 0) {
+ pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
+ return ret;
+ }
+ }
+
+ rga_convert_addr(img, false);
+ }
+
+ if (job->scheduler->data->mmu == RGA_MMU &&
+ rga_mm_is_need_mmu(job, job_buf->addr)) {
+ ret = rga_mm_set_mmu_base(job, img, job_buf);
+ if (ret < 0) {
+ pr_err("Can't set RGA2 MMU_BASE from handle!\n");
+
+ rga_mm_put_channel_handle_info(mm, job, job_buf, dir);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int rga_mm_get_handle_info(struct rga_job *job)
{
int ret = 0;
@@ -1496,7 +1580,7 @@
&job->src_buffer,
DMA_TO_DEVICE);
if (ret < 0) {
- pr_err("Can't get src buffer third info!\n");
+ pr_err("Can't get src buffer info from handle!\n");
return ret;
}
}
@@ -1506,7 +1590,7 @@
&job->dst_buffer,
DMA_TO_DEVICE);
if (ret < 0) {
- pr_err("Can't get dst buffer third info!\n");
+ pr_err("Can't get dst buffer info from handle!\n");
return ret;
}
}
@@ -1528,7 +1612,7 @@
DMA_BIDIRECTIONAL);
}
if (ret < 0) {
- pr_err("Can't get pat buffer third info!\n");
+ pr_err("Can't get pat buffer info from handle!\n");
return ret;
}
}
@@ -1869,7 +1953,7 @@
uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
struct rga_session *session)
{
- int ret = 0;
+ int ret = 0, new_id;
struct rga_mm *mm;
struct rga_internal_buffer *internal_buffer;
@@ -1911,9 +1995,14 @@
* allocation under our spinlock.
*/
idr_preload(GFP_KERNEL);
- internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL);
+ new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT);
idr_preload_end();
+ if (new_id < 0) {
+ pr_err("internal_buffer alloc id failed!\n");
+ goto FREE_INTERNAL_BUFFER;
+ }
+ internal_buffer->handle = new_id;
mm->buffer_count++;
if (DEBUGGER_EN(MM)) {
--
Gitblit v1.6.2