From 7d07b3ae8ddad407913c5301877e694430a3263f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 23 Nov 2023 08:24:31 +0000
Subject: [PATCH] add build kerneldeb
---
kernel/drivers/video/rockchip/rga3/rga_dma_buf.c | 60 ++++++++++++++++++++++++++++++++++++------------------------
1 files changed, 36 insertions(+), 24 deletions(-)
diff --git a/kernel/drivers/video/rockchip/rga3/rga_dma_buf.c b/kernel/drivers/video/rockchip/rga3/rga_dma_buf.c
index b5b0dd0..db34db5 100644
--- a/kernel/drivers/video/rockchip/rga3/rga_dma_buf.c
+++ b/kernel/drivers/video/rockchip/rga3/rga_dma_buf.c
@@ -203,15 +203,14 @@
size_t size, u64 dma_limit,
struct device *dev)
{
- struct rga_iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long shift, iova_len, iova = 0;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
- dma_addr_t limit;
-#endif
shift = iova_shift(iovad);
iova_len = size >> shift;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
/*
* Freeing non-power-of-two-sized allocations back into the IOVA caches
* will come back to bite us badly, so we have to waste a bit of space
@@ -220,6 +219,7 @@
*/
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
+#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
@@ -231,12 +231,13 @@
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 19, 111) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
+ iova = alloc_iova_fast(iovad, iova_len,
+ min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn),
+ true);
#else
- limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn);
-
- iova = alloc_iova_fast(iovad, iova_len, limit, true);
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
#endif
return (dma_addr_t)iova << shift;
@@ -245,7 +246,7 @@
static void rga_iommu_dma_free_iova(struct iommu_domain *domain,
dma_addr_t iova, size_t size)
{
- struct rga_iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct rga_iommu_dma_cookie *cookie = (void *)domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
@@ -284,7 +285,7 @@
}
domain = rga_iommu_get_dma_domain(rga_dev);
- cookie = domain->iova_cookie;
+ cookie = (void *)domain->iova_cookie;
iovad = &cookie->iovad;
align_size = iova_align(iovad, size);
@@ -329,7 +330,7 @@
}
domain = rga_iommu_get_dma_domain(rga_dev);
- cookie = domain->iova_cookie;
+ cookie = (void *)domain->iova_cookie;
iovad = &cookie->iovad;
align_size = iova_align(iovad, size);
@@ -393,12 +394,20 @@
{
int ret = 0;
void *vaddr;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
+ struct iosys_map map;
+#endif
struct dma_buf *dma_buf;
dma_buf = rga_dma_buffer->dma_buf;
if (!IS_ERR_OR_NULL(dma_buf)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
+ ret = dma_buf_vmap(dma_buf, &map);
+ vaddr = ret ? NULL : map.vaddr;
+#else
vaddr = dma_buf_vmap(dma_buf);
+#endif
if (vaddr) {
ret = rga_virtual_memory_check(vaddr, img->vir_w,
img->vir_h, img->format, img->yrgb_addr);
@@ -406,8 +415,11 @@
pr_err("can't vmap the dma buffer!\n");
return -EINVAL;
}
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
+ dma_buf_vunmap(dma_buf, &map);
+#else
dma_buf_vunmap(dma_buf, vaddr);
+#endif
}
return ret;
@@ -424,21 +436,21 @@
if (dma_buf != NULL) {
get_dma_buf(dma_buf);
} else {
- pr_err("dma_buf is Invalid[%p]\n", dma_buf);
+ pr_err("dma_buf is invalid[%p]\n", dma_buf);
return -EINVAL;
}
attach = dma_buf_attach(dma_buf, rga_dev);
if (IS_ERR(attach)) {
- pr_err("Failed to attach dma_buf\n");
- ret = -EINVAL;
+ ret = PTR_ERR(attach);
+ pr_err("Failed to attach dma_buf, ret[%d]\n", ret);
goto err_get_attach;
}
sgt = dma_buf_map_attachment(attach, dir);
if (IS_ERR(sgt)) {
- pr_err("Failed to map src attachment\n");
- ret = -EINVAL;
+ ret = PTR_ERR(sgt);
+ pr_err("Failed to map attachment, ret[%d]\n", ret);
goto err_get_sgt;
}
@@ -474,22 +486,22 @@
dma_buf = dma_buf_get(fd);
if (IS_ERR(dma_buf)) {
- pr_err("dma_buf_get fail fd[%d]\n", fd);
- ret = -EINVAL;
+ ret = PTR_ERR(dma_buf);
+ pr_err("Fail to get dma_buf from fd[%d], ret[%d]\n", fd, ret);
return ret;
}
attach = dma_buf_attach(dma_buf, rga_dev);
if (IS_ERR(attach)) {
- pr_err("Failed to attach dma_buf\n");
- ret = -EINVAL;
+ ret = PTR_ERR(attach);
+ pr_err("Failed to attach dma_buf, ret[%d]\n", ret);
goto err_get_attach;
}
sgt = dma_buf_map_attachment(attach, dir);
if (IS_ERR(sgt)) {
- pr_err("Failed to map src attachment\n");
- ret = -EINVAL;
+ ret = PTR_ERR(sgt);
+ pr_err("Failed to map attachment, ret[%d]\n", ret);
goto err_get_sgt;
}
--
Gitblit v1.6.2