From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/drivers/rknpu/rknpu_gem.c |  114 ++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 files changed, 96 insertions(+), 18 deletions(-)

diff --git a/kernel/drivers/rknpu/rknpu_gem.c b/kernel/drivers/rknpu/rknpu_gem.c
index f97be2b..415d3a4 100644
--- a/kernel/drivers/rknpu/rknpu_gem.c
+++ b/kernel/drivers/rknpu/rknpu_gem.c
@@ -13,7 +13,6 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <linux/iommu.h>
-#include <linux/dma-iommu.h>
 #include <linux/pfn_t.h>
 #include <linux/version.h>
 #include <asm/cacheflush.h>
@@ -68,6 +67,7 @@
 			      rknpu_obj->size);
 		goto free_sgt;
 	}
+	iommu_flush_iotlb_all(iommu_get_domain_for_dev(drm->dev));
 
 	if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) {
 		rknpu_obj->cookie = vmap(rknpu_obj->pages, rknpu_obj->num_pages,
@@ -182,7 +182,9 @@
 	if (rknpu_obj->flags & RKNPU_MEM_ZEROING)
 		gfp_mask |= __GFP_ZERO;
 
-	if (!(rknpu_obj->flags & RKNPU_MEM_NON_DMA32)) {
+	if (!rknpu_dev->iommu_en ||
+	    rknpu_dev->config->dma_mask <= DMA_BIT_MASK(32) ||
+	    (rknpu_obj->flags & RKNPU_MEM_DMA32)) {
 		gfp_mask &= ~__GFP_HIGHMEM;
 		gfp_mask |= __GFP_DMA32;
 	}
@@ -253,10 +255,15 @@
 			  i, &s->dma_address, s->length);
 	}
 
-	if (drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL,
-					     nr_pages)) {
-		LOG_DEV_ERROR(drm->dev, "invalid sgtable.\n");
-		ret = -EINVAL;
+#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL,
+					       nr_pages);
+#else
+	ret = drm_prime_sg_to_page_array(sgt, rknpu_obj->pages, nr_pages);
+#endif
+
+	if (ret < 0) {
+		LOG_DEV_ERROR(drm->dev, "invalid sgtable, ret: %d\n", ret);
 		goto err_free_sg_table;
 	}
 
@@ -335,9 +342,28 @@
 	return drm_gem_handle_delete(file_priv, handle);
 }
 
+#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE
+static const struct vm_operations_struct vm_ops = {
+	.fault = rknpu_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs rknpu_gem_object_funcs = {
+	.free = rknpu_gem_free_object,
+	.export = drm_gem_prime_export,
+	.get_sg_table = rknpu_gem_prime_get_sg_table,
+	.vmap = rknpu_gem_prime_vmap,
+	.vunmap = rknpu_gem_prime_vunmap,
+	.mmap = rknpu_gem_mmap_obj,
+	.vm_ops = &vm_ops,
+};
+#endif
+
 static struct rknpu_gem_object *rknpu_gem_init(struct drm_device *drm,
 					       unsigned long size)
 {
+	struct rknpu_device *rknpu_dev = drm->dev_private;
 	struct rknpu_gem_object *rknpu_obj = NULL;
 	struct drm_gem_object *obj = NULL;
 	gfp_t gfp_mask;
@@ -348,6 +374,9 @@
 		return ERR_PTR(-ENOMEM);
 
 	obj = &rknpu_obj->base;
+#if KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE
+	obj->funcs = &rknpu_gem_object_funcs;
+#endif
 
 	ret = drm_gem_object_init(drm, obj, size);
 	if (ret < 0) {
@@ -363,7 +392,9 @@
 	if (rknpu_obj->flags & RKNPU_MEM_ZEROING)
 		gfp_mask |= __GFP_ZERO;
 
-	if (!(rknpu_obj->flags & RKNPU_MEM_NON_DMA32)) {
+	if (!rknpu_dev->iommu_en ||
+	    rknpu_dev->config->dma_mask <= DMA_BIT_MASK(32) ||
+	    (rknpu_obj->flags & RKNPU_MEM_DMA32)) {
 		gfp_mask &= ~__GFP_HIGHMEM;
 		gfp_mask |= __GFP_DMA32;
 	}
@@ -422,7 +453,7 @@
 		return -EINVAL;
 	}
 
-	cookie = domain->iova_cookie;
+	cookie = (void *)domain->iova_cookie;
 	iovad = &cookie->iovad;
 	rknpu_obj->iova_size = iova_align(iovad, cache_size + rknpu_obj->size);
 	rknpu_obj->iova_start = rknpu_iommu_dma_alloc_iova(
@@ -534,8 +565,8 @@
 	iommu_unmap(domain, rknpu_obj->iova_start, cache_size);
 
 free_iova:
-	rknpu_iommu_dma_free_iova(domain->iova_cookie, rknpu_obj->iova_start,
-				  rknpu_obj->iova_size);
+	rknpu_iommu_dma_free_iova((void *)domain->iova_cookie,
+				  rknpu_obj->iova_start, rknpu_obj->iova_size);
 
 	return ret;
 }
@@ -566,7 +597,7 @@
 		if (rknpu_obj->size > 0)
 			iommu_unmap(domain, rknpu_obj->iova_start + cache_size,
 				    rknpu_obj->size);
-		rknpu_iommu_dma_free_iova(domain->iova_cookie,
+		rknpu_iommu_dma_free_iova((void *)domain->iova_cookie,
 					  rknpu_obj->iova_start,
 					  rknpu_obj->iova_size);
 	}
@@ -954,6 +985,7 @@
 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
 	 * the whole buffer.
 	 */
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 	vma->vm_flags &= ~VM_PFNMAP;
 	vma->vm_pgoff = 0;
 
@@ -1148,8 +1180,7 @@
 }
 #endif
 
-static int rknpu_gem_mmap_obj(struct drm_gem_object *obj,
-			      struct vm_area_struct *vma)
+int rknpu_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
 	struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
 	int ret = -EINVAL;
@@ -1246,8 +1277,12 @@
 		goto err;
 	}
 
+#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
 	ret = drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL,
 					       npages);
+#else
+	ret = drm_prime_sg_to_page_array(sgt, rknpu_obj->pages, npages);
+#endif
 	if (ret < 0)
 		goto err_free_large;
 
@@ -1275,6 +1310,7 @@
 	return ERR_PTR(ret);
 }
 
+#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
 void *rknpu_gem_prime_vmap(struct drm_gem_object *obj)
 {
 	struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
@@ -1290,6 +1326,35 @@
 {
 	vunmap(vaddr);
 }
+#else
+int rknpu_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+	struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
+	void *vaddr = NULL;
+
+	if (!rknpu_obj->pages)
+		return -EINVAL;
+
+	vaddr = vmap(rknpu_obj->pages, rknpu_obj->num_pages, VM_MAP,
+			  PAGE_KERNEL);
+	if (!vaddr)
+		return -ENOMEM;
+
+	iosys_map_set_vaddr(map, vaddr);
+
+	return 0;
+}
+
+void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+	struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj);
+
+	if (rknpu_obj->pages) {
+		vunmap(map->vaddr);
+		map->vaddr = NULL;
+	}
+}
+#endif
 
 int rknpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
@@ -1306,6 +1371,7 @@
 			    unsigned long *length, unsigned long *offset,
 			    enum rknpu_cache_type cache_type)
 {
+#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
 	struct drm_gem_object *obj = &rknpu_obj->base;
 	struct rknpu_device *rknpu_dev = obj->dev->dev_private;
 	void __iomem *cache_base_io = NULL;
@@ -1348,6 +1414,8 @@
 		*length -= cache_length;
 		*offset = 0;
 	}
+#endif
+
 	return 0;
 }
 
@@ -1355,10 +1423,12 @@
 			 struct drm_file *file_priv)
 {
 	struct rknpu_gem_object *rknpu_obj = NULL;
+	struct rknpu_device *rknpu_dev = dev->dev_private;
 	struct rknpu_mem_sync *args = data;
 	struct scatterlist *sg;
+	dma_addr_t sg_phys_addr;
 	unsigned long length, offset = 0;
-	unsigned long sg_left, size = 0;
+	unsigned long sg_offset, sg_left, size = 0;
 	unsigned long len = 0;
 	int i;
 
@@ -1382,6 +1452,8 @@
 						      DMA_FROM_DEVICE);
 		}
 	} else {
+		WARN_ON(!rknpu_dev->fake_dev);
+
 		length = args->size;
 		offset = args->offset;
 
@@ -1405,17 +1477,23 @@
 			if (len <= offset)
 				continue;
 
+			sg_phys_addr = sg_phys(sg);
+
 			sg_left = len - offset;
+			sg_offset = sg->length - sg_left;
+
 			size = (length < sg_left) ? length : sg_left;
 
 			if (args->flags & RKNPU_MEM_SYNC_TO_DEVICE) {
-				dma_sync_sg_for_device(dev->dev, sg, 1,
-						       DMA_TO_DEVICE);
+				dma_sync_single_range_for_device(
+					rknpu_dev->fake_dev, sg_phys_addr,
+					sg_offset, size, DMA_TO_DEVICE);
 			}
 
 			if (args->flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
-				dma_sync_sg_for_cpu(dev->dev, sg, 1,
-						    DMA_FROM_DEVICE);
+				dma_sync_single_range_for_cpu(
+					rknpu_dev->fake_dev, sg_phys_addr,
+					sg_offset, size, DMA_FROM_DEVICE);
 			}
 
 			offset += size;

--
Gitblit v1.6.2