From 1c055e55a242a33e574e48be530e06770a210dcd Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 19 Feb 2024 03:26:26 +0000
Subject: [PATCH] add r8169 read mac form eeprom

---
 kernel/drivers/rknpu/rknpu_mem.c |  155 +++++++++++++++++++++++++++++++++++++++++++--------
 1 files changed, 130 insertions(+), 25 deletions(-)

diff --git a/kernel/drivers/rknpu/rknpu_mem.c b/kernel/drivers/rknpu/rknpu_mem.c
index a3bf982..858c21f 100644
--- a/kernel/drivers/rknpu/rknpu_mem.c
+++ b/kernel/drivers/rknpu/rknpu_mem.c
@@ -15,7 +15,10 @@
 #include "rknpu_ioctl.h"
 #include "rknpu_mem.h"
 
-int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
+#ifdef CONFIG_ROCKCHIP_RKNPU_DMA_HEAP
+
+int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
+			   struct file *file)
 {
 	struct rknpu_mem_create args;
 	int ret = -EINVAL;
@@ -27,6 +30,7 @@
 	struct page **pages;
 	struct page *page;
 	struct rknpu_mem_object *rknpu_obj = NULL;
+	struct rknpu_session *session = NULL;
 	int i, fd;
 	unsigned int length, page_count;
 
@@ -65,6 +69,8 @@
 						  O_CLOEXEC | O_RDWR, 0x0,
 						  dev_name(rknpu_dev->dev));
 		if (IS_ERR(dmabuf)) {
+			LOG_ERROR("dmabuf alloc failed, args.size = %llu\n",
+				  args.size);
 			ret = PTR_ERR(dmabuf);
 			goto err_free_obj;
 		}
@@ -74,6 +80,7 @@
 
 		fd = dma_buf_fd(dmabuf, O_CLOEXEC | O_RDWR);
 		if (fd < 0) {
+			LOG_ERROR("dmabuf fd get failed\n");
 			ret = -EFAULT;
 			goto err_free_dma_buf;
 		}
@@ -81,12 +88,14 @@
 
 	attachment = dma_buf_attach(dmabuf, rknpu_dev->dev);
 	if (IS_ERR(attachment)) {
+		LOG_ERROR("dma_buf_attach failed\n");
 		ret = PTR_ERR(attachment);
 		goto err_free_dma_buf;
 	}
 
 	table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
 	if (IS_ERR(table)) {
+		LOG_ERROR("dma_buf_attach failed\n");
 		dma_buf_detach(dmabuf, attachment);
 		ret = PTR_ERR(table);
 		goto err_free_dma_buf;
@@ -100,20 +109,27 @@
 			  __LINE__, &phys, length);
 	}
 
-	page_count = length >> PAGE_SHIFT;
-	pages = kmalloc_array(page_count, sizeof(struct page), GFP_KERNEL);
-	if (!pages) {
-		ret = -ENOMEM;
-		goto err_detach_dma_buf;
-	}
+	if (args.flags & RKNPU_MEM_KERNEL_MAPPING) {
+		page_count = length >> PAGE_SHIFT;
+		pages = vmalloc(page_count * sizeof(struct page));
+		if (!pages) {
+			LOG_ERROR("alloc pages failed\n");
+			ret = -ENOMEM;
+			goto err_detach_dma_buf;
+		}
 
-	for (i = 0; i < page_count; i++)
-		pages[i] = &page[i];
+		for (i = 0; i < page_count; i++)
+			pages[i] = &page[i];
 
-	rknpu_obj->kv_addr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
-	if (!rknpu_obj->kv_addr) {
-		ret = -ENOMEM;
-		goto err_free_pages;
+		rknpu_obj->kv_addr =
+			vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
+		if (!rknpu_obj->kv_addr) {
+			LOG_ERROR("vmap pages addr failed\n");
+			ret = -ENOMEM;
+			goto err_free_pages;
+		}
+		vfree(pages);
+		pages = NULL;
 	}
 
 	rknpu_obj->size = PAGE_ALIGN(args.size);
@@ -137,9 +153,20 @@
 		goto err_unmap_kv_addr;
 	}
 
-	kfree(pages);
 	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
 	dma_buf_detach(dmabuf, attachment);
+
+	spin_lock(&rknpu_dev->lock);
+
+	session = file->private_data;
+	if (!session) {
+		spin_unlock(&rknpu_dev->lock);
+		ret = -EFAULT;
+		goto err_unmap_kv_addr;
+	}
+	list_add_tail(&rknpu_obj->head, &session->list);
+
+	spin_unlock(&rknpu_dev->lock);
 
 	return 0;
 
@@ -148,7 +175,8 @@
 	rknpu_obj->kv_addr = NULL;
 
 err_free_pages:
-	kfree(pages);
+	vfree(pages);
+	pages = NULL;
 
 err_detach_dma_buf:
 	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
@@ -166,11 +194,12 @@
 	return ret;
 }
 
-int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
+int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
+			    struct file *file)
 {
-	struct rknpu_mem_object *rknpu_obj = NULL;
+	struct rknpu_mem_object *rknpu_obj, *entry, *q;
+	struct rknpu_session *session = NULL;
 	struct rknpu_mem_destroy args;
-	struct dma_buf *dmabuf;
 	int ret = -EFAULT;
 
 	if (unlikely(copy_from_user(&args, (struct rknpu_mem_destroy *)data,
@@ -188,28 +217,91 @@
 	}
 
 	rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
-	dmabuf = rknpu_obj->dmabuf;
 	LOG_DEBUG(
 		"free args.handle: %d, rknpu_obj: %#llx, rknpu_obj->dma_addr: %#llx\n",
 		args.handle, (__u64)(uintptr_t)rknpu_obj,
 		(__u64)rknpu_obj->dma_addr);
 
-	vunmap(rknpu_obj->kv_addr);
-	rknpu_obj->kv_addr = NULL;
+	spin_lock(&rknpu_dev->lock);
+	session = file->private_data;
+	if (!session) {
+		spin_unlock(&rknpu_dev->lock);
+		ret = -EFAULT;
+		return ret;
+	}
+	list_for_each_entry_safe(entry, q, &session->list, head) {
+		if (entry == rknpu_obj) {
+			list_del(&entry->head);
+			break;
+		}
+	}
+	spin_unlock(&rknpu_dev->lock);
 
-	if (!rknpu_obj->owner)
-		dma_buf_put(dmabuf);
+	if (rknpu_obj == entry) {
+		vunmap(rknpu_obj->kv_addr);
+		rknpu_obj->kv_addr = NULL;
 
-	kfree(rknpu_obj);
+		if (!rknpu_obj->owner)
+			dma_buf_put(rknpu_obj->dmabuf);
+
+		kfree(rknpu_obj);
+	}
 
 	return 0;
+}
+
+/*
+ * begin cpu access => for_cpu = true
+ * end cpu access => for_cpu = false
+ */
+static void __maybe_unused rknpu_dma_buf_sync(
+	struct rknpu_device *rknpu_dev, struct rknpu_mem_object *rknpu_obj,
+	u32 offset, u32 length, enum dma_data_direction dir, bool for_cpu)
+{
+	struct device *dev = rknpu_dev->dev;
+	struct sg_table *sgt = rknpu_obj->sgt;
+	struct scatterlist *sg = sgt->sgl;
+	dma_addr_t sg_dma_addr = sg_dma_address(sg);
+	unsigned int len = 0;
+	int i;
+
+	for_each_sgtable_sg(sgt, sg, i) {
+		unsigned int sg_offset, sg_left, size = 0;
+
+		len += sg->length;
+		if (len <= offset) {
+			sg_dma_addr += sg->length;
+			continue;
+		}
+
+		sg_left = len - offset;
+		sg_offset = sg->length - sg_left;
+
+		size = (length < sg_left) ? length : sg_left;
+
+		if (for_cpu)
+			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
+						      sg_offset, size, dir);
+		else
+			dma_sync_single_range_for_device(dev, sg_dma_addr,
+							 sg_offset, size, dir);
+
+		offset += size;
+		length -= size;
+		sg_dma_addr += sg->length;
+
+		if (length == 0)
+			break;
+	}
 }
 
 int rknpu_mem_sync_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
 {
 	struct rknpu_mem_object *rknpu_obj = NULL;
 	struct rknpu_mem_sync args;
+#ifdef CONFIG_DMABUF_PARTIAL
 	struct dma_buf *dmabuf;
+#endif
 	int ret = -EFAULT;
 
 	if (unlikely(copy_from_user(&args, (struct rknpu_mem_sync *)data,
@@ -227,8 +319,18 @@
 	}
 
 	rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
-	dmabuf = rknpu_obj->dmabuf;
 
+#ifndef CONFIG_DMABUF_PARTIAL
+	if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
+		rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
+				   DMA_TO_DEVICE, false);
+	}
+	if (args.flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
+		rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
+				   DMA_FROM_DEVICE, true);
+	}
+#else
+	dmabuf = rknpu_obj->dmabuf;
 	if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
 		dmabuf->ops->end_cpu_access_partial(dmabuf, DMA_TO_DEVICE,
 						    args.offset, args.size);
@@ -237,6 +339,9 @@
 		dmabuf->ops->begin_cpu_access_partial(dmabuf, DMA_FROM_DEVICE,
 						      args.offset, args.size);
 	}
+#endif
 
 	return 0;
 }
+
+#endif

--
Gitblit v1.6.2