hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/rknpu/rknpu_mem.c
....@@ -15,7 +15,10 @@
1515 #include "rknpu_ioctl.h"
1616 #include "rknpu_mem.h"
1717
18
-int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
18
+#ifdef CONFIG_ROCKCHIP_RKNPU_DMA_HEAP
19
+
20
+int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
21
+ struct file *file)
1922 {
2023 struct rknpu_mem_create args;
2124 int ret = -EINVAL;
....@@ -27,6 +30,7 @@
2730 struct page **pages;
2831 struct page *page;
2932 struct rknpu_mem_object *rknpu_obj = NULL;
33
+ struct rknpu_session *session = NULL;
3034 int i, fd;
3135 unsigned int length, page_count;
3236
....@@ -65,6 +69,8 @@
6569 O_CLOEXEC | O_RDWR, 0x0,
6670 dev_name(rknpu_dev->dev));
6771 if (IS_ERR(dmabuf)) {
72
+ LOG_ERROR("dmabuf alloc failed, args.size = %llu\n",
73
+ args.size);
6874 ret = PTR_ERR(dmabuf);
6975 goto err_free_obj;
7076 }
....@@ -74,6 +80,7 @@
7480
7581 fd = dma_buf_fd(dmabuf, O_CLOEXEC | O_RDWR);
7682 if (fd < 0) {
83
+ LOG_ERROR("dmabuf fd get failed\n");
7784 ret = -EFAULT;
7885 goto err_free_dma_buf;
7986 }
....@@ -81,12 +88,14 @@
8188
8289 attachment = dma_buf_attach(dmabuf, rknpu_dev->dev);
8390 if (IS_ERR(attachment)) {
91
+ LOG_ERROR("dma_buf_attach failed\n");
8492 ret = PTR_ERR(attachment);
8593 goto err_free_dma_buf;
8694 }
8795
8896 table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
8997 if (IS_ERR(table)) {
98
+ LOG_ERROR("dma_buf_attach failed\n");
9099 dma_buf_detach(dmabuf, attachment);
91100 ret = PTR_ERR(table);
92101 goto err_free_dma_buf;
....@@ -100,20 +109,27 @@
100109 __LINE__, &phys, length);
101110 }
102111
103
- page_count = length >> PAGE_SHIFT;
104
- pages = kmalloc_array(page_count, sizeof(struct page), GFP_KERNEL);
105
- if (!pages) {
106
- ret = -ENOMEM;
107
- goto err_detach_dma_buf;
108
- }
112
+ if (args.flags & RKNPU_MEM_KERNEL_MAPPING) {
113
+ page_count = length >> PAGE_SHIFT;
114
+ pages = vmalloc(page_count * sizeof(struct page));
115
+ if (!pages) {
116
+ LOG_ERROR("alloc pages failed\n");
117
+ ret = -ENOMEM;
118
+ goto err_detach_dma_buf;
119
+ }
109120
110
- for (i = 0; i < page_count; i++)
111
- pages[i] = &page[i];
121
+ for (i = 0; i < page_count; i++)
122
+ pages[i] = &page[i];
112123
113
- rknpu_obj->kv_addr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
114
- if (!rknpu_obj->kv_addr) {
115
- ret = -ENOMEM;
116
- goto err_free_pages;
124
+ rknpu_obj->kv_addr =
125
+ vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
126
+ if (!rknpu_obj->kv_addr) {
127
+ LOG_ERROR("vmap pages addr failed\n");
128
+ ret = -ENOMEM;
129
+ goto err_free_pages;
130
+ }
131
+ vfree(pages);
132
+ pages = NULL;
117133 }
118134
119135 rknpu_obj->size = PAGE_ALIGN(args.size);
....@@ -137,9 +153,20 @@
137153 goto err_unmap_kv_addr;
138154 }
139155
140
- kfree(pages);
141156 dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
142157 dma_buf_detach(dmabuf, attachment);
158
+
159
+ spin_lock(&rknpu_dev->lock);
160
+
161
+ session = file->private_data;
162
+ if (!session) {
163
+ spin_unlock(&rknpu_dev->lock);
164
+ ret = -EFAULT;
165
+ goto err_unmap_kv_addr;
166
+ }
167
+ list_add_tail(&rknpu_obj->head, &session->list);
168
+
169
+ spin_unlock(&rknpu_dev->lock);
143170
144171 return 0;
145172
....@@ -148,7 +175,8 @@
148175 rknpu_obj->kv_addr = NULL;
149176
150177 err_free_pages:
151
- kfree(pages);
178
+ vfree(pages);
179
+ pages = NULL;
152180
153181 err_detach_dma_buf:
154182 dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
....@@ -166,11 +194,12 @@
166194 return ret;
167195 }
168196
169
-int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
197
+int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
198
+ struct file *file)
170199 {
171
- struct rknpu_mem_object *rknpu_obj = NULL;
200
+ struct rknpu_mem_object *rknpu_obj, *entry, *q;
201
+ struct rknpu_session *session = NULL;
172202 struct rknpu_mem_destroy args;
173
- struct dma_buf *dmabuf;
174203 int ret = -EFAULT;
175204
176205 if (unlikely(copy_from_user(&args, (struct rknpu_mem_destroy *)data,
....@@ -188,28 +217,91 @@
188217 }
189218
190219 rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
191
- dmabuf = rknpu_obj->dmabuf;
192220 LOG_DEBUG(
193221 "free args.handle: %d, rknpu_obj: %#llx, rknpu_obj->dma_addr: %#llx\n",
194222 args.handle, (__u64)(uintptr_t)rknpu_obj,
195223 (__u64)rknpu_obj->dma_addr);
196224
197
- vunmap(rknpu_obj->kv_addr);
198
- rknpu_obj->kv_addr = NULL;
225
+ spin_lock(&rknpu_dev->lock);
226
+ session = file->private_data;
227
+ if (!session) {
228
+ spin_unlock(&rknpu_dev->lock);
229
+ ret = -EFAULT;
230
+ return ret;
231
+ }
232
+ list_for_each_entry_safe(entry, q, &session->list, head) {
233
+ if (entry == rknpu_obj) {
234
+ list_del(&entry->head);
235
+ break;
236
+ }
237
+ }
238
+ spin_unlock(&rknpu_dev->lock);
199239
200
- if (!rknpu_obj->owner)
201
- dma_buf_put(dmabuf);
240
+ if (rknpu_obj == entry) {
241
+ vunmap(rknpu_obj->kv_addr);
242
+ rknpu_obj->kv_addr = NULL;
202243
203
- kfree(rknpu_obj);
244
+ if (!rknpu_obj->owner)
245
+ dma_buf_put(rknpu_obj->dmabuf);
246
+
247
+ kfree(rknpu_obj);
248
+ }
204249
205250 return 0;
251
+}
252
+
253
+/*
254
+ * begin cpu access => for_cpu = true
255
+ * end cpu access => for_cpu = false
256
+ */
257
+static void __maybe_unused rknpu_dma_buf_sync(
258
+ struct rknpu_device *rknpu_dev, struct rknpu_mem_object *rknpu_obj,
259
+ u32 offset, u32 length, enum dma_data_direction dir, bool for_cpu)
260
+{
261
+ struct device *dev = rknpu_dev->dev;
262
+ struct sg_table *sgt = rknpu_obj->sgt;
263
+ struct scatterlist *sg = sgt->sgl;
264
+ dma_addr_t sg_dma_addr = sg_dma_address(sg);
265
+ unsigned int len = 0;
266
+ int i;
267
+
268
+ for_each_sgtable_sg(sgt, sg, i) {
269
+ unsigned int sg_offset, sg_left, size = 0;
270
+
271
+ len += sg->length;
272
+ if (len <= offset) {
273
+ sg_dma_addr += sg->length;
274
+ continue;
275
+ }
276
+
277
+ sg_left = len - offset;
278
+ sg_offset = sg->length - sg_left;
279
+
280
+ size = (length < sg_left) ? length : sg_left;
281
+
282
+ if (for_cpu)
283
+ dma_sync_single_range_for_cpu(dev, sg_dma_addr,
284
+ sg_offset, size, dir);
285
+ else
286
+ dma_sync_single_range_for_device(dev, sg_dma_addr,
287
+ sg_offset, size, dir);
288
+
289
+ offset += size;
290
+ length -= size;
291
+ sg_dma_addr += sg->length;
292
+
293
+ if (length == 0)
294
+ break;
295
+ }
206296 }
207297
208298 int rknpu_mem_sync_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
209299 {
210300 struct rknpu_mem_object *rknpu_obj = NULL;
211301 struct rknpu_mem_sync args;
302
+#ifdef CONFIG_DMABUF_PARTIAL
212303 struct dma_buf *dmabuf;
304
+#endif
213305 int ret = -EFAULT;
214306
215307 if (unlikely(copy_from_user(&args, (struct rknpu_mem_sync *)data,
....@@ -227,8 +319,18 @@
227319 }
228320
229321 rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
230
- dmabuf = rknpu_obj->dmabuf;
231322
323
+#ifndef CONFIG_DMABUF_PARTIAL
324
+ if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
325
+ rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
326
+ DMA_TO_DEVICE, false);
327
+ }
328
+ if (args.flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
329
+ rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
330
+ DMA_FROM_DEVICE, true);
331
+ }
332
+#else
333
+ dmabuf = rknpu_obj->dmabuf;
232334 if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
233335 dmabuf->ops->end_cpu_access_partial(dmabuf, DMA_TO_DEVICE,
234336 args.offset, args.size);
....@@ -237,6 +339,9 @@
237339 dmabuf->ops->begin_cpu_access_partial(dmabuf, DMA_FROM_DEVICE,
238340 args.offset, args.size);
239341 }
342
+#endif
240343
241344 return 0;
242345 }
346
+
347
+#endif