.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | 2 | /* |
---|
3 | | - * drivers/staging/android/ion/ion.c |
---|
| 3 | + * ION Memory Allocator |
---|
4 | 4 | * |
---|
5 | 5 | * Copyright (C) 2011 Google, Inc. |
---|
| 6 | + * Copyright (c) 2019, The Linux Foundation. All rights reserved. |
---|
| 7 | + * |
---|
6 | 8 | */ |
---|
7 | 9 | |
---|
8 | | -#include <linux/anon_inodes.h> |
---|
| 10 | +#include <linux/bitmap.h> |
---|
9 | 11 | #include <linux/debugfs.h> |
---|
10 | | -#include <linux/of_device.h> |
---|
11 | | -#include <linux/platform_device.h> |
---|
| 12 | +#include <linux/device.h> |
---|
12 | 13 | #include <linux/dma-buf.h> |
---|
13 | 14 | #include <linux/err.h> |
---|
14 | 15 | #include <linux/export.h> |
---|
15 | 16 | #include <linux/file.h> |
---|
16 | 17 | #include <linux/freezer.h> |
---|
17 | 18 | #include <linux/fs.h> |
---|
18 | | -#include <linux/idr.h> |
---|
19 | 19 | #include <linux/kthread.h> |
---|
20 | 20 | #include <linux/list.h> |
---|
21 | | -#include <linux/memblock.h> |
---|
22 | | -#include <linux/miscdevice.h> |
---|
23 | 21 | #include <linux/mm.h> |
---|
24 | 22 | #include <linux/mm_types.h> |
---|
25 | | -#include <linux/module.h> |
---|
26 | 23 | #include <linux/rbtree.h> |
---|
27 | 24 | #include <linux/sched/task.h> |
---|
28 | | -#include <linux/seq_file.h> |
---|
29 | 25 | #include <linux/slab.h> |
---|
30 | 26 | #include <linux/uaccess.h> |
---|
31 | | -#include <linux/vmalloc.h> |
---|
32 | | -#include <asm/cacheflush.h> |
---|
33 | 27 | |
---|
34 | | -#define CREATE_TRACE_POINTS |
---|
35 | | -#include "ion_trace.h" |
---|
36 | | -#include "ion.h" |
---|
| 28 | +#include "ion_private.h" |
---|
| 29 | + |
---|
| 30 | +#define ION_CURRENT_ABI_VERSION 2 |
---|
37 | 31 | |
---|
38 | 32 | static struct ion_device *internal_dev; |
---|
39 | | -static struct device *ion_dev; |
---|
40 | 33 | |
---|
41 | | -static int heap_id; |
---|
42 | | -static atomic_long_t total_heap_bytes; |
---|
43 | | - |
---|
44 | | -/* this function should only be called while dev->lock is held */ |
---|
45 | | -static void ion_buffer_add(struct ion_device *dev, |
---|
46 | | - struct ion_buffer *buffer) |
---|
| 34 | +/* Entry into ION allocator for rest of the kernel */ |
---|
| 35 | +struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, |
---|
| 36 | + unsigned int flags) |
---|
47 | 37 | { |
---|
48 | | - struct rb_node **p = &dev->buffers.rb_node; |
---|
49 | | - struct rb_node *parent = NULL; |
---|
50 | | - struct ion_buffer *entry; |
---|
51 | | - |
---|
52 | | - while (*p) { |
---|
53 | | - parent = *p; |
---|
54 | | - entry = rb_entry(parent, struct ion_buffer, node); |
---|
55 | | - |
---|
56 | | - if (buffer < entry) { |
---|
57 | | - p = &(*p)->rb_left; |
---|
58 | | - } else if (buffer > entry) { |
---|
59 | | - p = &(*p)->rb_right; |
---|
60 | | - } else { |
---|
61 | | - pr_err("%s: buffer already found.", __func__); |
---|
62 | | - BUG(); |
---|
63 | | - } |
---|
64 | | - } |
---|
65 | | - |
---|
66 | | - rb_link_node(&buffer->node, parent, p); |
---|
67 | | - rb_insert_color(&buffer->node, &dev->buffers); |
---|
| 38 | + return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags); |
---|
68 | 39 | } |
---|
| 40 | +EXPORT_SYMBOL_GPL(ion_alloc); |
---|
69 | 41 | |
---|
70 | | -static void track_buffer_created(struct ion_buffer *buffer) |
---|
| 42 | +int ion_free(struct ion_buffer *buffer) |
---|
71 | 43 | { |
---|
72 | | - long total = atomic_long_add_return(buffer->size, &total_heap_bytes); |
---|
73 | | - |
---|
74 | | - trace_ion_stat(buffer->sg_table, buffer->size, total); |
---|
| 44 | + return ion_buffer_destroy(internal_dev, buffer); |
---|
75 | 45 | } |
---|
| 46 | +EXPORT_SYMBOL_GPL(ion_free); |
---|
76 | 47 | |
---|
77 | | -static void track_buffer_destroyed(struct ion_buffer *buffer) |
---|
| 48 | +static int ion_alloc_fd(size_t len, unsigned int heap_id_mask, |
---|
| 49 | + unsigned int flags) |
---|
78 | 50 | { |
---|
79 | | - long total = atomic_long_sub_return(buffer->size, &total_heap_bytes); |
---|
80 | | - |
---|
81 | | - trace_ion_stat(buffer->sg_table, -buffer->size, total); |
---|
82 | | -} |
---|
83 | | - |
---|
84 | | -/* this function should only be called while dev->lock is held */ |
---|
85 | | -static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, |
---|
86 | | - struct ion_device *dev, |
---|
87 | | - unsigned long len, |
---|
88 | | - unsigned long flags) |
---|
89 | | -{ |
---|
90 | | - struct ion_buffer *buffer; |
---|
91 | | - int ret; |
---|
92 | | - |
---|
93 | | - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
---|
94 | | - if (!buffer) |
---|
95 | | - return ERR_PTR(-ENOMEM); |
---|
96 | | - |
---|
97 | | - buffer->heap = heap; |
---|
98 | | - buffer->flags = flags; |
---|
99 | | - buffer->dev = dev; |
---|
100 | | - buffer->size = len; |
---|
101 | | - |
---|
102 | | - ret = heap->ops->allocate(heap, buffer, len, flags); |
---|
103 | | - |
---|
104 | | - if (ret) { |
---|
105 | | - if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) |
---|
106 | | - goto err2; |
---|
107 | | - |
---|
108 | | - ion_heap_freelist_drain(heap, 0); |
---|
109 | | - ret = heap->ops->allocate(heap, buffer, len, flags); |
---|
110 | | - if (ret) |
---|
111 | | - goto err2; |
---|
112 | | - } |
---|
113 | | - |
---|
114 | | - if (!buffer->sg_table) { |
---|
115 | | - WARN_ONCE(1, "This heap needs to set the sgtable"); |
---|
116 | | - ret = -EINVAL; |
---|
117 | | - goto err1; |
---|
118 | | - } |
---|
119 | | - |
---|
120 | | - INIT_LIST_HEAD(&buffer->attachments); |
---|
121 | | - mutex_init(&buffer->lock); |
---|
122 | | - |
---|
123 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
124 | | - struct scatterlist *sg; |
---|
125 | | - struct sg_table *table = buffer->sg_table; |
---|
126 | | - int i; |
---|
127 | | - |
---|
128 | | - /* |
---|
129 | | - * this will set up dma addresses for the sglist -- it is not |
---|
130 | | - * technically correct as per the dma api -- a specific |
---|
131 | | - * device isn't really taking ownership here. However, in |
---|
132 | | - * practice on our systems the only dma_address space is |
---|
133 | | - * physical addresses. |
---|
134 | | - */ |
---|
135 | | - for_each_sg(table->sgl, sg, table->nents, i) { |
---|
136 | | - sg_dma_address(sg) = sg_phys(sg); |
---|
137 | | - sg_dma_len(sg) = sg->length; |
---|
138 | | - } |
---|
139 | | - } |
---|
140 | | - |
---|
141 | | - mutex_lock(&dev->buffer_lock); |
---|
142 | | - ion_buffer_add(dev, buffer); |
---|
143 | | - mutex_unlock(&dev->buffer_lock); |
---|
144 | | - track_buffer_created(buffer); |
---|
145 | | - return buffer; |
---|
146 | | - |
---|
147 | | -err1: |
---|
148 | | - heap->ops->free(buffer); |
---|
149 | | -err2: |
---|
150 | | - kfree(buffer); |
---|
151 | | - return ERR_PTR(ret); |
---|
152 | | -} |
---|
153 | | - |
---|
154 | | -void ion_buffer_destroy(struct ion_buffer *buffer) |
---|
155 | | -{ |
---|
156 | | - if (buffer->kmap_cnt > 0) { |
---|
157 | | - pr_warn_once("%s: buffer still mapped in the kernel\n", |
---|
158 | | - __func__); |
---|
159 | | - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
---|
160 | | - } |
---|
161 | | - buffer->heap->ops->free(buffer); |
---|
162 | | - kfree(buffer); |
---|
163 | | -} |
---|
164 | | - |
---|
165 | | -static void _ion_buffer_destroy(struct ion_buffer *buffer) |
---|
166 | | -{ |
---|
167 | | - struct ion_heap *heap = buffer->heap; |
---|
168 | | - struct ion_device *dev = buffer->dev; |
---|
169 | | - |
---|
170 | | - mutex_lock(&dev->buffer_lock); |
---|
171 | | - rb_erase(&buffer->node, &dev->buffers); |
---|
172 | | - mutex_unlock(&dev->buffer_lock); |
---|
173 | | - track_buffer_destroyed(buffer); |
---|
174 | | - |
---|
175 | | - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
---|
176 | | - ion_heap_freelist_add(heap, buffer); |
---|
177 | | - else |
---|
178 | | - ion_buffer_destroy(buffer); |
---|
179 | | -} |
---|
180 | | - |
---|
181 | | -static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
---|
182 | | -{ |
---|
183 | | - void *vaddr; |
---|
184 | | - |
---|
185 | | - if (buffer->kmap_cnt) { |
---|
186 | | - buffer->kmap_cnt++; |
---|
187 | | - return buffer->vaddr; |
---|
188 | | - } |
---|
189 | | - vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); |
---|
190 | | - if (WARN_ONCE(!vaddr, |
---|
191 | | - "heap->ops->map_kernel should return ERR_PTR on error")) |
---|
192 | | - return ERR_PTR(-EINVAL); |
---|
193 | | - if (IS_ERR(vaddr)) |
---|
194 | | - return vaddr; |
---|
195 | | - buffer->vaddr = vaddr; |
---|
196 | | - buffer->kmap_cnt++; |
---|
197 | | - return vaddr; |
---|
198 | | -} |
---|
199 | | - |
---|
200 | | -static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
---|
201 | | -{ |
---|
202 | | - buffer->kmap_cnt--; |
---|
203 | | - if (!buffer->kmap_cnt) { |
---|
204 | | - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
---|
205 | | - buffer->vaddr = NULL; |
---|
206 | | - } |
---|
207 | | -} |
---|
208 | | - |
---|
209 | | -static struct sg_table *dup_sg_table(struct sg_table *table) |
---|
210 | | -{ |
---|
211 | | - struct sg_table *new_table; |
---|
212 | | - int ret, i; |
---|
213 | | - struct scatterlist *sg, *new_sg; |
---|
214 | | - |
---|
215 | | - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); |
---|
216 | | - if (!new_table) |
---|
217 | | - return ERR_PTR(-ENOMEM); |
---|
218 | | - |
---|
219 | | - ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); |
---|
220 | | - if (ret) { |
---|
221 | | - kfree(new_table); |
---|
222 | | - return ERR_PTR(-ENOMEM); |
---|
223 | | - } |
---|
224 | | - |
---|
225 | | - new_sg = new_table->sgl; |
---|
226 | | - for_each_sg(table->sgl, sg, table->nents, i) { |
---|
227 | | - memcpy(new_sg, sg, sizeof(*sg)); |
---|
228 | | - sg_dma_address(new_sg) = 0; |
---|
229 | | - sg_dma_len(new_sg) = 0; |
---|
230 | | - new_sg = sg_next(new_sg); |
---|
231 | | - } |
---|
232 | | - |
---|
233 | | - return new_table; |
---|
234 | | -} |
---|
235 | | - |
---|
236 | | -static void free_duped_table(struct sg_table *table) |
---|
237 | | -{ |
---|
238 | | - sg_free_table(table); |
---|
239 | | - kfree(table); |
---|
240 | | -} |
---|
241 | | - |
---|
242 | | -struct ion_dma_buf_attachment { |
---|
243 | | - struct device *dev; |
---|
244 | | - struct sg_table *table; |
---|
245 | | - struct list_head list; |
---|
246 | | - bool mapped:1; |
---|
247 | | -}; |
---|
248 | | - |
---|
249 | | -static int ion_dma_buf_attach(struct dma_buf *dmabuf, |
---|
250 | | - struct dma_buf_attachment *attachment) |
---|
251 | | -{ |
---|
252 | | - struct ion_dma_buf_attachment *a; |
---|
253 | | - struct sg_table *table; |
---|
254 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
255 | | - |
---|
256 | | - a = kzalloc(sizeof(*a), GFP_KERNEL); |
---|
257 | | - if (!a) |
---|
258 | | - return -ENOMEM; |
---|
259 | | - |
---|
260 | | - table = dup_sg_table(buffer->sg_table); |
---|
261 | | - if (IS_ERR(table)) { |
---|
262 | | - kfree(a); |
---|
263 | | - return -ENOMEM; |
---|
264 | | - } |
---|
265 | | - |
---|
266 | | - a->table = table; |
---|
267 | | - a->dev = attachment->dev; |
---|
268 | | - INIT_LIST_HEAD(&a->list); |
---|
269 | | - a->mapped = false; |
---|
270 | | - |
---|
271 | | - attachment->priv = a; |
---|
272 | | - |
---|
273 | | - mutex_lock(&buffer->lock); |
---|
274 | | - list_add(&a->list, &buffer->attachments); |
---|
275 | | - mutex_unlock(&buffer->lock); |
---|
276 | | - |
---|
277 | | - return 0; |
---|
278 | | -} |
---|
279 | | - |
---|
280 | | -static void ion_dma_buf_detatch(struct dma_buf *dmabuf, |
---|
281 | | - struct dma_buf_attachment *attachment) |
---|
282 | | -{ |
---|
283 | | - struct ion_dma_buf_attachment *a = attachment->priv; |
---|
284 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
285 | | - |
---|
286 | | - mutex_lock(&buffer->lock); |
---|
287 | | - list_del(&a->list); |
---|
288 | | - mutex_unlock(&buffer->lock); |
---|
289 | | - free_duped_table(a->table); |
---|
290 | | - |
---|
291 | | - kfree(a); |
---|
292 | | -} |
---|
293 | | - |
---|
294 | | -static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
---|
295 | | - enum dma_data_direction direction) |
---|
296 | | -{ |
---|
297 | | - struct sg_table *table; |
---|
298 | | - unsigned long map_attrs; |
---|
299 | | - int count; |
---|
300 | | - struct ion_dma_buf_attachment *a = attachment->priv; |
---|
301 | | - struct ion_buffer *buffer = attachment->dmabuf->priv; |
---|
302 | | - |
---|
303 | | - table = a->table; |
---|
304 | | - |
---|
305 | | - map_attrs = attachment->dma_map_attrs; |
---|
306 | | - if (!(buffer->flags & ION_FLAG_CACHED)) |
---|
307 | | - map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
---|
308 | | - |
---|
309 | | - mutex_lock(&buffer->lock); |
---|
310 | | - count = dma_map_sg_attrs(attachment->dev, table->sgl, |
---|
311 | | - table->nents, direction, |
---|
312 | | - map_attrs); |
---|
313 | | - if (count <= 0) { |
---|
314 | | - mutex_unlock(&buffer->lock); |
---|
315 | | - return ERR_PTR(-ENOMEM); |
---|
316 | | - } |
---|
317 | | - |
---|
318 | | - a->mapped = true; |
---|
319 | | - mutex_unlock(&buffer->lock); |
---|
320 | | - return table; |
---|
321 | | -} |
---|
322 | | - |
---|
323 | | -static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, |
---|
324 | | - struct sg_table *table, |
---|
325 | | - enum dma_data_direction direction) |
---|
326 | | -{ |
---|
327 | | - unsigned long map_attrs; |
---|
328 | | - struct ion_buffer *buffer = attachment->dmabuf->priv; |
---|
329 | | - struct ion_dma_buf_attachment *a = attachment->priv; |
---|
330 | | - |
---|
331 | | - map_attrs = attachment->dma_map_attrs; |
---|
332 | | - if (!(buffer->flags & ION_FLAG_CACHED)) |
---|
333 | | - map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
---|
334 | | - |
---|
335 | | - mutex_lock(&buffer->lock); |
---|
336 | | - dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, |
---|
337 | | - direction, map_attrs); |
---|
338 | | - a->mapped = false; |
---|
339 | | - mutex_unlock(&buffer->lock); |
---|
340 | | -} |
---|
341 | | - |
---|
342 | | -static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
---|
343 | | -{ |
---|
344 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
345 | | - int ret = 0; |
---|
346 | | - |
---|
347 | | - if (!buffer->heap->ops->map_user) { |
---|
348 | | - pr_err("%s: this heap does not define a method for mapping to userspace\n", |
---|
349 | | - __func__); |
---|
350 | | - return -EINVAL; |
---|
351 | | - } |
---|
352 | | - |
---|
353 | | - if (!(buffer->flags & ION_FLAG_CACHED)) |
---|
354 | | - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
355 | | - |
---|
356 | | - mutex_lock(&buffer->lock); |
---|
357 | | - /* now map it to userspace */ |
---|
358 | | - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); |
---|
359 | | - mutex_unlock(&buffer->lock); |
---|
360 | | - |
---|
361 | | - if (ret) |
---|
362 | | - pr_err("%s: failure mapping buffer to userspace\n", |
---|
363 | | - __func__); |
---|
364 | | - |
---|
365 | | - return ret; |
---|
366 | | -} |
---|
367 | | - |
---|
368 | | -static void ion_dma_buf_release(struct dma_buf *dmabuf) |
---|
369 | | -{ |
---|
370 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
371 | | - |
---|
372 | | - _ion_buffer_destroy(buffer); |
---|
373 | | - kfree(dmabuf->exp_name); |
---|
374 | | -} |
---|
375 | | - |
---|
376 | | -static void *ion_dma_buf_vmap(struct dma_buf *dmabuf) |
---|
377 | | -{ |
---|
378 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
379 | | - void *vaddr = ERR_PTR(-EINVAL); |
---|
380 | | - |
---|
381 | | - if (buffer->heap->ops->map_kernel) { |
---|
382 | | - mutex_lock(&buffer->lock); |
---|
383 | | - vaddr = ion_buffer_kmap_get(buffer); |
---|
384 | | - mutex_unlock(&buffer->lock); |
---|
385 | | - } else { |
---|
386 | | - pr_warn_ratelimited("heap %s doesn't support map_kernel\n", |
---|
387 | | - buffer->heap->name); |
---|
388 | | - } |
---|
389 | | - |
---|
390 | | - return vaddr; |
---|
391 | | -} |
---|
392 | | - |
---|
393 | | -static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) |
---|
394 | | -{ |
---|
395 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
396 | | - |
---|
397 | | - if (buffer->heap->ops->map_kernel) { |
---|
398 | | - mutex_lock(&buffer->lock); |
---|
399 | | - ion_buffer_kmap_put(buffer); |
---|
400 | | - mutex_unlock(&buffer->lock); |
---|
401 | | - } |
---|
402 | | -} |
---|
403 | | - |
---|
404 | | -static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
---|
405 | | -{ |
---|
406 | | - /* |
---|
407 | | - * TODO: Once clients remove their hacks where they assume kmap(ed) |
---|
408 | | - * addresses are virtually contiguous implement this properly |
---|
409 | | - */ |
---|
410 | | - void *vaddr = ion_dma_buf_vmap(dmabuf); |
---|
411 | | - |
---|
412 | | - if (IS_ERR(vaddr)) |
---|
413 | | - return vaddr; |
---|
414 | | - |
---|
415 | | - return vaddr + offset * PAGE_SIZE; |
---|
416 | | -} |
---|
417 | | - |
---|
418 | | -static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
---|
419 | | - void *ptr) |
---|
420 | | -{ |
---|
421 | | - /* |
---|
422 | | - * TODO: Once clients remove their hacks where they assume kmap(ed) |
---|
423 | | - * addresses are virtually contiguous implement this properly |
---|
424 | | - */ |
---|
425 | | - ion_dma_buf_vunmap(dmabuf, ptr); |
---|
426 | | -} |
---|
427 | | - |
---|
428 | | -static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl, |
---|
429 | | - unsigned int nents, unsigned int offset, |
---|
430 | | - unsigned int length, |
---|
431 | | - enum dma_data_direction dir, bool for_cpu) |
---|
432 | | -{ |
---|
433 | | - int i; |
---|
434 | | - struct scatterlist *sg; |
---|
435 | | - unsigned int len = 0; |
---|
436 | | - dma_addr_t sg_dma_addr; |
---|
437 | | - |
---|
438 | | - for_each_sg(sgl, sg, nents, i) { |
---|
439 | | - unsigned int sg_offset, sg_left, size = 0; |
---|
440 | | - |
---|
441 | | - sg_dma_addr = sg_dma_address(sg); |
---|
442 | | - |
---|
443 | | - len += sg->length; |
---|
444 | | - if (len <= offset) |
---|
445 | | - continue; |
---|
446 | | - |
---|
447 | | - sg_left = len - offset; |
---|
448 | | - sg_offset = sg->length - sg_left; |
---|
449 | | - |
---|
450 | | - size = (length < sg_left) ? length : sg_left; |
---|
451 | | - if (for_cpu) |
---|
452 | | - dma_sync_single_range_for_cpu(dev, sg_dma_addr, |
---|
453 | | - sg_offset, size, dir); |
---|
454 | | - else |
---|
455 | | - dma_sync_single_range_for_device(dev, sg_dma_addr, |
---|
456 | | - sg_offset, size, dir); |
---|
457 | | - |
---|
458 | | - offset += size; |
---|
459 | | - length -= size; |
---|
460 | | - |
---|
461 | | - if (length == 0) |
---|
462 | | - break; |
---|
463 | | - } |
---|
464 | | - |
---|
465 | | - return 0; |
---|
466 | | -} |
---|
467 | | - |
---|
468 | | -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
---|
469 | | - enum dma_data_direction direction) |
---|
470 | | -{ |
---|
471 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
472 | | - struct ion_dma_buf_attachment *a; |
---|
473 | | - |
---|
474 | | - if (direction == DMA_TO_DEVICE) |
---|
475 | | - return 0; |
---|
476 | | - |
---|
477 | | - mutex_lock(&buffer->lock); |
---|
478 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
479 | | - struct device *dev = ion_dev; |
---|
480 | | - struct sg_table *table = buffer->sg_table; |
---|
481 | | - |
---|
482 | | - if (dev) { |
---|
483 | | - if (buffer->heap->type == ION_HEAP_TYPE_DMA) |
---|
484 | | - dma_sync_single_range_for_cpu(dev, |
---|
485 | | - sg_dma_address(table->sgl), |
---|
486 | | - 0, buffer->size, |
---|
487 | | - direction); |
---|
488 | | - else |
---|
489 | | - dma_sync_sg_for_cpu(dev, table->sgl, table->nents, |
---|
490 | | - direction); |
---|
491 | | - goto unlock; |
---|
492 | | - } |
---|
493 | | - } |
---|
494 | | - |
---|
495 | | - list_for_each_entry(a, &buffer->attachments, list) { |
---|
496 | | - if (!a->mapped) |
---|
497 | | - continue; |
---|
498 | | - dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, |
---|
499 | | - direction); |
---|
500 | | - } |
---|
501 | | -unlock: |
---|
502 | | - mutex_unlock(&buffer->lock); |
---|
503 | | - |
---|
504 | | - return 0; |
---|
505 | | -} |
---|
506 | | - |
---|
507 | | -static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
---|
508 | | - enum dma_data_direction direction) |
---|
509 | | -{ |
---|
510 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
511 | | - struct ion_dma_buf_attachment *a; |
---|
512 | | - |
---|
513 | | - if (buffer->size >= SZ_1M) { |
---|
514 | | - if (direction == DMA_FROM_DEVICE) { |
---|
515 | | - flush_cache_all(); |
---|
516 | | - goto exit; |
---|
517 | | - } else { |
---|
518 | | -#ifdef CONFIG_ARM64 |
---|
519 | | - __flush_dcache_all(); |
---|
520 | | - goto exit; |
---|
521 | | -#endif |
---|
522 | | - } |
---|
523 | | - } |
---|
524 | | - |
---|
525 | | - mutex_lock(&buffer->lock); |
---|
526 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
527 | | - struct device *dev = ion_dev; |
---|
528 | | - struct sg_table *table = buffer->sg_table; |
---|
529 | | - |
---|
530 | | - if (dev) { |
---|
531 | | - if (buffer->heap->type == ION_HEAP_TYPE_DMA) |
---|
532 | | - dma_sync_single_range_for_device(dev, |
---|
533 | | - sg_dma_address(table->sgl), |
---|
534 | | - 0, buffer->size, |
---|
535 | | - direction); |
---|
536 | | - else |
---|
537 | | - |
---|
538 | | - dma_sync_sg_for_device(dev, table->sgl, table->nents, |
---|
539 | | - direction); |
---|
540 | | - goto unlock; |
---|
541 | | - } |
---|
542 | | - } |
---|
543 | | - |
---|
544 | | - list_for_each_entry(a, &buffer->attachments, list) { |
---|
545 | | - if (!a->mapped) |
---|
546 | | - continue; |
---|
547 | | - dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, |
---|
548 | | - direction); |
---|
549 | | - } |
---|
550 | | -unlock: |
---|
551 | | - mutex_unlock(&buffer->lock); |
---|
552 | | -exit: |
---|
553 | | - return 0; |
---|
554 | | -} |
---|
555 | | - |
---|
556 | | -static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, |
---|
557 | | - enum dma_data_direction direction, |
---|
558 | | - unsigned int offset, |
---|
559 | | - unsigned int len) |
---|
560 | | -{ |
---|
561 | | - struct device *dev = ion_dev; |
---|
562 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
563 | | - struct sg_table *table = buffer->sg_table; |
---|
564 | | - struct ion_dma_buf_attachment *a; |
---|
565 | | - int ret = 0; |
---|
566 | | - |
---|
567 | | - if (direction == DMA_TO_DEVICE) |
---|
568 | | - return 0; |
---|
569 | | - |
---|
570 | | - mutex_lock(&buffer->lock); |
---|
571 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
572 | | - if (dev) { |
---|
573 | | - if (buffer->heap->type == ION_HEAP_TYPE_DMA) |
---|
574 | | - dma_sync_single_range_for_cpu(dev, |
---|
575 | | - sg_dma_address(table->sgl), |
---|
576 | | - offset, len, |
---|
577 | | - direction); |
---|
578 | | - else |
---|
579 | | - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, |
---|
580 | | - offset, len, direction, true); |
---|
581 | | - goto unlock; |
---|
582 | | - } |
---|
583 | | - } |
---|
584 | | - |
---|
585 | | - list_for_each_entry(a, &buffer->attachments, list) { |
---|
586 | | - if (!a->mapped) |
---|
587 | | - continue; |
---|
588 | | - |
---|
589 | | - ret = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, |
---|
590 | | - offset, len, direction, true); |
---|
591 | | - } |
---|
592 | | -unlock: |
---|
593 | | - mutex_unlock(&buffer->lock); |
---|
594 | | - |
---|
595 | | - return ret; |
---|
596 | | -} |
---|
597 | | - |
---|
598 | | -static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, |
---|
599 | | - enum dma_data_direction direction, |
---|
600 | | - unsigned int offset, |
---|
601 | | - unsigned int len) |
---|
602 | | -{ |
---|
603 | | - struct device *dev = ion_dev; |
---|
604 | | - struct ion_buffer *buffer = dmabuf->priv; |
---|
605 | | - struct sg_table *table = buffer->sg_table; |
---|
606 | | - struct ion_dma_buf_attachment *a; |
---|
607 | | - int ret = 0; |
---|
608 | | - |
---|
609 | | - if (len >= SZ_1M) { |
---|
610 | | - if (direction == DMA_FROM_DEVICE) { |
---|
611 | | - flush_cache_all(); |
---|
612 | | - goto exit; |
---|
613 | | - } else { |
---|
614 | | -#ifdef CONFIG_ARM64 |
---|
615 | | - __flush_dcache_all(); |
---|
616 | | - goto exit; |
---|
617 | | -#endif |
---|
618 | | - } |
---|
619 | | - } |
---|
620 | | - |
---|
621 | | - mutex_lock(&buffer->lock); |
---|
622 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
623 | | - if (dev) { |
---|
624 | | - if (buffer->heap->type == ION_HEAP_TYPE_DMA) |
---|
625 | | - dma_sync_single_range_for_device(dev, |
---|
626 | | - sg_dma_address(table->sgl), |
---|
627 | | - offset, len, |
---|
628 | | - direction); |
---|
629 | | - else |
---|
630 | | - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, |
---|
631 | | - offset, len, direction, false); |
---|
632 | | - goto unlock; |
---|
633 | | - } |
---|
634 | | - } |
---|
635 | | - |
---|
636 | | - list_for_each_entry(a, &buffer->attachments, list) { |
---|
637 | | - if (!a->mapped) |
---|
638 | | - continue; |
---|
639 | | - |
---|
640 | | - ret = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, |
---|
641 | | - offset, len, direction, false); |
---|
642 | | - } |
---|
643 | | -unlock: |
---|
644 | | - mutex_unlock(&buffer->lock); |
---|
645 | | -exit: |
---|
646 | | - return ret; |
---|
647 | | -} |
---|
648 | | - |
---|
649 | | -static const struct dma_buf_ops dma_buf_ops = { |
---|
650 | | - .map_dma_buf = ion_map_dma_buf, |
---|
651 | | - .unmap_dma_buf = ion_unmap_dma_buf, |
---|
652 | | - .mmap = ion_mmap, |
---|
653 | | - .release = ion_dma_buf_release, |
---|
654 | | - .attach = ion_dma_buf_attach, |
---|
655 | | - .detach = ion_dma_buf_detatch, |
---|
656 | | - .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
---|
657 | | - .end_cpu_access = ion_dma_buf_end_cpu_access, |
---|
658 | | - .begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial, |
---|
659 | | - .end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial, |
---|
660 | | - .map = ion_dma_buf_kmap, |
---|
661 | | - .unmap = ion_dma_buf_kunmap, |
---|
662 | | - .vmap = ion_dma_buf_vmap, |
---|
663 | | - .vunmap = ion_dma_buf_vunmap, |
---|
664 | | -}; |
---|
665 | | - |
---|
666 | | -int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) |
---|
667 | | -{ |
---|
668 | | - struct ion_device *dev = internal_dev; |
---|
669 | | - struct ion_buffer *buffer = NULL; |
---|
670 | | - struct ion_heap *heap; |
---|
671 | | - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
---|
672 | 51 | int fd; |
---|
673 | 52 | struct dma_buf *dmabuf; |
---|
674 | | - char task_comm[TASK_COMM_LEN]; |
---|
675 | 53 | |
---|
676 | | - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, |
---|
677 | | - len, heap_id_mask, flags); |
---|
678 | | - /* |
---|
679 | | - * traverse the list of heaps available in this system in priority |
---|
680 | | - * order. If the heap type is supported by the client, and matches the |
---|
681 | | - * request of the caller allocate from it. Repeat until allocate has |
---|
682 | | - * succeeded or all heaps have been tried |
---|
683 | | - */ |
---|
684 | | - len = PAGE_ALIGN(len); |
---|
685 | | - |
---|
686 | | - if (!len) |
---|
687 | | - return -EINVAL; |
---|
688 | | - |
---|
689 | | - down_read(&dev->lock); |
---|
690 | | - plist_for_each_entry(heap, &dev->heaps, node) { |
---|
691 | | - /* if the caller didn't specify this heap id */ |
---|
692 | | - if (!((1 << heap->id) & heap_id_mask)) |
---|
693 | | - continue; |
---|
694 | | - buffer = ion_buffer_create(heap, dev, len, flags); |
---|
695 | | - if (!IS_ERR(buffer)) |
---|
696 | | - break; |
---|
697 | | - } |
---|
698 | | - up_read(&dev->lock); |
---|
699 | | - |
---|
700 | | - if (!buffer) |
---|
701 | | - return -ENODEV; |
---|
702 | | - |
---|
703 | | - if (IS_ERR(buffer)) |
---|
704 | | - return PTR_ERR(buffer); |
---|
705 | | - |
---|
706 | | - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { |
---|
707 | | - struct device *dev = ion_dev; |
---|
708 | | - struct sg_table *table = buffer->sg_table; |
---|
709 | | - |
---|
710 | | - if (dev) |
---|
711 | | - dma_sync_sg_for_device(dev, table->sgl, table->nents, |
---|
712 | | - DMA_BIDIRECTIONAL); |
---|
713 | | - } |
---|
714 | | - |
---|
715 | | - get_task_comm(task_comm, current->group_leader); |
---|
716 | | - |
---|
717 | | - exp_info.ops = &dma_buf_ops; |
---|
718 | | - exp_info.size = buffer->size; |
---|
719 | | - exp_info.flags = O_RDWR; |
---|
720 | | - exp_info.priv = buffer; |
---|
721 | | - exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME, |
---|
722 | | - heap->name, current->tgid, task_comm); |
---|
723 | | - |
---|
724 | | - dmabuf = dma_buf_export(&exp_info); |
---|
725 | | - if (IS_ERR(dmabuf)) { |
---|
726 | | - _ion_buffer_destroy(buffer); |
---|
727 | | - kfree(exp_info.exp_name); |
---|
| 54 | + dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags); |
---|
| 55 | + if (IS_ERR(dmabuf)) |
---|
728 | 56 | return PTR_ERR(dmabuf); |
---|
729 | | - } |
---|
730 | 57 | |
---|
731 | 58 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
---|
732 | 59 | if (fd < 0) |
---|
.. | .. |
---|
735 | 62 | return fd; |
---|
736 | 63 | } |
---|
737 | 64 | |
---|
738 | | -int ion_query_heaps(struct ion_heap_query *query) |
---|
| 65 | +size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size) |
---|
| 66 | +{ |
---|
| 67 | + struct ion_device *dev = internal_dev; |
---|
| 68 | + size_t i = 0, num_heaps = 0; |
---|
| 69 | + struct ion_heap *heap; |
---|
| 70 | + |
---|
| 71 | + down_read(&dev->lock); |
---|
| 72 | + |
---|
| 73 | + // If size is 0, return without updating hdata. |
---|
| 74 | + if (size == 0) { |
---|
| 75 | + num_heaps = dev->heap_cnt; |
---|
| 76 | + goto out; |
---|
| 77 | + } |
---|
| 78 | + |
---|
| 79 | + plist_for_each_entry(heap, &dev->heaps, node) { |
---|
| 80 | + strncpy(hdata[i].name, heap->name, MAX_HEAP_NAME); |
---|
| 81 | + hdata[i].name[MAX_HEAP_NAME - 1] = '\0'; |
---|
| 82 | + hdata[i].type = heap->type; |
---|
| 83 | + hdata[i].heap_id = heap->id; |
---|
| 84 | + |
---|
| 85 | + i++; |
---|
| 86 | + if (i >= size) |
---|
| 87 | + break; |
---|
| 88 | + } |
---|
| 89 | + |
---|
| 90 | + num_heaps = i; |
---|
| 91 | +out: |
---|
| 92 | + up_read(&dev->lock); |
---|
| 93 | + return num_heaps; |
---|
| 94 | +} |
---|
| 95 | +EXPORT_SYMBOL_GPL(ion_query_heaps_kernel); |
---|
| 96 | + |
---|
| 97 | +static int ion_query_heaps(struct ion_heap_query *query) |
---|
739 | 98 | { |
---|
740 | 99 | struct ion_device *dev = internal_dev; |
---|
741 | 100 | struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); |
---|
.. | .. |
---|
780 | 139 | return ret; |
---|
781 | 140 | } |
---|
782 | 141 | |
---|
783 | | -int ion_get_phys(struct ion_phys_data *phys) |
---|
| 142 | +union ion_ioctl_arg { |
---|
| 143 | + struct ion_allocation_data allocation; |
---|
| 144 | + struct ion_heap_query query; |
---|
| 145 | + u32 ion_abi_version; |
---|
| 146 | +}; |
---|
| 147 | + |
---|
| 148 | +static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) |
---|
784 | 149 | { |
---|
785 | | - struct dma_buf *dmabuf; |
---|
786 | | - struct ion_buffer *buffer; |
---|
787 | | - |
---|
788 | | - if (IS_ERR_OR_NULL(phys)) |
---|
789 | | - return -EINVAL; |
---|
790 | | - |
---|
791 | | - dmabuf = dma_buf_get(phys->fd); |
---|
792 | | - if (IS_ERR_OR_NULL(dmabuf)) |
---|
793 | | - return -ENOENT; |
---|
794 | | - |
---|
795 | | - phys->paddr = (__u64)-1; |
---|
796 | | - buffer = dmabuf->priv; |
---|
797 | | - if (!IS_ERR_OR_NULL(buffer) && |
---|
798 | | - (buffer->heap->type == ION_HEAP_TYPE_SYSTEM_CONTIG || |
---|
799 | | - buffer->heap->type == ION_HEAP_TYPE_DMA || |
---|
800 | | - buffer->heap->type == ION_HEAP_TYPE_CARVEOUT)) |
---|
801 | | - phys->paddr = sg_phys(buffer->sg_table->sgl); |
---|
802 | | - |
---|
803 | | - dma_buf_put(dmabuf); |
---|
| 150 | + switch (cmd) { |
---|
| 151 | + case ION_IOC_HEAP_QUERY: |
---|
| 152 | + if (arg->query.reserved0 || |
---|
| 153 | + arg->query.reserved1 || |
---|
| 154 | + arg->query.reserved2) |
---|
| 155 | + return -EINVAL; |
---|
| 156 | + break; |
---|
| 157 | + default: |
---|
| 158 | + break; |
---|
| 159 | + } |
---|
804 | 160 | |
---|
805 | 161 | return 0; |
---|
| 162 | +} |
---|
| 163 | + |
---|
| 164 | +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
---|
| 165 | +{ |
---|
| 166 | + int ret = 0; |
---|
| 167 | + union ion_ioctl_arg data; |
---|
| 168 | + |
---|
| 169 | + if (_IOC_SIZE(cmd) > sizeof(data)) |
---|
| 170 | + return -EINVAL; |
---|
| 171 | + |
---|
| 172 | + /* |
---|
| 173 | + * The copy_from_user is unconditional here for both read and write |
---|
| 174 | + * to do the validate. If there is no write for the ioctl, the |
---|
| 175 | + * buffer is cleared |
---|
| 176 | + */ |
---|
| 177 | + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) |
---|
| 178 | + return -EFAULT; |
---|
| 179 | + |
---|
| 180 | + ret = validate_ioctl_arg(cmd, &data); |
---|
| 181 | + if (ret) { |
---|
| 182 | + pr_warn_once("%s: ioctl validate failed\n", __func__); |
---|
| 183 | + return ret; |
---|
| 184 | + } |
---|
| 185 | + |
---|
| 186 | + if (!(_IOC_DIR(cmd) & _IOC_WRITE)) |
---|
| 187 | + memset(&data, 0, sizeof(data)); |
---|
| 188 | + |
---|
| 189 | + switch (cmd) { |
---|
| 190 | + case ION_IOC_ALLOC: |
---|
| 191 | + { |
---|
| 192 | + int fd; |
---|
| 193 | + |
---|
| 194 | + fd = ion_alloc_fd(data.allocation.len, |
---|
| 195 | + data.allocation.heap_id_mask, |
---|
| 196 | + data.allocation.flags); |
---|
| 197 | + if (fd < 0) |
---|
| 198 | + return fd; |
---|
| 199 | + |
---|
| 200 | + data.allocation.fd = fd; |
---|
| 201 | + |
---|
| 202 | + break; |
---|
| 203 | + } |
---|
| 204 | + case ION_IOC_HEAP_QUERY: |
---|
| 205 | + ret = ion_query_heaps(&data.query); |
---|
| 206 | + break; |
---|
| 207 | + case ION_IOC_ABI_VERSION: |
---|
| 208 | + data.ion_abi_version = ION_CURRENT_ABI_VERSION; |
---|
| 209 | + break; |
---|
| 210 | + default: |
---|
| 211 | + return -ENOTTY; |
---|
| 212 | + } |
---|
| 213 | + |
---|
| 214 | + if (_IOC_DIR(cmd) & _IOC_READ) { |
---|
| 215 | + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) |
---|
| 216 | + return -EFAULT; |
---|
| 217 | + } |
---|
| 218 | + return ret; |
---|
806 | 219 | } |
---|
807 | 220 | |
---|
808 | 221 | static const struct file_operations ion_fops = { |
---|
809 | 222 | .owner = THIS_MODULE, |
---|
810 | 223 | .unlocked_ioctl = ion_ioctl, |
---|
811 | | -#ifdef CONFIG_COMPAT |
---|
812 | | - .compat_ioctl = ion_ioctl, |
---|
813 | | -#endif |
---|
814 | | -}; |
---|
815 | | - |
---|
816 | | -static int ion_debug_heap_show(struct seq_file *s, void *unused) |
---|
817 | | -{ |
---|
818 | | - struct ion_heap *heap = s->private; |
---|
819 | | - |
---|
820 | | - if (heap->debug_show) |
---|
821 | | - heap->debug_show(heap, s, unused); |
---|
822 | | - |
---|
823 | | - return 0; |
---|
824 | | -} |
---|
825 | | - |
---|
826 | | -static int ion_debug_heap_open(struct inode *inode, struct file *file) |
---|
827 | | -{ |
---|
828 | | - return single_open(file, ion_debug_heap_show, inode->i_private); |
---|
829 | | -} |
---|
830 | | - |
---|
831 | | -static const struct file_operations debug_heap_fops = { |
---|
832 | | - .open = ion_debug_heap_open, |
---|
833 | | - .read = seq_read, |
---|
834 | | - .llseek = seq_lseek, |
---|
835 | | - .release = single_release, |
---|
| 224 | + .compat_ioctl = compat_ptr_ioctl, |
---|
836 | 225 | }; |
---|
837 | 226 | |
---|
838 | 227 | static int debug_shrink_set(void *data, u64 val) |
---|
.. | .. |
---|
870 | 259 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, |
---|
871 | 260 | debug_shrink_set, "%llu\n"); |
---|
872 | 261 | |
---|
873 | | -void ion_device_add_heap(struct ion_heap *heap) |
---|
| 262 | +static int ion_assign_heap_id(struct ion_heap *heap, struct ion_device *dev) |
---|
| 263 | +{ |
---|
| 264 | + int id_bit = -EINVAL; |
---|
| 265 | + int start_bit = -1, end_bit = -1; |
---|
| 266 | + |
---|
| 267 | + switch (heap->type) { |
---|
| 268 | + case ION_HEAP_TYPE_SYSTEM: |
---|
| 269 | + id_bit = __ffs(ION_HEAP_SYSTEM); |
---|
| 270 | + break; |
---|
| 271 | + case ION_HEAP_TYPE_DMA: |
---|
| 272 | + start_bit = __ffs(ION_HEAP_DMA_START); |
---|
| 273 | + end_bit = __ffs(ION_HEAP_DMA_END); |
---|
| 274 | + break; |
---|
| 275 | + case ION_HEAP_TYPE_CUSTOM ... ION_HEAP_TYPE_MAX: |
---|
| 276 | + start_bit = __ffs(ION_HEAP_CUSTOM_START); |
---|
| 277 | + end_bit = __ffs(ION_HEAP_CUSTOM_END); |
---|
| 278 | + break; |
---|
| 279 | + default: |
---|
| 280 | + return -EINVAL; |
---|
| 281 | + } |
---|
| 282 | + |
---|
| 283 | + /* For carveout, dma & custom heaps, we first let the heaps choose their |
---|
| 284 | + * own IDs. This allows the old behaviour of knowing the heap ids |
---|
| 285 | + * of these type of heaps in advance in user space. If a heap with |
---|
| 286 | + * that ID already exists, it is an error. |
---|
| 287 | + * |
---|
| 288 | + * If the heap hasn't picked an id by itself, then we assign it |
---|
| 289 | + * one. |
---|
| 290 | + */ |
---|
| 291 | + if (id_bit < 0) { |
---|
| 292 | + if (heap->id) { |
---|
| 293 | + id_bit = __ffs(heap->id); |
---|
| 294 | + if (id_bit < start_bit || id_bit > end_bit) |
---|
| 295 | + return -EINVAL; |
---|
| 296 | + } else { |
---|
| 297 | + id_bit = find_next_zero_bit(dev->heap_ids, end_bit + 1, |
---|
| 298 | + start_bit); |
---|
| 299 | + if (id_bit > end_bit) |
---|
| 300 | + return -ENOSPC; |
---|
| 301 | + } |
---|
| 302 | + } |
---|
| 303 | + |
---|
| 304 | + if (test_and_set_bit(id_bit, dev->heap_ids)) |
---|
| 305 | + return -EEXIST; |
---|
| 306 | + heap->id = id_bit; |
---|
| 307 | + dev->heap_cnt++; |
---|
| 308 | + |
---|
| 309 | + return 0; |
---|
| 310 | +} |
---|
| 311 | + |
---|
| 312 | +int __ion_device_add_heap(struct ion_heap *heap, struct module *owner) |
---|
874 | 313 | { |
---|
875 | 314 | struct ion_device *dev = internal_dev; |
---|
876 | 315 | int ret; |
---|
| 316 | + struct dentry *heap_root; |
---|
| 317 | + char debug_name[64]; |
---|
877 | 318 | |
---|
878 | | - if (!heap->ops->allocate || !heap->ops->free) |
---|
879 | | - pr_err("%s: can not add heap with invalid ops struct.\n", |
---|
880 | | - __func__); |
---|
| 319 | + if (!heap || !heap->ops || !heap->ops->allocate || !heap->ops->free) { |
---|
| 320 | + pr_err("%s: invalid heap or heap_ops\n", __func__); |
---|
| 321 | + ret = -EINVAL; |
---|
| 322 | + goto out; |
---|
| 323 | + } |
---|
881 | 324 | |
---|
| 325 | + heap->owner = owner; |
---|
882 | 326 | spin_lock_init(&heap->free_lock); |
---|
| 327 | + spin_lock_init(&heap->stat_lock); |
---|
883 | 328 | heap->free_list_size = 0; |
---|
884 | 329 | |
---|
885 | | - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
---|
886 | | - ion_heap_init_deferred_free(heap); |
---|
| 330 | + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) { |
---|
| 331 | + ret = ion_heap_init_deferred_free(heap); |
---|
| 332 | + if (ret) |
---|
| 333 | + goto out_heap_cleanup; |
---|
| 334 | + } |
---|
887 | 335 | |
---|
888 | 336 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) { |
---|
889 | 337 | ret = ion_heap_init_shrinker(heap); |
---|
890 | | - if (ret) |
---|
| 338 | + if (ret) { |
---|
891 | 339 | pr_err("%s: Failed to register shrinker\n", __func__); |
---|
| 340 | + goto out_heap_cleanup; |
---|
| 341 | + } |
---|
892 | 342 | } |
---|
893 | 343 | |
---|
894 | | - heap->dev = dev; |
---|
| 344 | + heap->num_of_buffers = 0; |
---|
| 345 | + heap->num_of_alloc_bytes = 0; |
---|
| 346 | + heap->alloc_bytes_wm = 0; |
---|
| 347 | + |
---|
| 348 | + heap_root = debugfs_create_dir(heap->name, dev->debug_root); |
---|
| 349 | + debugfs_create_u64("num_of_buffers", |
---|
| 350 | + 0444, heap_root, |
---|
| 351 | + &heap->num_of_buffers); |
---|
| 352 | + debugfs_create_u64("num_of_alloc_bytes", |
---|
| 353 | + 0444, |
---|
| 354 | + heap_root, |
---|
| 355 | + &heap->num_of_alloc_bytes); |
---|
| 356 | + debugfs_create_u64("alloc_bytes_wm", |
---|
| 357 | + 0444, |
---|
| 358 | + heap_root, |
---|
| 359 | + &heap->alloc_bytes_wm); |
---|
| 360 | + |
---|
| 361 | + if (heap->shrinker.count_objects && |
---|
| 362 | + heap->shrinker.scan_objects) { |
---|
| 363 | + snprintf(debug_name, 64, "%s_shrink", heap->name); |
---|
| 364 | + debugfs_create_file(debug_name, |
---|
| 365 | + 0644, |
---|
| 366 | + heap_root, |
---|
| 367 | + heap, |
---|
| 368 | + &debug_shrink_fops); |
---|
| 369 | + } |
---|
| 370 | + |
---|
| 371 | + heap->debugfs_dir = heap_root; |
---|
895 | 372 | down_write(&dev->lock); |
---|
896 | | - heap->id = heap_id++; |
---|
| 373 | + ret = ion_assign_heap_id(heap, dev); |
---|
| 374 | + if (ret) { |
---|
| 375 | + pr_err("%s: Failed to assign heap id for heap type %x\n", |
---|
| 376 | + __func__, heap->type); |
---|
| 377 | + up_write(&dev->lock); |
---|
| 378 | + goto out_debugfs_cleanup; |
---|
| 379 | + } |
---|
| 380 | + |
---|
897 | 381 | /* |
---|
898 | 382 | * use negative heap->id to reverse the priority -- when traversing |
---|
899 | 383 | * the list later attempt higher id numbers first |
---|
.. | .. |
---|
901 | 385 | plist_node_init(&heap->node, -heap->id); |
---|
902 | 386 | plist_add(&heap->node, &dev->heaps); |
---|
903 | 387 | |
---|
904 | | - if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { |
---|
905 | | - char debug_name[64]; |
---|
906 | | - |
---|
907 | | - snprintf(debug_name, 64, "%s_shrink", heap->name); |
---|
908 | | - debugfs_create_file(debug_name, 0644, dev->debug_root, |
---|
909 | | - heap, &debug_shrink_fops); |
---|
910 | | - } |
---|
911 | | - |
---|
912 | | - if (heap->debug_show) { |
---|
913 | | - char debug_name[64]; |
---|
914 | | - |
---|
915 | | - snprintf(debug_name, 64, "%s_stats", heap->name); |
---|
916 | | - debugfs_create_file(debug_name, 0644, dev->debug_root, |
---|
917 | | - heap, &debug_heap_fops); |
---|
918 | | - } |
---|
919 | | - |
---|
920 | | - dev->heap_cnt++; |
---|
921 | 388 | up_write(&dev->lock); |
---|
922 | 389 | |
---|
923 | | - pr_info("%s: %s id=%d type=%d\n", __func__, heap->name, heap->id, heap->type); |
---|
| 390 | + return 0; |
---|
| 391 | + |
---|
| 392 | +out_debugfs_cleanup: |
---|
| 393 | + debugfs_remove_recursive(heap->debugfs_dir); |
---|
| 394 | +out_heap_cleanup: |
---|
| 395 | + ion_heap_cleanup(heap); |
---|
| 396 | +out: |
---|
| 397 | + return ret; |
---|
924 | 398 | } |
---|
925 | | -EXPORT_SYMBOL(ion_device_add_heap); |
---|
| 399 | +EXPORT_SYMBOL_GPL(__ion_device_add_heap); |
---|
| 400 | + |
---|
| 401 | +void ion_device_remove_heap(struct ion_heap *heap) |
---|
| 402 | +{ |
---|
| 403 | + struct ion_device *dev = internal_dev; |
---|
| 404 | + |
---|
| 405 | + if (!heap) { |
---|
| 406 | + pr_err("%s: Invalid argument\n", __func__); |
---|
| 407 | + return; |
---|
| 408 | + } |
---|
| 409 | + |
---|
| 410 | + // take semaphore and remove the heap from dev->heap list |
---|
| 411 | + down_write(&dev->lock); |
---|
| 412 | + /* So no new allocations can happen from this heap */ |
---|
| 413 | + plist_del(&heap->node, &dev->heaps); |
---|
| 414 | + if (ion_heap_cleanup(heap) != 0) { |
---|
| 415 | + pr_warn("%s: failed to cleanup heap (%s)\n", |
---|
| 416 | + __func__, heap->name); |
---|
| 417 | + } |
---|
| 418 | + debugfs_remove_recursive(heap->debugfs_dir); |
---|
| 419 | + clear_bit(heap->id, dev->heap_ids); |
---|
| 420 | + dev->heap_cnt--; |
---|
| 421 | + up_write(&dev->lock); |
---|
| 422 | +} |
---|
| 423 | +EXPORT_SYMBOL_GPL(ion_device_remove_heap); |
---|
926 | 424 | |
---|
927 | 425 | static ssize_t |
---|
928 | 426 | total_heaps_kb_show(struct kobject *kobj, struct kobj_attribute *attr, |
---|
929 | 427 | char *buf) |
---|
930 | 428 | { |
---|
931 | | - u64 size_in_bytes = atomic_long_read(&total_heap_bytes); |
---|
932 | | - |
---|
933 | | - return sprintf(buf, "%llu\n", div_u64(size_in_bytes, 1024)); |
---|
| 429 | + return sprintf(buf, "%llu\n", |
---|
| 430 | + div_u64(ion_get_total_heap_bytes(), 1024)); |
---|
934 | 431 | } |
---|
935 | 432 | |
---|
936 | 433 | static ssize_t |
---|
937 | 434 | total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr, |
---|
938 | 435 | char *buf) |
---|
939 | 436 | { |
---|
940 | | - u64 size_in_bytes = ion_page_pool_nr_pages() * PAGE_SIZE; |
---|
| 437 | + struct ion_device *dev = internal_dev; |
---|
| 438 | + struct ion_heap *heap; |
---|
| 439 | + u64 total_pages = 0; |
---|
941 | 440 | |
---|
942 | | - return sprintf(buf, "%llu\n", div_u64(size_in_bytes, 1024)); |
---|
| 441 | + down_read(&dev->lock); |
---|
| 442 | + plist_for_each_entry(heap, &dev->heaps, node) |
---|
| 443 | + if (heap->ops->get_pool_size) |
---|
| 444 | + total_pages += heap->ops->get_pool_size(heap); |
---|
| 445 | + up_read(&dev->lock); |
---|
| 446 | + |
---|
| 447 | + return sprintf(buf, "%llu\n", total_pages * (PAGE_SIZE / 1024)); |
---|
943 | 448 | } |
---|
944 | 449 | |
---|
945 | 450 | static struct kobj_attribute total_heaps_kb_attr = |
---|
.. | .. |
---|
974 | 479 | return 0; |
---|
975 | 480 | } |
---|
976 | 481 | |
---|
977 | | -#ifdef CONFIG_DEBUG_FS |
---|
978 | | -static int ion_heaps_show(struct seq_file *s, void *unused) |
---|
979 | | -{ |
---|
980 | | - struct ion_device *dev = internal_dev; |
---|
981 | | - struct ion_heap *heap; |
---|
982 | | - |
---|
983 | | - down_read(&dev->lock); |
---|
984 | | - seq_printf(s, "%s\t%s\t%s\n", "id", "type", "name"); |
---|
985 | | - plist_for_each_entry(heap, &dev->heaps, node) { |
---|
986 | | - seq_printf(s, "%u\t%u\t%s\n", heap->id, heap->type, heap->name); |
---|
987 | | - } |
---|
988 | | - up_read(&dev->lock); |
---|
989 | | - return 0; |
---|
990 | | -} |
---|
991 | | - |
---|
992 | | -static int ion_heaps_open(struct inode *inode, struct file *file) |
---|
993 | | -{ |
---|
994 | | - return single_open(file, ion_heaps_show, NULL); |
---|
995 | | -} |
---|
996 | | - |
---|
997 | | -static const struct file_operations ion_heaps_operations = { |
---|
998 | | - .open = ion_heaps_open, |
---|
999 | | - .read = seq_read, |
---|
1000 | | - .llseek = seq_lseek, |
---|
1001 | | - .release = single_release, |
---|
1002 | | -}; |
---|
1003 | | -#endif |
---|
1004 | | - |
---|
1005 | | -static const struct platform_device_info ion_dev_info = { |
---|
1006 | | - .name = "ion", |
---|
1007 | | - .id = PLATFORM_DEVID_AUTO, |
---|
1008 | | - .dma_mask = DMA_BIT_MASK(32), |
---|
1009 | | -}; |
---|
1010 | | - |
---|
1011 | | -static void ion_device_register(void) |
---|
1012 | | -{ |
---|
1013 | | - struct platform_device *pdev; |
---|
1014 | | - int ret; |
---|
1015 | | - |
---|
1016 | | - pdev = platform_device_register_full(&ion_dev_info); |
---|
1017 | | - if (pdev) { |
---|
1018 | | - ret = of_dma_configure(&pdev->dev, NULL, true); |
---|
1019 | | - if (ret) { |
---|
1020 | | - platform_device_unregister(pdev); |
---|
1021 | | - pdev = NULL; |
---|
1022 | | - } |
---|
1023 | | - } |
---|
1024 | | - |
---|
1025 | | - ion_dev = pdev ? &pdev->dev : NULL; |
---|
1026 | | -} |
---|
1027 | | - |
---|
1028 | 482 | static int ion_device_create(void) |
---|
1029 | 483 | { |
---|
1030 | 484 | struct ion_device *idev; |
---|
.. | .. |
---|
1051 | 505 | } |
---|
1052 | 506 | |
---|
1053 | 507 | idev->debug_root = debugfs_create_dir("ion", NULL); |
---|
1054 | | -#ifdef CONFIG_DEBUG_FS |
---|
1055 | | - debugfs_create_file("heaps", 0444, idev->debug_root, NULL, |
---|
1056 | | - &ion_heaps_operations); |
---|
1057 | | -#endif |
---|
1058 | | - idev->buffers = RB_ROOT; |
---|
1059 | | - mutex_init(&idev->buffer_lock); |
---|
1060 | 508 | init_rwsem(&idev->lock); |
---|
1061 | 509 | plist_head_init(&idev->heaps); |
---|
1062 | 510 | internal_dev = idev; |
---|
1063 | | - ion_device_register(); |
---|
1064 | | - |
---|
1065 | 511 | return 0; |
---|
1066 | 512 | |
---|
1067 | 513 | err_sysfs: |
---|
.. | .. |
---|
1070 | 516 | kfree(idev); |
---|
1071 | 517 | return ret; |
---|
1072 | 518 | } |
---|
1073 | | - |
---|
1074 | | -#ifdef CONFIG_ION_MODULE |
---|
1075 | | -int ion_module_init(void) |
---|
1076 | | -{ |
---|
1077 | | - int ret; |
---|
1078 | | - |
---|
1079 | | - ret = ion_device_create(); |
---|
1080 | | -#ifdef CONFIG_ION_SYSTEM_HEAP |
---|
1081 | | - if (ret) |
---|
1082 | | - return ret; |
---|
1083 | | - |
---|
1084 | | - ret = ion_system_heap_create(); |
---|
1085 | | - if (ret) |
---|
1086 | | - return ret; |
---|
1087 | | - |
---|
1088 | | - ret = ion_system_contig_heap_create(); |
---|
1089 | | -#endif |
---|
1090 | | -#ifdef CONFIG_ION_CMA_HEAP |
---|
1091 | | - if (ret) |
---|
1092 | | - return ret; |
---|
1093 | | - |
---|
1094 | | - ret = ion_add_cma_heaps(); |
---|
1095 | | -#endif |
---|
1096 | | -#ifdef CONFIG_ION_PROTECTED_HEAP |
---|
1097 | | - if (ret) |
---|
1098 | | - return ret; |
---|
1099 | | - |
---|
1100 | | - ret = ion_protected_heap_create(); |
---|
1101 | | -#endif |
---|
1102 | | - return ret; |
---|
1103 | | -} |
---|
1104 | | - |
---|
1105 | | -module_init(ion_module_init); |
---|
1106 | | -#else |
---|
1107 | 519 | subsys_initcall(ion_device_create); |
---|
1108 | | -#endif |
---|
1109 | | - |
---|
1110 | | -MODULE_LICENSE("GPL v2"); |
---|
1111 | | -MODULE_DESCRIPTION("Ion memory allocator"); |
---|