.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Framework for buffer objects that can be shared across devices/subsystems. |
---|
3 | 4 | * |
---|
.. | .. |
---|
8 | 9 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and |
---|
9 | 10 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and |
---|
10 | 11 | * refining of this idea. |
---|
11 | | - * |
---|
12 | | - * This program is free software; you can redistribute it and/or modify it |
---|
13 | | - * under the terms of the GNU General Public License version 2 as published by |
---|
14 | | - * the Free Software Foundation. |
---|
15 | | - * |
---|
16 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
---|
17 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
18 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
19 | | - * more details. |
---|
20 | | - * |
---|
21 | | - * You should have received a copy of the GNU General Public License along with |
---|
22 | | - * this program. If not, see <http://www.gnu.org/licenses/>. |
---|
23 | 12 | */ |
---|
24 | 13 | |
---|
25 | 14 | #include <linux/fs.h> |
---|
.. | .. |
---|
32 | 21 | #include <linux/module.h> |
---|
33 | 22 | #include <linux/seq_file.h> |
---|
34 | 23 | #include <linux/poll.h> |
---|
35 | | -#include <linux/reservation.h> |
---|
| 24 | +#include <linux/dma-resv.h> |
---|
36 | 25 | #include <linux/mm.h> |
---|
37 | | -#include <linux/sched/signal.h> |
---|
38 | | -#include <linux/fdtable.h> |
---|
39 | | -#include <linux/hashtable.h> |
---|
40 | | -#include <linux/list_sort.h> |
---|
41 | 26 | #include <linux/mount.h> |
---|
42 | | -#include <linux/cache.h> |
---|
| 27 | +#include <linux/pseudo_fs.h> |
---|
| 28 | +#include <linux/sched/task.h> |
---|
43 | 29 | |
---|
44 | 30 | #include <uapi/linux/dma-buf.h> |
---|
45 | 31 | #include <uapi/linux/magic.h> |
---|
46 | 32 | |
---|
47 | | -static inline int is_dma_buf_file(struct file *); |
---|
48 | | - |
---|
49 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
50 | | -struct dma_buf_callback { |
---|
51 | | - struct list_head list; |
---|
52 | | - void (*callback)(void *); |
---|
53 | | - void *data; |
---|
54 | | -}; |
---|
55 | | -#endif |
---|
| 33 | +#include "dma-buf-sysfs-stats.h" |
---|
56 | 34 | |
---|
57 | 35 | struct dma_buf_list { |
---|
58 | 36 | struct list_head head; |
---|
.. | .. |
---|
60 | 38 | }; |
---|
61 | 39 | |
---|
62 | 40 | static struct dma_buf_list db_list; |
---|
| 41 | + |
---|
| 42 | +/* |
---|
| 43 | + * This function helps in traversing the db_list and calls the |
---|
| 44 | + * callback function which can extract required info out of each |
---|
| 45 | + * dmabuf. |
---|
| 46 | + */ |
---|
| 47 | +int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, |
---|
| 48 | + void *private), void *private) |
---|
| 49 | +{ |
---|
| 50 | + struct dma_buf *buf; |
---|
| 51 | + int ret = mutex_lock_interruptible(&db_list.lock); |
---|
| 52 | + |
---|
| 53 | + if (ret) |
---|
| 54 | + return ret; |
---|
| 55 | + |
---|
| 56 | + list_for_each_entry(buf, &db_list.head, list_node) { |
---|
| 57 | + ret = callback(buf, private); |
---|
| 58 | + if (ret) |
---|
| 59 | + break; |
---|
| 60 | + } |
---|
| 61 | + mutex_unlock(&db_list.lock); |
---|
| 62 | + return ret; |
---|
| 63 | +} |
---|
| 64 | +EXPORT_SYMBOL_GPL(get_each_dmabuf); |
---|
| 65 | + |
---|
| 66 | +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) |
---|
| 67 | +static size_t db_total_size; |
---|
| 68 | +static size_t db_peak_size; |
---|
| 69 | + |
---|
| 70 | +void dma_buf_reset_peak_size(void) |
---|
| 71 | +{ |
---|
| 72 | + mutex_lock(&db_list.lock); |
---|
| 73 | + db_peak_size = 0; |
---|
| 74 | + mutex_unlock(&db_list.lock); |
---|
| 75 | +} |
---|
| 76 | +EXPORT_SYMBOL_GPL(dma_buf_reset_peak_size); |
---|
| 77 | + |
---|
| 78 | +size_t dma_buf_get_peak_size(void) |
---|
| 79 | +{ |
---|
| 80 | + size_t sz; |
---|
| 81 | + |
---|
| 82 | + mutex_lock(&db_list.lock); |
---|
| 83 | + sz = db_peak_size; |
---|
| 84 | + mutex_unlock(&db_list.lock); |
---|
| 85 | + |
---|
| 86 | + return sz; |
---|
| 87 | +} |
---|
| 88 | +EXPORT_SYMBOL_GPL(dma_buf_get_peak_size); |
---|
| 89 | + |
---|
| 90 | +size_t dma_buf_get_total_size(void) |
---|
| 91 | +{ |
---|
| 92 | + size_t sz; |
---|
| 93 | + |
---|
| 94 | + mutex_lock(&db_list.lock); |
---|
| 95 | + sz = db_total_size; |
---|
| 96 | + mutex_unlock(&db_list.lock); |
---|
| 97 | + |
---|
| 98 | + return sz; |
---|
| 99 | +} |
---|
| 100 | +EXPORT_SYMBOL_GPL(dma_buf_get_total_size); |
---|
| 101 | +#endif |
---|
63 | 102 | |
---|
64 | 103 | static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) |
---|
65 | 104 | { |
---|
.. | .. |
---|
80 | 119 | static void dma_buf_release(struct dentry *dentry) |
---|
81 | 120 | { |
---|
82 | 121 | struct dma_buf *dmabuf; |
---|
83 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
84 | | - struct dma_buf_callback *cb, *tmp; |
---|
85 | | -#endif |
---|
| 122 | +#ifdef CONFIG_DMABUF_CACHE |
---|
86 | 123 | int dtor_ret = 0; |
---|
| 124 | +#endif |
---|
87 | 125 | |
---|
88 | 126 | dmabuf = dentry->d_fsdata; |
---|
89 | 127 | if (unlikely(!dmabuf)) |
---|
.. | .. |
---|
101 | 139 | */ |
---|
102 | 140 | BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); |
---|
103 | 141 | |
---|
104 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
105 | | - mutex_lock(&dmabuf->release_lock); |
---|
106 | | - list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) { |
---|
107 | | - if (cb->callback) |
---|
108 | | - cb->callback(cb->data); |
---|
109 | | - list_del(&cb->list); |
---|
110 | | - kfree(cb); |
---|
111 | | - } |
---|
112 | | - mutex_unlock(&dmabuf->release_lock); |
---|
113 | | -#endif |
---|
| 142 | + dma_buf_stats_teardown(dmabuf); |
---|
| 143 | +#ifdef CONFIG_DMABUF_CACHE |
---|
114 | 144 | if (dmabuf->dtor) |
---|
115 | 145 | dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data); |
---|
116 | 146 | |
---|
117 | 147 | if (!dtor_ret) |
---|
| 148 | +#endif |
---|
118 | 149 | dmabuf->ops->release(dmabuf); |
---|
119 | | - else |
---|
120 | | - pr_warn_ratelimited("Leaking dmabuf %s because destructor failed error:%d\n", |
---|
121 | | - dmabuf->name, dtor_ret); |
---|
122 | 150 | |
---|
123 | | - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) |
---|
124 | | - reservation_object_fini(dmabuf->resv); |
---|
| 151 | + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) |
---|
| 152 | + dma_resv_fini(dmabuf->resv); |
---|
125 | 153 | |
---|
| 154 | + WARN_ON(!list_empty(&dmabuf->attachments)); |
---|
126 | 155 | module_put(dmabuf->owner); |
---|
127 | 156 | kfree(dmabuf->name); |
---|
128 | 157 | kfree(dmabuf); |
---|
.. | .. |
---|
138 | 167 | dmabuf = file->private_data; |
---|
139 | 168 | |
---|
140 | 169 | mutex_lock(&db_list.lock); |
---|
| 170 | +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) |
---|
| 171 | + db_total_size -= dmabuf->size; |
---|
| 172 | +#endif |
---|
141 | 173 | list_del(&dmabuf->list_node); |
---|
142 | 174 | mutex_unlock(&db_list.lock); |
---|
143 | 175 | |
---|
.. | .. |
---|
151 | 183 | |
---|
152 | 184 | static struct vfsmount *dma_buf_mnt; |
---|
153 | 185 | |
---|
154 | | -static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type, |
---|
155 | | - int flags, const char *name, void *data) |
---|
| 186 | +static int dma_buf_fs_init_context(struct fs_context *fc) |
---|
156 | 187 | { |
---|
157 | | - return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops, |
---|
158 | | - DMA_BUF_MAGIC); |
---|
| 188 | + struct pseudo_fs_context *ctx; |
---|
| 189 | + |
---|
| 190 | + ctx = init_pseudo(fc, DMA_BUF_MAGIC); |
---|
| 191 | + if (!ctx) |
---|
| 192 | + return -ENOMEM; |
---|
| 193 | + ctx->dops = &dma_buf_dentry_ops; |
---|
| 194 | + return 0; |
---|
159 | 195 | } |
---|
160 | 196 | |
---|
161 | 197 | static struct file_system_type dma_buf_fs_type = { |
---|
162 | 198 | .name = "dmabuf", |
---|
163 | | - .mount = dma_buf_fs_mount, |
---|
| 199 | + .init_fs_context = dma_buf_fs_init_context, |
---|
164 | 200 | .kill_sb = kill_anon_super, |
---|
165 | 201 | }; |
---|
166 | 202 | |
---|
.. | .. |
---|
212 | 248 | } |
---|
213 | 249 | |
---|
214 | 250 | /** |
---|
215 | | - * DOC: fence polling |
---|
| 251 | + * DOC: implicit fence polling |
---|
216 | 252 | * |
---|
217 | 253 | * To support cross-device and cross-driver synchronization of buffer access |
---|
218 | | - * implicit fences (represented internally in the kernel with &struct fence) can |
---|
219 | | - * be attached to a &dma_buf. The glue for that and a few related things are |
---|
220 | | - * provided in the &reservation_object structure. |
---|
| 254 | + * implicit fences (represented internally in the kernel with &struct dma_fence) |
---|
| 255 | + * can be attached to a &dma_buf. The glue for that and a few related things are |
---|
| 256 | + * provided in the &dma_resv structure. |
---|
221 | 257 | * |
---|
222 | 258 | * Userspace can query the state of these implicitly tracked fences using poll() |
---|
223 | 259 | * and related system calls: |
---|
.. | .. |
---|
247 | 283 | static __poll_t dma_buf_poll(struct file *file, poll_table *poll) |
---|
248 | 284 | { |
---|
249 | 285 | struct dma_buf *dmabuf; |
---|
250 | | - struct reservation_object *resv; |
---|
251 | | - struct reservation_object_list *fobj; |
---|
| 286 | + struct dma_resv *resv; |
---|
| 287 | + struct dma_resv_list *fobj; |
---|
252 | 288 | struct dma_fence *fence_excl; |
---|
253 | 289 | __poll_t events; |
---|
254 | 290 | unsigned shared_count, seq; |
---|
.. | .. |
---|
266 | 302 | return 0; |
---|
267 | 303 | |
---|
268 | 304 | retry: |
---|
269 | | - seq = read_seqbegin(&resv->seq); |
---|
| 305 | + seq = read_seqcount_begin(&resv->seq); |
---|
270 | 306 | rcu_read_lock(); |
---|
271 | 307 | |
---|
272 | 308 | fobj = rcu_dereference(resv->fence); |
---|
.. | .. |
---|
275 | 311 | else |
---|
276 | 312 | shared_count = 0; |
---|
277 | 313 | fence_excl = rcu_dereference(resv->fence_excl); |
---|
278 | | - if (read_seqretry(&resv->seq, seq)) { |
---|
| 314 | + if (read_seqcount_retry(&resv->seq, seq)) { |
---|
279 | 315 | rcu_read_unlock(); |
---|
280 | 316 | goto retry; |
---|
281 | 317 | } |
---|
.. | .. |
---|
363 | 399 | return events; |
---|
364 | 400 | } |
---|
365 | 401 | |
---|
| 402 | +static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name) |
---|
| 403 | +{ |
---|
| 404 | + spin_lock(&dmabuf->name_lock); |
---|
| 405 | + kfree(dmabuf->name); |
---|
| 406 | + dmabuf->name = name; |
---|
| 407 | + spin_unlock(&dmabuf->name_lock); |
---|
| 408 | + |
---|
| 409 | + return 0; |
---|
| 410 | +} |
---|
| 411 | + |
---|
366 | 412 | /** |
---|
367 | 413 | * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. |
---|
368 | | - * The name of the dma-buf buffer can only be set when the dma-buf is not |
---|
369 | | - * attached to any devices. It could theoritically support changing the |
---|
370 | | - * name of the dma-buf if the same piece of memory is used for multiple |
---|
371 | | - * purpose between different devices. |
---|
| 414 | + * It could support changing the name of the dma-buf if the same piece of |
---|
| 415 | + * memory is used for multiple purpose between different devices. |
---|
372 | 416 | * |
---|
373 | | - * @dmabuf [in] dmabuf buffer that will be renamed. |
---|
374 | | - * @buf: [in] A piece of userspace memory that contains the name of |
---|
375 | | - * the dma-buf. |
---|
| 417 | + * @dmabuf: [in] dmabuf buffer that will be renamed. |
---|
| 418 | + * @buf: [in] A piece of userspace memory that contains the name of |
---|
| 419 | + * the dma-buf. |
---|
376 | 420 | * |
---|
377 | 421 | * Returns 0 on success. If the dma-buf buffer is already attached to |
---|
378 | 422 | * devices, return -EBUSY. |
---|
379 | 423 | * |
---|
380 | 424 | */ |
---|
381 | | -static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) |
---|
| 425 | +long dma_buf_set_name(struct dma_buf *dmabuf, const char *name) |
---|
| 426 | +{ |
---|
| 427 | + long ret = 0; |
---|
| 428 | + char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL); |
---|
| 429 | + |
---|
| 430 | + if (!buf) |
---|
| 431 | + return -ENOMEM; |
---|
| 432 | + |
---|
| 433 | + ret = _dma_buf_set_name(dmabuf, buf); |
---|
| 434 | + if (ret) |
---|
| 435 | + kfree(buf); |
---|
| 436 | + |
---|
| 437 | + return ret; |
---|
| 438 | +} |
---|
| 439 | +EXPORT_SYMBOL_GPL(dma_buf_set_name); |
---|
| 440 | + |
---|
| 441 | +static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf) |
---|
382 | 442 | { |
---|
383 | 443 | char *name = strndup_user(buf, DMA_BUF_NAME_LEN); |
---|
384 | 444 | long ret = 0; |
---|
.. | .. |
---|
386 | 446 | if (IS_ERR(name)) |
---|
387 | 447 | return PTR_ERR(name); |
---|
388 | 448 | |
---|
389 | | - mutex_lock(&dmabuf->lock); |
---|
390 | | - spin_lock(&dmabuf->name_lock); |
---|
391 | | - if (!list_empty(&dmabuf->attachments)) { |
---|
392 | | - ret = -EBUSY; |
---|
| 449 | + ret = _dma_buf_set_name(dmabuf, name); |
---|
| 450 | + if (ret) |
---|
393 | 451 | kfree(name); |
---|
394 | | - goto out_unlock; |
---|
395 | | - } |
---|
396 | | - kfree(dmabuf->name); |
---|
397 | | - dmabuf->name = name; |
---|
398 | 452 | |
---|
399 | | -out_unlock: |
---|
400 | | - spin_unlock(&dmabuf->name_lock); |
---|
401 | | - mutex_unlock(&dmabuf->lock); |
---|
402 | 453 | return ret; |
---|
403 | 454 | } |
---|
404 | | - |
---|
405 | | -static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf, |
---|
406 | | - enum dma_data_direction direction); |
---|
407 | | - |
---|
408 | | - |
---|
409 | | -static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf, |
---|
410 | | - enum dma_data_direction direction); |
---|
411 | 455 | |
---|
412 | 456 | static long dma_buf_ioctl(struct file *file, |
---|
413 | 457 | unsigned int cmd, unsigned long arg) |
---|
.. | .. |
---|
415 | 459 | struct dma_buf *dmabuf; |
---|
416 | 460 | struct dma_buf_sync sync; |
---|
417 | 461 | struct dma_buf_sync_partial sync_p; |
---|
418 | | - enum dma_data_direction dir; |
---|
| 462 | + enum dma_data_direction direction; |
---|
419 | 463 | int ret; |
---|
420 | 464 | |
---|
421 | 465 | dmabuf = file->private_data; |
---|
.. | .. |
---|
430 | 474 | |
---|
431 | 475 | switch (sync.flags & DMA_BUF_SYNC_RW) { |
---|
432 | 476 | case DMA_BUF_SYNC_READ: |
---|
433 | | - dir = DMA_FROM_DEVICE; |
---|
| 477 | + direction = DMA_FROM_DEVICE; |
---|
434 | 478 | break; |
---|
435 | 479 | case DMA_BUF_SYNC_WRITE: |
---|
436 | | - dir = DMA_TO_DEVICE; |
---|
| 480 | + direction = DMA_TO_DEVICE; |
---|
437 | 481 | break; |
---|
438 | 482 | case DMA_BUF_SYNC_RW: |
---|
439 | | - dir = DMA_BIDIRECTIONAL; |
---|
| 483 | + direction = DMA_BIDIRECTIONAL; |
---|
440 | 484 | break; |
---|
441 | 485 | default: |
---|
442 | 486 | return -EINVAL; |
---|
443 | 487 | } |
---|
444 | 488 | |
---|
445 | 489 | if (sync.flags & DMA_BUF_SYNC_END) |
---|
446 | | - if (sync.flags & DMA_BUF_SYNC_USER_MAPPED) |
---|
447 | | - ret = dma_buf_end_cpu_access_umapped(dmabuf, |
---|
448 | | - dir); |
---|
449 | | - else |
---|
450 | | - ret = dma_buf_end_cpu_access(dmabuf, dir); |
---|
| 490 | + ret = dma_buf_end_cpu_access(dmabuf, direction); |
---|
451 | 491 | else |
---|
452 | | - if (sync.flags & DMA_BUF_SYNC_USER_MAPPED) |
---|
453 | | - ret = dma_buf_begin_cpu_access_umapped(dmabuf, |
---|
454 | | - dir); |
---|
455 | | - else |
---|
456 | | - ret = dma_buf_begin_cpu_access(dmabuf, dir); |
---|
| 492 | + ret = dma_buf_begin_cpu_access(dmabuf, direction); |
---|
457 | 493 | |
---|
458 | 494 | return ret; |
---|
459 | 495 | |
---|
460 | 496 | case DMA_BUF_SET_NAME_A: |
---|
461 | 497 | case DMA_BUF_SET_NAME_B: |
---|
462 | | - return dma_buf_set_name(dmabuf, (const char __user *)arg); |
---|
| 498 | + return dma_buf_set_name_user(dmabuf, (const char __user *)arg); |
---|
463 | 499 | |
---|
464 | 500 | case DMA_BUF_IOCTL_SYNC_PARTIAL: |
---|
465 | 501 | if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p))) |
---|
466 | 502 | return -EFAULT; |
---|
467 | 503 | |
---|
468 | 504 | if (sync_p.len == 0) |
---|
469 | | - return -EINVAL; |
---|
470 | | - |
---|
471 | | - if ((sync_p.offset % cache_line_size()) || (sync_p.len % cache_line_size())) |
---|
472 | | - return -EINVAL; |
---|
| 505 | + return 0; |
---|
473 | 506 | |
---|
474 | 507 | if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len) |
---|
475 | 508 | return -EINVAL; |
---|
.. | .. |
---|
479 | 512 | |
---|
480 | 513 | switch (sync_p.flags & DMA_BUF_SYNC_RW) { |
---|
481 | 514 | case DMA_BUF_SYNC_READ: |
---|
482 | | - dir = DMA_FROM_DEVICE; |
---|
| 515 | + direction = DMA_FROM_DEVICE; |
---|
483 | 516 | break; |
---|
484 | 517 | case DMA_BUF_SYNC_WRITE: |
---|
485 | | - dir = DMA_TO_DEVICE; |
---|
| 518 | + direction = DMA_TO_DEVICE; |
---|
486 | 519 | break; |
---|
487 | 520 | case DMA_BUF_SYNC_RW: |
---|
488 | | - dir = DMA_BIDIRECTIONAL; |
---|
| 521 | + direction = DMA_BIDIRECTIONAL; |
---|
489 | 522 | break; |
---|
490 | 523 | default: |
---|
491 | 524 | return -EINVAL; |
---|
492 | 525 | } |
---|
493 | 526 | |
---|
494 | 527 | if (sync_p.flags & DMA_BUF_SYNC_END) |
---|
495 | | - ret = dma_buf_end_cpu_access_partial(dmabuf, dir, |
---|
| 528 | + ret = dma_buf_end_cpu_access_partial(dmabuf, direction, |
---|
496 | 529 | sync_p.offset, |
---|
497 | 530 | sync_p.len); |
---|
498 | 531 | else |
---|
499 | | - ret = dma_buf_begin_cpu_access_partial(dmabuf, dir, |
---|
| 532 | + ret = dma_buf_begin_cpu_access_partial(dmabuf, direction, |
---|
500 | 533 | sync_p.offset, |
---|
501 | 534 | sync_p.len); |
---|
502 | 535 | |
---|
.. | .. |
---|
527 | 560 | .llseek = dma_buf_llseek, |
---|
528 | 561 | .poll = dma_buf_poll, |
---|
529 | 562 | .unlocked_ioctl = dma_buf_ioctl, |
---|
530 | | -#ifdef CONFIG_COMPAT |
---|
531 | | - .compat_ioctl = dma_buf_ioctl, |
---|
532 | | -#endif |
---|
| 563 | + .compat_ioctl = compat_ptr_ioctl, |
---|
533 | 564 | .show_fdinfo = dma_buf_show_fdinfo, |
---|
534 | 565 | }; |
---|
535 | 566 | |
---|
536 | 567 | /* |
---|
537 | 568 | * is_dma_buf_file - Check if struct file* is associated with dma_buf |
---|
538 | 569 | */ |
---|
539 | | -static inline int is_dma_buf_file(struct file *file) |
---|
| 570 | +int is_dma_buf_file(struct file *file) |
---|
540 | 571 | { |
---|
541 | 572 | return file->f_op == &dma_buf_fops; |
---|
542 | 573 | } |
---|
| 574 | +EXPORT_SYMBOL_GPL(is_dma_buf_file); |
---|
543 | 575 | |
---|
544 | 576 | static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) |
---|
545 | 577 | { |
---|
| 578 | + static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); |
---|
546 | 579 | struct file *file; |
---|
547 | 580 | struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); |
---|
548 | 581 | |
---|
.. | .. |
---|
552 | 585 | inode->i_size = dmabuf->size; |
---|
553 | 586 | inode_set_bytes(inode, dmabuf->size); |
---|
554 | 587 | |
---|
| 588 | + /* |
---|
| 589 | + * The ->i_ino acquired from get_next_ino() is not unique thus |
---|
| 590 | + * not suitable for using it as dentry name by dmabuf stats. |
---|
| 591 | + * Override ->i_ino with the unique and dmabuffs specific |
---|
| 592 | + * value. |
---|
| 593 | + */ |
---|
| 594 | + inode->i_ino = atomic64_add_return(1, &dmabuf_inode); |
---|
555 | 595 | file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", |
---|
556 | 596 | flags, &dma_buf_fops); |
---|
557 | 597 | if (IS_ERR(file)) |
---|
.. | .. |
---|
567 | 607 | return file; |
---|
568 | 608 | } |
---|
569 | 609 | |
---|
570 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
571 | | -void *dma_buf_get_release_callback_data(struct dma_buf *dmabuf, |
---|
572 | | - void (*callback)(void *)) |
---|
| 610 | +static void dma_buf_set_default_name(struct dma_buf *dmabuf) |
---|
573 | 611 | { |
---|
574 | | - struct dma_buf_callback *cb, *tmp; |
---|
575 | | - void *result = NULL; |
---|
| 612 | + char task_comm[TASK_COMM_LEN]; |
---|
| 613 | + char *name; |
---|
576 | 614 | |
---|
577 | | - mutex_lock(&dmabuf->release_lock); |
---|
578 | | - list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) { |
---|
579 | | - if (cb->callback == callback) { |
---|
580 | | - result = cb->data; |
---|
581 | | - break; |
---|
582 | | - } |
---|
583 | | - } |
---|
584 | | - mutex_unlock(&dmabuf->release_lock); |
---|
585 | | - |
---|
586 | | - return result; |
---|
| 615 | + get_task_comm(task_comm, current->group_leader); |
---|
| 616 | + name = kasprintf(GFP_KERNEL, "%d-%s", current->tgid, task_comm); |
---|
| 617 | + dma_buf_set_name(dmabuf, name); |
---|
| 618 | + kfree(name); |
---|
587 | 619 | } |
---|
588 | | -EXPORT_SYMBOL_GPL(dma_buf_get_release_callback_data); |
---|
589 | | - |
---|
590 | | -int dma_buf_set_release_callback(struct dma_buf *dmabuf, |
---|
591 | | - void (*callback)(void *), void *data) |
---|
592 | | -{ |
---|
593 | | - struct dma_buf_callback *cb; |
---|
594 | | - |
---|
595 | | - if (WARN_ON(dma_buf_get_release_callback_data(dmabuf, callback))) |
---|
596 | | - return -EINVAL; |
---|
597 | | - |
---|
598 | | - cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
---|
599 | | - if (!cb) |
---|
600 | | - return -ENOMEM; |
---|
601 | | - |
---|
602 | | - cb->callback = callback; |
---|
603 | | - cb->data = data; |
---|
604 | | - mutex_lock(&dmabuf->release_lock); |
---|
605 | | - list_add_tail(&cb->list, &dmabuf->release_callbacks); |
---|
606 | | - mutex_unlock(&dmabuf->release_lock); |
---|
607 | | - |
---|
608 | | - return 0; |
---|
609 | | -} |
---|
610 | | -EXPORT_SYMBOL_GPL(dma_buf_set_release_callback); |
---|
611 | | -#endif |
---|
612 | 620 | |
---|
613 | 621 | /** |
---|
614 | 622 | * DOC: dma buf device access |
---|
.. | .. |
---|
661 | 669 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) |
---|
662 | 670 | { |
---|
663 | 671 | struct dma_buf *dmabuf; |
---|
664 | | - struct reservation_object *resv = exp_info->resv; |
---|
| 672 | + struct dma_resv *resv = exp_info->resv; |
---|
665 | 673 | struct file *file; |
---|
666 | 674 | size_t alloc_size = sizeof(struct dma_buf); |
---|
667 | 675 | int ret; |
---|
668 | 676 | |
---|
669 | 677 | if (!exp_info->resv) |
---|
670 | | - alloc_size += sizeof(struct reservation_object); |
---|
| 678 | + alloc_size += sizeof(struct dma_resv); |
---|
671 | 679 | else |
---|
672 | 680 | /* prevent &dma_buf[1] == dma_buf->resv */ |
---|
673 | 681 | alloc_size += 1; |
---|
.. | .. |
---|
679 | 687 | || !exp_info->ops->release)) { |
---|
680 | 688 | return ERR_PTR(-EINVAL); |
---|
681 | 689 | } |
---|
| 690 | + |
---|
| 691 | + if (WARN_ON(exp_info->ops->cache_sgt_mapping && |
---|
| 692 | + (exp_info->ops->pin || exp_info->ops->unpin))) |
---|
| 693 | + return ERR_PTR(-EINVAL); |
---|
| 694 | + |
---|
| 695 | + if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) |
---|
| 696 | + return ERR_PTR(-EINVAL); |
---|
682 | 697 | |
---|
683 | 698 | if (!try_module_get(exp_info->owner)) |
---|
684 | 699 | return ERR_PTR(-ENOENT); |
---|
.. | .. |
---|
694 | 709 | dmabuf->size = exp_info->size; |
---|
695 | 710 | dmabuf->exp_name = exp_info->exp_name; |
---|
696 | 711 | dmabuf->owner = exp_info->owner; |
---|
| 712 | + spin_lock_init(&dmabuf->name_lock); |
---|
| 713 | +#ifdef CONFIG_DMABUF_CACHE |
---|
| 714 | + mutex_init(&dmabuf->cache_lock); |
---|
| 715 | +#endif |
---|
697 | 716 | init_waitqueue_head(&dmabuf->poll); |
---|
698 | 717 | dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; |
---|
699 | 718 | dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; |
---|
700 | | -#if defined(CONFIG_DEBUG_FS) |
---|
701 | | - dmabuf->ktime = ktime_get(); |
---|
702 | | -#endif |
---|
703 | 719 | |
---|
704 | 720 | if (!resv) { |
---|
705 | | - resv = (struct reservation_object *)&dmabuf[1]; |
---|
706 | | - reservation_object_init(resv); |
---|
| 721 | + resv = (struct dma_resv *)&dmabuf[1]; |
---|
| 722 | + dma_resv_init(resv); |
---|
707 | 723 | } |
---|
708 | 724 | dmabuf->resv = resv; |
---|
709 | 725 | |
---|
.. | .. |
---|
717 | 733 | dmabuf->file = file; |
---|
718 | 734 | |
---|
719 | 735 | mutex_init(&dmabuf->lock); |
---|
720 | | - spin_lock_init(&dmabuf->name_lock); |
---|
721 | 736 | INIT_LIST_HEAD(&dmabuf->attachments); |
---|
722 | 737 | |
---|
723 | | -#ifdef CONFIG_ARCH_ROCKCHIP |
---|
724 | | - mutex_init(&dmabuf->release_lock); |
---|
725 | | - INIT_LIST_HEAD(&dmabuf->release_callbacks); |
---|
726 | | -#endif |
---|
727 | 738 | mutex_lock(&db_list.lock); |
---|
728 | 739 | list_add(&dmabuf->list_node, &db_list.head); |
---|
| 740 | +#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG) |
---|
| 741 | + db_total_size += dmabuf->size; |
---|
| 742 | + db_peak_size = max(db_total_size, db_peak_size); |
---|
| 743 | +#endif |
---|
729 | 744 | mutex_unlock(&db_list.lock); |
---|
| 745 | + |
---|
| 746 | + ret = dma_buf_stats_setup(dmabuf); |
---|
| 747 | + if (ret) |
---|
| 748 | + goto err_sysfs; |
---|
| 749 | + |
---|
| 750 | + if (IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)) |
---|
| 751 | + dma_buf_set_default_name(dmabuf); |
---|
730 | 752 | |
---|
731 | 753 | return dmabuf; |
---|
732 | 754 | |
---|
| 755 | +err_sysfs: |
---|
| 756 | + /* |
---|
| 757 | + * Set file->f_path.dentry->d_fsdata to NULL so that when |
---|
| 758 | + * dma_buf_release() gets invoked by dentry_ops, it exits |
---|
| 759 | + * early before calling the release() dma_buf op. |
---|
| 760 | + */ |
---|
| 761 | + file->f_path.dentry->d_fsdata = NULL; |
---|
| 762 | + fput(file); |
---|
733 | 763 | err_dmabuf: |
---|
734 | 764 | kfree(dmabuf); |
---|
735 | 765 | err_module: |
---|
.. | .. |
---|
808 | 838 | EXPORT_SYMBOL_GPL(dma_buf_put); |
---|
809 | 839 | |
---|
810 | 840 | /** |
---|
811 | | - * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, |
---|
| 841 | + * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally, |
---|
812 | 842 | * calls attach() of dma_buf_ops to allow device-specific attach functionality |
---|
813 | | - * @dmabuf: [in] buffer to attach device to. |
---|
814 | | - * @dev: [in] device to be attached. |
---|
| 843 | + * @dmabuf: [in] buffer to attach device to. |
---|
| 844 | + * @dev: [in] device to be attached. |
---|
| 845 | + * @importer_ops: [in] importer operations for the attachment |
---|
| 846 | + * @importer_priv: [in] importer private pointer for the attachment |
---|
815 | 847 | * |
---|
816 | 848 | * Returns struct dma_buf_attachment pointer for this attachment. Attachments |
---|
817 | 849 | * must be cleaned up by calling dma_buf_detach(). |
---|
.. | .. |
---|
825 | 857 | * accessible to @dev, and cannot be moved to a more suitable place. This is |
---|
826 | 858 | * indicated with the error code -EBUSY. |
---|
827 | 859 | */ |
---|
828 | | -struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
---|
829 | | - struct device *dev) |
---|
| 860 | +struct dma_buf_attachment * |
---|
| 861 | +dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, |
---|
| 862 | + const struct dma_buf_attach_ops *importer_ops, |
---|
| 863 | + void *importer_priv) |
---|
830 | 864 | { |
---|
831 | 865 | struct dma_buf_attachment *attach; |
---|
832 | 866 | int ret; |
---|
833 | 867 | |
---|
834 | 868 | if (WARN_ON(!dmabuf || !dev)) |
---|
| 869 | + return ERR_PTR(-EINVAL); |
---|
| 870 | + |
---|
| 871 | + if (WARN_ON(importer_ops && !importer_ops->move_notify)) |
---|
835 | 872 | return ERR_PTR(-EINVAL); |
---|
836 | 873 | |
---|
837 | 874 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
---|
.. | .. |
---|
840 | 877 | |
---|
841 | 878 | attach->dev = dev; |
---|
842 | 879 | attach->dmabuf = dmabuf; |
---|
843 | | - |
---|
844 | | - mutex_lock(&dmabuf->lock); |
---|
| 880 | + if (importer_ops) |
---|
| 881 | + attach->peer2peer = importer_ops->allow_peer2peer; |
---|
| 882 | + attach->importer_ops = importer_ops; |
---|
| 883 | + attach->importer_priv = importer_priv; |
---|
845 | 884 | |
---|
846 | 885 | if (dmabuf->ops->attach) { |
---|
847 | 886 | ret = dmabuf->ops->attach(dmabuf, attach); |
---|
848 | 887 | if (ret) |
---|
849 | 888 | goto err_attach; |
---|
850 | 889 | } |
---|
| 890 | + dma_resv_lock(dmabuf->resv, NULL); |
---|
851 | 891 | list_add(&attach->node, &dmabuf->attachments); |
---|
| 892 | + dma_resv_unlock(dmabuf->resv); |
---|
852 | 893 | |
---|
853 | | - mutex_unlock(&dmabuf->lock); |
---|
| 894 | + /* When either the importer or the exporter can't handle dynamic |
---|
| 895 | + * mappings we cache the mapping here to avoid issues with the |
---|
| 896 | + * reservation object lock. |
---|
| 897 | + */ |
---|
| 898 | + if (dma_buf_attachment_is_dynamic(attach) != |
---|
| 899 | + dma_buf_is_dynamic(dmabuf)) { |
---|
| 900 | + struct sg_table *sgt; |
---|
| 901 | + |
---|
| 902 | + if (dma_buf_is_dynamic(attach->dmabuf)) { |
---|
| 903 | + dma_resv_lock(attach->dmabuf->resv, NULL); |
---|
| 904 | + ret = dma_buf_pin(attach); |
---|
| 905 | + if (ret) |
---|
| 906 | + goto err_unlock; |
---|
| 907 | + } |
---|
| 908 | + |
---|
| 909 | + sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); |
---|
| 910 | + if (!sgt) |
---|
| 911 | + sgt = ERR_PTR(-ENOMEM); |
---|
| 912 | + if (IS_ERR(sgt)) { |
---|
| 913 | + ret = PTR_ERR(sgt); |
---|
| 914 | + goto err_unpin; |
---|
| 915 | + } |
---|
| 916 | + if (dma_buf_is_dynamic(attach->dmabuf)) |
---|
| 917 | + dma_resv_unlock(attach->dmabuf->resv); |
---|
| 918 | + attach->sgt = sgt; |
---|
| 919 | + attach->dir = DMA_BIDIRECTIONAL; |
---|
| 920 | + } |
---|
| 921 | + |
---|
854 | 922 | return attach; |
---|
855 | 923 | |
---|
856 | 924 | err_attach: |
---|
857 | 925 | kfree(attach); |
---|
858 | | - mutex_unlock(&dmabuf->lock); |
---|
859 | 926 | return ERR_PTR(ret); |
---|
| 927 | + |
---|
| 928 | +err_unpin: |
---|
| 929 | + if (dma_buf_is_dynamic(attach->dmabuf)) |
---|
| 930 | + dma_buf_unpin(attach); |
---|
| 931 | + |
---|
| 932 | +err_unlock: |
---|
| 933 | + if (dma_buf_is_dynamic(attach->dmabuf)) |
---|
| 934 | + dma_resv_unlock(attach->dmabuf->resv); |
---|
| 935 | + |
---|
| 936 | + dma_buf_detach(dmabuf, attach); |
---|
| 937 | + return ERR_PTR(ret); |
---|
| 938 | +} |
---|
| 939 | +EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); |
---|
| 940 | + |
---|
| 941 | +/** |
---|
| 942 | + * dma_buf_attach - Wrapper for dma_buf_dynamic_attach |
---|
| 943 | + * @dmabuf: [in] buffer to attach device to. |
---|
| 944 | + * @dev: [in] device to be attached. |
---|
| 945 | + * |
---|
| 946 | + * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static |
---|
| 947 | + * mapping. |
---|
| 948 | + */ |
---|
| 949 | +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
---|
| 950 | + struct device *dev) |
---|
| 951 | +{ |
---|
| 952 | + return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); |
---|
860 | 953 | } |
---|
861 | 954 | EXPORT_SYMBOL_GPL(dma_buf_attach); |
---|
862 | 955 | |
---|
.. | .. |
---|
873 | 966 | if (WARN_ON(!dmabuf || !attach)) |
---|
874 | 967 | return; |
---|
875 | 968 | |
---|
876 | | - mutex_lock(&dmabuf->lock); |
---|
| 969 | + if (attach->sgt) { |
---|
| 970 | + if (dma_buf_is_dynamic(attach->dmabuf)) |
---|
| 971 | + dma_resv_lock(attach->dmabuf->resv, NULL); |
---|
| 972 | + |
---|
| 973 | + dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); |
---|
| 974 | + |
---|
| 975 | + if (dma_buf_is_dynamic(attach->dmabuf)) { |
---|
| 976 | + dma_buf_unpin(attach); |
---|
| 977 | + dma_resv_unlock(attach->dmabuf->resv); |
---|
| 978 | + } |
---|
| 979 | + } |
---|
| 980 | + |
---|
| 981 | + dma_resv_lock(dmabuf->resv, NULL); |
---|
877 | 982 | list_del(&attach->node); |
---|
| 983 | + dma_resv_unlock(dmabuf->resv); |
---|
878 | 984 | if (dmabuf->ops->detach) |
---|
879 | 985 | dmabuf->ops->detach(dmabuf, attach); |
---|
880 | 986 | |
---|
881 | | - mutex_unlock(&dmabuf->lock); |
---|
882 | 987 | kfree(attach); |
---|
883 | 988 | } |
---|
884 | 989 | EXPORT_SYMBOL_GPL(dma_buf_detach); |
---|
| 990 | + |
---|
| 991 | +/** |
---|
| 992 | + * dma_buf_pin - Lock down the DMA-buf |
---|
| 993 | + * |
---|
| 994 | + * @attach: [in] attachment which should be pinned |
---|
| 995 | + * |
---|
| 996 | + * Returns: |
---|
| 997 | + * 0 on success, negative error code on failure. |
---|
| 998 | + */ |
---|
| 999 | +int dma_buf_pin(struct dma_buf_attachment *attach) |
---|
| 1000 | +{ |
---|
| 1001 | + struct dma_buf *dmabuf = attach->dmabuf; |
---|
| 1002 | + int ret = 0; |
---|
| 1003 | + |
---|
| 1004 | + dma_resv_assert_held(dmabuf->resv); |
---|
| 1005 | + |
---|
| 1006 | + if (dmabuf->ops->pin) |
---|
| 1007 | + ret = dmabuf->ops->pin(attach); |
---|
| 1008 | + |
---|
| 1009 | + return ret; |
---|
| 1010 | +} |
---|
| 1011 | +EXPORT_SYMBOL_GPL(dma_buf_pin); |
---|
| 1012 | + |
---|
| 1013 | +/** |
---|
| 1014 | + * dma_buf_unpin - Remove lock from DMA-buf |
---|
| 1015 | + * |
---|
| 1016 | + * @attach: [in] attachment which should be unpinned |
---|
| 1017 | + */ |
---|
| 1018 | +void dma_buf_unpin(struct dma_buf_attachment *attach) |
---|
| 1019 | +{ |
---|
| 1020 | + struct dma_buf *dmabuf = attach->dmabuf; |
---|
| 1021 | + |
---|
| 1022 | + dma_resv_assert_held(dmabuf->resv); |
---|
| 1023 | + |
---|
| 1024 | + if (dmabuf->ops->unpin) |
---|
| 1025 | + dmabuf->ops->unpin(attach); |
---|
| 1026 | +} |
---|
| 1027 | +EXPORT_SYMBOL_GPL(dma_buf_unpin); |
---|
885 | 1028 | |
---|
886 | 1029 | /** |
---|
887 | 1030 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; |
---|
.. | .. |
---|
902 | 1045 | enum dma_data_direction direction) |
---|
903 | 1046 | { |
---|
904 | 1047 | struct sg_table *sg_table; |
---|
| 1048 | + int r; |
---|
905 | 1049 | |
---|
906 | 1050 | might_sleep(); |
---|
907 | 1051 | |
---|
908 | 1052 | if (WARN_ON(!attach || !attach->dmabuf)) |
---|
909 | 1053 | return ERR_PTR(-EINVAL); |
---|
910 | 1054 | |
---|
| 1055 | + if (dma_buf_attachment_is_dynamic(attach)) |
---|
| 1056 | + dma_resv_assert_held(attach->dmabuf->resv); |
---|
| 1057 | + |
---|
| 1058 | + if (attach->sgt) { |
---|
| 1059 | + /* |
---|
| 1060 | + * Two mappings with different directions for the same |
---|
| 1061 | + * attachment are not allowed. |
---|
| 1062 | + */ |
---|
| 1063 | + if (attach->dir != direction && |
---|
| 1064 | + attach->dir != DMA_BIDIRECTIONAL) |
---|
| 1065 | + return ERR_PTR(-EBUSY); |
---|
| 1066 | + |
---|
| 1067 | + return attach->sgt; |
---|
| 1068 | + } |
---|
| 1069 | + |
---|
| 1070 | + if (dma_buf_is_dynamic(attach->dmabuf)) { |
---|
| 1071 | + dma_resv_assert_held(attach->dmabuf->resv); |
---|
| 1072 | + if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { |
---|
| 1073 | + r = dma_buf_pin(attach); |
---|
| 1074 | + if (r) |
---|
| 1075 | + return ERR_PTR(r); |
---|
| 1076 | + } |
---|
| 1077 | + } |
---|
| 1078 | + |
---|
911 | 1079 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); |
---|
912 | 1080 | if (!sg_table) |
---|
913 | 1081 | sg_table = ERR_PTR(-ENOMEM); |
---|
| 1082 | + |
---|
| 1083 | + if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && |
---|
| 1084 | + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
---|
| 1085 | + dma_buf_unpin(attach); |
---|
| 1086 | + |
---|
| 1087 | + if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { |
---|
| 1088 | + attach->sgt = sg_table; |
---|
| 1089 | + attach->dir = direction; |
---|
| 1090 | + } |
---|
914 | 1091 | |
---|
915 | 1092 | return sg_table; |
---|
916 | 1093 | } |
---|
.. | .. |
---|
935 | 1112 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
---|
936 | 1113 | return; |
---|
937 | 1114 | |
---|
938 | | - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, |
---|
939 | | - direction); |
---|
| 1115 | + if (dma_buf_attachment_is_dynamic(attach)) |
---|
| 1116 | + dma_resv_assert_held(attach->dmabuf->resv); |
---|
| 1117 | + |
---|
| 1118 | + if (attach->sgt == sg_table) |
---|
| 1119 | + return; |
---|
| 1120 | + |
---|
| 1121 | + if (dma_buf_is_dynamic(attach->dmabuf)) |
---|
| 1122 | + dma_resv_assert_held(attach->dmabuf->resv); |
---|
| 1123 | + |
---|
| 1124 | + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); |
---|
| 1125 | + |
---|
| 1126 | + if (dma_buf_is_dynamic(attach->dmabuf) && |
---|
| 1127 | + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) |
---|
| 1128 | + dma_buf_unpin(attach); |
---|
940 | 1129 | } |
---|
941 | 1130 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); |
---|
| 1131 | + |
---|
| 1132 | +/** |
---|
| 1133 | + * dma_buf_move_notify - notify attachments that DMA-buf is moving |
---|
| 1134 | + * |
---|
| 1135 | + * @dmabuf: [in] buffer which is moving |
---|
| 1136 | + * |
---|
| 1137 | + * Informs all attachmenst that they need to destroy and recreated all their |
---|
| 1138 | + * mappings. |
---|
| 1139 | + */ |
---|
| 1140 | +void dma_buf_move_notify(struct dma_buf *dmabuf) |
---|
| 1141 | +{ |
---|
| 1142 | + struct dma_buf_attachment *attach; |
---|
| 1143 | + |
---|
| 1144 | + dma_resv_assert_held(dmabuf->resv); |
---|
| 1145 | + |
---|
| 1146 | + list_for_each_entry(attach, &dmabuf->attachments, node) |
---|
| 1147 | + if (attach->importer_ops) |
---|
| 1148 | + attach->importer_ops->move_notify(attach); |
---|
| 1149 | +} |
---|
| 1150 | +EXPORT_SYMBOL_GPL(dma_buf_move_notify); |
---|
942 | 1151 | |
---|
943 | 1152 | /** |
---|
944 | 1153 | * DOC: cpu access |
---|
.. | .. |
---|
951 | 1160 | * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() |
---|
952 | 1161 | * access. |
---|
953 | 1162 | * |
---|
954 | | - * To support dma_buf objects residing in highmem cpu access is page-based |
---|
955 | | - * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks |
---|
956 | | - * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which |
---|
957 | | - * returns a pointer in kernel virtual address space. Afterwards the chunk |
---|
958 | | - * needs to be unmapped again. There is no limit on how often a given chunk |
---|
959 | | - * can be mapped and unmapped, i.e. the importer does not need to call |
---|
960 | | - * begin_cpu_access again before mapping the same chunk again. |
---|
961 | | - * |
---|
962 | | - * Interfaces:: |
---|
963 | | - * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); |
---|
964 | | - * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); |
---|
965 | | - * |
---|
966 | | - * Implementing the functions is optional for exporters and for importers all |
---|
967 | | - * the restrictions of using kmap apply. |
---|
968 | | - * |
---|
969 | | - * dma_buf kmap calls outside of the range specified in begin_cpu_access are |
---|
970 | | - * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on |
---|
971 | | - * the partial chunks at the beginning and end but may return stale or bogus |
---|
972 | | - * data outside of the range (in these partial chunks). |
---|
973 | | - * |
---|
974 | | - * For some cases the overhead of kmap can be too high, a vmap interface |
---|
975 | | - * is introduced. This interface should be used very carefully, as vmalloc |
---|
976 | | - * space is a limited resources on many architectures. |
---|
| 1163 | + * Since for most kernel internal dma-buf accesses need the entire buffer, a |
---|
| 1164 | + * vmap interface is introduced. Note that on very old 32-bit architectures |
---|
| 1165 | + * vmalloc space might be limited and result in vmap calls failing. |
---|
977 | 1166 | * |
---|
978 | 1167 | * Interfaces:: |
---|
979 | 1168 | * void \*dma_buf_vmap(struct dma_buf \*dmabuf) |
---|
.. | .. |
---|
1010 | 1199 | * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write |
---|
1011 | 1200 | * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you |
---|
1012 | 1201 | * want (with the new data being consumed by say the GPU or the scanout |
---|
1013 | | - * device). Optionally SYNC_USER_MAPPED can be set to restrict cache |
---|
1014 | | - * maintenance to only the parts of the buffer which are mmap(ed). |
---|
| 1202 | + * device) |
---|
1015 | 1203 | * - munmap once you don't need the buffer any more |
---|
1016 | 1204 | * |
---|
1017 | 1205 | * For correctness and optimal performance, it is always required to use |
---|
.. | .. |
---|
1050 | 1238 | { |
---|
1051 | 1239 | bool write = (direction == DMA_BIDIRECTIONAL || |
---|
1052 | 1240 | direction == DMA_TO_DEVICE); |
---|
1053 | | - struct reservation_object *resv = dmabuf->resv; |
---|
| 1241 | + struct dma_resv *resv = dmabuf->resv; |
---|
1054 | 1242 | long ret; |
---|
1055 | 1243 | |
---|
1056 | 1244 | /* Wait on any implicit rendering fences */ |
---|
1057 | | - ret = reservation_object_wait_timeout_rcu(resv, write, true, |
---|
| 1245 | + ret = dma_resv_wait_timeout_rcu(resv, write, true, |
---|
1058 | 1246 | MAX_SCHEDULE_TIMEOUT); |
---|
1059 | 1247 | if (ret < 0) |
---|
1060 | 1248 | return ret; |
---|
.. | .. |
---|
1097 | 1285 | return ret; |
---|
1098 | 1286 | } |
---|
1099 | 1287 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); |
---|
1100 | | - |
---|
1101 | | -static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf, |
---|
1102 | | - enum dma_data_direction direction) |
---|
1103 | | -{ |
---|
1104 | | - int ret = 0; |
---|
1105 | | - |
---|
1106 | | - if (WARN_ON(!dmabuf)) |
---|
1107 | | - return -EINVAL; |
---|
1108 | | - |
---|
1109 | | - if (dmabuf->ops->begin_cpu_access_umapped) |
---|
1110 | | - ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction); |
---|
1111 | | - |
---|
1112 | | - /* Ensure that all fences are waited upon - but we first allow |
---|
1113 | | - * the native handler the chance to do so more efficiently if it |
---|
1114 | | - * chooses. A double invocation here will be reasonably cheap no-op. |
---|
1115 | | - */ |
---|
1116 | | - if (ret == 0) |
---|
1117 | | - ret = __dma_buf_begin_cpu_access(dmabuf, direction); |
---|
1118 | | - |
---|
1119 | | - return ret; |
---|
1120 | | -} |
---|
1121 | 1288 | |
---|
1122 | 1289 | int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, |
---|
1123 | 1290 | enum dma_data_direction direction, |
---|
.. | .. |
---|
1169 | 1336 | } |
---|
1170 | 1337 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); |
---|
1171 | 1338 | |
---|
1172 | | -static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf, |
---|
1173 | | - enum dma_data_direction direction) |
---|
1174 | | -{ |
---|
1175 | | - int ret = 0; |
---|
1176 | | - |
---|
1177 | | - WARN_ON(!dmabuf); |
---|
1178 | | - |
---|
1179 | | - if (dmabuf->ops->end_cpu_access_umapped) |
---|
1180 | | - ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction); |
---|
1181 | | - |
---|
1182 | | - return ret; |
---|
1183 | | -} |
---|
1184 | | - |
---|
1185 | 1339 | int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, |
---|
1186 | 1340 | enum dma_data_direction direction, |
---|
1187 | 1341 | unsigned int offset, unsigned int len) |
---|
.. | .. |
---|
1197 | 1351 | return ret; |
---|
1198 | 1352 | } |
---|
1199 | 1353 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial); |
---|
1200 | | - |
---|
1201 | | -/** |
---|
1202 | | - * dma_buf_kmap - Map a page of the buffer object into kernel address space. The |
---|
1203 | | - * same restrictions as for kmap and friends apply. |
---|
1204 | | - * @dmabuf: [in] buffer to map page from. |
---|
1205 | | - * @page_num: [in] page in PAGE_SIZE units to map. |
---|
1206 | | - * |
---|
1207 | | - * This call must always succeed, any necessary preparations that might fail |
---|
1208 | | - * need to be done in begin_cpu_access. |
---|
1209 | | - */ |
---|
1210 | | -void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) |
---|
1211 | | -{ |
---|
1212 | | - WARN_ON(!dmabuf); |
---|
1213 | | - |
---|
1214 | | - if (!dmabuf->ops->map) |
---|
1215 | | - return NULL; |
---|
1216 | | - return dmabuf->ops->map(dmabuf, page_num); |
---|
1217 | | -} |
---|
1218 | | -EXPORT_SYMBOL_GPL(dma_buf_kmap); |
---|
1219 | | - |
---|
1220 | | -/** |
---|
1221 | | - * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. |
---|
1222 | | - * @dmabuf: [in] buffer to unmap page from. |
---|
1223 | | - * @page_num: [in] page in PAGE_SIZE units to unmap. |
---|
1224 | | - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. |
---|
1225 | | - * |
---|
1226 | | - * This call must always succeed. |
---|
1227 | | - */ |
---|
1228 | | -void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, |
---|
1229 | | - void *vaddr) |
---|
1230 | | -{ |
---|
1231 | | - WARN_ON(!dmabuf); |
---|
1232 | | - |
---|
1233 | | - if (dmabuf->ops->unmap) |
---|
1234 | | - dmabuf->ops->unmap(dmabuf, page_num, vaddr); |
---|
1235 | | -} |
---|
1236 | | -EXPORT_SYMBOL_GPL(dma_buf_kunmap); |
---|
1237 | | - |
---|
1238 | 1354 | |
---|
1239 | 1355 | /** |
---|
1240 | 1356 | * dma_buf_mmap - Setup up a userspace mmap with the given vma |
---|
.. | .. |
---|
1395 | 1511 | int ret; |
---|
1396 | 1512 | struct dma_buf *buf_obj; |
---|
1397 | 1513 | struct dma_buf_attachment *attach_obj; |
---|
1398 | | - struct reservation_object *robj; |
---|
1399 | | - struct reservation_object_list *fobj; |
---|
| 1514 | + struct dma_resv *robj; |
---|
| 1515 | + struct dma_resv_list *fobj; |
---|
1400 | 1516 | struct dma_fence *fence; |
---|
1401 | 1517 | unsigned seq; |
---|
1402 | 1518 | int count = 0, attach_count, shared_count, i; |
---|
.. | .. |
---|
1408 | 1524 | return ret; |
---|
1409 | 1525 | |
---|
1410 | 1526 | seq_puts(s, "\nDma-buf Objects:\n"); |
---|
1411 | | - seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-60s\t%-8s\n", |
---|
1412 | | - "size", "flags", "mode", "count", "exp_name", "ino"); |
---|
| 1527 | + seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", |
---|
| 1528 | + "size", "flags", "mode", "count", "ino"); |
---|
1413 | 1529 | |
---|
1414 | 1530 | list_for_each_entry(buf_obj, &db_list.head, list_node) { |
---|
1415 | | - ret = mutex_lock_interruptible(&buf_obj->lock); |
---|
1416 | 1531 | |
---|
1417 | | - if (ret) { |
---|
1418 | | - seq_puts(s, |
---|
1419 | | - "\tERROR locking buffer object: skipping\n"); |
---|
1420 | | - continue; |
---|
1421 | | - } |
---|
| 1532 | + ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); |
---|
| 1533 | + if (ret) |
---|
| 1534 | + goto error_unlock; |
---|
1422 | 1535 | |
---|
1423 | 1536 | spin_lock(&buf_obj->name_lock); |
---|
1424 | | - seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-60s\t%08lu\t%s\n", |
---|
| 1537 | + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", |
---|
1425 | 1538 | buf_obj->size, |
---|
1426 | 1539 | buf_obj->file->f_flags, buf_obj->file->f_mode, |
---|
1427 | 1540 | file_count(buf_obj->file), |
---|
.. | .. |
---|
1432 | 1545 | |
---|
1433 | 1546 | robj = buf_obj->resv; |
---|
1434 | 1547 | while (true) { |
---|
1435 | | - seq = read_seqbegin(&robj->seq); |
---|
| 1548 | + seq = read_seqcount_begin(&robj->seq); |
---|
1436 | 1549 | rcu_read_lock(); |
---|
1437 | 1550 | fobj = rcu_dereference(robj->fence); |
---|
1438 | 1551 | shared_count = fobj ? fobj->shared_count : 0; |
---|
1439 | 1552 | fence = rcu_dereference(robj->fence_excl); |
---|
1440 | | - if (!read_seqretry(&robj->seq, seq)) |
---|
| 1553 | + if (!read_seqcount_retry(&robj->seq, seq)) |
---|
1441 | 1554 | break; |
---|
1442 | 1555 | rcu_read_unlock(); |
---|
1443 | 1556 | } |
---|
.. | .. |
---|
1466 | 1579 | seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); |
---|
1467 | 1580 | attach_count++; |
---|
1468 | 1581 | } |
---|
| 1582 | + dma_resv_unlock(buf_obj->resv); |
---|
1469 | 1583 | |
---|
1470 | 1584 | seq_printf(s, "Total %d devices attached\n\n", |
---|
1471 | 1585 | attach_count); |
---|
1472 | 1586 | |
---|
1473 | 1587 | count++; |
---|
1474 | 1588 | size += buf_obj->size; |
---|
1475 | | - mutex_unlock(&buf_obj->lock); |
---|
1476 | 1589 | } |
---|
1477 | 1590 | |
---|
1478 | 1591 | seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); |
---|
1479 | 1592 | |
---|
1480 | 1593 | mutex_unlock(&db_list.lock); |
---|
1481 | 1594 | return 0; |
---|
1482 | | -} |
---|
1483 | 1595 | |
---|
1484 | | -static int dma_buf_debug_open(struct inode *inode, struct file *file) |
---|
1485 | | -{ |
---|
1486 | | - return single_open(file, dma_buf_debug_show, NULL); |
---|
1487 | | -} |
---|
1488 | | - |
---|
1489 | | -static const struct file_operations dma_buf_debug_fops = { |
---|
1490 | | - .open = dma_buf_debug_open, |
---|
1491 | | - .read = seq_read, |
---|
1492 | | - .llseek = seq_lseek, |
---|
1493 | | - .release = single_release, |
---|
1494 | | -}; |
---|
1495 | | - |
---|
1496 | | -struct dma_info { |
---|
1497 | | - struct dma_buf *dmabuf; |
---|
1498 | | - struct hlist_node head; |
---|
1499 | | -}; |
---|
1500 | | - |
---|
1501 | | -struct dma_proc { |
---|
1502 | | - char name[TASK_COMM_LEN]; |
---|
1503 | | - pid_t pid; |
---|
1504 | | - size_t size; |
---|
1505 | | - struct hlist_head dma_bufs[1 << 10]; |
---|
1506 | | - struct list_head head; |
---|
1507 | | -}; |
---|
1508 | | - |
---|
1509 | | -static int get_dma_info(const void *data, struct file *file, unsigned int n) |
---|
1510 | | -{ |
---|
1511 | | - struct dma_proc *dma_proc; |
---|
1512 | | - struct dma_info *dma_info; |
---|
1513 | | - |
---|
1514 | | - dma_proc = (struct dma_proc *)data; |
---|
1515 | | - if (!is_dma_buf_file(file)) |
---|
1516 | | - return 0; |
---|
1517 | | - |
---|
1518 | | - hash_for_each_possible(dma_proc->dma_bufs, dma_info, |
---|
1519 | | - head, (unsigned long)file->private_data) { |
---|
1520 | | - if (file->private_data == dma_info->dmabuf) |
---|
1521 | | - return 0; |
---|
1522 | | - } |
---|
1523 | | - |
---|
1524 | | - dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC); |
---|
1525 | | - if (!dma_info) |
---|
1526 | | - return -ENOMEM; |
---|
1527 | | - |
---|
1528 | | - get_file(file); |
---|
1529 | | - dma_info->dmabuf = file->private_data; |
---|
1530 | | - dma_proc->size += dma_info->dmabuf->size / SZ_1K; |
---|
1531 | | - hash_add(dma_proc->dma_bufs, &dma_info->head, |
---|
1532 | | - (unsigned long)dma_info->dmabuf); |
---|
1533 | | - return 0; |
---|
1534 | | -} |
---|
1535 | | - |
---|
1536 | | -static void write_proc(struct seq_file *s, struct dma_proc *proc) |
---|
1537 | | -{ |
---|
1538 | | - struct dma_info *tmp; |
---|
1539 | | - int i; |
---|
1540 | | - |
---|
1541 | | - seq_printf(s, "\n%s (PID %d) size: %zu\nDMA Buffers:\n", |
---|
1542 | | - proc->name, proc->pid, proc->size); |
---|
1543 | | - seq_printf(s, "%-8s\t%-60s\t%-8s\t%-8s\t%s\n", |
---|
1544 | | - "Name", "Exp_name", "Size (KB)", "Alive (sec)", "Attached Devices"); |
---|
1545 | | - |
---|
1546 | | - hash_for_each(proc->dma_bufs, i, tmp, head) { |
---|
1547 | | - struct dma_buf *dmabuf = tmp->dmabuf; |
---|
1548 | | - struct dma_buf_attachment *a; |
---|
1549 | | - ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime); |
---|
1550 | | - |
---|
1551 | | - elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC); |
---|
1552 | | - seq_printf(s, "%-8s\t%-60s\t%-8zu\t%-8lld", |
---|
1553 | | - dmabuf->name, |
---|
1554 | | - dmabuf->exp_name, |
---|
1555 | | - dmabuf->size / SZ_1K, |
---|
1556 | | - elapmstime); |
---|
1557 | | - |
---|
1558 | | - list_for_each_entry(a, &dmabuf->attachments, node) { |
---|
1559 | | - seq_printf(s, "\t%s", dev_name(a->dev)); |
---|
1560 | | - } |
---|
1561 | | - seq_printf(s, "\n"); |
---|
1562 | | - } |
---|
1563 | | -} |
---|
1564 | | - |
---|
1565 | | -static void free_proc(struct dma_proc *proc) |
---|
1566 | | -{ |
---|
1567 | | - struct dma_info *tmp; |
---|
1568 | | - struct hlist_node *n; |
---|
1569 | | - int i; |
---|
1570 | | - |
---|
1571 | | - hash_for_each_safe(proc->dma_bufs, i, n, tmp, head) { |
---|
1572 | | - fput(tmp->dmabuf->file); |
---|
1573 | | - hash_del(&tmp->head); |
---|
1574 | | - kfree(tmp); |
---|
1575 | | - } |
---|
1576 | | - kfree(proc); |
---|
1577 | | -} |
---|
1578 | | - |
---|
1579 | | -static int cmp_proc(void *unused, struct list_head *a, struct list_head *b) |
---|
1580 | | -{ |
---|
1581 | | - struct dma_proc *a_proc, *b_proc; |
---|
1582 | | - |
---|
1583 | | - a_proc = list_entry(a, struct dma_proc, head); |
---|
1584 | | - b_proc = list_entry(b, struct dma_proc, head); |
---|
1585 | | - return b_proc->size - a_proc->size; |
---|
1586 | | -} |
---|
1587 | | - |
---|
1588 | | -static int dma_procs_debug_show(struct seq_file *s, void *unused) |
---|
1589 | | -{ |
---|
1590 | | - struct task_struct *task, *thread; |
---|
1591 | | - struct files_struct *files; |
---|
1592 | | - int ret = 0; |
---|
1593 | | - struct dma_proc *tmp, *n; |
---|
1594 | | - LIST_HEAD(plist); |
---|
1595 | | - |
---|
1596 | | - rcu_read_lock(); |
---|
1597 | | - for_each_process(task) { |
---|
1598 | | - struct files_struct *group_leader_files = NULL; |
---|
1599 | | - |
---|
1600 | | - tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); |
---|
1601 | | - if (!tmp) { |
---|
1602 | | - ret = -ENOMEM; |
---|
1603 | | - rcu_read_unlock(); |
---|
1604 | | - goto mem_err; |
---|
1605 | | - } |
---|
1606 | | - hash_init(tmp->dma_bufs); |
---|
1607 | | - for_each_thread(task, thread) { |
---|
1608 | | - task_lock(thread); |
---|
1609 | | - if (unlikely(!group_leader_files)) |
---|
1610 | | - group_leader_files = task->group_leader->files; |
---|
1611 | | - files = thread->files; |
---|
1612 | | - if (files && (group_leader_files != files || |
---|
1613 | | - thread == task->group_leader)) |
---|
1614 | | - ret = iterate_fd(files, 0, get_dma_info, tmp); |
---|
1615 | | - task_unlock(thread); |
---|
1616 | | - } |
---|
1617 | | - if (ret || hash_empty(tmp->dma_bufs)) |
---|
1618 | | - goto skip; |
---|
1619 | | - get_task_comm(tmp->name, task); |
---|
1620 | | - tmp->pid = task->tgid; |
---|
1621 | | - list_add(&tmp->head, &plist); |
---|
1622 | | - continue; |
---|
1623 | | -skip: |
---|
1624 | | - free_proc(tmp); |
---|
1625 | | - } |
---|
1626 | | - rcu_read_unlock(); |
---|
1627 | | - |
---|
1628 | | - list_sort(NULL, &plist, cmp_proc); |
---|
1629 | | - list_for_each_entry(tmp, &plist, head) |
---|
1630 | | - write_proc(s, tmp); |
---|
1631 | | - |
---|
1632 | | - ret = 0; |
---|
1633 | | -mem_err: |
---|
1634 | | - list_for_each_entry_safe(tmp, n, &plist, head) { |
---|
1635 | | - list_del(&tmp->head); |
---|
1636 | | - free_proc(tmp); |
---|
1637 | | - } |
---|
| 1596 | +error_unlock: |
---|
| 1597 | + mutex_unlock(&db_list.lock); |
---|
1638 | 1598 | return ret; |
---|
1639 | 1599 | } |
---|
1640 | | -DEFINE_SHOW_ATTRIBUTE(dma_procs_debug); |
---|
| 1600 | + |
---|
| 1601 | +DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); |
---|
1641 | 1602 | |
---|
1642 | 1603 | static struct dentry *dma_buf_debugfs_dir; |
---|
1643 | 1604 | |
---|
.. | .. |
---|
1656 | 1617 | NULL, &dma_buf_debug_fops); |
---|
1657 | 1618 | if (IS_ERR(d)) { |
---|
1658 | 1619 | pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); |
---|
1659 | | - debugfs_remove_recursive(dma_buf_debugfs_dir); |
---|
1660 | | - dma_buf_debugfs_dir = NULL; |
---|
1661 | | - err = PTR_ERR(d); |
---|
1662 | | - return err; |
---|
1663 | | - } |
---|
1664 | | - |
---|
1665 | | - d = debugfs_create_file("bufprocs", 0444, dma_buf_debugfs_dir, |
---|
1666 | | - NULL, &dma_procs_debug_fops); |
---|
1667 | | - |
---|
1668 | | - if (IS_ERR(d)) { |
---|
1669 | | - pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n"); |
---|
1670 | 1620 | debugfs_remove_recursive(dma_buf_debugfs_dir); |
---|
1671 | 1621 | dma_buf_debugfs_dir = NULL; |
---|
1672 | 1622 | err = PTR_ERR(d); |
---|
.. | .. |
---|
1691 | 1641 | |
---|
1692 | 1642 | static int __init dma_buf_init(void) |
---|
1693 | 1643 | { |
---|
| 1644 | + int ret; |
---|
| 1645 | + |
---|
| 1646 | + ret = dma_buf_init_sysfs_statistics(); |
---|
| 1647 | + if (ret) |
---|
| 1648 | + return ret; |
---|
| 1649 | + |
---|
1694 | 1650 | dma_buf_mnt = kern_mount(&dma_buf_fs_type); |
---|
1695 | 1651 | if (IS_ERR(dma_buf_mnt)) |
---|
1696 | 1652 | return PTR_ERR(dma_buf_mnt); |
---|
.. | .. |
---|
1706 | 1662 | { |
---|
1707 | 1663 | dma_buf_uninit_debugfs(); |
---|
1708 | 1664 | kern_unmount(dma_buf_mnt); |
---|
| 1665 | + dma_buf_uninit_sysfs_statistics(); |
---|
1709 | 1666 | } |
---|
1710 | 1667 | __exitcall(dma_buf_deinit); |
---|