hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/dma-buf/dma-buf.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Framework for buffer objects that can be shared across devices/subsystems.
34 *
....@@ -8,18 +9,6 @@
89 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
910 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
1011 * refining of this idea.
11
- *
12
- * This program is free software; you can redistribute it and/or modify it
13
- * under the terms of the GNU General Public License version 2 as published by
14
- * the Free Software Foundation.
15
- *
16
- * This program is distributed in the hope that it will be useful, but WITHOUT
17
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19
- * more details.
20
- *
21
- * You should have received a copy of the GNU General Public License along with
22
- * this program. If not, see <http://www.gnu.org/licenses/>.
2312 */
2413
2514 #include <linux/fs.h>
....@@ -32,27 +21,16 @@
3221 #include <linux/module.h>
3322 #include <linux/seq_file.h>
3423 #include <linux/poll.h>
35
-#include <linux/reservation.h>
24
+#include <linux/dma-resv.h>
3625 #include <linux/mm.h>
37
-#include <linux/sched/signal.h>
38
-#include <linux/fdtable.h>
39
-#include <linux/hashtable.h>
40
-#include <linux/list_sort.h>
4126 #include <linux/mount.h>
42
-#include <linux/cache.h>
27
+#include <linux/pseudo_fs.h>
28
+#include <linux/sched/task.h>
4329
4430 #include <uapi/linux/dma-buf.h>
4531 #include <uapi/linux/magic.h>
4632
47
-static inline int is_dma_buf_file(struct file *);
48
-
49
-#ifdef CONFIG_ARCH_ROCKCHIP
50
-struct dma_buf_callback {
51
- struct list_head list;
52
- void (*callback)(void *);
53
- void *data;
54
-};
55
-#endif
33
+#include "dma-buf-sysfs-stats.h"
5634
5735 struct dma_buf_list {
5836 struct list_head head;
....@@ -60,6 +38,67 @@
6038 };
6139
6240 static struct dma_buf_list db_list;
41
+
42
+/*
43
+ * This function helps in traversing the db_list and calls the
44
+ * callback function which can extract required info out of each
45
+ * dmabuf.
46
+ */
47
+int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
48
+ void *private), void *private)
49
+{
50
+ struct dma_buf *buf;
51
+ int ret = mutex_lock_interruptible(&db_list.lock);
52
+
53
+ if (ret)
54
+ return ret;
55
+
56
+ list_for_each_entry(buf, &db_list.head, list_node) {
57
+ ret = callback(buf, private);
58
+ if (ret)
59
+ break;
60
+ }
61
+ mutex_unlock(&db_list.lock);
62
+ return ret;
63
+}
64
+EXPORT_SYMBOL_GPL(get_each_dmabuf);
65
+
66
+#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
67
+static size_t db_total_size;
68
+static size_t db_peak_size;
69
+
70
+void dma_buf_reset_peak_size(void)
71
+{
72
+ mutex_lock(&db_list.lock);
73
+ db_peak_size = 0;
74
+ mutex_unlock(&db_list.lock);
75
+}
76
+EXPORT_SYMBOL_GPL(dma_buf_reset_peak_size);
77
+
78
+size_t dma_buf_get_peak_size(void)
79
+{
80
+ size_t sz;
81
+
82
+ mutex_lock(&db_list.lock);
83
+ sz = db_peak_size;
84
+ mutex_unlock(&db_list.lock);
85
+
86
+ return sz;
87
+}
88
+EXPORT_SYMBOL_GPL(dma_buf_get_peak_size);
89
+
90
+size_t dma_buf_get_total_size(void)
91
+{
92
+ size_t sz;
93
+
94
+ mutex_lock(&db_list.lock);
95
+ sz = db_total_size;
96
+ mutex_unlock(&db_list.lock);
97
+
98
+ return sz;
99
+}
100
+EXPORT_SYMBOL_GPL(dma_buf_get_total_size);
101
+#endif
63102
64103 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
65104 {
....@@ -80,10 +119,9 @@
80119 static void dma_buf_release(struct dentry *dentry)
81120 {
82121 struct dma_buf *dmabuf;
83
-#ifdef CONFIG_ARCH_ROCKCHIP
84
- struct dma_buf_callback *cb, *tmp;
85
-#endif
122
+#ifdef CONFIG_DMABUF_CACHE
86123 int dtor_ret = 0;
124
+#endif
87125
88126 dmabuf = dentry->d_fsdata;
89127 if (unlikely(!dmabuf))
....@@ -101,28 +139,19 @@
101139 */
102140 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
103141
104
-#ifdef CONFIG_ARCH_ROCKCHIP
105
- mutex_lock(&dmabuf->release_lock);
106
- list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) {
107
- if (cb->callback)
108
- cb->callback(cb->data);
109
- list_del(&cb->list);
110
- kfree(cb);
111
- }
112
- mutex_unlock(&dmabuf->release_lock);
113
-#endif
142
+ dma_buf_stats_teardown(dmabuf);
143
+#ifdef CONFIG_DMABUF_CACHE
114144 if (dmabuf->dtor)
115145 dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data);
116146
117147 if (!dtor_ret)
148
+#endif
118149 dmabuf->ops->release(dmabuf);
119
- else
120
- pr_warn_ratelimited("Leaking dmabuf %s because destructor failed error:%d\n",
121
- dmabuf->name, dtor_ret);
122150
123
- if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
124
- reservation_object_fini(dmabuf->resv);
151
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
152
+ dma_resv_fini(dmabuf->resv);
125153
154
+ WARN_ON(!list_empty(&dmabuf->attachments));
126155 module_put(dmabuf->owner);
127156 kfree(dmabuf->name);
128157 kfree(dmabuf);
....@@ -138,6 +167,9 @@
138167 dmabuf = file->private_data;
139168
140169 mutex_lock(&db_list.lock);
170
+#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
171
+ db_total_size -= dmabuf->size;
172
+#endif
141173 list_del(&dmabuf->list_node);
142174 mutex_unlock(&db_list.lock);
143175
....@@ -151,16 +183,20 @@
151183
152184 static struct vfsmount *dma_buf_mnt;
153185
154
-static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
155
- int flags, const char *name, void *data)
186
+static int dma_buf_fs_init_context(struct fs_context *fc)
156187 {
157
- return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
158
- DMA_BUF_MAGIC);
188
+ struct pseudo_fs_context *ctx;
189
+
190
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
191
+ if (!ctx)
192
+ return -ENOMEM;
193
+ ctx->dops = &dma_buf_dentry_ops;
194
+ return 0;
159195 }
160196
161197 static struct file_system_type dma_buf_fs_type = {
162198 .name = "dmabuf",
163
- .mount = dma_buf_fs_mount,
199
+ .init_fs_context = dma_buf_fs_init_context,
164200 .kill_sb = kill_anon_super,
165201 };
166202
....@@ -212,12 +248,12 @@
212248 }
213249
214250 /**
215
- * DOC: fence polling
251
+ * DOC: implicit fence polling
216252 *
217253 * To support cross-device and cross-driver synchronization of buffer access
218
- * implicit fences (represented internally in the kernel with &struct fence) can
219
- * be attached to a &dma_buf. The glue for that and a few related things are
220
- * provided in the &reservation_object structure.
254
+ * implicit fences (represented internally in the kernel with &struct dma_fence)
255
+ * can be attached to a &dma_buf. The glue for that and a few related things are
256
+ * provided in the &dma_resv structure.
221257 *
222258 * Userspace can query the state of these implicitly tracked fences using poll()
223259 * and related system calls:
....@@ -247,8 +283,8 @@
247283 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
248284 {
249285 struct dma_buf *dmabuf;
250
- struct reservation_object *resv;
251
- struct reservation_object_list *fobj;
286
+ struct dma_resv *resv;
287
+ struct dma_resv_list *fobj;
252288 struct dma_fence *fence_excl;
253289 __poll_t events;
254290 unsigned shared_count, seq;
....@@ -363,22 +399,46 @@
363399 return events;
364400 }
365401
402
+static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
403
+{
404
+ spin_lock(&dmabuf->name_lock);
405
+ kfree(dmabuf->name);
406
+ dmabuf->name = name;
407
+ spin_unlock(&dmabuf->name_lock);
408
+
409
+ return 0;
410
+}
411
+
366412 /**
367413 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
368
- * The name of the dma-buf buffer can only be set when the dma-buf is not
369
- * attached to any devices. It could theoritically support changing the
370
- * name of the dma-buf if the same piece of memory is used for multiple
371
- * purpose between different devices.
414
+ * It could support changing the name of the dma-buf if the same piece of
415
+ * memory is used for multiple purpose between different devices.
372416 *
373
- * @dmabuf [in] dmabuf buffer that will be renamed.
374
- * @buf: [in] A piece of userspace memory that contains the name of
375
- * the dma-buf.
417
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
418
+ * @buf: [in] A piece of userspace memory that contains the name of
419
+ * the dma-buf.
376420 *
377421 * Returns 0 on success. If the dma-buf buffer is already attached to
378422 * devices, return -EBUSY.
379423 *
380424 */
381
-static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
425
+long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
426
+{
427
+ long ret = 0;
428
+ char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
429
+
430
+ if (!buf)
431
+ return -ENOMEM;
432
+
433
+ ret = _dma_buf_set_name(dmabuf, buf);
434
+ if (ret)
435
+ kfree(buf);
436
+
437
+ return ret;
438
+}
439
+EXPORT_SYMBOL_GPL(dma_buf_set_name);
440
+
441
+static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
382442 {
383443 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
384444 long ret = 0;
....@@ -386,28 +446,12 @@
386446 if (IS_ERR(name))
387447 return PTR_ERR(name);
388448
389
- mutex_lock(&dmabuf->lock);
390
- spin_lock(&dmabuf->name_lock);
391
- if (!list_empty(&dmabuf->attachments)) {
392
- ret = -EBUSY;
449
+ ret = _dma_buf_set_name(dmabuf, name);
450
+ if (ret)
393451 kfree(name);
394
- goto out_unlock;
395
- }
396
- kfree(dmabuf->name);
397
- dmabuf->name = name;
398452
399
-out_unlock:
400
- spin_unlock(&dmabuf->name_lock);
401
- mutex_unlock(&dmabuf->lock);
402453 return ret;
403454 }
404
-
405
-static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
406
- enum dma_data_direction direction);
407
-
408
-
409
-static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
410
- enum dma_data_direction direction);
411455
412456 static long dma_buf_ioctl(struct file *file,
413457 unsigned int cmd, unsigned long arg)
....@@ -415,7 +459,7 @@
415459 struct dma_buf *dmabuf;
416460 struct dma_buf_sync sync;
417461 struct dma_buf_sync_partial sync_p;
418
- enum dma_data_direction dir;
462
+ enum dma_data_direction direction;
419463 int ret;
420464
421465 dmabuf = file->private_data;
....@@ -430,46 +474,35 @@
430474
431475 switch (sync.flags & DMA_BUF_SYNC_RW) {
432476 case DMA_BUF_SYNC_READ:
433
- dir = DMA_FROM_DEVICE;
477
+ direction = DMA_FROM_DEVICE;
434478 break;
435479 case DMA_BUF_SYNC_WRITE:
436
- dir = DMA_TO_DEVICE;
480
+ direction = DMA_TO_DEVICE;
437481 break;
438482 case DMA_BUF_SYNC_RW:
439
- dir = DMA_BIDIRECTIONAL;
483
+ direction = DMA_BIDIRECTIONAL;
440484 break;
441485 default:
442486 return -EINVAL;
443487 }
444488
445489 if (sync.flags & DMA_BUF_SYNC_END)
446
- if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
447
- ret = dma_buf_end_cpu_access_umapped(dmabuf,
448
- dir);
449
- else
450
- ret = dma_buf_end_cpu_access(dmabuf, dir);
490
+ ret = dma_buf_end_cpu_access(dmabuf, direction);
451491 else
452
- if (sync.flags & DMA_BUF_SYNC_USER_MAPPED)
453
- ret = dma_buf_begin_cpu_access_umapped(dmabuf,
454
- dir);
455
- else
456
- ret = dma_buf_begin_cpu_access(dmabuf, dir);
492
+ ret = dma_buf_begin_cpu_access(dmabuf, direction);
457493
458494 return ret;
459495
460496 case DMA_BUF_SET_NAME_A:
461497 case DMA_BUF_SET_NAME_B:
462
- return dma_buf_set_name(dmabuf, (const char __user *)arg);
498
+ return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
463499
464500 case DMA_BUF_IOCTL_SYNC_PARTIAL:
465501 if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p)))
466502 return -EFAULT;
467503
468504 if (sync_p.len == 0)
469
- return -EINVAL;
470
-
471
- if ((sync_p.offset % cache_line_size()) || (sync_p.len % cache_line_size()))
472
- return -EINVAL;
505
+ return 0;
473506
474507 if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len)
475508 return -EINVAL;
....@@ -479,24 +512,24 @@
479512
480513 switch (sync_p.flags & DMA_BUF_SYNC_RW) {
481514 case DMA_BUF_SYNC_READ:
482
- dir = DMA_FROM_DEVICE;
515
+ direction = DMA_FROM_DEVICE;
483516 break;
484517 case DMA_BUF_SYNC_WRITE:
485
- dir = DMA_TO_DEVICE;
518
+ direction = DMA_TO_DEVICE;
486519 break;
487520 case DMA_BUF_SYNC_RW:
488
- dir = DMA_BIDIRECTIONAL;
521
+ direction = DMA_BIDIRECTIONAL;
489522 break;
490523 default:
491524 return -EINVAL;
492525 }
493526
494527 if (sync_p.flags & DMA_BUF_SYNC_END)
495
- ret = dma_buf_end_cpu_access_partial(dmabuf, dir,
528
+ ret = dma_buf_end_cpu_access_partial(dmabuf, direction,
496529 sync_p.offset,
497530 sync_p.len);
498531 else
499
- ret = dma_buf_begin_cpu_access_partial(dmabuf, dir,
532
+ ret = dma_buf_begin_cpu_access_partial(dmabuf, direction,
500533 sync_p.offset,
501534 sync_p.len);
502535
....@@ -527,22 +560,22 @@
527560 .llseek = dma_buf_llseek,
528561 .poll = dma_buf_poll,
529562 .unlocked_ioctl = dma_buf_ioctl,
530
-#ifdef CONFIG_COMPAT
531
- .compat_ioctl = dma_buf_ioctl,
532
-#endif
563
+ .compat_ioctl = compat_ptr_ioctl,
533564 .show_fdinfo = dma_buf_show_fdinfo,
534565 };
535566
536567 /*
537568 * is_dma_buf_file - Check if struct file* is associated with dma_buf
538569 */
539
-static inline int is_dma_buf_file(struct file *file)
570
+int is_dma_buf_file(struct file *file)
540571 {
541572 return file->f_op == &dma_buf_fops;
542573 }
574
+EXPORT_SYMBOL_GPL(is_dma_buf_file);
543575
544576 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
545577 {
578
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
546579 struct file *file;
547580 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
548581
....@@ -552,6 +585,13 @@
552585 inode->i_size = dmabuf->size;
553586 inode_set_bytes(inode, dmabuf->size);
554587
588
+ /*
589
+ * The ->i_ino acquired from get_next_ino() is not unique thus
590
+ * not suitable for using it as dentry name by dmabuf stats.
591
+ * Override ->i_ino with the unique and dmabuffs specific
592
+ * value.
593
+ */
594
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
555595 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
556596 flags, &dma_buf_fops);
557597 if (IS_ERR(file))
....@@ -567,48 +607,16 @@
567607 return file;
568608 }
569609
570
-#ifdef CONFIG_ARCH_ROCKCHIP
571
-void *dma_buf_get_release_callback_data(struct dma_buf *dmabuf,
572
- void (*callback)(void *))
610
+static void dma_buf_set_default_name(struct dma_buf *dmabuf)
573611 {
574
- struct dma_buf_callback *cb, *tmp;
575
- void *result = NULL;
612
+ char task_comm[TASK_COMM_LEN];
613
+ char *name;
576614
577
- mutex_lock(&dmabuf->release_lock);
578
- list_for_each_entry_safe(cb, tmp, &dmabuf->release_callbacks, list) {
579
- if (cb->callback == callback) {
580
- result = cb->data;
581
- break;
582
- }
583
- }
584
- mutex_unlock(&dmabuf->release_lock);
585
-
586
- return result;
615
+ get_task_comm(task_comm, current->group_leader);
616
+ name = kasprintf(GFP_KERNEL, "%d-%s", current->tgid, task_comm);
617
+ dma_buf_set_name(dmabuf, name);
618
+ kfree(name);
587619 }
588
-EXPORT_SYMBOL_GPL(dma_buf_get_release_callback_data);
589
-
590
-int dma_buf_set_release_callback(struct dma_buf *dmabuf,
591
- void (*callback)(void *), void *data)
592
-{
593
- struct dma_buf_callback *cb;
594
-
595
- if (WARN_ON(dma_buf_get_release_callback_data(dmabuf, callback)))
596
- return -EINVAL;
597
-
598
- cb = kzalloc(sizeof(*cb), GFP_KERNEL);
599
- if (!cb)
600
- return -ENOMEM;
601
-
602
- cb->callback = callback;
603
- cb->data = data;
604
- mutex_lock(&dmabuf->release_lock);
605
- list_add_tail(&cb->list, &dmabuf->release_callbacks);
606
- mutex_unlock(&dmabuf->release_lock);
607
-
608
- return 0;
609
-}
610
-EXPORT_SYMBOL_GPL(dma_buf_set_release_callback);
611
-#endif
612620
613621 /**
614622 * DOC: dma buf device access
....@@ -661,13 +669,13 @@
661669 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
662670 {
663671 struct dma_buf *dmabuf;
664
- struct reservation_object *resv = exp_info->resv;
672
+ struct dma_resv *resv = exp_info->resv;
665673 struct file *file;
666674 size_t alloc_size = sizeof(struct dma_buf);
667675 int ret;
668676
669677 if (!exp_info->resv)
670
- alloc_size += sizeof(struct reservation_object);
678
+ alloc_size += sizeof(struct dma_resv);
671679 else
672680 /* prevent &dma_buf[1] == dma_buf->resv */
673681 alloc_size += 1;
....@@ -679,6 +687,13 @@
679687 || !exp_info->ops->release)) {
680688 return ERR_PTR(-EINVAL);
681689 }
690
+
691
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
692
+ (exp_info->ops->pin || exp_info->ops->unpin)))
693
+ return ERR_PTR(-EINVAL);
694
+
695
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
696
+ return ERR_PTR(-EINVAL);
682697
683698 if (!try_module_get(exp_info->owner))
684699 return ERR_PTR(-ENOENT);
....@@ -694,16 +709,17 @@
694709 dmabuf->size = exp_info->size;
695710 dmabuf->exp_name = exp_info->exp_name;
696711 dmabuf->owner = exp_info->owner;
712
+ spin_lock_init(&dmabuf->name_lock);
713
+#ifdef CONFIG_DMABUF_CACHE
714
+ mutex_init(&dmabuf->cache_lock);
715
+#endif
697716 init_waitqueue_head(&dmabuf->poll);
698717 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
699718 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
700
-#if defined(CONFIG_DEBUG_FS)
701
- dmabuf->ktime = ktime_get();
702
-#endif
703719
704720 if (!resv) {
705
- resv = (struct reservation_object *)&dmabuf[1];
706
- reservation_object_init(resv);
721
+ resv = (struct dma_resv *)&dmabuf[1];
722
+ dma_resv_init(resv);
707723 }
708724 dmabuf->resv = resv;
709725
....@@ -717,19 +733,33 @@
717733 dmabuf->file = file;
718734
719735 mutex_init(&dmabuf->lock);
720
- spin_lock_init(&dmabuf->name_lock);
721736 INIT_LIST_HEAD(&dmabuf->attachments);
722737
723
-#ifdef CONFIG_ARCH_ROCKCHIP
724
- mutex_init(&dmabuf->release_lock);
725
- INIT_LIST_HEAD(&dmabuf->release_callbacks);
726
-#endif
727738 mutex_lock(&db_list.lock);
728739 list_add(&dmabuf->list_node, &db_list.head);
740
+#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
741
+ db_total_size += dmabuf->size;
742
+ db_peak_size = max(db_total_size, db_peak_size);
743
+#endif
729744 mutex_unlock(&db_list.lock);
745
+
746
+ ret = dma_buf_stats_setup(dmabuf);
747
+ if (ret)
748
+ goto err_sysfs;
749
+
750
+ if (IS_ENABLED(CONFIG_RK_DMABUF_DEBUG))
751
+ dma_buf_set_default_name(dmabuf);
730752
731753 return dmabuf;
732754
755
+err_sysfs:
756
+ /*
757
+ * Set file->f_path.dentry->d_fsdata to NULL so that when
758
+ * dma_buf_release() gets invoked by dentry_ops, it exits
759
+ * early before calling the release() dma_buf op.
760
+ */
761
+ file->f_path.dentry->d_fsdata = NULL;
762
+ fput(file);
733763 err_dmabuf:
734764 kfree(dmabuf);
735765 err_module:
....@@ -808,10 +838,12 @@
808838 EXPORT_SYMBOL_GPL(dma_buf_put);
809839
810840 /**
811
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
841
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
812842 * calls attach() of dma_buf_ops to allow device-specific attach functionality
813
- * @dmabuf: [in] buffer to attach device to.
814
- * @dev: [in] device to be attached.
843
+ * @dmabuf: [in] buffer to attach device to.
844
+ * @dev: [in] device to be attached.
845
+ * @importer_ops: [in] importer operations for the attachment
846
+ * @importer_priv: [in] importer private pointer for the attachment
815847 *
816848 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
817849 * must be cleaned up by calling dma_buf_detach().
....@@ -825,13 +857,18 @@
825857 * accessible to @dev, and cannot be moved to a more suitable place. This is
826858 * indicated with the error code -EBUSY.
827859 */
828
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
829
- struct device *dev)
860
+struct dma_buf_attachment *
861
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
862
+ const struct dma_buf_attach_ops *importer_ops,
863
+ void *importer_priv)
830864 {
831865 struct dma_buf_attachment *attach;
832866 int ret;
833867
834868 if (WARN_ON(!dmabuf || !dev))
869
+ return ERR_PTR(-EINVAL);
870
+
871
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
835872 return ERR_PTR(-EINVAL);
836873
837874 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
....@@ -840,23 +877,79 @@
840877
841878 attach->dev = dev;
842879 attach->dmabuf = dmabuf;
843
-
844
- mutex_lock(&dmabuf->lock);
880
+ if (importer_ops)
881
+ attach->peer2peer = importer_ops->allow_peer2peer;
882
+ attach->importer_ops = importer_ops;
883
+ attach->importer_priv = importer_priv;
845884
846885 if (dmabuf->ops->attach) {
847886 ret = dmabuf->ops->attach(dmabuf, attach);
848887 if (ret)
849888 goto err_attach;
850889 }
890
+ dma_resv_lock(dmabuf->resv, NULL);
851891 list_add(&attach->node, &dmabuf->attachments);
892
+ dma_resv_unlock(dmabuf->resv);
852893
853
- mutex_unlock(&dmabuf->lock);
894
+ /* When either the importer or the exporter can't handle dynamic
895
+ * mappings we cache the mapping here to avoid issues with the
896
+ * reservation object lock.
897
+ */
898
+ if (dma_buf_attachment_is_dynamic(attach) !=
899
+ dma_buf_is_dynamic(dmabuf)) {
900
+ struct sg_table *sgt;
901
+
902
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
903
+ dma_resv_lock(attach->dmabuf->resv, NULL);
904
+ ret = dma_buf_pin(attach);
905
+ if (ret)
906
+ goto err_unlock;
907
+ }
908
+
909
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
910
+ if (!sgt)
911
+ sgt = ERR_PTR(-ENOMEM);
912
+ if (IS_ERR(sgt)) {
913
+ ret = PTR_ERR(sgt);
914
+ goto err_unpin;
915
+ }
916
+ if (dma_buf_is_dynamic(attach->dmabuf))
917
+ dma_resv_unlock(attach->dmabuf->resv);
918
+ attach->sgt = sgt;
919
+ attach->dir = DMA_BIDIRECTIONAL;
920
+ }
921
+
854922 return attach;
855923
856924 err_attach:
857925 kfree(attach);
858
- mutex_unlock(&dmabuf->lock);
859926 return ERR_PTR(ret);
927
+
928
+err_unpin:
929
+ if (dma_buf_is_dynamic(attach->dmabuf))
930
+ dma_buf_unpin(attach);
931
+
932
+err_unlock:
933
+ if (dma_buf_is_dynamic(attach->dmabuf))
934
+ dma_resv_unlock(attach->dmabuf->resv);
935
+
936
+ dma_buf_detach(dmabuf, attach);
937
+ return ERR_PTR(ret);
938
+}
939
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
940
+
941
+/**
942
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
943
+ * @dmabuf: [in] buffer to attach device to.
944
+ * @dev: [in] device to be attached.
945
+ *
946
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
947
+ * mapping.
948
+ */
949
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
950
+ struct device *dev)
951
+{
952
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
860953 }
861954 EXPORT_SYMBOL_GPL(dma_buf_attach);
862955
....@@ -873,15 +966,65 @@
873966 if (WARN_ON(!dmabuf || !attach))
874967 return;
875968
876
- mutex_lock(&dmabuf->lock);
969
+ if (attach->sgt) {
970
+ if (dma_buf_is_dynamic(attach->dmabuf))
971
+ dma_resv_lock(attach->dmabuf->resv, NULL);
972
+
973
+ dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
974
+
975
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
976
+ dma_buf_unpin(attach);
977
+ dma_resv_unlock(attach->dmabuf->resv);
978
+ }
979
+ }
980
+
981
+ dma_resv_lock(dmabuf->resv, NULL);
877982 list_del(&attach->node);
983
+ dma_resv_unlock(dmabuf->resv);
878984 if (dmabuf->ops->detach)
879985 dmabuf->ops->detach(dmabuf, attach);
880986
881
- mutex_unlock(&dmabuf->lock);
882987 kfree(attach);
883988 }
884989 EXPORT_SYMBOL_GPL(dma_buf_detach);
990
+
991
+/**
992
+ * dma_buf_pin - Lock down the DMA-buf
993
+ *
994
+ * @attach: [in] attachment which should be pinned
995
+ *
996
+ * Returns:
997
+ * 0 on success, negative error code on failure.
998
+ */
999
+int dma_buf_pin(struct dma_buf_attachment *attach)
1000
+{
1001
+ struct dma_buf *dmabuf = attach->dmabuf;
1002
+ int ret = 0;
1003
+
1004
+ dma_resv_assert_held(dmabuf->resv);
1005
+
1006
+ if (dmabuf->ops->pin)
1007
+ ret = dmabuf->ops->pin(attach);
1008
+
1009
+ return ret;
1010
+}
1011
+EXPORT_SYMBOL_GPL(dma_buf_pin);
1012
+
1013
+/**
1014
+ * dma_buf_unpin - Remove lock from DMA-buf
1015
+ *
1016
+ * @attach: [in] attachment which should be unpinned
1017
+ */
1018
+void dma_buf_unpin(struct dma_buf_attachment *attach)
1019
+{
1020
+ struct dma_buf *dmabuf = attach->dmabuf;
1021
+
1022
+ dma_resv_assert_held(dmabuf->resv);
1023
+
1024
+ if (dmabuf->ops->unpin)
1025
+ dmabuf->ops->unpin(attach);
1026
+}
1027
+EXPORT_SYMBOL_GPL(dma_buf_unpin);
8851028
8861029 /**
8871030 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
....@@ -902,15 +1045,49 @@
9021045 enum dma_data_direction direction)
9031046 {
9041047 struct sg_table *sg_table;
1048
+ int r;
9051049
9061050 might_sleep();
9071051
9081052 if (WARN_ON(!attach || !attach->dmabuf))
9091053 return ERR_PTR(-EINVAL);
9101054
1055
+ if (dma_buf_attachment_is_dynamic(attach))
1056
+ dma_resv_assert_held(attach->dmabuf->resv);
1057
+
1058
+ if (attach->sgt) {
1059
+ /*
1060
+ * Two mappings with different directions for the same
1061
+ * attachment are not allowed.
1062
+ */
1063
+ if (attach->dir != direction &&
1064
+ attach->dir != DMA_BIDIRECTIONAL)
1065
+ return ERR_PTR(-EBUSY);
1066
+
1067
+ return attach->sgt;
1068
+ }
1069
+
1070
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
1071
+ dma_resv_assert_held(attach->dmabuf->resv);
1072
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1073
+ r = dma_buf_pin(attach);
1074
+ if (r)
1075
+ return ERR_PTR(r);
1076
+ }
1077
+ }
1078
+
9111079 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
9121080 if (!sg_table)
9131081 sg_table = ERR_PTR(-ENOMEM);
1082
+
1083
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1084
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1085
+ dma_buf_unpin(attach);
1086
+
1087
+ if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1088
+ attach->sgt = sg_table;
1089
+ attach->dir = direction;
1090
+ }
9141091
9151092 return sg_table;
9161093 }
....@@ -935,10 +1112,42 @@
9351112 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
9361113 return;
9371114
938
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
939
- direction);
1115
+ if (dma_buf_attachment_is_dynamic(attach))
1116
+ dma_resv_assert_held(attach->dmabuf->resv);
1117
+
1118
+ if (attach->sgt == sg_table)
1119
+ return;
1120
+
1121
+ if (dma_buf_is_dynamic(attach->dmabuf))
1122
+ dma_resv_assert_held(attach->dmabuf->resv);
1123
+
1124
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1125
+
1126
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
1127
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1128
+ dma_buf_unpin(attach);
9401129 }
9411130 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1131
+
1132
+/**
1133
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
1134
+ *
1135
+ * @dmabuf: [in] buffer which is moving
1136
+ *
1137
+ * Informs all attachmenst that they need to destroy and recreated all their
1138
+ * mappings.
1139
+ */
1140
+void dma_buf_move_notify(struct dma_buf *dmabuf)
1141
+{
1142
+ struct dma_buf_attachment *attach;
1143
+
1144
+ dma_resv_assert_held(dmabuf->resv);
1145
+
1146
+ list_for_each_entry(attach, &dmabuf->attachments, node)
1147
+ if (attach->importer_ops)
1148
+ attach->importer_ops->move_notify(attach);
1149
+}
1150
+EXPORT_SYMBOL_GPL(dma_buf_move_notify);
9421151
9431152 /**
9441153 * DOC: cpu access
....@@ -951,29 +1160,9 @@
9511160 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
9521161 * access.
9531162 *
954
- * To support dma_buf objects residing in highmem cpu access is page-based
955
- * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
956
- * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
957
- * returns a pointer in kernel virtual address space. Afterwards the chunk
958
- * needs to be unmapped again. There is no limit on how often a given chunk
959
- * can be mapped and unmapped, i.e. the importer does not need to call
960
- * begin_cpu_access again before mapping the same chunk again.
961
- *
962
- * Interfaces::
963
- * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
964
- * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
965
- *
966
- * Implementing the functions is optional for exporters and for importers all
967
- * the restrictions of using kmap apply.
968
- *
969
- * dma_buf kmap calls outside of the range specified in begin_cpu_access are
970
- * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
971
- * the partial chunks at the beginning and end but may return stale or bogus
972
- * data outside of the range (in these partial chunks).
973
- *
974
- * For some cases the overhead of kmap can be too high, a vmap interface
975
- * is introduced. This interface should be used very carefully, as vmalloc
976
- * space is a limited resources on many architectures.
1163
+ * Since for most kernel internal dma-buf accesses need the entire buffer, a
1164
+ * vmap interface is introduced. Note that on very old 32-bit architectures
1165
+ * vmalloc space might be limited and result in vmap calls failing.
9771166 *
9781167 * Interfaces::
9791168 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
....@@ -1010,8 +1199,7 @@
10101199 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
10111200 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
10121201 * want (with the new data being consumed by say the GPU or the scanout
1013
- * device). Optionally SYNC_USER_MAPPED can be set to restrict cache
1014
- * maintenance to only the parts of the buffer which are mmap(ed).
1202
+ * device)
10151203 * - munmap once you don't need the buffer any more
10161204 *
10171205 * For correctness and optimal performance, it is always required to use
....@@ -1050,11 +1238,11 @@
10501238 {
10511239 bool write = (direction == DMA_BIDIRECTIONAL ||
10521240 direction == DMA_TO_DEVICE);
1053
- struct reservation_object *resv = dmabuf->resv;
1241
+ struct dma_resv *resv = dmabuf->resv;
10541242 long ret;
10551243
10561244 /* Wait on any implicit rendering fences */
1057
- ret = reservation_object_wait_timeout_rcu(resv, write, true,
1245
+ ret = dma_resv_wait_timeout_rcu(resv, write, true,
10581246 MAX_SCHEDULE_TIMEOUT);
10591247 if (ret < 0)
10601248 return ret;
....@@ -1097,27 +1285,6 @@
10971285 return ret;
10981286 }
10991287 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1100
-
1101
-static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
1102
- enum dma_data_direction direction)
1103
-{
1104
- int ret = 0;
1105
-
1106
- if (WARN_ON(!dmabuf))
1107
- return -EINVAL;
1108
-
1109
- if (dmabuf->ops->begin_cpu_access_umapped)
1110
- ret = dmabuf->ops->begin_cpu_access_umapped(dmabuf, direction);
1111
-
1112
- /* Ensure that all fences are waited upon - but we first allow
1113
- * the native handler the chance to do so more efficiently if it
1114
- * chooses. A double invocation here will be reasonably cheap no-op.
1115
- */
1116
- if (ret == 0)
1117
- ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1118
-
1119
- return ret;
1120
-}
11211288
11221289 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
11231290 enum dma_data_direction direction,
....@@ -1169,19 +1336,6 @@
11691336 }
11701337 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
11711338
1172
-static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
1173
- enum dma_data_direction direction)
1174
-{
1175
- int ret = 0;
1176
-
1177
- WARN_ON(!dmabuf);
1178
-
1179
- if (dmabuf->ops->end_cpu_access_umapped)
1180
- ret = dmabuf->ops->end_cpu_access_umapped(dmabuf, direction);
1181
-
1182
- return ret;
1183
-}
1184
-
11851339 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
11861340 enum dma_data_direction direction,
11871341 unsigned int offset, unsigned int len)
....@@ -1197,44 +1351,6 @@
11971351 return ret;
11981352 }
11991353 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1200
-
1201
-/**
1202
- * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
1203
- * same restrictions as for kmap and friends apply.
1204
- * @dmabuf: [in] buffer to map page from.
1205
- * @page_num: [in] page in PAGE_SIZE units to map.
1206
- *
1207
- * This call must always succeed, any necessary preparations that might fail
1208
- * need to be done in begin_cpu_access.
1209
- */
1210
-void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
1211
-{
1212
- WARN_ON(!dmabuf);
1213
-
1214
- if (!dmabuf->ops->map)
1215
- return NULL;
1216
- return dmabuf->ops->map(dmabuf, page_num);
1217
-}
1218
-EXPORT_SYMBOL_GPL(dma_buf_kmap);
1219
-
1220
-/**
1221
- * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
1222
- * @dmabuf: [in] buffer to unmap page from.
1223
- * @page_num: [in] page in PAGE_SIZE units to unmap.
1224
- * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
1225
- *
1226
- * This call must always succeed.
1227
- */
1228
-void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1229
- void *vaddr)
1230
-{
1231
- WARN_ON(!dmabuf);
1232
-
1233
- if (dmabuf->ops->unmap)
1234
- dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1235
-}
1236
-EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1237
-
12381354
12391355 /**
12401356 * dma_buf_mmap - Setup up a userspace mmap with the given vma
....@@ -1395,8 +1511,8 @@
13951511 int ret;
13961512 struct dma_buf *buf_obj;
13971513 struct dma_buf_attachment *attach_obj;
1398
- struct reservation_object *robj;
1399
- struct reservation_object_list *fobj;
1514
+ struct dma_resv *robj;
1515
+ struct dma_resv_list *fobj;
14001516 struct dma_fence *fence;
14011517 unsigned seq;
14021518 int count = 0, attach_count, shared_count, i;
....@@ -1408,20 +1524,17 @@
14081524 return ret;
14091525
14101526 seq_puts(s, "\nDma-buf Objects:\n");
1411
- seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-60s\t%-8s\n",
1412
- "size", "flags", "mode", "count", "exp_name", "ino");
1527
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1528
+ "size", "flags", "mode", "count", "ino");
14131529
14141530 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1415
- ret = mutex_lock_interruptible(&buf_obj->lock);
14161531
1417
- if (ret) {
1418
- seq_puts(s,
1419
- "\tERROR locking buffer object: skipping\n");
1420
- continue;
1421
- }
1532
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1533
+ if (ret)
1534
+ goto error_unlock;
14221535
14231536 spin_lock(&buf_obj->name_lock);
1424
- seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-60s\t%08lu\t%s\n",
1537
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
14251538 buf_obj->size,
14261539 buf_obj->file->f_flags, buf_obj->file->f_mode,
14271540 file_count(buf_obj->file),
....@@ -1466,178 +1579,26 @@
14661579 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
14671580 attach_count++;
14681581 }
1582
+ dma_resv_unlock(buf_obj->resv);
14691583
14701584 seq_printf(s, "Total %d devices attached\n\n",
14711585 attach_count);
14721586
14731587 count++;
14741588 size += buf_obj->size;
1475
- mutex_unlock(&buf_obj->lock);
14761589 }
14771590
14781591 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
14791592
14801593 mutex_unlock(&db_list.lock);
14811594 return 0;
1482
-}
14831595
1484
-static int dma_buf_debug_open(struct inode *inode, struct file *file)
1485
-{
1486
- return single_open(file, dma_buf_debug_show, NULL);
1487
-}
1488
-
1489
-static const struct file_operations dma_buf_debug_fops = {
1490
- .open = dma_buf_debug_open,
1491
- .read = seq_read,
1492
- .llseek = seq_lseek,
1493
- .release = single_release,
1494
-};
1495
-
1496
-struct dma_info {
1497
- struct dma_buf *dmabuf;
1498
- struct hlist_node head;
1499
-};
1500
-
1501
-struct dma_proc {
1502
- char name[TASK_COMM_LEN];
1503
- pid_t pid;
1504
- size_t size;
1505
- struct hlist_head dma_bufs[1 << 10];
1506
- struct list_head head;
1507
-};
1508
-
1509
-static int get_dma_info(const void *data, struct file *file, unsigned int n)
1510
-{
1511
- struct dma_proc *dma_proc;
1512
- struct dma_info *dma_info;
1513
-
1514
- dma_proc = (struct dma_proc *)data;
1515
- if (!is_dma_buf_file(file))
1516
- return 0;
1517
-
1518
- hash_for_each_possible(dma_proc->dma_bufs, dma_info,
1519
- head, (unsigned long)file->private_data) {
1520
- if (file->private_data == dma_info->dmabuf)
1521
- return 0;
1522
- }
1523
-
1524
- dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
1525
- if (!dma_info)
1526
- return -ENOMEM;
1527
-
1528
- get_file(file);
1529
- dma_info->dmabuf = file->private_data;
1530
- dma_proc->size += dma_info->dmabuf->size / SZ_1K;
1531
- hash_add(dma_proc->dma_bufs, &dma_info->head,
1532
- (unsigned long)dma_info->dmabuf);
1533
- return 0;
1534
-}
1535
-
1536
-static void write_proc(struct seq_file *s, struct dma_proc *proc)
1537
-{
1538
- struct dma_info *tmp;
1539
- int i;
1540
-
1541
- seq_printf(s, "\n%s (PID %d) size: %zu\nDMA Buffers:\n",
1542
- proc->name, proc->pid, proc->size);
1543
- seq_printf(s, "%-8s\t%-60s\t%-8s\t%-8s\t%s\n",
1544
- "Name", "Exp_name", "Size (KB)", "Alive (sec)", "Attached Devices");
1545
-
1546
- hash_for_each(proc->dma_bufs, i, tmp, head) {
1547
- struct dma_buf *dmabuf = tmp->dmabuf;
1548
- struct dma_buf_attachment *a;
1549
- ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);
1550
-
1551
- elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
1552
- seq_printf(s, "%-8s\t%-60s\t%-8zu\t%-8lld",
1553
- dmabuf->name,
1554
- dmabuf->exp_name,
1555
- dmabuf->size / SZ_1K,
1556
- elapmstime);
1557
-
1558
- list_for_each_entry(a, &dmabuf->attachments, node) {
1559
- seq_printf(s, "\t%s", dev_name(a->dev));
1560
- }
1561
- seq_printf(s, "\n");
1562
- }
1563
-}
1564
-
1565
-static void free_proc(struct dma_proc *proc)
1566
-{
1567
- struct dma_info *tmp;
1568
- struct hlist_node *n;
1569
- int i;
1570
-
1571
- hash_for_each_safe(proc->dma_bufs, i, n, tmp, head) {
1572
- fput(tmp->dmabuf->file);
1573
- hash_del(&tmp->head);
1574
- kfree(tmp);
1575
- }
1576
- kfree(proc);
1577
-}
1578
-
1579
-static int cmp_proc(void *unused, struct list_head *a, struct list_head *b)
1580
-{
1581
- struct dma_proc *a_proc, *b_proc;
1582
-
1583
- a_proc = list_entry(a, struct dma_proc, head);
1584
- b_proc = list_entry(b, struct dma_proc, head);
1585
- return b_proc->size - a_proc->size;
1586
-}
1587
-
1588
-static int dma_procs_debug_show(struct seq_file *s, void *unused)
1589
-{
1590
- struct task_struct *task, *thread;
1591
- struct files_struct *files;
1592
- int ret = 0;
1593
- struct dma_proc *tmp, *n;
1594
- LIST_HEAD(plist);
1595
-
1596
- rcu_read_lock();
1597
- for_each_process(task) {
1598
- struct files_struct *group_leader_files = NULL;
1599
-
1600
- tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1601
- if (!tmp) {
1602
- ret = -ENOMEM;
1603
- rcu_read_unlock();
1604
- goto mem_err;
1605
- }
1606
- hash_init(tmp->dma_bufs);
1607
- for_each_thread(task, thread) {
1608
- task_lock(thread);
1609
- if (unlikely(!group_leader_files))
1610
- group_leader_files = task->group_leader->files;
1611
- files = thread->files;
1612
- if (files && (group_leader_files != files ||
1613
- thread == task->group_leader))
1614
- ret = iterate_fd(files, 0, get_dma_info, tmp);
1615
- task_unlock(thread);
1616
- }
1617
- if (ret || hash_empty(tmp->dma_bufs))
1618
- goto skip;
1619
- get_task_comm(tmp->name, task);
1620
- tmp->pid = task->tgid;
1621
- list_add(&tmp->head, &plist);
1622
- continue;
1623
-skip:
1624
- free_proc(tmp);
1625
- }
1626
- rcu_read_unlock();
1627
-
1628
- list_sort(NULL, &plist, cmp_proc);
1629
- list_for_each_entry(tmp, &plist, head)
1630
- write_proc(s, tmp);
1631
-
1632
- ret = 0;
1633
-mem_err:
1634
- list_for_each_entry_safe(tmp, n, &plist, head) {
1635
- list_del(&tmp->head);
1636
- free_proc(tmp);
1637
- }
1596
+error_unlock:
1597
+ mutex_unlock(&db_list.lock);
16381598 return ret;
16391599 }
1640
-DEFINE_SHOW_ATTRIBUTE(dma_procs_debug);
1600
+
1601
+DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
16411602
16421603 static struct dentry *dma_buf_debugfs_dir;
16431604
....@@ -1656,17 +1617,6 @@
16561617 NULL, &dma_buf_debug_fops);
16571618 if (IS_ERR(d)) {
16581619 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1659
- debugfs_remove_recursive(dma_buf_debugfs_dir);
1660
- dma_buf_debugfs_dir = NULL;
1661
- err = PTR_ERR(d);
1662
- return err;
1663
- }
1664
-
1665
- d = debugfs_create_file("bufprocs", 0444, dma_buf_debugfs_dir,
1666
- NULL, &dma_procs_debug_fops);
1667
-
1668
- if (IS_ERR(d)) {
1669
- pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
16701620 debugfs_remove_recursive(dma_buf_debugfs_dir);
16711621 dma_buf_debugfs_dir = NULL;
16721622 err = PTR_ERR(d);
....@@ -1691,6 +1641,12 @@
16911641
16921642 static int __init dma_buf_init(void)
16931643 {
1644
+ int ret;
1645
+
1646
+ ret = dma_buf_init_sysfs_statistics();
1647
+ if (ret)
1648
+ return ret;
1649
+
16941650 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
16951651 if (IS_ERR(dma_buf_mnt))
16961652 return PTR_ERR(dma_buf_mnt);
....@@ -1706,5 +1662,6 @@
17061662 {
17071663 dma_buf_uninit_debugfs();
17081664 kern_unmount(dma_buf_mnt);
1665
+ dma_buf_uninit_sysfs_statistics();
17091666 }
17101667 __exitcall(dma_buf_deinit);