hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/linux/dma-buf.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Header file for dma buffer sharing framework.
34 *
....@@ -8,18 +9,6 @@
89 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
910 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
1011 * refining of this idea.
11
- *
12
- * This program is free software; you can redistribute it and/or modify it
13
- * under the terms of the GNU General Public License version 2 as published by
14
- * the Free Software Foundation.
15
- *
16
- * This program is distributed in the hope that it will be useful, but WITHOUT
17
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19
- * more details.
20
- *
21
- * You should have received a copy of the GNU General Public License along with
22
- * this program. If not, see <http://www.gnu.org/licenses/>.
2312 */
2413 #ifndef __DMA_BUF_H__
2514 #define __DMA_BUF_H__
....@@ -32,6 +21,7 @@
3221 #include <linux/fs.h>
3322 #include <linux/dma-fence.h>
3423 #include <linux/wait.h>
24
+#include <linux/android_kabi.h>
3525
3626 struct device;
3727 struct dma_buf;
....@@ -39,18 +29,20 @@
3929
4030 /**
4131 * struct dma_buf_ops - operations possible on struct dma_buf
42
- * @map_atomic: [optional] maps a page from the buffer into kernel address
43
- * space, users may not block until the subsequent unmap call.
44
- * This callback must not sleep.
45
- * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
46
- * This Callback must not sleep.
47
- * @map: [optional] maps a page from the buffer into kernel address space.
48
- * @unmap: [optional] unmaps a page from the buffer.
4932 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
5033 * address space. Same restrictions as for vmap and friends apply.
5134 * @vunmap: [optional] unmaps a vmap from the buffer
5235 */
5336 struct dma_buf_ops {
37
+ /**
38
+ * @cache_sgt_mapping:
39
+ *
40
+ * If true the framework will cache the first mapping made for each
41
+ * attachment. This avoids creating mappings for attachments multiple
42
+ * times.
43
+ */
44
+ bool cache_sgt_mapping;
45
+
5446 /**
5547 * @attach:
5648 *
....@@ -91,13 +83,42 @@
9183 void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
9284
9385 /**
86
+ * @pin:
87
+ *
88
+ * This is called by dma_buf_pin and lets the exporter know that the
89
+ * DMA-buf can't be moved any more.
90
+ *
91
+ * This is called with the dmabuf->resv object locked and is mutual
92
+ * exclusive with @cache_sgt_mapping.
93
+ *
94
+ * This callback is optional and should only be used in limited use
95
+ * cases like scanout and not for temporary pin operations.
96
+ *
97
+ * Returns:
98
+ *
99
+ * 0 on success, negative error code on failure.
100
+ */
101
+ int (*pin)(struct dma_buf_attachment *attach);
102
+
103
+ /**
104
+ * @unpin:
105
+ *
106
+ * This is called by dma_buf_unpin and lets the exporter know that the
107
+ * DMA-buf can be moved again.
108
+ *
109
+ * This is called with the dmabuf->resv object locked and is mutual
110
+ * exclusive with @cache_sgt_mapping.
111
+ *
112
+ * This callback is optional.
113
+ */
114
+ void (*unpin)(struct dma_buf_attachment *attach);
115
+
116
+ /**
94117 * @map_dma_buf:
95118 *
96119 * This is called by dma_buf_map_attachment() and is used to map a
97120 * shared &dma_buf into device address space, and it is mandatory. It
98
- * can only be called if @attach has been called successfully. This
99
- * essentially pins the DMA buffer into place, and it cannot be moved
100
- * any more
121
+ * can only be called if @attach has been called successfully.
101122 *
102123 * This call may sleep, e.g. when the backing storage first needs to be
103124 * allocated, or moved to a location suitable for all currently attached
....@@ -118,6 +139,9 @@
118139 * any other kind of sharing that the exporter might wish to make
119140 * available to buffer-users.
120141 *
142
+ * This is always called with the dmabuf->resv object locked when
143
+ * the dynamic_mapping flag is true.
144
+ *
121145 * Returns:
122146 *
123147 * A &sg_table scatter list of or the backing storage of the DMA buffer,
....@@ -135,9 +159,8 @@
135159 *
136160 * This is called by dma_buf_unmap_attachment() and should unmap and
137161 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
138
- * It should also unpin the backing storage if this is the last mapping
139
- * of the DMA buffer, it the exporter supports backing storage
140
- * migration.
162
+ * For static dma_buf handling this might also unpins the backing
163
+ * storage if this is the last mapping of the DMA buffer.
141164 */
142165 void (*unmap_dma_buf)(struct dma_buf_attachment *,
143166 struct sg_table *,
....@@ -146,12 +169,7 @@
146169 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
147170 * if the call would block.
148171 */
149
-#ifdef CONFIG_ARCH_ROCKCHIP
150
- int (*set_release_callback)(void (*release_callback)(void *data),
151
- void *data);
152
- void *(*get_release_callback_data)(void *callback);
153
- /* after final dma_buf_put() */
154
-#endif
172
+
155173 /**
156174 * @release:
157175 *
....@@ -191,33 +209,6 @@
191209 * needs to be restarted.
192210 */
193211 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
194
-
195
- /**
196
- * @begin_cpu_access_umapped:
197
- *
198
- * This is called as a result of the DMA_BUF_IOCTL_SYNC IOCTL being
199
- * called with the DMA_BUF_SYNC_START and DMA_BUF_SYNC_USER_MAPPED flags
200
- * set. It allows the exporter to ensure that the mmap(ed) portions of
201
- * the buffer are available for cpu access - the exporter might need to
202
- * allocate or swap-in and pin the backing storage.
203
- * The exporter also needs to ensure that cpu access is
204
- * coherent for the access direction. The direction can be used by the
205
- * exporter to optimize the cache flushing, i.e. access with a different
206
- * direction (read instead of write) might return stale or even bogus
207
- * data (e.g. when the exporter needs to copy the data to temporary
208
- * storage).
209
- *
210
- * This callback is optional.
211
- *
212
- * Returns:
213
- *
214
- * 0 on success or a negative error code on failure. This can for
215
- * example fail when the backing storage can't be allocated. Can also
216
- * return -ERESTARTSYS or -EINTR when the call has been interrupted and
217
- * needs to be restarted.
218
- */
219
- int (*begin_cpu_access_umapped)(struct dma_buf *dmabuf,
220
- enum dma_data_direction);
221212
222213 /**
223214 * @begin_cpu_access_partial:
....@@ -274,28 +265,6 @@
274265 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
275266
276267 /**
277
- * @end_cpu_access_umapped:
278
- *
279
- * This is called as result a of the DMA_BUF_IOCTL_SYNC IOCTL being
280
- * called with the DMA_BUF_SYNC_END and DMA_BUF_SYNC_USER_MAPPED flags
281
- * set. The exporter can use to limit cache flushing to only those parts
282
- * of the buffer which are mmap(ed) and to unpin any resources pinned in
283
- * @begin_cpu_access_umapped.
284
- * The result of any dma_buf kmap calls after end_cpu_access_umapped is
285
- * undefined.
286
- *
287
- * This callback is optional.
288
- *
289
- * Returns:
290
- *
291
- * 0 on success or a negative error code on failure. Can return
292
- * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
293
- * to be restarted.
294
- */
295
- int (*end_cpu_access_umapped)(struct dma_buf *dmabuf,
296
- enum dma_data_direction);
297
-
298
- /**
299268 * @end_cpu_access_partial:
300269 *
301270 * This is called from dma_buf_end_cpu_access_partial() when the
....@@ -316,9 +285,6 @@
316285 int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
317286 enum dma_data_direction,
318287 unsigned int offset, unsigned int len);
319
-
320
- void *(*map)(struct dma_buf *, unsigned long);
321
- void (*unmap)(struct dma_buf *, unsigned long, void *);
322288
323289 /**
324290 * @mmap:
....@@ -388,8 +354,12 @@
388354 * will be populated with the buffer's flags.
389355 */
390356 int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
357
+
358
+ ANDROID_KABI_RESERVE(1);
359
+ ANDROID_KABI_RESERVE(2);
391360 };
392361
362
+#ifdef CONFIG_DMABUF_CACHE
393363 /**
394364 * dma_buf_destructor - dma-buf destructor function
395365 * @dmabuf: [in] pointer to dma-buf
....@@ -401,21 +371,23 @@
401371 * won't be called.
402372 */
403373 typedef int (*dma_buf_destructor)(struct dma_buf *dmabuf, void *dtor_data);
374
+#endif
404375
405376 /**
406377 * struct dma_buf - shared buffer object
407378 * @size: size of the buffer
408379 * @file: file pointer used for sharing buffers across, and for refcounting.
409
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
380
+ * @attachments: list of dma_buf_attachment that denotes all devices attached,
381
+ * protected by dma_resv lock.
410382 * @ops: dma_buf_ops associated with this buffer object.
411383 * @lock: used internally to serialize list manipulation, attach/detach and
412
- * vmap/unmap, and accesses to name
384
+ * vmap/unmap
413385 * @vmapping_counter: used internally to refcnt the vmaps
414386 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
415387 * @exp_name: name of the exporter; useful for debugging.
416
- * @name: userspace-provided name; useful for accounting and debugging.
417
- * @name_lock: lock to protect name.
418
- * @ktime: time (in jiffies) at which the buffer was born
388
+ * @name: userspace-provided name; useful for accounting and debugging,
389
+ * protected by @resv.
390
+ * @name_lock: spinlock to protect name access
419391 * @owner: pointer to exporter module; used for refcounting when exporter is a
420392 * kernel module.
421393 * @list_node: node for dma_buf accounting and debugging.
....@@ -424,6 +396,7 @@
424396 * @poll: for userspace poll support
425397 * @cb_excl: for userspace poll support
426398 * @cb_shared: for userspace poll support
399
+ * @sysfs_entry: for exposing information about this buffer in sysfs.
427400 *
428401 * This represents a shared buffer, created by calling dma_buf_export(). The
429402 * userspace representation is a normal file descriptor, which can be created by
....@@ -438,10 +411,6 @@
438411 size_t size;
439412 struct file *file;
440413 struct list_head attachments;
441
-#ifdef CONFIG_ARCH_ROCKCHIP
442
- struct list_head release_callbacks;
443
- struct mutex release_lock;
444
-#endif
445414 const struct dma_buf_ops *ops;
446415 struct mutex lock;
447416 unsigned vmapping_counter;
....@@ -449,13 +418,10 @@
449418 const char *exp_name;
450419 const char *name;
451420 spinlock_t name_lock;
452
-#if defined(CONFIG_DEBUG_FS)
453
- ktime_t ktime;
454
-#endif
455421 struct module *owner;
456422 struct list_head list_node;
457423 void *priv;
458
- struct reservation_object *resv;
424
+ struct dma_resv *resv;
459425
460426 /* poll support */
461427 wait_queue_head_t poll;
....@@ -466,17 +432,70 @@
466432
467433 __poll_t active;
468434 } cb_excl, cb_shared;
435
+#ifdef CONFIG_DMABUF_SYSFS_STATS
436
+ /* for sysfs stats */
437
+ struct dma_buf_sysfs_entry {
438
+ struct kobject kobj;
439
+ struct dma_buf *dmabuf;
440
+ } *sysfs_entry;
441
+#endif
442
+#ifdef CONFIG_DMABUF_CACHE
469443 dma_buf_destructor dtor;
470444 void *dtor_data;
471
- atomic_t dent_count;
445
+ struct mutex cache_lock;
446
+#endif
447
+
448
+ ANDROID_KABI_RESERVE(1);
449
+ ANDROID_KABI_RESERVE(2);
450
+};
451
+
452
+/**
453
+ * struct dma_buf_attach_ops - importer operations for an attachment
454
+ *
455
+ * Attachment operations implemented by the importer.
456
+ */
457
+struct dma_buf_attach_ops {
458
+ /**
459
+ * @allow_peer2peer:
460
+ *
461
+ * If this is set to true the importer must be able to handle peer
462
+ * resources without struct pages.
463
+ */
464
+ bool allow_peer2peer;
465
+
466
+ /**
467
+ * @move_notify: [optional] notification that the DMA-buf is moving
468
+ *
469
+ * If this callback is provided the framework can avoid pinning the
470
+ * backing store while mappings exists.
471
+ *
472
+ * This callback is called with the lock of the reservation object
473
+ * associated with the dma_buf held and the mapping function must be
474
+ * called with this lock held as well. This makes sure that no mapping
475
+ * is created concurrently with an ongoing move operation.
476
+ *
477
+ * Mappings stay valid and are not directly affected by this callback.
478
+ * But the DMA-buf can now be in a different physical location, so all
479
+ * mappings should be destroyed and re-created as soon as possible.
480
+ *
481
+ * New mappings can be created after this callback returns, and will
482
+ * point to the new location of the DMA-buf.
483
+ */
484
+ void (*move_notify)(struct dma_buf_attachment *attach);
472485 };
473486
474487 /**
475488 * struct dma_buf_attachment - holds device-buffer attachment data
476489 * @dmabuf: buffer for this attachment.
477490 * @dev: device attached to the buffer.
478
- * @node: list of dma_buf_attachment.
491
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
492
+ * @sgt: cached mapping.
493
+ * @dir: direction of cached mapping.
494
+ * @peer2peer: true if the importer can handle peer resources without pages.
479495 * @priv: exporter specific attachment data.
496
+ * @importer_ops: importer operations for this attachment, if provided
497
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
498
+ * @importer_priv: importer specific attachment data.
480499 * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
481500 * through dma_buf_map_attachment.
482501 *
....@@ -493,8 +512,16 @@
493512 struct dma_buf *dmabuf;
494513 struct device *dev;
495514 struct list_head node;
515
+ struct sg_table *sgt;
516
+ enum dma_data_direction dir;
517
+ bool peer2peer;
518
+ const struct dma_buf_attach_ops *importer_ops;
519
+ void *importer_priv;
496520 void *priv;
497521 unsigned long dma_map_attrs;
522
+
523
+ ANDROID_KABI_RESERVE(1);
524
+ ANDROID_KABI_RESERVE(2);
498525 };
499526
500527 /**
....@@ -516,8 +543,11 @@
516543 const struct dma_buf_ops *ops;
517544 size_t size;
518545 int flags;
519
- struct reservation_object *resv;
546
+ struct dma_resv *resv;
520547 void *priv;
548
+
549
+ ANDROID_KABI_RESERVE(1);
550
+ ANDROID_KABI_RESERVE(2);
521551 };
522552
523553 /**
....@@ -545,18 +575,46 @@
545575 get_file(dmabuf->file);
546576 }
547577
548
-#ifdef CONFIG_ARCH_ROCKCHIP
549
-int dma_buf_set_release_callback(struct dma_buf *dmabuf,
550
- void (*callback)(void *), void *data);
578
+/**
579
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
580
+ * @dmabuf: the DMA-buf to check
581
+ *
582
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
583
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
584
+ * with the lock held.
585
+ */
586
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
587
+{
588
+ return !!dmabuf->ops->pin;
589
+}
551590
552
-void *dma_buf_get_release_callback_data(struct dma_buf *dmabuf,
553
- void (*callback)(void *));
554
-#endif
591
+/**
592
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
593
+ * mappinsg
594
+ * @attach: the DMA-buf attachment to check
595
+ *
596
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
597
+ * the dma_resv lock held.
598
+ */
599
+static inline bool
600
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
601
+{
602
+ return !!attach->importer_ops;
603
+}
555604
605
+int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
606
+ void *private), void *private);
607
+int is_dma_buf_file(struct file *file);
556608 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
557
- struct device *dev);
609
+ struct device *dev);
610
+struct dma_buf_attachment *
611
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
612
+ const struct dma_buf_attach_ops *importer_ops,
613
+ void *importer_priv);
558614 void dma_buf_detach(struct dma_buf *dmabuf,
559
- struct dma_buf_attachment *dmabuf_attach);
615
+ struct dma_buf_attachment *attach);
616
+int dma_buf_pin(struct dma_buf_attachment *attach);
617
+void dma_buf_unpin(struct dma_buf_attachment *attach);
560618
561619 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
562620
....@@ -568,6 +626,7 @@
568626 enum dma_data_direction);
569627 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
570628 enum dma_data_direction);
629
+void dma_buf_move_notify(struct dma_buf *dma_buf);
571630 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
572631 enum dma_data_direction dir);
573632 int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
....@@ -578,16 +637,16 @@
578637 int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
579638 enum dma_data_direction dir,
580639 unsigned int offset, unsigned int len);
581
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
582
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
583640
584641 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
585642 unsigned long);
586643 void *dma_buf_vmap(struct dma_buf *);
587644 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
645
+long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
588646 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
589647 int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);
590648
649
+#ifdef CONFIG_DMABUF_CACHE
591650 /**
592651 * dma_buf_set_destructor - set the dma-buf's destructor
593652 * @dmabuf: [in] pointer to dma-buf
....@@ -601,5 +660,16 @@
601660 dmabuf->dtor = dtor;
602661 dmabuf->dtor_data = dtor_data;
603662 }
663
+#endif
664
+
665
+#if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
666
+void dma_buf_reset_peak_size(void);
667
+size_t dma_buf_get_peak_size(void);
668
+size_t dma_buf_get_total_size(void);
669
+#else
670
+static inline void dma_buf_reset_peak_size(void) {}
671
+static inline size_t dma_buf_get_peak_size(void) { return 0; }
672
+static inline size_t dma_buf_get_total_size(void) { return 0; }
673
+#endif
604674
605675 #endif /* __DMA_BUF_H__ */