hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/linux/dma-fence.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Fence mechanism for dma-buf to allow for asynchronous dma access
34 *
....@@ -7,15 +8,6 @@
78 * Authors:
89 * Rob Clark <robdclark@gmail.com>
910 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10
- *
11
- * This program is free software; you can redistribute it and/or modify it
12
- * under the terms of the GNU General Public License version 2 as published by
13
- * the Free Software Foundation.
14
- *
15
- * This program is distributed in the hope that it will be useful, but WITHOUT
16
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18
- * more details.
1911 */
2012
2113 #ifndef __LINUX_DMA_FENCE_H
....@@ -71,15 +63,35 @@
7163 * been completed, or never called at all.
7264 */
7365 struct dma_fence {
74
- struct kref refcount;
75
- const struct dma_fence_ops *ops;
76
- struct rcu_head rcu;
77
- struct list_head cb_list;
7866 spinlock_t *lock;
67
+ const struct dma_fence_ops *ops;
68
+ /*
69
+ * We clear the callback list on kref_put so that by the time we
70
+ * release the fence it is unused. No one should be adding to the
71
+ * cb_list that they don't themselves hold a reference for.
72
+ *
73
+ * The lifetime of the timestamp is similarly tied to both the
74
+ * rcu freelist and the cb_list. The timestamp is only set upon
75
+ * signaling while simultaneously notifying the cb_list. Ergo, we
76
+ * only use either the cb_list of timestamp. Upon destruction,
77
+ * neither are accessible, and so we can use the rcu. This means
78
+ * that the cb_list is *only* valid until the signal bit is set,
79
+ * and to read either you *must* hold a reference to the fence,
80
+ * and not just the rcu_read_lock.
81
+ *
82
+ * Listed in chronological order.
83
+ */
84
+ union {
85
+ struct list_head cb_list;
86
+ /* @cb_list replaced by @timestamp on dma_fence_signal() */
87
+ ktime_t timestamp;
88
+ /* @timestamp replaced by @rcu on dma_fence_release() */
89
+ struct rcu_head rcu;
90
+ };
7991 u64 context;
80
- unsigned seqno;
92
+ u64 seqno;
8193 unsigned long flags;
82
- ktime_t timestamp;
94
+ struct kref refcount;
8395 int error;
8496 };
8597
....@@ -111,6 +123,14 @@
111123 *
112124 */
113125 struct dma_fence_ops {
126
+ /**
127
+ * @use_64bit_seqno:
128
+ *
129
+ * True if this dma_fence implementation uses 64bit seqno, false
130
+ * otherwise.
131
+ */
132
+ bool use_64bit_seqno;
133
+
114134 /**
115135 * @get_driver_name:
116136 *
....@@ -244,7 +264,7 @@
244264 };
245265
246266 void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
247
- spinlock_t *lock, u64 context, unsigned seqno);
267
+ spinlock_t *lock, u64 context, u64 seqno);
248268
249269 void dma_fence_release(struct kref *kref);
250270 void dma_fence_free(struct dma_fence *fence);
....@@ -273,7 +293,7 @@
273293 }
274294
275295 /**
276
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
296
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
277297 * rcu read lock
278298 * @fence: fence to increase refcount of
279299 *
....@@ -297,7 +317,7 @@
297317 * so long as the caller is using RCU on the pointer to the fence.
298318 *
299319 * An alternative mechanism is to employ a seqlock to protect a bunch of
300
- * fences, such as used by struct reservation_object. When using a seqlock,
320
+ * fences, such as used by struct dma_resv. When using a seqlock,
301321 * the seqlock must be taken before and checked after a reference to the
302322 * fence is acquired (as shown here).
303323 *
....@@ -337,8 +357,24 @@
337357 } while (1);
338358 }
339359
360
+#ifdef CONFIG_LOCKDEP
361
+bool dma_fence_begin_signalling(void);
362
+void dma_fence_end_signalling(bool cookie);
363
+void __dma_fence_might_wait(void);
364
+#else
365
+static inline bool dma_fence_begin_signalling(void)
366
+{
367
+ return true;
368
+}
369
+static inline void dma_fence_end_signalling(bool cookie) {}
370
+static inline void __dma_fence_might_wait(void) {}
371
+#endif
372
+
340373 int dma_fence_signal(struct dma_fence *fence);
341374 int dma_fence_signal_locked(struct dma_fence *fence);
375
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
376
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
377
+ ktime_t timestamp);
342378 signed long dma_fence_default_wait(struct dma_fence *fence,
343379 bool intr, signed long timeout);
344380 int dma_fence_add_callback(struct dma_fence *fence,
....@@ -410,13 +446,22 @@
410446 * __dma_fence_is_later - return if f1 is chronologically later than f2
411447 * @f1: the first fence's seqno
412448 * @f2: the second fence's seqno from the same context
449
+ * @ops: dma_fence_ops associated with the seqno
413450 *
414451 * Returns true if f1 is chronologically later than f2. Both fences must be
415452 * from the same context, since a seqno is not common across contexts.
416453 */
417
-static inline bool __dma_fence_is_later(u32 f1, u32 f2)
454
+static inline bool __dma_fence_is_later(u64 f1, u64 f2,
455
+ const struct dma_fence_ops *ops)
418456 {
419
- return (int)(f1 - f2) > 0;
457
+ /* This is for backward compatibility with drivers which can only handle
458
+ * 32bit sequence numbers. Use a 64bit compare when the driver says to
459
+ * do so.
460
+ */
461
+ if (ops->use_64bit_seqno)
462
+ return f1 > f2;
463
+
464
+ return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
420465 }
421466
422467 /**
....@@ -433,7 +478,7 @@
433478 if (WARN_ON(f1->context != f2->context))
434479 return false;
435480
436
- return __dma_fence_is_later(f1->seqno, f2->seqno);
481
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
437482 }
438483
439484 /**
....@@ -541,27 +586,28 @@
541586 return ret < 0 ? ret : 0;
542587 }
543588
589
+struct dma_fence *dma_fence_get_stub(void);
544590 u64 dma_fence_context_alloc(unsigned num);
545591
546592 #define DMA_FENCE_TRACE(f, fmt, args...) \
547593 do { \
548594 struct dma_fence *__ff = (f); \
549595 if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
550
- pr_info("f %llu#%u: " fmt, \
596
+ pr_info("f %llu#%llu: " fmt, \
551597 __ff->context, __ff->seqno, ##args); \
552598 } while (0)
553599
554600 #define DMA_FENCE_WARN(f, fmt, args...) \
555601 do { \
556602 struct dma_fence *__ff = (f); \
557
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
603
+ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
558604 ##args); \
559605 } while (0)
560606
561607 #define DMA_FENCE_ERR(f, fmt, args...) \
562608 do { \
563609 struct dma_fence *__ff = (f); \
564
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
610
+ pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
565611 ##args); \
566612 } while (0)
567613