.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Fence mechanism for dma-buf to allow for asynchronous dma access |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Authors: |
---|
8 | 9 | * Rob Clark <robdclark@gmail.com> |
---|
9 | 10 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or modify it |
---|
12 | | - * under the terms of the GNU General Public License version 2 as published by |
---|
13 | | - * the Free Software Foundation. |
---|
14 | | - * |
---|
15 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
---|
16 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
17 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
18 | | - * more details. |
---|
19 | 11 | */ |
---|
20 | 12 | |
---|
21 | 13 | #ifndef __LINUX_DMA_FENCE_H |
---|
.. | .. |
---|
71 | 63 | * been completed, or never called at all. |
---|
72 | 64 | */ |
---|
73 | 65 | struct dma_fence { |
---|
74 | | - struct kref refcount; |
---|
75 | | - const struct dma_fence_ops *ops; |
---|
76 | | - struct rcu_head rcu; |
---|
77 | | - struct list_head cb_list; |
---|
78 | 66 | spinlock_t *lock; |
---|
| 67 | + const struct dma_fence_ops *ops; |
---|
| 68 | + /* |
---|
| 69 | + * We clear the callback list on kref_put so that by the time we |
---|
| 70 | + * release the fence it is unused. No one should be adding to the |
---|
| 71 | + * cb_list that they don't themselves hold a reference for. |
---|
| 72 | + * |
---|
| 73 | + * The lifetime of the timestamp is similarly tied to both the |
---|
| 74 | + * rcu freelist and the cb_list. The timestamp is only set upon |
---|
| 75 | + * signaling while simultaneously notifying the cb_list. Ergo, we |
---|
| 76 | + * only use either the cb_list of timestamp. Upon destruction, |
---|
| 77 | + * neither are accessible, and so we can use the rcu. This means |
---|
| 78 | + * that the cb_list is *only* valid until the signal bit is set, |
---|
| 79 | + * and to read either you *must* hold a reference to the fence, |
---|
| 80 | + * and not just the rcu_read_lock. |
---|
| 81 | + * |
---|
| 82 | + * Listed in chronological order. |
---|
| 83 | + */ |
---|
| 84 | + union { |
---|
| 85 | + struct list_head cb_list; |
---|
| 86 | + /* @cb_list replaced by @timestamp on dma_fence_signal() */ |
---|
| 87 | + ktime_t timestamp; |
---|
| 88 | + /* @timestamp replaced by @rcu on dma_fence_release() */ |
---|
| 89 | + struct rcu_head rcu; |
---|
| 90 | + }; |
---|
79 | 91 | u64 context; |
---|
80 | | - unsigned seqno; |
---|
| 92 | + u64 seqno; |
---|
81 | 93 | unsigned long flags; |
---|
82 | | - ktime_t timestamp; |
---|
| 94 | + struct kref refcount; |
---|
83 | 95 | int error; |
---|
84 | 96 | }; |
---|
85 | 97 | |
---|
.. | .. |
---|
111 | 123 | * |
---|
112 | 124 | */ |
---|
113 | 125 | struct dma_fence_ops { |
---|
| 126 | + /** |
---|
| 127 | + * @use_64bit_seqno: |
---|
| 128 | + * |
---|
| 129 | + * True if this dma_fence implementation uses 64bit seqno, false |
---|
| 130 | + * otherwise. |
---|
| 131 | + */ |
---|
| 132 | + bool use_64bit_seqno; |
---|
| 133 | + |
---|
114 | 134 | /** |
---|
115 | 135 | * @get_driver_name: |
---|
116 | 136 | * |
---|
.. | .. |
---|
244 | 264 | }; |
---|
245 | 265 | |
---|
246 | 266 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
---|
247 | | - spinlock_t *lock, u64 context, unsigned seqno); |
---|
| 267 | + spinlock_t *lock, u64 context, u64 seqno); |
---|
248 | 268 | |
---|
249 | 269 | void dma_fence_release(struct kref *kref); |
---|
250 | 270 | void dma_fence_free(struct dma_fence *fence); |
---|
.. | .. |
---|
273 | 293 | } |
---|
274 | 294 | |
---|
275 | 295 | /** |
---|
276 | | - * dma_fence_get_rcu - get a fence from a reservation_object_list with |
---|
| 296 | + * dma_fence_get_rcu - get a fence from a dma_resv_list with |
---|
277 | 297 | * rcu read lock |
---|
278 | 298 | * @fence: fence to increase refcount of |
---|
279 | 299 | * |
---|
.. | .. |
---|
297 | 317 | * so long as the caller is using RCU on the pointer to the fence. |
---|
298 | 318 | * |
---|
299 | 319 | * An alternative mechanism is to employ a seqlock to protect a bunch of |
---|
300 | | - * fences, such as used by struct reservation_object. When using a seqlock, |
---|
| 320 | + * fences, such as used by struct dma_resv. When using a seqlock, |
---|
301 | 321 | * the seqlock must be taken before and checked after a reference to the |
---|
302 | 322 | * fence is acquired (as shown here). |
---|
303 | 323 | * |
---|
.. | .. |
---|
337 | 357 | } while (1); |
---|
338 | 358 | } |
---|
339 | 359 | |
---|
| 360 | +#ifdef CONFIG_LOCKDEP |
---|
| 361 | +bool dma_fence_begin_signalling(void); |
---|
| 362 | +void dma_fence_end_signalling(bool cookie); |
---|
| 363 | +void __dma_fence_might_wait(void); |
---|
| 364 | +#else |
---|
| 365 | +static inline bool dma_fence_begin_signalling(void) |
---|
| 366 | +{ |
---|
| 367 | + return true; |
---|
| 368 | +} |
---|
| 369 | +static inline void dma_fence_end_signalling(bool cookie) {} |
---|
| 370 | +static inline void __dma_fence_might_wait(void) {} |
---|
| 371 | +#endif |
---|
| 372 | + |
---|
340 | 373 | int dma_fence_signal(struct dma_fence *fence); |
---|
341 | 374 | int dma_fence_signal_locked(struct dma_fence *fence); |
---|
| 375 | +int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); |
---|
| 376 | +int dma_fence_signal_timestamp_locked(struct dma_fence *fence, |
---|
| 377 | + ktime_t timestamp); |
---|
342 | 378 | signed long dma_fence_default_wait(struct dma_fence *fence, |
---|
343 | 379 | bool intr, signed long timeout); |
---|
344 | 380 | int dma_fence_add_callback(struct dma_fence *fence, |
---|
.. | .. |
---|
410 | 446 | * __dma_fence_is_later - return if f1 is chronologically later than f2 |
---|
411 | 447 | * @f1: the first fence's seqno |
---|
412 | 448 | * @f2: the second fence's seqno from the same context |
---|
| 449 | + * @ops: dma_fence_ops associated with the seqno |
---|
413 | 450 | * |
---|
414 | 451 | * Returns true if f1 is chronologically later than f2. Both fences must be |
---|
415 | 452 | * from the same context, since a seqno is not common across contexts. |
---|
416 | 453 | */ |
---|
417 | | -static inline bool __dma_fence_is_later(u32 f1, u32 f2) |
---|
| 454 | +static inline bool __dma_fence_is_later(u64 f1, u64 f2, |
---|
| 455 | + const struct dma_fence_ops *ops) |
---|
418 | 456 | { |
---|
419 | | - return (int)(f1 - f2) > 0; |
---|
| 457 | + /* This is for backward compatibility with drivers which can only handle |
---|
| 458 | + * 32bit sequence numbers. Use a 64bit compare when the driver says to |
---|
| 459 | + * do so. |
---|
| 460 | + */ |
---|
| 461 | + if (ops->use_64bit_seqno) |
---|
| 462 | + return f1 > f2; |
---|
| 463 | + |
---|
| 464 | + return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; |
---|
420 | 465 | } |
---|
421 | 466 | |
---|
422 | 467 | /** |
---|
.. | .. |
---|
433 | 478 | if (WARN_ON(f1->context != f2->context)) |
---|
434 | 479 | return false; |
---|
435 | 480 | |
---|
436 | | - return __dma_fence_is_later(f1->seqno, f2->seqno); |
---|
| 481 | + return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops); |
---|
437 | 482 | } |
---|
438 | 483 | |
---|
439 | 484 | /** |
---|
.. | .. |
---|
541 | 586 | return ret < 0 ? ret : 0; |
---|
542 | 587 | } |
---|
543 | 588 | |
---|
| 589 | +struct dma_fence *dma_fence_get_stub(void); |
---|
544 | 590 | u64 dma_fence_context_alloc(unsigned num); |
---|
545 | 591 | |
---|
546 | 592 | #define DMA_FENCE_TRACE(f, fmt, args...) \ |
---|
547 | 593 | do { \ |
---|
548 | 594 | struct dma_fence *__ff = (f); \ |
---|
549 | 595 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ |
---|
550 | | - pr_info("f %llu#%u: " fmt, \ |
---|
| 596 | + pr_info("f %llu#%llu: " fmt, \ |
---|
551 | 597 | __ff->context, __ff->seqno, ##args); \ |
---|
552 | 598 | } while (0) |
---|
553 | 599 | |
---|
554 | 600 | #define DMA_FENCE_WARN(f, fmt, args...) \ |
---|
555 | 601 | do { \ |
---|
556 | 602 | struct dma_fence *__ff = (f); \ |
---|
557 | | - pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
---|
| 603 | + pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ |
---|
558 | 604 | ##args); \ |
---|
559 | 605 | } while (0) |
---|
560 | 606 | |
---|
561 | 607 | #define DMA_FENCE_ERR(f, fmt, args...) \ |
---|
562 | 608 | do { \ |
---|
563 | 609 | struct dma_fence *__ff = (f); \ |
---|
564 | | - pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ |
---|
| 610 | + pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ |
---|
565 | 611 | ##args); \ |
---|
566 | 612 | } while (0) |
---|
567 | 613 | |
---|