forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/i915/i915_vma.h
....@@ -30,187 +30,112 @@
3030
3131 #include <drm/drm_mm.h>
3232
33
+#include "gt/intel_ggtt_fencing.h"
34
+#include "gem/i915_gem_object.h"
35
+
3336 #include "i915_gem_gtt.h"
34
-#include "i915_gem_fence_reg.h"
35
-#include "i915_gem_object.h"
3637
38
+#include "i915_active.h"
3739 #include "i915_request.h"
38
-
39
-enum i915_cache_level;
40
-
41
-/**
42
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
43
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
44
- * object into/from the address space.
45
- *
46
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
47
- * will always be <= an objects lifetime. So object refcounting should cover us.
48
- */
49
-struct i915_vma {
50
- struct drm_mm_node node;
51
- struct drm_i915_gem_object *obj;
52
- struct i915_address_space *vm;
53
- const struct i915_vma_ops *ops;
54
- struct drm_i915_fence_reg *fence;
55
- struct reservation_object *resv; /** Alias of obj->resv */
56
- struct sg_table *pages;
57
- void __iomem *iomap;
58
- void *private; /* owned by creator */
59
- u64 size;
60
- u64 display_alignment;
61
- struct i915_page_sizes page_sizes;
62
-
63
- u32 fence_size;
64
- u32 fence_alignment;
65
-
66
- /**
67
- * Count of the number of times this vma has been opened by different
68
- * handles (but same file) for execbuf, i.e. the number of aliases
69
- * that exist in the ctx->handle_vmas LUT for this vma.
70
- */
71
- unsigned int open_count;
72
- unsigned long flags;
73
- /**
74
- * How many users have pinned this object in GTT space. The following
75
- * users can each hold at most one reference: pwrite/pread, execbuffer
76
- * (objects are not allowed multiple times for the same batchbuffer),
77
- * and the framebuffer code. When switching/pageflipping, the
78
- * framebuffer code has at most two buffers pinned per crtc.
79
- *
80
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
81
- * bits with absolutely no headroom. So use 4 bits.
82
- */
83
-#define I915_VMA_PIN_MASK 0xf
84
-#define I915_VMA_PIN_OVERFLOW BIT(5)
85
-
86
- /** Flags and address space this VMA is bound to */
87
-#define I915_VMA_GLOBAL_BIND BIT(6)
88
-#define I915_VMA_LOCAL_BIND BIT(7)
89
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
90
-
91
-#define I915_VMA_GGTT BIT(8)
92
-#define I915_VMA_CAN_FENCE BIT(9)
93
-#define I915_VMA_CLOSED BIT(10)
94
-#define I915_VMA_USERFAULT_BIT 11
95
-#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
96
-#define I915_VMA_GGTT_WRITE BIT(12)
97
-
98
- unsigned int active_count;
99
- struct rb_root active;
100
- struct i915_gem_active last_active;
101
- struct i915_gem_active last_fence;
102
-
103
- /**
104
- * Support different GGTT views into the same object.
105
- * This means there can be multiple VMA mappings per object and per VM.
106
- * i915_ggtt_view_type is used to distinguish between those entries.
107
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
108
- * assumed in GEM functions which take no ggtt view parameter.
109
- */
110
- struct i915_ggtt_view ggtt_view;
111
-
112
- /** This object's place on the active/inactive lists */
113
- struct list_head vm_link;
114
-
115
- struct list_head obj_link; /* Link in the object's VMA list */
116
- struct rb_node obj_node;
117
- struct hlist_node obj_hash;
118
-
119
- /** This vma's place in the execbuf reservation list */
120
- struct list_head exec_link;
121
- struct list_head reloc_link;
122
-
123
- /** This vma's place in the eviction list */
124
- struct list_head evict_link;
125
-
126
- struct list_head closed_link;
127
-
128
- /**
129
- * Used for performing relocations during execbuffer insertion.
130
- */
131
- unsigned int *exec_flags;
132
- struct hlist_node exec_node;
133
- u32 exec_handle;
134
-};
40
+#include "i915_vma_types.h"
13541
13642 struct i915_vma *
13743 i915_vma_instance(struct drm_i915_gem_object *obj,
13844 struct i915_address_space *vm,
13945 const struct i915_ggtt_view *view);
14046
141
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
47
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
48
+#define I915_VMA_RELEASE_MAP BIT(0)
14249
143
-static inline bool i915_vma_is_active(struct i915_vma *vma)
50
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
14451 {
145
- return vma->active_count;
52
+ return !i915_active_is_idle(&vma->active);
14653 }
14754
55
+int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
56
+ struct i915_request *rq);
14857 int __must_check i915_vma_move_to_active(struct i915_vma *vma,
14958 struct i915_request *rq,
15059 unsigned int flags);
15160
61
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
62
+
15263 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
15364 {
154
- return vma->flags & I915_VMA_GGTT;
65
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
15566 }
15667
15768 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
15869 {
159
- return vma->flags & I915_VMA_GGTT_WRITE;
70
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
16071 }
16172
16273 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
16374 {
16475 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
165
- vma->flags |= I915_VMA_GGTT_WRITE;
76
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
16677 }
16778
168
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
79
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
16980 {
170
- vma->flags &= ~I915_VMA_GGTT_WRITE;
81
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
82
+ __i915_vma_flags(vma));
17183 }
17284
17385 void i915_vma_flush_writes(struct i915_vma *vma);
17486
17587 static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
17688 {
177
- return vma->flags & I915_VMA_CAN_FENCE;
178
-}
179
-
180
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
181
-{
182
- return vma->flags & I915_VMA_CLOSED;
89
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
18390 }
18491
18592 static inline bool i915_vma_set_userfault(struct i915_vma *vma)
18693 {
18794 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
188
- return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
95
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
18996 }
19097
19198 static inline void i915_vma_unset_userfault(struct i915_vma *vma)
19299 {
193
- return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
100
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
194101 }
195102
196103 static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
197104 {
198
- return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
105
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
106
+}
107
+
108
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
109
+{
110
+ return !list_empty(&vma->closed_link);
199111 }
200112
201113 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
202114 {
203115 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
204
- GEM_BUG_ON(!vma->node.allocated);
116
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
205117 GEM_BUG_ON(upper_32_bits(vma->node.start));
206118 GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
207119 return lower_32_bits(vma->node.start);
120
+}
121
+
122
+static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
123
+{
124
+ return i915_vm_to_ggtt(vma->vm)->pin_bias;
208125 }
209126
210127 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
211128 {
212129 i915_gem_object_get(vma->obj);
213130 return vma;
131
+}
132
+
133
+static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
134
+{
135
+ if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
136
+ return vma;
137
+
138
+ return NULL;
214139 }
215140
216141 static inline void i915_vma_put(struct i915_vma *vma)
....@@ -245,6 +170,8 @@
245170 if (cmp)
246171 return cmp;
247172
173
+ assert_i915_gem_gtt_types();
174
+
248175 /* ggtt_view.type also encodes its size so that we both distinguish
249176 * different views using it as a "type" and also use a compact (no
250177 * accessing of uninitialised padding bytes) memcmp without storing
....@@ -257,48 +184,74 @@
257184 */
258185 BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
259186 BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
187
+ BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
260188 BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
261189 offsetof(typeof(*view), partial));
190
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
191
+ offsetof(typeof(*view), remapped));
262192 return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
263193 }
264194
265
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
266
- u32 flags);
267
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
195
+struct i915_vma_work *i915_vma_work(void);
196
+int i915_vma_bind(struct i915_vma *vma,
197
+ enum i915_cache_level cache_level,
198
+ u32 flags,
199
+ struct i915_vma_work *work);
200
+
201
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
268202 bool i915_vma_misplaced(const struct i915_vma *vma,
269203 u64 size, u64 alignment, u64 flags);
270204 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
271205 void i915_vma_revoke_mmap(struct i915_vma *vma);
206
+void __i915_vma_evict(struct i915_vma *vma);
207
+int __i915_vma_unbind(struct i915_vma *vma);
272208 int __must_check i915_vma_unbind(struct i915_vma *vma);
273209 void i915_vma_unlink_ctx(struct i915_vma *vma);
274210 void i915_vma_close(struct i915_vma *vma);
275211 void i915_vma_reopen(struct i915_vma *vma);
276
-void i915_vma_destroy(struct i915_vma *vma);
277212
278
-int __i915_vma_do_pin(struct i915_vma *vma,
279
- u64 size, u64 alignment, u64 flags);
213
+static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
214
+{
215
+ if (kref_get_unless_zero(&vma->ref))
216
+ return vma;
217
+
218
+ return NULL;
219
+}
220
+
221
+void i915_vma_release(struct kref *ref);
222
+static inline void __i915_vma_put(struct i915_vma *vma)
223
+{
224
+ kref_put(&vma->ref, i915_vma_release);
225
+}
226
+
227
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
228
+
229
+static inline void i915_vma_lock(struct i915_vma *vma)
230
+{
231
+ dma_resv_lock(vma->resv, NULL);
232
+}
233
+
234
+static inline void i915_vma_unlock(struct i915_vma *vma)
235
+{
236
+ dma_resv_unlock(vma->resv);
237
+}
238
+
239
+int __must_check
240
+i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
241
+ u64 size, u64 alignment, u64 flags);
242
+
280243 static inline int __must_check
281244 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
282245 {
283
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
284
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
285
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
286
-
287
- /* Pin early to prevent the shrinker/eviction logic from destroying
288
- * our vma as we insert and bind.
289
- */
290
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
291
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
292
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
293
- return 0;
294
- }
295
-
296
- return __i915_vma_do_pin(vma, size, alignment, flags);
246
+ return i915_vma_pin_ww(vma, NULL, size, alignment, flags);
297247 }
248
+
249
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
250
+ u32 align, unsigned int flags);
298251
299252 static inline int i915_vma_pin_count(const struct i915_vma *vma)
300253 {
301
- return vma->flags & I915_VMA_PIN_MASK;
254
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
302255 }
303256
304257 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
....@@ -308,18 +261,18 @@
308261
309262 static inline void __i915_vma_pin(struct i915_vma *vma)
310263 {
311
- vma->flags++;
312
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
264
+ atomic_inc(&vma->flags);
265
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
313266 }
314267
315268 static inline void __i915_vma_unpin(struct i915_vma *vma)
316269 {
317
- vma->flags--;
270
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
271
+ atomic_dec(&vma->flags);
318272 }
319273
320274 static inline void i915_vma_unpin(struct i915_vma *vma)
321275 {
322
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
323276 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
324277 __i915_vma_unpin(vma);
325278 }
....@@ -327,7 +280,13 @@
327280 static inline bool i915_vma_is_bound(const struct i915_vma *vma,
328281 unsigned int where)
329282 {
330
- return vma->flags & where;
283
+ return atomic_read(&vma->flags) & where;
284
+}
285
+
286
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
287
+ unsigned long color)
288
+{
289
+ return drm_mm_node_allocated(node) && node->color != color;
331290 }
332291
333292 /**
....@@ -338,8 +297,6 @@
338297 * An extra pinning of the VMA is acquired for the return iomapping,
339298 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
340299 * after the iomapping is no longer required.
341
- *
342
- * Callers must hold the struct_mutex.
343300 *
344301 * Returns a valid iomapped pointer or ERR_PTR.
345302 */
....@@ -352,8 +309,8 @@
352309 *
353310 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
354311 *
355
- * Callers must hold the struct_mutex. This function is only valid to be
356
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
312
+ * This function is only valid to be called on a VMA previously
313
+ * iomapped by the caller with i915_vma_pin_iomap().
357314 */
358315 void i915_vma_unpin_iomap(struct i915_vma *vma);
359316
....@@ -378,13 +335,15 @@
378335 *
379336 * True if the vma has a fence, false otherwise.
380337 */
381
-int i915_vma_pin_fence(struct i915_vma *vma);
382
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
338
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
339
+void i915_vma_revoke_fence(struct i915_vma *vma);
340
+
341
+int __i915_vma_pin_fence(struct i915_vma *vma);
383342
384343 static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
385344 {
386
- GEM_BUG_ON(vma->fence->pin_count <= 0);
387
- vma->fence->pin_count--;
345
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
346
+ atomic_dec(&vma->fence->pin_count);
388347 }
389348
390349 /**
....@@ -398,12 +357,11 @@
398357 static inline void
399358 i915_vma_unpin_fence(struct i915_vma *vma)
400359 {
401
- /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
402360 if (vma->fence)
403361 __i915_vma_unpin_fence(vma);
404362 }
405363
406
-void i915_vma_parked(struct drm_i915_private *i915);
364
+void i915_vma_parked(struct intel_gt *gt);
407365
408366 #define for_each_until(cond) if (cond) break; else
409367
....@@ -417,7 +375,22 @@
417375 * or the list is empty ofc.
418376 */
419377 #define for_each_ggtt_vma(V, OBJ) \
420
- list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \
378
+ list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
421379 for_each_until(!i915_vma_is_ggtt(V))
422380
381
+struct i915_vma *i915_vma_alloc(void);
382
+void i915_vma_free(struct i915_vma *vma);
383
+
384
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
385
+void i915_vma_make_shrinkable(struct i915_vma *vma);
386
+void i915_vma_make_purgeable(struct i915_vma *vma);
387
+
388
+int i915_vma_wait_for_bind(struct i915_vma *vma);
389
+
390
+static inline int i915_vma_sync(struct i915_vma *vma)
391
+{
392
+ /* Wait for the asynchronous bindings and pending GPU reads */
393
+ return i915_active_wait(&vma->active);
394
+}
395
+
423396 #endif