hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/i915/i915_vma.c
....@@ -22,13 +22,36 @@
2222 *
2323 */
2424
25
-#include "i915_vma.h"
25
+#include <linux/sched/mm.h>
26
+#include <drm/drm_gem.h>
27
+
28
+#include "display/intel_frontbuffer.h"
29
+
30
+#include "gt/intel_engine.h"
31
+#include "gt/intel_engine_heartbeat.h"
32
+#include "gt/intel_gt.h"
33
+#include "gt/intel_gt_requests.h"
2634
2735 #include "i915_drv.h"
28
-#include "intel_ringbuffer.h"
29
-#include "intel_frontbuffer.h"
36
+#include "i915_globals.h"
37
+#include "i915_sw_fence_work.h"
38
+#include "i915_trace.h"
39
+#include "i915_vma.h"
3040
31
-#include <drm/drm_gem.h>
41
+static struct i915_global_vma {
42
+ struct i915_global base;
43
+ struct kmem_cache *slab_vmas;
44
+} global;
45
+
46
+struct i915_vma *i915_vma_alloc(void)
47
+{
48
+ return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49
+}
50
+
51
+void i915_vma_free(struct i915_vma *vma)
52
+{
53
+ return kmem_cache_free(global.slab_vmas, vma);
54
+}
3255
3356 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
3457
....@@ -36,11 +59,8 @@
3659
3760 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
3861 {
39
- unsigned long entries[12];
40
- struct stack_trace trace = {
41
- .entries = entries,
42
- .max_entries = ARRAY_SIZE(entries),
43
- };
62
+ unsigned long *entries;
63
+ unsigned int nr_entries;
4464 char buf[512];
4565
4666 if (!vma->node.stack) {
....@@ -49,8 +69,8 @@
4969 return;
5070 }
5171
52
- depot_fetch_stack(vma->node.stack, &trace);
53
- snprint_stack_trace(buf, sizeof(buf), &trace, 0);
72
+ nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73
+ stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
5474 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
5575 vma->node.start, vma->node.size, reason, buf);
5676 }
....@@ -63,66 +83,20 @@
6383
6484 #endif
6585
66
-struct i915_vma_active {
67
- struct i915_gem_active base;
68
- struct i915_vma *vma;
69
- struct rb_node node;
70
- u64 timeline;
71
-};
72
-
73
-static void
74
-__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
86
+static inline struct i915_vma *active_to_vma(struct i915_active *ref)
7587 {
76
- struct drm_i915_gem_object *obj = vma->obj;
77
-
78
- GEM_BUG_ON(!i915_vma_is_active(vma));
79
- if (--vma->active_count)
80
- return;
81
-
82
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
83
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
84
-
85
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
86
- if (--obj->active_count)
87
- return;
88
-
89
- /* Prune the shared fence arrays iff completely idle (inc. external) */
90
- if (reservation_object_trylock(obj->resv)) {
91
- if (reservation_object_test_signaled_rcu(obj->resv, true))
92
- reservation_object_add_excl_fence(obj->resv, NULL);
93
- reservation_object_unlock(obj->resv);
94
- }
95
-
96
- /* Bump our place on the bound list to keep it roughly in LRU order
97
- * so that we don't steal from recently used but inactive objects
98
- * (unless we are forced to ofc!)
99
- */
100
- spin_lock(&rq->i915->mm.obj_lock);
101
- if (obj->bind_count)
102
- list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
103
- spin_unlock(&rq->i915->mm.obj_lock);
104
-
105
- obj->mm.dirty = true; /* be paranoid */
106
-
107
- if (i915_gem_object_has_active_reference(obj)) {
108
- i915_gem_object_clear_active_reference(obj);
109
- i915_gem_object_put(obj);
110
- }
88
+ return container_of(ref, typeof(struct i915_vma), active);
11189 }
11290
113
-static void
114
-i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
91
+static int __i915_vma_active(struct i915_active *ref)
11592 {
116
- struct i915_vma_active *active =
117
- container_of(base, typeof(*active), base);
118
-
119
- __i915_vma_retire(active->vma, rq);
93
+ return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
12094 }
12195
122
-static void
123
-i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
96
+__i915_active_call
97
+static void __i915_vma_retire(struct i915_active *ref)
12498 {
125
- __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
99
+ i915_vma_put(active_to_vma(ref));
126100 }
127101
128102 static struct i915_vma *
....@@ -130,26 +104,36 @@
130104 struct i915_address_space *vm,
131105 const struct i915_ggtt_view *view)
132106 {
107
+ struct i915_vma *pos = ERR_PTR(-E2BIG);
133108 struct i915_vma *vma;
134109 struct rb_node *rb, **p;
135110
136111 /* The aliasing_ppgtt should never be used directly! */
137
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
112
+ GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
138113
139
- vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
114
+ vma = i915_vma_alloc();
140115 if (vma == NULL)
141116 return ERR_PTR(-ENOMEM);
142117
143
- vma->active = RB_ROOT;
144
-
145
- init_request_active(&vma->last_active, i915_vma_last_retire);
146
- init_request_active(&vma->last_fence, NULL);
147
- vma->vm = vm;
118
+ kref_init(&vma->ref);
119
+ mutex_init(&vma->pages_mutex);
120
+ vma->vm = i915_vm_get(vm);
148121 vma->ops = &vm->vma_ops;
149122 vma->obj = obj;
150
- vma->resv = obj->resv;
123
+ vma->resv = obj->base.resv;
151124 vma->size = obj->base.size;
152125 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
126
+
127
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
128
+
129
+ /* Declare ourselves safe for use inside shrinkers */
130
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
131
+ fs_reclaim_acquire(GFP_KERNEL);
132
+ might_lock(&vma->active.mutex);
133
+ fs_reclaim_release(GFP_KERNEL);
134
+ }
135
+
136
+ INIT_LIST_HEAD(&vma->closed_link);
153137
154138 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
155139 vma->ggtt_view = *view;
....@@ -164,6 +148,9 @@
164148 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
165149 vma->size = intel_rotation_info_size(&view->rotated);
166150 vma->size <<= PAGE_SHIFT;
151
+ } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152
+ vma->size = intel_remapped_info_size(&view->remapped);
153
+ vma->size <<= PAGE_SHIFT;
167154 }
168155 }
169156
....@@ -172,16 +159,18 @@
172159
173160 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
174161
162
+ spin_lock(&obj->vma.lock);
163
+
175164 if (i915_is_ggtt(vm)) {
176165 if (unlikely(overflows_type(vma->size, u32)))
177
- goto err_vma;
166
+ goto err_unlock;
178167
179168 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
180169 i915_gem_object_get_tiling(obj),
181170 i915_gem_object_get_stride(obj));
182171 if (unlikely(vma->fence_size < vma->size || /* overflow */
183172 vma->fence_size > vm->total))
184
- goto err_vma;
173
+ goto err_unlock;
185174
186175 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
187176
....@@ -190,39 +179,54 @@
190179 i915_gem_object_get_stride(obj));
191180 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
192181
182
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
183
+ }
184
+
185
+ rb = NULL;
186
+ p = &obj->vma.tree.rb_node;
187
+ while (*p) {
188
+ long cmp;
189
+
190
+ rb = *p;
191
+ pos = rb_entry(rb, struct i915_vma, obj_node);
192
+
193
+ /*
194
+ * If the view already exists in the tree, another thread
195
+ * already created a matching vma, so return the older instance
196
+ * and dispose of ours.
197
+ */
198
+ cmp = i915_vma_compare(pos, vm, view);
199
+ if (cmp < 0)
200
+ p = &rb->rb_right;
201
+ else if (cmp > 0)
202
+ p = &rb->rb_left;
203
+ else
204
+ goto err_unlock;
205
+ }
206
+ rb_link_node(&vma->obj_node, rb, p);
207
+ rb_insert_color(&vma->obj_node, &obj->vma.tree);
208
+
209
+ if (i915_vma_is_ggtt(vma))
193210 /*
194211 * We put the GGTT vma at the start of the vma-list, followed
195212 * by the ppGGTT vma. This allows us to break early when
196213 * iterating over only the GGTT vma for an object, see
197214 * for_each_ggtt_vma()
198215 */
199
- vma->flags |= I915_VMA_GGTT;
200
- list_add(&vma->obj_link, &obj->vma_list);
201
- } else {
202
- list_add_tail(&vma->obj_link, &obj->vma_list);
203
- }
216
+ list_add(&vma->obj_link, &obj->vma.list);
217
+ else
218
+ list_add_tail(&vma->obj_link, &obj->vma.list);
204219
205
- rb = NULL;
206
- p = &obj->vma_tree.rb_node;
207
- while (*p) {
208
- struct i915_vma *pos;
209
-
210
- rb = *p;
211
- pos = rb_entry(rb, struct i915_vma, obj_node);
212
- if (i915_vma_compare(pos, vm, view) < 0)
213
- p = &rb->rb_right;
214
- else
215
- p = &rb->rb_left;
216
- }
217
- rb_link_node(&vma->obj_node, rb, p);
218
- rb_insert_color(&vma->obj_node, &obj->vma_tree);
219
- list_add(&vma->vm_link, &vm->unbound_list);
220
+ spin_unlock(&obj->vma.lock);
220221
221222 return vma;
222223
224
+err_unlock:
225
+ spin_unlock(&obj->vma.lock);
223226 err_vma:
224
- kmem_cache_free(vm->i915->vmas, vma);
225
- return ERR_PTR(-E2BIG);
227
+ i915_vm_put(vm);
228
+ i915_vma_free(vma);
229
+ return pos;
226230 }
227231
228232 static struct i915_vma *
....@@ -232,7 +236,7 @@
232236 {
233237 struct rb_node *rb;
234238
235
- rb = obj->vma_tree.rb_node;
239
+ rb = obj->vma.tree.rb_node;
236240 while (rb) {
237241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
238242 long cmp;
....@@ -261,8 +265,6 @@
261265 * Once created, the VMA is kept until either the object is freed, or the
262266 * address space is closed.
263267 *
264
- * Must be called with struct_mutex held.
265
- *
266268 * Returns the vma, or an error pointer.
267269 */
268270 struct i915_vma *
....@@ -272,17 +274,92 @@
272274 {
273275 struct i915_vma *vma;
274276
275
- lockdep_assert_held(&obj->base.dev->struct_mutex);
276277 GEM_BUG_ON(view && !i915_is_ggtt(vm));
277
- GEM_BUG_ON(vm->closed);
278
+ GEM_BUG_ON(!atomic_read(&vm->open));
278279
280
+ spin_lock(&obj->vma.lock);
279281 vma = vma_lookup(obj, vm, view);
280
- if (!vma)
282
+ spin_unlock(&obj->vma.lock);
283
+
284
+ /* vma_create() will resolve the race if another creates the vma */
285
+ if (unlikely(!vma))
281286 vma = vma_create(obj, vm, view);
282287
283288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
284
- GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
285289 return vma;
290
+}
291
+
292
+struct i915_vma_work {
293
+ struct dma_fence_work base;
294
+ struct i915_address_space *vm;
295
+ struct i915_vm_pt_stash stash;
296
+ struct i915_vma *vma;
297
+ struct drm_i915_gem_object *pinned;
298
+ struct i915_sw_dma_fence_cb cb;
299
+ enum i915_cache_level cache_level;
300
+ unsigned int flags;
301
+};
302
+
303
+static int __vma_bind(struct dma_fence_work *work)
304
+{
305
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306
+ struct i915_vma *vma = vw->vma;
307
+
308
+ vma->ops->bind_vma(vw->vm, &vw->stash,
309
+ vma, vw->cache_level, vw->flags);
310
+ return 0;
311
+}
312
+
313
+static void __vma_release(struct dma_fence_work *work)
314
+{
315
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
316
+
317
+ if (vw->pinned) {
318
+ __i915_gem_object_unpin_pages(vw->pinned);
319
+ i915_gem_object_put(vw->pinned);
320
+ }
321
+
322
+ i915_vm_free_pt_stash(vw->vm, &vw->stash);
323
+ i915_vm_put(vw->vm);
324
+}
325
+
326
+static const struct dma_fence_work_ops bind_ops = {
327
+ .name = "bind",
328
+ .work = __vma_bind,
329
+ .release = __vma_release,
330
+};
331
+
332
+struct i915_vma_work *i915_vma_work(void)
333
+{
334
+ struct i915_vma_work *vw;
335
+
336
+ vw = kzalloc(sizeof(*vw), GFP_KERNEL);
337
+ if (!vw)
338
+ return NULL;
339
+
340
+ dma_fence_work_init(&vw->base, &bind_ops);
341
+ vw->base.dma.error = -EAGAIN; /* disable the worker by default */
342
+
343
+ return vw;
344
+}
345
+
346
+int i915_vma_wait_for_bind(struct i915_vma *vma)
347
+{
348
+ int err = 0;
349
+
350
+ if (rcu_access_pointer(vma->active.excl.fence)) {
351
+ struct dma_fence *fence;
352
+
353
+ rcu_read_lock();
354
+ fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
355
+ rcu_read_unlock();
356
+ if (fence) {
357
+ err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
358
+ dma_fence_put(fence);
359
+ }
360
+ }
361
+
362
+ return err;
286363 }
287364
288365 /**
....@@ -290,55 +367,82 @@
290367 * @vma: VMA to map
291368 * @cache_level: mapping cache level
292369 * @flags: flags like global or local mapping
370
+ * @work: preallocated worker for allocating and binding the PTE
293371 *
294372 * DMA addresses are taken from the scatter-gather table of this object (or of
295373 * this VMA in case of non-default GGTT views) and PTE entries set up.
296374 * Note that DMA addresses are also the only part of the SG table we care about.
297375 */
298
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
299
- u32 flags)
376
+int i915_vma_bind(struct i915_vma *vma,
377
+ enum i915_cache_level cache_level,
378
+ u32 flags,
379
+ struct i915_vma_work *work)
300380 {
301381 u32 bind_flags;
302382 u32 vma_flags;
303
- int ret;
304383
305384 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
306385 GEM_BUG_ON(vma->size > vma->node.size);
307386
308
- if (GEM_WARN_ON(range_overflows(vma->node.start,
309
- vma->node.size,
310
- vma->vm->total)))
387
+ if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388
+ vma->node.size,
389
+ vma->vm->total)))
311390 return -ENODEV;
312391
313
- if (GEM_WARN_ON(!flags))
392
+ if (GEM_DEBUG_WARN_ON(!flags))
314393 return -EINVAL;
315394
316
- bind_flags = 0;
317
- if (flags & PIN_GLOBAL)
318
- bind_flags |= I915_VMA_GLOBAL_BIND;
319
- if (flags & PIN_USER)
320
- bind_flags |= I915_VMA_LOCAL_BIND;
395
+ bind_flags = flags;
396
+ bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
321397
322
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
323
- if (flags & PIN_UPDATE)
324
- bind_flags |= vma_flags;
325
- else
326
- bind_flags &= ~vma_flags;
398
+ vma_flags = atomic_read(&vma->flags);
399
+ vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
400
+
401
+ bind_flags &= ~vma_flags;
327402 if (bind_flags == 0)
328403 return 0;
329404
330405 GEM_BUG_ON(!vma->pages);
331406
332407 trace_i915_vma_bind(vma, bind_flags);
333
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
334
- if (ret)
335
- return ret;
408
+ if (work && bind_flags & vma->vm->bind_async_flags) {
409
+ struct dma_fence *prev;
336410
337
- vma->flags |= bind_flags;
411
+ work->vma = vma;
412
+ work->cache_level = cache_level;
413
+ work->flags = bind_flags;
414
+
415
+ /*
416
+ * Note we only want to chain up to the migration fence on
417
+ * the pages (not the object itself). As we don't track that,
418
+ * yet, we have to use the exclusive fence instead.
419
+ *
420
+ * Also note that we do not want to track the async vma as
421
+ * part of the obj->resv->excl_fence as it only affects
422
+ * execution and not content or object's backing store lifetime.
423
+ */
424
+ prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
425
+ if (prev) {
426
+ __i915_sw_fence_await_dma_fence(&work->base.chain,
427
+ prev,
428
+ &work->cb);
429
+ dma_fence_put(prev);
430
+ }
431
+
432
+ work->base.dma.error = 0; /* enable the queue_work() */
433
+
434
+ if (vma->obj) {
435
+ __i915_gem_object_pin_pages(vma->obj);
436
+ work->pinned = i915_gem_object_get(vma->obj);
437
+ }
438
+ } else {
439
+ vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
440
+ }
338441
339442 if (vma->obj)
340443 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
341444
445
+ atomic_or(bind_flags, &vma->flags);
342446 return 0;
343447 }
344448
....@@ -347,19 +451,15 @@
347451 void __iomem *ptr;
348452 int err;
349453
350
- /* Access through the GTT requires the device to be awake. */
351
- assert_rpm_wakelock_held(vma->vm->i915);
352
-
353
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
354
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
454
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
355455 err = -ENODEV;
356456 goto err;
357457 }
358458
359459 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
360
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
460
+ GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
361461
362
- ptr = vma->iomap;
462
+ ptr = READ_ONCE(vma->iomap);
363463 if (ptr == NULL) {
364464 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
365465 vma->node.start,
....@@ -369,7 +469,10 @@
369469 goto err;
370470 }
371471
372
- vma->iomap = ptr;
472
+ if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
473
+ io_mapping_unmap(ptr);
474
+ ptr = vma->iomap;
475
+ }
373476 }
374477
375478 __i915_vma_pin(vma);
....@@ -379,6 +482,8 @@
379482 goto err_unpin;
380483
381484 i915_vma_set_ggtt_write(vma);
485
+
486
+ /* NB Access through the GTT requires the device to be awake. */
382487 return ptr;
383488
384489 err_unpin:
....@@ -389,18 +494,12 @@
389494
390495 void i915_vma_flush_writes(struct i915_vma *vma)
391496 {
392
- if (!i915_vma_has_ggtt_write(vma))
393
- return;
394
-
395
- i915_gem_flush_ggtt_writes(vma->vm->i915);
396
-
397
- i915_vma_unset_ggtt_write(vma);
497
+ if (i915_vma_unset_ggtt_write(vma))
498
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
398499 }
399500
400501 void i915_vma_unpin_iomap(struct i915_vma *vma)
401502 {
402
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
403
-
404503 GEM_BUG_ON(vma->iomap == NULL);
405504
406505 i915_vma_flush_writes(vma);
....@@ -409,7 +508,7 @@
409508 i915_vma_unpin(vma);
410509 }
411510
412
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
511
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
413512 {
414513 struct i915_vma *vma;
415514 struct drm_i915_gem_object *obj;
....@@ -422,9 +521,11 @@
422521 GEM_BUG_ON(!obj);
423522
424523 i915_vma_unpin(vma);
425
- i915_vma_close(vma);
426524
427
- __i915_gem_object_release_unless_active(obj);
525
+ if (flags & I915_VMA_RELEASE_MAP)
526
+ i915_gem_object_unpin_map(obj);
527
+
528
+ i915_gem_object_put(obj);
428529 }
429530
430531 bool i915_vma_misplaced(const struct i915_vma *vma,
....@@ -432,6 +533,9 @@
432533 {
433534 if (!drm_mm_node_allocated(&vma->node))
434535 return false;
536
+
537
+ if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
538
+ return true;
435539
436540 if (vma->node.size < size)
437541 return true;
....@@ -461,30 +565,18 @@
461565 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
462566 GEM_BUG_ON(!vma->fence_size);
463567
464
- /*
465
- * Explicitly disable for rotated VMA since the display does not
466
- * need the fence and the VMA is not accessible to other users.
467
- */
468
- if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
469
- return;
470
-
471568 fenceable = (vma->node.size >= vma->fence_size &&
472569 IS_ALIGNED(vma->node.start, vma->fence_alignment));
473570
474571 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
475572
476573 if (mappable && fenceable)
477
- vma->flags |= I915_VMA_CAN_FENCE;
574
+ set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
478575 else
479
- vma->flags &= ~I915_VMA_CAN_FENCE;
576
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
480577 }
481578
482
-static bool color_differs(struct drm_mm_node *node, unsigned long color)
483
-{
484
- return node->allocated && node->color != color;
485
-}
486
-
487
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
579
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
488580 {
489581 struct drm_mm_node *node = &vma->node;
490582 struct drm_mm_node *other;
....@@ -496,7 +588,7 @@
496588 * these constraints apply and set the drm_mm.color_adjust
497589 * appropriately.
498590 */
499
- if (vma->vm->mm.color_adjust == NULL)
591
+ if (!i915_vm_has_cache_coloring(vma->vm))
500592 return true;
501593
502594 /* Only valid to be called on an already inserted vma */
....@@ -504,26 +596,16 @@
504596 GEM_BUG_ON(list_empty(&node->node_list));
505597
506598 other = list_prev_entry(node, node_list);
507
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
599
+ if (i915_node_color_differs(other, color) &&
600
+ !drm_mm_hole_follows(other))
508601 return false;
509602
510603 other = list_next_entry(node, node_list);
511
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
604
+ if (i915_node_color_differs(other, color) &&
605
+ !drm_mm_hole_follows(node))
512606 return false;
513607
514608 return true;
515
-}
516
-
517
-static void assert_bind_count(const struct drm_i915_gem_object *obj)
518
-{
519
- /*
520
- * Combine the assertion that the object is bound and that we have
521
- * pinned its pages. But we should never have bound the object
522
- * more than we have pinned its pages. (For complete accuracy, we
523
- * assume that no else is pinning the pages, but as a rough assertion
524
- * that we will not run into problems later, this will do!)
525
- */
526
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
527609 }
528610
529611 /**
....@@ -543,13 +625,11 @@
543625 static int
544626 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
545627 {
546
- struct drm_i915_private *dev_priv = vma->vm->i915;
547
- unsigned int cache_level;
628
+ unsigned long color;
548629 u64 start, end;
549630 int ret;
550631
551
- GEM_BUG_ON(i915_vma_is_closed(vma));
552
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
632
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
553633 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
554634
555635 size = max(size, vma->size);
....@@ -569,7 +649,7 @@
569649
570650 end = vma->vm->total;
571651 if (flags & PIN_MAPPABLE)
572
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
652
+ end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
573653 if (flags & PIN_ZONE_4G)
574654 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
575655 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
....@@ -585,35 +665,21 @@
585665 return -ENOSPC;
586666 }
587667
588
- if (vma->obj) {
589
- ret = i915_gem_object_pin_pages(vma->obj);
590
- if (ret)
591
- return ret;
592
-
593
- cache_level = vma->obj->cache_level;
594
- } else {
595
- cache_level = 0;
596
- }
597
-
598
- GEM_BUG_ON(vma->pages);
599
-
600
- ret = vma->ops->set_pages(vma);
601
- if (ret)
602
- goto err_unpin;
668
+ color = 0;
669
+ if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
670
+ color = vma->obj->cache_level;
603671
604672 if (flags & PIN_OFFSET_FIXED) {
605673 u64 offset = flags & PIN_OFFSET_MASK;
606674 if (!IS_ALIGNED(offset, alignment) ||
607
- range_overflows(offset, size, end)) {
608
- ret = -EINVAL;
609
- goto err_clear;
610
- }
675
+ range_overflows(offset, size, end))
676
+ return -EINVAL;
611677
612678 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
613
- size, offset, cache_level,
679
+ size, offset, color,
614680 flags);
615681 if (ret)
616
- goto err_clear;
682
+ return ret;
617683 } else {
618684 /*
619685 * We only support huge gtt pages through the 48b PPGTT,
....@@ -649,127 +715,336 @@
649715 }
650716
651717 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
652
- size, alignment, cache_level,
718
+ size, alignment, color,
653719 start, end, flags);
654720 if (ret)
655
- goto err_clear;
721
+ return ret;
656722
657723 GEM_BUG_ON(vma->node.start < start);
658724 GEM_BUG_ON(vma->node.start + vma->node.size > end);
659725 }
660726 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
661
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
727
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
662728
663
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
664
-
665
- if (vma->obj) {
666
- struct drm_i915_gem_object *obj = vma->obj;
667
-
668
- spin_lock(&dev_priv->mm.obj_lock);
669
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
670
- obj->bind_count++;
671
- spin_unlock(&dev_priv->mm.obj_lock);
672
-
673
- assert_bind_count(obj);
674
- }
729
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
675730
676731 return 0;
677
-
678
-err_clear:
679
- vma->ops->clear_pages(vma);
680
-err_unpin:
681
- if (vma->obj)
682
- i915_gem_object_unpin_pages(vma->obj);
683
- return ret;
684732 }
685733
686734 static void
687
-i915_vma_remove(struct i915_vma *vma)
735
+i915_vma_detach(struct i915_vma *vma)
688736 {
689
- struct drm_i915_private *i915 = vma->vm->i915;
690
-
691737 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
692
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
693
-
694
- vma->ops->clear_pages(vma);
695
-
696
- drm_mm_remove_node(&vma->node);
697
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
738
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
698739
699740 /*
700
- * Since the unbound list is global, only move to that list if
701
- * no more VMAs exist.
741
+ * And finally now the object is completely decoupled from this
742
+ * vma, we can drop its hold on the backing storage and allow
743
+ * it to be reaped by the shrinker.
702744 */
703
- if (vma->obj) {
704
- struct drm_i915_gem_object *obj = vma->obj;
705
-
706
- spin_lock(&i915->mm.obj_lock);
707
- if (--obj->bind_count == 0)
708
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
709
- spin_unlock(&i915->mm.obj_lock);
710
-
711
- /*
712
- * And finally now the object is completely decoupled from this
713
- * vma, we can drop its hold on the backing storage and allow
714
- * it to be reaped by the shrinker.
715
- */
716
- i915_gem_object_unpin_pages(obj);
717
- assert_bind_count(obj);
718
- }
745
+ list_del(&vma->vm_link);
719746 }
720747
721
-int __i915_vma_do_pin(struct i915_vma *vma,
722
- u64 size, u64 alignment, u64 flags)
748
+static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
723749 {
724
- const unsigned int bound = vma->flags;
725
- int ret;
750
+ unsigned int bound;
751
+ bool pinned = true;
726752
727
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
728
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
729
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
753
+ bound = atomic_read(&vma->flags);
754
+ do {
755
+ if (unlikely(flags & ~bound))
756
+ return false;
730757
731
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
732
- ret = -EBUSY;
733
- goto err_unpin;
758
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
759
+ return false;
760
+
761
+ if (!(bound & I915_VMA_PIN_MASK))
762
+ goto unpinned;
763
+
764
+ GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
765
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
766
+
767
+ return true;
768
+
769
+unpinned:
770
+ /*
771
+ * If pin_count==0, but we are bound, check under the lock to avoid
772
+ * racing with a concurrent i915_vma_unbind().
773
+ */
774
+ mutex_lock(&vma->vm->mutex);
775
+ do {
776
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
777
+ pinned = false;
778
+ break;
779
+ }
780
+
781
+ if (unlikely(flags & ~bound)) {
782
+ pinned = false;
783
+ break;
784
+ }
785
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
786
+ mutex_unlock(&vma->vm->mutex);
787
+
788
+ return pinned;
789
+}
790
+
791
+static int vma_get_pages(struct i915_vma *vma)
792
+{
793
+ int err = 0;
794
+
795
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
796
+ return 0;
797
+
798
+ /* Allocations ahoy! */
799
+ if (mutex_lock_interruptible(&vma->pages_mutex))
800
+ return -EINTR;
801
+
802
+ if (!atomic_read(&vma->pages_count)) {
803
+ if (vma->obj) {
804
+ err = i915_gem_object_pin_pages(vma->obj);
805
+ if (err)
806
+ goto unlock;
807
+ }
808
+
809
+ err = vma->ops->set_pages(vma);
810
+ if (err) {
811
+ if (vma->obj)
812
+ i915_gem_object_unpin_pages(vma->obj);
813
+ goto unlock;
814
+ }
815
+ }
816
+ atomic_inc(&vma->pages_count);
817
+
818
+unlock:
819
+ mutex_unlock(&vma->pages_mutex);
820
+
821
+ return err;
822
+}
823
+
824
+static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
825
+{
826
+ /* We allocate under vma_get_pages, so beware the shrinker */
827
+ mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
828
+ GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
829
+ if (atomic_sub_return(count, &vma->pages_count) == 0) {
830
+ vma->ops->clear_pages(vma);
831
+ GEM_BUG_ON(vma->pages);
832
+ if (vma->obj)
833
+ i915_gem_object_unpin_pages(vma->obj);
834
+ }
835
+ mutex_unlock(&vma->pages_mutex);
836
+}
837
+
838
+static void vma_put_pages(struct i915_vma *vma)
839
+{
840
+ if (atomic_add_unless(&vma->pages_count, -1, 1))
841
+ return;
842
+
843
+ __vma_put_pages(vma, 1);
844
+}
845
+
846
+static void vma_unbind_pages(struct i915_vma *vma)
847
+{
848
+ unsigned int count;
849
+
850
+ lockdep_assert_held(&vma->vm->mutex);
851
+
852
+ /* The upper portion of pages_count is the number of bindings */
853
+ count = atomic_read(&vma->pages_count);
854
+ count >>= I915_VMA_PAGES_BIAS;
855
+ GEM_BUG_ON(!count);
856
+
857
+ __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
858
+}
859
+
860
+int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
861
+ u64 size, u64 alignment, u64 flags)
862
+{
863
+ struct i915_vma_work *work = NULL;
864
+ intel_wakeref_t wakeref = 0;
865
+ unsigned int bound;
866
+ int err;
867
+
868
+#ifdef CONFIG_PROVE_LOCKING
869
+ if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
870
+ WARN_ON(!ww);
871
+#endif
872
+
873
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
874
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
875
+
876
+ GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
877
+
878
+ /* First try and grab the pin without rebinding the vma */
879
+ if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
880
+ return 0;
881
+
882
+ err = vma_get_pages(vma);
883
+ if (err)
884
+ return err;
885
+
886
+ if (flags & PIN_GLOBAL)
887
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
888
+
889
+ if (flags & vma->vm->bind_async_flags) {
890
+ work = i915_vma_work();
891
+ if (!work) {
892
+ err = -ENOMEM;
893
+ goto err_rpm;
894
+ }
895
+
896
+ work->vm = i915_vm_get(vma->vm);
897
+
898
+ /* Allocate enough page directories to used PTE */
899
+ if (vma->vm->allocate_va_range) {
900
+ err = i915_vm_alloc_pt_stash(vma->vm,
901
+ &work->stash,
902
+ vma->size);
903
+ if (err)
904
+ goto err_fence;
905
+
906
+ err = i915_vm_pin_pt_stash(vma->vm,
907
+ &work->stash);
908
+ if (err)
909
+ goto err_fence;
910
+ }
734911 }
735912
736
- if ((bound & I915_VMA_BIND_MASK) == 0) {
737
- ret = i915_vma_insert(vma, size, alignment, flags);
738
- if (ret)
739
- goto err_unpin;
740
- }
741
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
913
+ /*
914
+ * Differentiate between user/kernel vma inside the aliasing-ppgtt.
915
+ *
916
+ * We conflate the Global GTT with the user's vma when using the
917
+ * aliasing-ppgtt, but it is still vitally important to try and
918
+ * keep the use cases distinct. For example, userptr objects are
919
+ * not allowed inside the Global GTT as that will cause lock
920
+ * inversions when we have to evict them the mmu_notifier callbacks -
921
+ * but they are allowed to be part of the user ppGTT which can never
922
+ * be mapped. As such we try to give the distinct users of the same
923
+ * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
924
+ * and i915_ppgtt separate].
925
+ *
926
+ * NB this may cause us to mask real lock inversions -- while the
927
+ * code is safe today, lockdep may not be able to spot future
928
+ * transgressions.
929
+ */
930
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex,
931
+ !(flags & PIN_GLOBAL));
932
+ if (err)
933
+ goto err_fence;
742934
743
- ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
744
- if (ret)
935
+ /* No more allocations allowed now we hold vm->mutex */
936
+
937
+ if (unlikely(i915_vma_is_closed(vma))) {
938
+ err = -ENOENT;
939
+ goto err_unlock;
940
+ }
941
+
942
+ bound = atomic_read(&vma->flags);
943
+ if (unlikely(bound & I915_VMA_ERROR)) {
944
+ err = -ENOMEM;
945
+ goto err_unlock;
946
+ }
947
+
948
+ if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
949
+ err = -EAGAIN; /* pins are meant to be fairly temporary */
950
+ goto err_unlock;
951
+ }
952
+
953
+ if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
954
+ __i915_vma_pin(vma);
955
+ goto err_unlock;
956
+ }
957
+
958
+ err = i915_active_acquire(&vma->active);
959
+ if (err)
960
+ goto err_unlock;
961
+
962
+ if (!(bound & I915_VMA_BIND_MASK)) {
963
+ err = i915_vma_insert(vma, size, alignment, flags);
964
+ if (err)
965
+ goto err_active;
966
+
967
+ if (i915_is_ggtt(vma->vm))
968
+ __i915_vma_set_map_and_fenceable(vma);
969
+ }
970
+
971
+ GEM_BUG_ON(!vma->pages);
972
+ err = i915_vma_bind(vma,
973
+ vma->obj ? vma->obj->cache_level : 0,
974
+ flags, work);
975
+ if (err)
745976 goto err_remove;
746977
747
- GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
978
+ /* There should only be at most 2 active bindings (user, global) */
979
+ GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
980
+ atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
981
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
748982
749
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
750
- __i915_vma_set_map_and_fenceable(vma);
751
-
983
+ __i915_vma_pin(vma);
984
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
985
+ GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
752986 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
753
- return 0;
754987
755988 err_remove:
756
- if ((bound & I915_VMA_BIND_MASK) == 0) {
757
- i915_vma_remove(vma);
758
- GEM_BUG_ON(vma->pages);
759
- GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
989
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
990
+ i915_vma_detach(vma);
991
+ drm_mm_remove_node(&vma->node);
760992 }
761
-err_unpin:
762
- __i915_vma_unpin(vma);
763
- return ret;
993
+err_active:
994
+ i915_active_release(&vma->active);
995
+err_unlock:
996
+ mutex_unlock(&vma->vm->mutex);
997
+err_fence:
998
+ if (work)
999
+ dma_fence_work_commit_imm(&work->base);
1000
+err_rpm:
1001
+ if (wakeref)
1002
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1003
+ vma_put_pages(vma);
1004
+ return err;
7641005 }
7651006
766
-void i915_vma_close(struct i915_vma *vma)
1007
+static void flush_idle_contexts(struct intel_gt *gt)
7671008 {
768
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
1009
+ struct intel_engine_cs *engine;
1010
+ enum intel_engine_id id;
7691011
770
- GEM_BUG_ON(i915_vma_is_closed(vma));
771
- vma->flags |= I915_VMA_CLOSED;
1012
+ for_each_engine(engine, gt, id)
1013
+ intel_engine_flush_barriers(engine);
7721014
1015
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1016
+}
1017
+
1018
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1019
+ u32 align, unsigned int flags)
1020
+{
1021
+ struct i915_address_space *vm = vma->vm;
1022
+ int err;
1023
+
1024
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1025
+
1026
+ do {
1027
+ err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1028
+ if (err != -ENOSPC) {
1029
+ if (!err) {
1030
+ err = i915_vma_wait_for_bind(vma);
1031
+ if (err)
1032
+ i915_vma_unpin(vma);
1033
+ }
1034
+ return err;
1035
+ }
1036
+
1037
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
1038
+ flush_idle_contexts(vm->gt);
1039
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
1040
+ i915_gem_evict_vm(vm);
1041
+ mutex_unlock(&vm->mutex);
1042
+ }
1043
+ } while (1);
1044
+}
1045
+
1046
+static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1047
+{
7731048 /*
7741049 * We defer actually closing, unbinding and destroying the VMA until
7751050 * the next idle point, or if the object is freed in the meantime. By
....@@ -782,66 +1057,107 @@
7821057 * causing us to rebind the VMA once more. This ends up being a lot
7831058 * of wasted work for the steady state.
7841059 */
785
- list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
1060
+ GEM_BUG_ON(i915_vma_is_closed(vma));
1061
+ list_add(&vma->closed_link, &gt->closed_vma);
1062
+}
1063
+
1064
+void i915_vma_close(struct i915_vma *vma)
1065
+{
1066
+ struct intel_gt *gt = vma->vm->gt;
1067
+ unsigned long flags;
1068
+
1069
+ if (i915_vma_is_ggtt(vma))
1070
+ return;
1071
+
1072
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
1073
+ if (atomic_dec_and_lock_irqsave(&vma->open_count,
1074
+ &gt->closed_lock,
1075
+ flags)) {
1076
+ __vma_close(vma, gt);
1077
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
1078
+ }
1079
+}
1080
+
1081
+static void __i915_vma_remove_closed(struct i915_vma *vma)
1082
+{
1083
+ struct intel_gt *gt = vma->vm->gt;
1084
+
1085
+ spin_lock_irq(&gt->closed_lock);
1086
+ list_del_init(&vma->closed_link);
1087
+ spin_unlock_irq(&gt->closed_lock);
7861088 }
7871089
7881090 void i915_vma_reopen(struct i915_vma *vma)
7891091 {
790
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
791
-
792
- if (vma->flags & I915_VMA_CLOSED) {
793
- vma->flags &= ~I915_VMA_CLOSED;
794
- list_del(&vma->closed_link);
795
- }
796
-}
797
-
798
-static void __i915_vma_destroy(struct i915_vma *vma)
799
-{
800
- struct drm_i915_private *i915 = vma->vm->i915;
801
- struct i915_vma_active *iter, *n;
802
-
803
- GEM_BUG_ON(vma->node.allocated);
804
- GEM_BUG_ON(vma->fence);
805
-
806
- GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
807
-
808
- list_del(&vma->obj_link);
809
- list_del(&vma->vm_link);
810
- if (vma->obj)
811
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
812
-
813
- rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
814
- GEM_BUG_ON(i915_gem_active_isset(&iter->base));
815
- kfree(iter);
816
- }
817
-
818
- kmem_cache_free(i915->vmas, vma);
819
-}
820
-
821
-void i915_vma_destroy(struct i915_vma *vma)
822
-{
823
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
824
-
825
- GEM_BUG_ON(i915_vma_is_active(vma));
826
- GEM_BUG_ON(i915_vma_is_pinned(vma));
827
-
8281092 if (i915_vma_is_closed(vma))
829
- list_del(&vma->closed_link);
830
-
831
- WARN_ON(i915_vma_unbind(vma));
832
- __i915_vma_destroy(vma);
1093
+ __i915_vma_remove_closed(vma);
8331094 }
8341095
835
-void i915_vma_parked(struct drm_i915_private *i915)
1096
+void i915_vma_release(struct kref *ref)
1097
+{
1098
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1099
+
1100
+ if (drm_mm_node_allocated(&vma->node)) {
1101
+ mutex_lock(&vma->vm->mutex);
1102
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1103
+ WARN_ON(__i915_vma_unbind(vma));
1104
+ mutex_unlock(&vma->vm->mutex);
1105
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1106
+ }
1107
+ GEM_BUG_ON(i915_vma_is_active(vma));
1108
+
1109
+ if (vma->obj) {
1110
+ struct drm_i915_gem_object *obj = vma->obj;
1111
+
1112
+ spin_lock(&obj->vma.lock);
1113
+ list_del(&vma->obj_link);
1114
+ if (!RB_EMPTY_NODE(&vma->obj_node))
1115
+ rb_erase(&vma->obj_node, &obj->vma.tree);
1116
+ spin_unlock(&obj->vma.lock);
1117
+ }
1118
+
1119
+ __i915_vma_remove_closed(vma);
1120
+ i915_vm_put(vma->vm);
1121
+
1122
+ i915_active_fini(&vma->active);
1123
+ i915_vma_free(vma);
1124
+}
1125
+
1126
+void i915_vma_parked(struct intel_gt *gt)
8361127 {
8371128 struct i915_vma *vma, *next;
1129
+ LIST_HEAD(closed);
8381130
839
- list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
840
- GEM_BUG_ON(!i915_vma_is_closed(vma));
841
- i915_vma_destroy(vma);
1131
+ spin_lock_irq(&gt->closed_lock);
1132
+ list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1133
+ struct drm_i915_gem_object *obj = vma->obj;
1134
+ struct i915_address_space *vm = vma->vm;
1135
+
1136
+ /* XXX All to avoid keeping a reference on i915_vma itself */
1137
+
1138
+ if (!kref_get_unless_zero(&obj->base.refcount))
1139
+ continue;
1140
+
1141
+ if (!i915_vm_tryopen(vm)) {
1142
+ i915_gem_object_put(obj);
1143
+ continue;
1144
+ }
1145
+
1146
+ list_move(&vma->closed_link, &closed);
8421147 }
1148
+ spin_unlock_irq(&gt->closed_lock);
8431149
844
- GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
1150
+ /* As the GT is held idle, no vma can be reopened as we destroy them */
1151
+ list_for_each_entry_safe(vma, next, &closed, closed_link) {
1152
+ struct drm_i915_gem_object *obj = vma->obj;
1153
+ struct i915_address_space *vm = vma->vm;
1154
+
1155
+ INIT_LIST_HEAD(&vma->closed_link);
1156
+ __i915_vma_put(vma);
1157
+
1158
+ i915_gem_object_put(obj);
1159
+ i915_vm_close(vm);
1160
+ }
8451161 }
8461162
8471163 static void __i915_vma_iounmap(struct i915_vma *vma)
....@@ -857,10 +1173,8 @@
8571173
8581174 void i915_vma_revoke_mmap(struct i915_vma *vma)
8591175 {
860
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
1176
+ struct drm_vma_offset_node *node;
8611177 u64 vma_offset;
862
-
863
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
8641178
8651179 if (!i915_vma_has_userfault(vma))
8661180 return;
....@@ -868,6 +1182,7 @@
8681182 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
8691183 GEM_BUG_ON(!vma->obj->userfault_count);
8701184
1185
+ node = &vma->mmo->vma_node;
8711186 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
8721187 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
8731188 drm_vma_node_offset_addr(node) + vma_offset,
....@@ -879,107 +1194,24 @@
8791194 list_del(&vma->obj->userfault_link);
8801195 }
8811196
882
-static void export_fence(struct i915_vma *vma,
883
- struct i915_request *rq,
884
- unsigned int flags)
1197
+static int
1198
+__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
8851199 {
886
- struct reservation_object *resv = vma->resv;
887
-
888
- /*
889
- * Ignore errors from failing to allocate the new fence, we can't
890
- * handle an error right now. Worst case should be missed
891
- * synchronisation leading to rendering corruption.
892
- */
893
- reservation_object_lock(resv, NULL);
894
- if (flags & EXEC_OBJECT_WRITE)
895
- reservation_object_add_excl_fence(resv, &rq->fence);
896
- else if (reservation_object_reserve_shared(resv) == 0)
897
- reservation_object_add_shared_fence(resv, &rq->fence);
898
- reservation_object_unlock(resv);
1200
+ return __i915_request_await_exclusive(rq, &vma->active);
8991201 }
9001202
901
-static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
1203
+int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
9021204 {
903
- struct i915_vma_active *active;
904
- struct rb_node **p, *parent;
905
- struct i915_request *old;
1205
+ int err;
9061206
907
- /*
908
- * We track the most recently used timeline to skip a rbtree search
909
- * for the common case, under typical loads we never need the rbtree
910
- * at all. We can reuse the last_active slot if it is empty, that is
911
- * after the previous activity has been retired, or if the active
912
- * matches the current timeline.
913
- *
914
- * Note that we allow the timeline to be active simultaneously in
915
- * the rbtree and the last_active cache. We do this to avoid having
916
- * to search and replace the rbtree element for a new timeline, with
917
- * the cost being that we must be aware that the vma may be retired
918
- * twice for the same timeline (as the older rbtree element will be
919
- * retired before the new request added to last_active).
920
- */
921
- old = i915_gem_active_raw(&vma->last_active,
922
- &vma->vm->i915->drm.struct_mutex);
923
- if (!old || old->fence.context == idx)
924
- goto out;
1207
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
9251208
926
- /* Move the currently active fence into the rbtree */
927
- idx = old->fence.context;
1209
+ /* Wait for the vma to be bound before we start! */
1210
+ err = __i915_request_await_bind(rq, vma);
1211
+ if (err)
1212
+ return err;
9281213
929
- parent = NULL;
930
- p = &vma->active.rb_node;
931
- while (*p) {
932
- parent = *p;
933
-
934
- active = rb_entry(parent, struct i915_vma_active, node);
935
- if (active->timeline == idx)
936
- goto replace;
937
-
938
- if (active->timeline < idx)
939
- p = &parent->rb_right;
940
- else
941
- p = &parent->rb_left;
942
- }
943
-
944
- active = kmalloc(sizeof(*active), GFP_KERNEL);
945
-
946
- /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
947
- if (unlikely(!i915_gem_active_raw(&vma->last_active,
948
- &vma->vm->i915->drm.struct_mutex))) {
949
- kfree(active);
950
- goto out;
951
- }
952
-
953
- if (unlikely(!active))
954
- return ERR_PTR(-ENOMEM);
955
-
956
- init_request_active(&active->base, i915_vma_retire);
957
- active->vma = vma;
958
- active->timeline = idx;
959
-
960
- rb_link_node(&active->node, parent, p);
961
- rb_insert_color(&active->node, &vma->active);
962
-
963
-replace:
964
- /*
965
- * Overwrite the previous active slot in the rbtree with last_active,
966
- * leaving last_active zeroed. If the previous slot is still active,
967
- * we must be careful as we now only expect to receive one retire
968
- * callback not two, and so much undo the active counting for the
969
- * overwritten slot.
970
- */
971
- if (i915_gem_active_isset(&active->base)) {
972
- /* Retire ourselves from the old rq->active_list */
973
- __list_del_entry(&active->base.link);
974
- vma->active_count--;
975
- GEM_BUG_ON(!vma->active_count);
976
- }
977
- GEM_BUG_ON(list_empty(&vma->last_active.link));
978
- list_replace_init(&vma->last_active.link, &active->base.link);
979
- active->base.request = fetch_and_zero(&vma->last_active.request);
980
-
981
-out:
982
- return &vma->last_active;
1214
+ return i915_active_add_request(&vma->active, rq);
9831215 }
9841216
9851217 int i915_vma_move_to_active(struct i915_vma *vma,
....@@ -987,143 +1219,196 @@
9871219 unsigned int flags)
9881220 {
9891221 struct drm_i915_gem_object *obj = vma->obj;
990
- struct i915_gem_active *active;
1222
+ int err;
9911223
992
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
993
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1224
+ assert_object_held(obj);
9941225
995
- active = active_instance(vma, rq->fence.context);
996
- if (IS_ERR(active))
997
- return PTR_ERR(active);
1226
+ err = __i915_vma_move_to_active(vma, rq);
1227
+ if (unlikely(err))
1228
+ return err;
9981229
999
- /*
1000
- * Add a reference if we're newly entering the active list.
1001
- * The order in which we add operations to the retirement queue is
1002
- * vital here: mark_active adds to the start of the callback list,
1003
- * such that subsequent callbacks are called first. Therefore we
1004
- * add the active reference first and queue for it to be dropped
1005
- * *last*.
1006
- */
1007
- if (!i915_gem_active_isset(active) && !vma->active_count++) {
1008
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
1009
- obj->active_count++;
1010
- }
1011
- i915_gem_active_set(active, rq);
1012
- GEM_BUG_ON(!i915_vma_is_active(vma));
1013
- GEM_BUG_ON(!obj->active_count);
1014
-
1015
- obj->write_domain = 0;
10161230 if (flags & EXEC_OBJECT_WRITE) {
1231
+ struct intel_frontbuffer *front;
1232
+
1233
+ front = __intel_frontbuffer_get(obj);
1234
+ if (unlikely(front)) {
1235
+ if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1236
+ i915_active_add_request(&front->write, rq);
1237
+ intel_frontbuffer_put(front);
1238
+ }
1239
+
1240
+ dma_resv_add_excl_fence(vma->resv, &rq->fence);
10171241 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1018
-
1019
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1020
- i915_gem_active_set(&obj->frontbuffer_write, rq);
1021
-
10221242 obj->read_domains = 0;
1243
+ } else {
1244
+ err = dma_resv_reserve_shared(vma->resv, 1);
1245
+ if (unlikely(err))
1246
+ return err;
1247
+
1248
+ dma_resv_add_shared_fence(vma->resv, &rq->fence);
1249
+ obj->write_domain = 0;
10231250 }
1251
+
1252
+ if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1253
+ i915_active_add_request(&vma->fence->active, rq);
1254
+
10241255 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1256
+ obj->mm.dirty = true;
10251257
1026
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
1027
- i915_gem_active_set(&vma->last_fence, rq);
1028
-
1029
- export_fence(vma, rq, flags);
1258
+ GEM_BUG_ON(!i915_vma_is_active(vma));
10301259 return 0;
10311260 }
10321261
1033
-int i915_vma_unbind(struct i915_vma *vma)
1262
+void __i915_vma_evict(struct i915_vma *vma)
10341263 {
1035
- int ret;
1036
-
1037
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
1038
-
1039
- /*
1040
- * First wait upon any activity as retiring the request may
1041
- * have side-effects such as unpinning or even unbinding this vma.
1042
- */
1043
- might_sleep();
1044
- if (i915_vma_is_active(vma)) {
1045
- struct i915_vma_active *active, *n;
1046
-
1047
- /*
1048
- * When a closed VMA is retired, it is unbound - eek.
1049
- * In order to prevent it from being recursively closed,
1050
- * take a pin on the vma so that the second unbind is
1051
- * aborted.
1052
- *
1053
- * Even more scary is that the retire callback may free
1054
- * the object (last active vma). To prevent the explosion
1055
- * we defer the actual object free to a worker that can
1056
- * only proceed once it acquires the struct_mutex (which
1057
- * we currently hold, therefore it cannot free this object
1058
- * before we are finished).
1059
- */
1060
- __i915_vma_pin(vma);
1061
-
1062
- ret = i915_gem_active_retire(&vma->last_active,
1063
- &vma->vm->i915->drm.struct_mutex);
1064
- if (ret)
1065
- goto unpin;
1066
-
1067
- rbtree_postorder_for_each_entry_safe(active, n,
1068
- &vma->active, node) {
1069
- ret = i915_gem_active_retire(&active->base,
1070
- &vma->vm->i915->drm.struct_mutex);
1071
- if (ret)
1072
- goto unpin;
1073
- }
1074
-
1075
- ret = i915_gem_active_retire(&vma->last_fence,
1076
- &vma->vm->i915->drm.struct_mutex);
1077
-unpin:
1078
- __i915_vma_unpin(vma);
1079
- if (ret)
1080
- return ret;
1081
- }
1082
- GEM_BUG_ON(i915_vma_is_active(vma));
1083
-
1084
- if (i915_vma_is_pinned(vma)) {
1085
- vma_print_allocator(vma, "is pinned");
1086
- return -EBUSY;
1087
- }
1088
-
1089
- if (!drm_mm_node_allocated(&vma->node))
1090
- return 0;
1264
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
10911265
10921266 if (i915_vma_is_map_and_fenceable(vma)) {
1267
+ /* Force a pagefault for domain tracking on next user access */
1268
+ i915_vma_revoke_mmap(vma);
1269
+
10931270 /*
10941271 * Check that we have flushed all writes through the GGTT
10951272 * before the unbind, other due to non-strict nature of those
10961273 * indirect writes they may end up referencing the GGTT PTE
10971274 * after the unbind.
1275
+ *
1276
+ * Note that we may be concurrently poking at the GGTT_WRITE
1277
+ * bit from set-domain, as we mark all GGTT vma associated
1278
+ * with an object. We know this is for another vma, as we
1279
+ * are currently unbinding this one -- so if this vma will be
1280
+ * reused, it will be refaulted and have its dirty bit set
1281
+ * before the next write.
10981282 */
10991283 i915_vma_flush_writes(vma);
1100
- GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
11011284
11021285 /* release the fence reg _after_ flushing */
1103
- ret = i915_vma_put_fence(vma);
1104
- if (ret)
1105
- return ret;
1106
-
1107
- /* Force a pagefault for domain tracking on next user access */
1108
- i915_vma_revoke_mmap(vma);
1286
+ i915_vma_revoke_fence(vma);
11091287
11101288 __i915_vma_iounmap(vma);
1111
- vma->flags &= ~I915_VMA_CAN_FENCE;
1289
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
11121290 }
11131291 GEM_BUG_ON(vma->fence);
11141292 GEM_BUG_ON(i915_vma_has_userfault(vma));
11151293
1116
- if (likely(!vma->vm->closed)) {
1294
+ if (likely(atomic_read(&vma->vm->open))) {
11171295 trace_i915_vma_unbind(vma);
1118
- vma->ops->unbind_vma(vma);
1296
+ vma->ops->unbind_vma(vma->vm, vma);
11191297 }
1120
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1298
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1299
+ &vma->flags);
11211300
1122
- i915_vma_remove(vma);
1301
+ i915_vma_detach(vma);
1302
+ vma_unbind_pages(vma);
1303
+}
11231304
1305
+int __i915_vma_unbind(struct i915_vma *vma)
1306
+{
1307
+ int ret;
1308
+
1309
+ lockdep_assert_held(&vma->vm->mutex);
1310
+
1311
+ if (!drm_mm_node_allocated(&vma->node))
1312
+ return 0;
1313
+
1314
+ if (i915_vma_is_pinned(vma)) {
1315
+ vma_print_allocator(vma, "is pinned");
1316
+ return -EAGAIN;
1317
+ }
1318
+
1319
+ /*
1320
+ * After confirming that no one else is pinning this vma, wait for
1321
+ * any laggards who may have crept in during the wait (through
1322
+ * a residual pin skipping the vm->mutex) to complete.
1323
+ */
1324
+ ret = i915_vma_sync(vma);
1325
+ if (ret)
1326
+ return ret;
1327
+
1328
+ GEM_BUG_ON(i915_vma_is_active(vma));
1329
+ __i915_vma_evict(vma);
1330
+
1331
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
11241332 return 0;
1333
+}
1334
+
1335
+int i915_vma_unbind(struct i915_vma *vma)
1336
+{
1337
+ struct i915_address_space *vm = vma->vm;
1338
+ intel_wakeref_t wakeref = 0;
1339
+ int err;
1340
+
1341
+ /* Optimistic wait before taking the mutex */
1342
+ err = i915_vma_sync(vma);
1343
+ if (err)
1344
+ return err;
1345
+
1346
+ if (!drm_mm_node_allocated(&vma->node))
1347
+ return 0;
1348
+
1349
+ if (i915_vma_is_pinned(vma)) {
1350
+ vma_print_allocator(vma, "is pinned");
1351
+ return -EAGAIN;
1352
+ }
1353
+
1354
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1355
+ /* XXX not always required: nop_clear_range */
1356
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1357
+
1358
+ err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1359
+ if (err)
1360
+ goto out_rpm;
1361
+
1362
+ err = __i915_vma_unbind(vma);
1363
+ mutex_unlock(&vm->mutex);
1364
+
1365
+out_rpm:
1366
+ if (wakeref)
1367
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1368
+ return err;
1369
+}
1370
+
1371
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1372
+{
1373
+ i915_gem_object_make_unshrinkable(vma->obj);
1374
+ return vma;
1375
+}
1376
+
1377
+void i915_vma_make_shrinkable(struct i915_vma *vma)
1378
+{
1379
+ i915_gem_object_make_shrinkable(vma->obj);
1380
+}
1381
+
1382
+void i915_vma_make_purgeable(struct i915_vma *vma)
1383
+{
1384
+ i915_gem_object_make_purgeable(vma->obj);
11251385 }
11261386
11271387 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
11281388 #include "selftests/i915_vma.c"
11291389 #endif
1390
+
1391
+static void i915_global_vma_shrink(void)
1392
+{
1393
+ kmem_cache_shrink(global.slab_vmas);
1394
+}
1395
+
1396
+static void i915_global_vma_exit(void)
1397
+{
1398
+ kmem_cache_destroy(global.slab_vmas);
1399
+}
1400
+
1401
+static struct i915_global_vma global = { {
1402
+ .shrink = i915_global_vma_shrink,
1403
+ .exit = i915_global_vma_exit,
1404
+} };
1405
+
1406
+int __init i915_global_vma_init(void)
1407
+{
1408
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1409
+ if (!global.slab_vmas)
1410
+ return -ENOMEM;
1411
+
1412
+ i915_global_register(&global.base);
1413
+ return 0;
1414
+}