forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
....@@ -25,25 +25,21 @@
2525 #include <linux/list_sort.h>
2626 #include <linux/prime_numbers.h>
2727
28
-#include "../i915_selftest.h"
29
-#include "i915_random.h"
28
+#include "gem/i915_gem_context.h"
29
+#include "gem/selftests/mock_context.h"
30
+#include "gt/intel_context.h"
3031
31
-#include "mock_context.h"
32
+#include "i915_random.h"
33
+#include "i915_selftest.h"
34
+
3235 #include "mock_drm.h"
3336 #include "mock_gem_device.h"
37
+#include "mock_gtt.h"
38
+#include "igt_flush_test.h"
3439
3540 static void cleanup_freed_objects(struct drm_i915_private *i915)
3641 {
37
- /*
38
- * As we may hold onto the struct_mutex for inordinate lengths of
39
- * time, the NMI khungtaskd detector may fire for the free objects
40
- * worker.
41
- */
42
- mutex_unlock(&i915->drm.struct_mutex);
43
-
4442 i915_gem_drain_freed_objects(i915);
45
-
46
- mutex_lock(&i915->drm.struct_mutex);
4743 }
4844
4945 static void fake_free_pages(struct drm_i915_gem_object *obj,
....@@ -87,8 +83,6 @@
8783 }
8884 GEM_BUG_ON(rem);
8985
90
- obj->mm.madv = I915_MADV_DONTNEED;
91
-
9286 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
9387
9488 return 0;
....@@ -100,10 +94,10 @@
10094 {
10195 fake_free_pages(obj, pages);
10296 obj->mm.dirty = false;
103
- obj->mm.madv = I915_MADV_WILLNEED;
10497 }
10598
10699 static const struct drm_i915_gem_object_ops fake_ops = {
100
+ .name = "fake-gem",
107101 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
108102 .get_pages = fake_get_pages,
109103 .put_pages = fake_put_pages,
....@@ -112,6 +106,7 @@
112106 static struct drm_i915_gem_object *
113107 fake_dma_object(struct drm_i915_private *i915, u64 size)
114108 {
109
+ static struct lock_class_key lock_class;
115110 struct drm_i915_gem_object *obj;
116111
117112 GEM_BUG_ON(!size);
....@@ -120,12 +115,14 @@
120115 if (overflows_type(size, obj->base.size))
121116 return ERR_PTR(-E2BIG);
122117
123
- obj = i915_gem_object_alloc(i915);
118
+ obj = i915_gem_object_alloc();
124119 if (!obj)
125120 goto err;
126121
127122 drm_gem_private_object_init(&i915->drm, &obj->base, size);
128
- i915_gem_object_init(obj, &fake_ops);
123
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
124
+
125
+ i915_gem_object_set_volatile(obj);
129126
130127 obj->write_domain = I915_GEM_DOMAIN_CPU;
131128 obj->read_domains = I915_GEM_DOMAIN_CPU;
....@@ -147,16 +144,16 @@
147144 static int igt_ppgtt_alloc(void *arg)
148145 {
149146 struct drm_i915_private *dev_priv = arg;
150
- struct i915_hw_ppgtt *ppgtt;
147
+ struct i915_ppgtt *ppgtt;
151148 u64 size, last, limit;
152149 int err = 0;
153150
154151 /* Allocate a ppggt and try to fill the entire range */
155152
156
- if (!USES_PPGTT(dev_priv))
153
+ if (!HAS_PPGTT(dev_priv))
157154 return 0;
158155
159
- ppgtt = __hw_ppgtt_create(dev_priv);
156
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
160157 if (IS_ERR(ppgtt))
161158 return PTR_ERR(ppgtt);
162159
....@@ -170,59 +167,68 @@
170167 * This should ensure that we do not run into the oomkiller during
171168 * the test and take down the machine wilfully.
172169 */
173
- limit = totalram_pages << PAGE_SHIFT;
170
+ limit = totalram_pages() << PAGE_SHIFT;
174171 limit = min(ppgtt->vm.total, limit);
175172
176173 /* Check we can allocate the entire range */
177174 for (size = 4096; size <= limit; size <<= 2) {
178
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
175
+ struct i915_vm_pt_stash stash = {};
176
+
177
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
178
+ if (err)
179
+ goto err_ppgtt_cleanup;
180
+
181
+ err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
179182 if (err) {
180
- if (err == -ENOMEM) {
181
- pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
182
- size, ilog2(size));
183
- err = 0; /* virtual space too large! */
184
- }
183
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
185184 goto err_ppgtt_cleanup;
186185 }
187186
187
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
188188 cond_resched();
189189
190190 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
191
+
192
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
191193 }
192194
193195 /* Check we can incrementally allocate the entire range */
194196 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
195
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
196
- last, size - last);
197
+ struct i915_vm_pt_stash stash = {};
198
+
199
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
200
+ if (err)
201
+ goto err_ppgtt_cleanup;
202
+
203
+ err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
197204 if (err) {
198
- if (err == -ENOMEM) {
199
- pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
200
- last, size - last, ilog2(size));
201
- err = 0; /* virtual space too large! */
202
- }
205
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
203206 goto err_ppgtt_cleanup;
204207 }
205208
209
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
210
+ last, size - last);
206211 cond_resched();
212
+
213
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
207214 }
208215
209216 err_ppgtt_cleanup:
210
- mutex_lock(&dev_priv->drm.struct_mutex);
211
- i915_ppgtt_put(ppgtt);
212
- mutex_unlock(&dev_priv->drm.struct_mutex);
217
+ i915_vm_put(&ppgtt->vm);
213218 return err;
214219 }
215220
216
-static int lowlevel_hole(struct drm_i915_private *i915,
217
- struct i915_address_space *vm,
221
+static int lowlevel_hole(struct i915_address_space *vm,
218222 u64 hole_start, u64 hole_end,
219223 unsigned long end_time)
220224 {
221225 I915_RND_STATE(seed_prng);
226
+ struct i915_vma *mock_vma;
222227 unsigned int size;
223
- struct i915_vma mock_vma;
224228
225
- memset(&mock_vma, 0, sizeof(struct i915_vma));
229
+ mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
230
+ if (!mock_vma)
231
+ return -ENOMEM;
226232
227233 /* Keep creating larger objects until one cannot fit into the hole */
228234 for (size = 12; (hole_end - hole_start) >> size; size++) {
....@@ -246,8 +252,10 @@
246252 if (order)
247253 break;
248254 } while (count >>= 1);
249
- if (!count)
255
+ if (!count) {
256
+ kfree(mock_vma);
250257 return -ENOMEM;
258
+ }
251259 GEM_BUG_ON(!order);
252260
253261 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
....@@ -259,7 +267,7 @@
259267 * memory. We expect to hit -ENOMEM.
260268 */
261269
262
- obj = fake_dma_object(i915, BIT_ULL(size));
270
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
263271 if (IS_ERR(obj)) {
264272 kfree(order);
265273 break;
....@@ -275,6 +283,7 @@
275283
276284 for (n = 0; n < count; n++) {
277285 u64 addr = hole_start + order[n] * BIT_ULL(size);
286
+ intel_wakeref_t wakeref;
278287
279288 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
280289
....@@ -285,26 +294,42 @@
285294 break;
286295 }
287296
288
- if (vm->allocate_va_range &&
289
- vm->allocate_va_range(vm, addr, BIT_ULL(size)))
290
- break;
297
+ if (vm->allocate_va_range) {
298
+ struct i915_vm_pt_stash stash = {};
291299
292
- mock_vma.pages = obj->mm.pages;
293
- mock_vma.node.size = BIT_ULL(size);
294
- mock_vma.node.start = addr;
300
+ if (i915_vm_alloc_pt_stash(vm, &stash,
301
+ BIT_ULL(size)))
302
+ break;
295303
296
- intel_runtime_pm_get(i915);
297
- vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
298
- intel_runtime_pm_put(i915);
304
+ if (i915_vm_pin_pt_stash(vm, &stash)) {
305
+ i915_vm_free_pt_stash(vm, &stash);
306
+ break;
307
+ }
308
+
309
+ vm->allocate_va_range(vm, &stash,
310
+ addr, BIT_ULL(size));
311
+
312
+ i915_vm_free_pt_stash(vm, &stash);
313
+ }
314
+
315
+ mock_vma->pages = obj->mm.pages;
316
+ mock_vma->node.size = BIT_ULL(size);
317
+ mock_vma->node.start = addr;
318
+
319
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
320
+ vm->insert_entries(vm, mock_vma,
321
+ I915_CACHE_NONE, 0);
299322 }
300323 count = n;
301324
302325 i915_random_reorder(order, count, &prng);
303326 for (n = 0; n < count; n++) {
304327 u64 addr = hole_start + order[n] * BIT_ULL(size);
328
+ intel_wakeref_t wakeref;
305329
306330 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
307
- vm->clear_range(vm, addr, BIT_ULL(size));
331
+ with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
332
+ vm->clear_range(vm, addr, BIT_ULL(size));
308333 }
309334
310335 i915_gem_object_unpin_pages(obj);
....@@ -312,9 +337,10 @@
312337
313338 kfree(order);
314339
315
- cleanup_freed_objects(i915);
340
+ cleanup_freed_objects(vm->i915);
316341 }
317342
343
+ kfree(mock_vma);
318344 return 0;
319345 }
320346
....@@ -330,17 +356,13 @@
330356 vma = i915_vma_instance(obj, vm, NULL);
331357 if (!IS_ERR(vma))
332358 ignored = i915_vma_unbind(vma);
333
- /* Only ppgtt vma may be closed before the object is freed */
334
- if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
335
- i915_vma_close(vma);
336359
337360 list_del(&obj->st_link);
338361 i915_gem_object_put(obj);
339362 }
340363 }
341364
342
-static int fill_hole(struct drm_i915_private *i915,
343
- struct i915_address_space *vm,
365
+static int fill_hole(struct i915_address_space *vm,
344366 u64 hole_start, u64 hole_end,
345367 unsigned long end_time)
346368 {
....@@ -373,7 +395,7 @@
373395 { }
374396 }, *p;
375397
376
- obj = fake_dma_object(i915, full_size);
398
+ obj = fake_dma_object(vm->i915, full_size);
377399 if (IS_ERR(obj))
378400 break;
379401
....@@ -541,7 +563,7 @@
541563 }
542564
543565 close_object_list(&objects, vm);
544
- cleanup_freed_objects(i915);
566
+ cleanup_freed_objects(vm->i915);
545567 }
546568
547569 return 0;
....@@ -551,8 +573,7 @@
551573 return err;
552574 }
553575
554
-static int walk_hole(struct drm_i915_private *i915,
555
- struct i915_address_space *vm,
576
+static int walk_hole(struct i915_address_space *vm,
556577 u64 hole_start, u64 hole_end,
557578 unsigned long end_time)
558579 {
....@@ -574,7 +595,7 @@
574595 u64 addr;
575596 int err = 0;
576597
577
- obj = fake_dma_object(i915, size << PAGE_SHIFT);
598
+ obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
578599 if (IS_ERR(obj))
579600 break;
580601
....@@ -592,7 +613,7 @@
592613 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
593614 __func__, addr, vma->size,
594615 hole_start, hole_end, err);
595
- goto err_close;
616
+ goto err_put;
596617 }
597618 i915_vma_unpin(vma);
598619
....@@ -601,14 +622,14 @@
601622 pr_err("%s incorrect at %llx + %llx\n",
602623 __func__, addr, vma->size);
603624 err = -EINVAL;
604
- goto err_close;
625
+ goto err_put;
605626 }
606627
607628 err = i915_vma_unbind(vma);
608629 if (err) {
609630 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
610631 __func__, addr, vma->size, err);
611
- goto err_close;
632
+ goto err_put;
612633 }
613634
614635 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
....@@ -617,26 +638,22 @@
617638 "%s timed out at %llx\n",
618639 __func__, addr)) {
619640 err = -EINTR;
620
- goto err_close;
641
+ goto err_put;
621642 }
622643 }
623644
624
-err_close:
625
- if (!i915_vma_is_ggtt(vma))
626
- i915_vma_close(vma);
627645 err_put:
628646 i915_gem_object_put(obj);
629647 if (err)
630648 return err;
631649
632
- cleanup_freed_objects(i915);
650
+ cleanup_freed_objects(vm->i915);
633651 }
634652
635653 return 0;
636654 }
637655
638
-static int pot_hole(struct drm_i915_private *i915,
639
- struct i915_address_space *vm,
656
+static int pot_hole(struct i915_address_space *vm,
640657 u64 hole_start, u64 hole_end,
641658 unsigned long end_time)
642659 {
....@@ -650,7 +667,7 @@
650667 if (i915_is_ggtt(vm))
651668 flags |= PIN_GLOBAL;
652669
653
- obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
670
+ obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
654671 if (IS_ERR(obj))
655672 return PTR_ERR(obj);
656673
....@@ -677,7 +694,7 @@
677694 addr,
678695 hole_start, hole_end,
679696 err);
680
- goto err;
697
+ goto err_obj;
681698 }
682699
683700 if (!drm_mm_node_allocated(&vma->node) ||
....@@ -687,7 +704,7 @@
687704 i915_vma_unpin(vma);
688705 err = i915_vma_unbind(vma);
689706 err = -EINVAL;
690
- goto err;
707
+ goto err_obj;
691708 }
692709
693710 i915_vma_unpin(vma);
....@@ -699,20 +716,16 @@
699716 "%s timed out after %d/%d\n",
700717 __func__, pot, fls64(hole_end - 1) - 1)) {
701718 err = -EINTR;
702
- goto err;
719
+ goto err_obj;
703720 }
704721 }
705722
706
-err:
707
- if (!i915_vma_is_ggtt(vma))
708
- i915_vma_close(vma);
709723 err_obj:
710724 i915_gem_object_put(obj);
711725 return err;
712726 }
713727
714
-static int drunk_hole(struct drm_i915_private *i915,
715
- struct i915_address_space *vm,
728
+static int drunk_hole(struct i915_address_space *vm,
716729 u64 hole_start, u64 hole_end,
717730 unsigned long end_time)
718731 {
....@@ -757,7 +770,7 @@
757770 * memory. We expect to hit -ENOMEM.
758771 */
759772
760
- obj = fake_dma_object(i915, BIT_ULL(size));
773
+ obj = fake_dma_object(vm->i915, BIT_ULL(size));
761774 if (IS_ERR(obj)) {
762775 kfree(order);
763776 break;
....@@ -781,7 +794,7 @@
781794 addr, BIT_ULL(size),
782795 hole_start, hole_end,
783796 err);
784
- goto err;
797
+ goto err_obj;
785798 }
786799
787800 if (!drm_mm_node_allocated(&vma->node) ||
....@@ -791,7 +804,7 @@
791804 i915_vma_unpin(vma);
792805 err = i915_vma_unbind(vma);
793806 err = -EINVAL;
794
- goto err;
807
+ goto err_obj;
795808 }
796809
797810 i915_vma_unpin(vma);
....@@ -802,27 +815,23 @@
802815 "%s timed out after %d/%d\n",
803816 __func__, n, count)) {
804817 err = -EINTR;
805
- goto err;
818
+ goto err_obj;
806819 }
807820 }
808821
809
-err:
810
- if (!i915_vma_is_ggtt(vma))
811
- i915_vma_close(vma);
812822 err_obj:
813823 i915_gem_object_put(obj);
814824 kfree(order);
815825 if (err)
816826 return err;
817827
818
- cleanup_freed_objects(i915);
828
+ cleanup_freed_objects(vm->i915);
819829 }
820830
821831 return 0;
822832 }
823833
824
-static int __shrink_hole(struct drm_i915_private *i915,
825
- struct i915_address_space *vm,
834
+static int __shrink_hole(struct i915_address_space *vm,
826835 u64 hole_start, u64 hole_end,
827836 unsigned long end_time)
828837 {
....@@ -839,7 +848,7 @@
839848 u64 size = BIT_ULL(order++);
840849
841850 size = min(size, hole_end - addr);
842
- obj = fake_dma_object(i915, size);
851
+ obj = fake_dma_object(vm->i915, size);
843852 if (IS_ERR(obj)) {
844853 err = PTR_ERR(obj);
845854 break;
....@@ -875,6 +884,15 @@
875884 i915_vma_unpin(vma);
876885 addr += size;
877886
887
+ /*
888
+ * Since we are injecting allocation faults at random intervals,
889
+ * wait for this allocation to complete before we change the
890
+ * faultinjection.
891
+ */
892
+ err = i915_vma_sync(vma);
893
+ if (err)
894
+ break;
895
+
878896 if (igt_timeout(end_time,
879897 "%s timed out at ofset %llx [%llx - %llx]\n",
880898 __func__, addr, hole_start, hole_end)) {
....@@ -884,12 +902,11 @@
884902 }
885903
886904 close_object_list(&objects, vm);
887
- cleanup_freed_objects(i915);
905
+ cleanup_freed_objects(vm->i915);
888906 return err;
889907 }
890908
891
-static int shrink_hole(struct drm_i915_private *i915,
892
- struct i915_address_space *vm,
909
+static int shrink_hole(struct i915_address_space *vm,
893910 u64 hole_start, u64 hole_end,
894911 unsigned long end_time)
895912 {
....@@ -901,7 +918,7 @@
901918
902919 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
903920 vm->fault_attr.interval = prime;
904
- err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
921
+ err = __shrink_hole(vm, hole_start, hole_end, end_time);
905922 if (err)
906923 break;
907924 }
....@@ -911,8 +928,7 @@
911928 return err;
912929 }
913930
914
-static int shrink_boom(struct drm_i915_private *i915,
915
- struct i915_address_space *vm,
931
+static int shrink_boom(struct i915_address_space *vm,
916932 u64 hole_start, u64 hole_end,
917933 unsigned long end_time)
918934 {
....@@ -934,7 +950,7 @@
934950 unsigned int size = sizes[i];
935951 struct i915_vma *vma;
936952
937
- purge = fake_dma_object(i915, size);
953
+ purge = fake_dma_object(vm->i915, size);
938954 if (IS_ERR(purge))
939955 return PTR_ERR(purge);
940956
....@@ -951,7 +967,7 @@
951967 /* Should now be ripe for purging */
952968 i915_vma_unpin(vma);
953969
954
- explode = fake_dma_object(i915, size);
970
+ explode = fake_dma_object(vm->i915, size);
955971 if (IS_ERR(explode)) {
956972 err = PTR_ERR(explode);
957973 goto err_purge;
....@@ -977,7 +993,7 @@
977993 i915_gem_object_put(explode);
978994
979995 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
980
- cleanup_freed_objects(i915);
996
+ cleanup_freed_objects(vm->i915);
981997 }
982998
983999 return 0;
....@@ -991,40 +1007,36 @@
9911007 }
9921008
9931009 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
994
- int (*func)(struct drm_i915_private *i915,
995
- struct i915_address_space *vm,
1010
+ int (*func)(struct i915_address_space *vm,
9961011 u64 hole_start, u64 hole_end,
9971012 unsigned long end_time))
9981013 {
999
- struct drm_file *file;
1000
- struct i915_hw_ppgtt *ppgtt;
1014
+ struct i915_ppgtt *ppgtt;
10011015 IGT_TIMEOUT(end_time);
1016
+ struct file *file;
10021017 int err;
10031018
1004
- if (!USES_FULL_PPGTT(dev_priv))
1019
+ if (!HAS_FULL_PPGTT(dev_priv))
10051020 return 0;
10061021
10071022 file = mock_file(dev_priv);
10081023 if (IS_ERR(file))
10091024 return PTR_ERR(file);
10101025
1011
- mutex_lock(&dev_priv->drm.struct_mutex);
1012
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
1026
+ ppgtt = i915_ppgtt_create(&dev_priv->gt);
10131027 if (IS_ERR(ppgtt)) {
10141028 err = PTR_ERR(ppgtt);
1015
- goto out_unlock;
1029
+ goto out_free;
10161030 }
10171031 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1018
- GEM_BUG_ON(ppgtt->vm.closed);
1032
+ GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
10191033
1020
- err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1034
+ err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
10211035
1022
- i915_ppgtt_close(&ppgtt->vm);
1023
- i915_ppgtt_put(ppgtt);
1024
-out_unlock:
1025
- mutex_unlock(&dev_priv->drm.struct_mutex);
1036
+ i915_vm_put(&ppgtt->vm);
10261037
1027
- mock_file_free(dev_priv, file);
1038
+out_free:
1039
+ fput(file);
10281040 return err;
10291041 }
10301042
....@@ -1075,8 +1087,7 @@
10751087 }
10761088
10771089 static int exercise_ggtt(struct drm_i915_private *i915,
1078
- int (*func)(struct drm_i915_private *i915,
1079
- struct i915_address_space *vm,
1090
+ int (*func)(struct i915_address_space *vm,
10801091 u64 hole_start, u64 hole_end,
10811092 unsigned long end_time))
10821093 {
....@@ -1086,7 +1097,6 @@
10861097 IGT_TIMEOUT(end_time);
10871098 int err = 0;
10881099
1089
- mutex_lock(&i915->drm.struct_mutex);
10901100 restart:
10911101 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
10921102 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
....@@ -1099,7 +1109,7 @@
10991109 if (hole_start >= hole_end)
11001110 continue;
11011111
1102
- err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1112
+ err = func(&ggtt->vm, hole_start, hole_end, end_time);
11031113 if (err)
11041114 break;
11051115
....@@ -1107,7 +1117,6 @@
11071117 last = hole_end;
11081118 goto restart;
11091119 }
1110
- mutex_unlock(&i915->drm.struct_mutex);
11111120
11121121 return err;
11131122 }
....@@ -1144,32 +1153,34 @@
11441153 struct drm_i915_private *i915 = arg;
11451154 struct i915_ggtt *ggtt = &i915->ggtt;
11461155 struct drm_i915_gem_object *obj;
1156
+ intel_wakeref_t wakeref;
11471157 struct drm_mm_node tmp;
11481158 unsigned int *order, n;
11491159 int err;
11501160
1151
- mutex_lock(&i915->drm.struct_mutex);
1161
+ if (!i915_ggtt_has_aperture(ggtt))
1162
+ return 0;
11521163
11531164 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1154
- if (IS_ERR(obj)) {
1155
- err = PTR_ERR(obj);
1156
- goto out_unlock;
1157
- }
1165
+ if (IS_ERR(obj))
1166
+ return PTR_ERR(obj);
11581167
11591168 err = i915_gem_object_pin_pages(obj);
11601169 if (err)
11611170 goto out_free;
11621171
11631172 memset(&tmp, 0, sizeof(tmp));
1173
+ mutex_lock(&ggtt->vm.mutex);
11641174 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
11651175 count * PAGE_SIZE, 0,
11661176 I915_COLOR_UNEVICTABLE,
11671177 0, ggtt->mappable_end,
11681178 DRM_MM_INSERT_LOW);
1179
+ mutex_unlock(&ggtt->vm.mutex);
11691180 if (err)
11701181 goto out_unpin;
11711182
1172
- intel_runtime_pm_get(i915);
1183
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
11731184
11741185 for (n = 0; n < count; n++) {
11751186 u64 offset = tmp.start + n * PAGE_SIZE;
....@@ -1193,7 +1204,7 @@
11931204 iowrite32(n, vaddr + n);
11941205 io_mapping_unmap_atomic(vaddr);
11951206 }
1196
- i915_gem_flush_ggtt_writes(i915);
1207
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
11971208
11981209 i915_random_reorder(order, count, &prng);
11991210 for (n = 0; n < count; n++) {
....@@ -1216,14 +1227,14 @@
12161227 kfree(order);
12171228 out_remove:
12181229 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1219
- intel_runtime_pm_put(i915);
1230
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1231
+ mutex_lock(&ggtt->vm.mutex);
12201232 drm_mm_remove_node(&tmp);
1233
+ mutex_unlock(&ggtt->vm.mutex);
12211234 out_unpin:
12221235 i915_gem_object_unpin_pages(obj);
12231236 out_free:
12241237 i915_gem_object_put(obj);
1225
-out_unlock:
1226
- mutex_unlock(&i915->drm.struct_mutex);
12271238 return err;
12281239 }
12291240
....@@ -1231,22 +1242,26 @@
12311242 {
12321243 struct drm_i915_gem_object *obj = vma->obj;
12331244
1234
- obj->bind_count++; /* track for eviction later */
12351245 __i915_gem_object_pin_pages(obj);
12361246
1247
+ GEM_BUG_ON(vma->pages);
1248
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1249
+ __i915_gem_object_pin_pages(obj);
12371250 vma->pages = obj->mm.pages;
1238
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1251
+
1252
+ mutex_lock(&vma->vm->mutex);
1253
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
1254
+ mutex_unlock(&vma->vm->mutex);
12391255 }
12401256
12411257 static int exercise_mock(struct drm_i915_private *i915,
1242
- int (*func)(struct drm_i915_private *i915,
1243
- struct i915_address_space *vm,
1258
+ int (*func)(struct i915_address_space *vm,
12441259 u64 hole_start, u64 hole_end,
12451260 unsigned long end_time))
12461261 {
1247
- const u64 limit = totalram_pages << PAGE_SHIFT;
1262
+ const u64 limit = totalram_pages() << PAGE_SHIFT;
1263
+ struct i915_address_space *vm;
12481264 struct i915_gem_context *ctx;
1249
- struct i915_hw_ppgtt *ppgtt;
12501265 IGT_TIMEOUT(end_time);
12511266 int err;
12521267
....@@ -1254,10 +1269,9 @@
12541269 if (!ctx)
12551270 return -ENOMEM;
12561271
1257
- ppgtt = ctx->ppgtt;
1258
- GEM_BUG_ON(!ppgtt);
1259
-
1260
- err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
1272
+ vm = i915_gem_context_get_vm_rcu(ctx);
1273
+ err = func(vm, 0, min(vm->total, limit), end_time);
1274
+ i915_vm_put(vm);
12611275
12621276 mock_context_close(ctx);
12631277 return err;
....@@ -1265,28 +1279,37 @@
12651279
12661280 static int igt_mock_fill(void *arg)
12671281 {
1268
- return exercise_mock(arg, fill_hole);
1282
+ struct i915_ggtt *ggtt = arg;
1283
+
1284
+ return exercise_mock(ggtt->vm.i915, fill_hole);
12691285 }
12701286
12711287 static int igt_mock_walk(void *arg)
12721288 {
1273
- return exercise_mock(arg, walk_hole);
1289
+ struct i915_ggtt *ggtt = arg;
1290
+
1291
+ return exercise_mock(ggtt->vm.i915, walk_hole);
12741292 }
12751293
12761294 static int igt_mock_pot(void *arg)
12771295 {
1278
- return exercise_mock(arg, pot_hole);
1296
+ struct i915_ggtt *ggtt = arg;
1297
+
1298
+ return exercise_mock(ggtt->vm.i915, pot_hole);
12791299 }
12801300
12811301 static int igt_mock_drunk(void *arg)
12821302 {
1283
- return exercise_mock(arg, drunk_hole);
1303
+ struct i915_ggtt *ggtt = arg;
1304
+
1305
+ return exercise_mock(ggtt->vm.i915, drunk_hole);
12841306 }
12851307
12861308 static int igt_gtt_reserve(void *arg)
12871309 {
1288
- struct drm_i915_private *i915 = arg;
1310
+ struct i915_ggtt *ggtt = arg;
12891311 struct drm_i915_gem_object *obj, *on;
1312
+ I915_RND_STATE(prng);
12901313 LIST_HEAD(objects);
12911314 u64 total;
12921315 int err = -ENODEV;
....@@ -1298,11 +1321,12 @@
12981321
12991322 /* Start by filling the GGTT */
13001323 for (total = 0;
1301
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1302
- total += 2*I915_GTT_PAGE_SIZE) {
1324
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1325
+ total += 2 * I915_GTT_PAGE_SIZE) {
13031326 struct i915_vma *vma;
13041327
1305
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1328
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
1329
+ 2 * PAGE_SIZE);
13061330 if (IS_ERR(obj)) {
13071331 err = PTR_ERR(obj);
13081332 goto out;
....@@ -1316,20 +1340,22 @@
13161340
13171341 list_add(&obj->st_link, &objects);
13181342
1319
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1343
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
13201344 if (IS_ERR(vma)) {
13211345 err = PTR_ERR(vma);
13221346 goto out;
13231347 }
13241348
1325
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1349
+ mutex_lock(&ggtt->vm.mutex);
1350
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
13261351 obj->base.size,
13271352 total,
13281353 obj->cache_level,
13291354 0);
1355
+ mutex_unlock(&ggtt->vm.mutex);
13301356 if (err) {
13311357 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1332
- total, i915->ggtt.vm.total, err);
1358
+ total, ggtt->vm.total, err);
13331359 goto out;
13341360 }
13351361 track_vma_bind(vma);
....@@ -1347,11 +1373,12 @@
13471373
13481374 /* Now we start forcing evictions */
13491375 for (total = I915_GTT_PAGE_SIZE;
1350
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1351
- total += 2*I915_GTT_PAGE_SIZE) {
1376
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1377
+ total += 2 * I915_GTT_PAGE_SIZE) {
13521378 struct i915_vma *vma;
13531379
1354
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1380
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
1381
+ 2 * PAGE_SIZE);
13551382 if (IS_ERR(obj)) {
13561383 err = PTR_ERR(obj);
13571384 goto out;
....@@ -1365,20 +1392,22 @@
13651392
13661393 list_add(&obj->st_link, &objects);
13671394
1368
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1395
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
13691396 if (IS_ERR(vma)) {
13701397 err = PTR_ERR(vma);
13711398 goto out;
13721399 }
13731400
1374
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1401
+ mutex_lock(&ggtt->vm.mutex);
1402
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
13751403 obj->base.size,
13761404 total,
13771405 obj->cache_level,
13781406 0);
1407
+ mutex_unlock(&ggtt->vm.mutex);
13791408 if (err) {
13801409 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1381
- total, i915->ggtt.vm.total, err);
1410
+ total, ggtt->vm.total, err);
13821411 goto out;
13831412 }
13841413 track_vma_bind(vma);
....@@ -1399,7 +1428,7 @@
13991428 struct i915_vma *vma;
14001429 u64 offset;
14011430
1402
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1431
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
14031432 if (IS_ERR(vma)) {
14041433 err = PTR_ERR(vma);
14051434 goto out;
....@@ -1411,18 +1440,21 @@
14111440 goto out;
14121441 }
14131442
1414
- offset = random_offset(0, i915->ggtt.vm.total,
1415
- 2*I915_GTT_PAGE_SIZE,
1416
- I915_GTT_MIN_ALIGNMENT);
1443
+ offset = igt_random_offset(&prng,
1444
+ 0, ggtt->vm.total,
1445
+ 2 * I915_GTT_PAGE_SIZE,
1446
+ I915_GTT_MIN_ALIGNMENT);
14171447
1418
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1448
+ mutex_lock(&ggtt->vm.mutex);
1449
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
14191450 obj->base.size,
14201451 offset,
14211452 obj->cache_level,
14221453 0);
1454
+ mutex_unlock(&ggtt->vm.mutex);
14231455 if (err) {
14241456 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1425
- total, i915->ggtt.vm.total, err);
1457
+ total, ggtt->vm.total, err);
14261458 goto out;
14271459 }
14281460 track_vma_bind(vma);
....@@ -1448,7 +1480,7 @@
14481480
14491481 static int igt_gtt_insert(void *arg)
14501482 {
1451
- struct drm_i915_private *i915 = arg;
1483
+ struct i915_ggtt *ggtt = arg;
14521484 struct drm_i915_gem_object *obj, *on;
14531485 struct drm_mm_node tmp = {};
14541486 const struct invalid_insert {
....@@ -1457,8 +1489,8 @@
14571489 u64 start, end;
14581490 } invalid_insert[] = {
14591491 {
1460
- i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
1461
- 0, i915->ggtt.vm.total,
1492
+ ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1493
+ 0, ggtt->vm.total,
14621494 },
14631495 {
14641496 2*I915_GTT_PAGE_SIZE, 0,
....@@ -1488,11 +1520,13 @@
14881520
14891521 /* Check a couple of obviously invalid requests */
14901522 for (ii = invalid_insert; ii->size; ii++) {
1491
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
1523
+ mutex_lock(&ggtt->vm.mutex);
1524
+ err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
14921525 ii->size, ii->alignment,
14931526 I915_COLOR_UNEVICTABLE,
14941527 ii->start, ii->end,
14951528 0);
1529
+ mutex_unlock(&ggtt->vm.mutex);
14961530 if (err != -ENOSPC) {
14971531 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
14981532 ii->size, ii->alignment, ii->start, ii->end,
....@@ -1503,11 +1537,12 @@
15031537
15041538 /* Start by filling the GGTT */
15051539 for (total = 0;
1506
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1540
+ total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
15071541 total += I915_GTT_PAGE_SIZE) {
15081542 struct i915_vma *vma;
15091543
1510
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1544
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
1545
+ I915_GTT_PAGE_SIZE);
15111546 if (IS_ERR(obj)) {
15121547 err = PTR_ERR(obj);
15131548 goto out;
....@@ -1521,16 +1556,18 @@
15211556
15221557 list_add(&obj->st_link, &objects);
15231558
1524
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1559
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
15251560 if (IS_ERR(vma)) {
15261561 err = PTR_ERR(vma);
15271562 goto out;
15281563 }
15291564
1530
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1565
+ mutex_lock(&ggtt->vm.mutex);
1566
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
15311567 obj->base.size, 0, obj->cache_level,
1532
- 0, i915->ggtt.vm.total,
1568
+ 0, ggtt->vm.total,
15331569 0);
1570
+ mutex_unlock(&ggtt->vm.mutex);
15341571 if (err == -ENOSPC) {
15351572 /* maxed out the GGTT space */
15361573 i915_gem_object_put(obj);
....@@ -1538,7 +1575,7 @@
15381575 }
15391576 if (err) {
15401577 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1541
- total, i915->ggtt.vm.total, err);
1578
+ total, ggtt->vm.total, err);
15421579 goto out;
15431580 }
15441581 track_vma_bind(vma);
....@@ -1550,7 +1587,7 @@
15501587 list_for_each_entry(obj, &objects, st_link) {
15511588 struct i915_vma *vma;
15521589
1553
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1590
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
15541591 if (IS_ERR(vma)) {
15551592 err = PTR_ERR(vma);
15561593 goto out;
....@@ -1570,7 +1607,7 @@
15701607 struct i915_vma *vma;
15711608 u64 offset;
15721609
1573
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1610
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
15741611 if (IS_ERR(vma)) {
15751612 err = PTR_ERR(vma);
15761613 goto out;
....@@ -1585,13 +1622,15 @@
15851622 goto out;
15861623 }
15871624
1588
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1625
+ mutex_lock(&ggtt->vm.mutex);
1626
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
15891627 obj->base.size, 0, obj->cache_level,
1590
- 0, i915->ggtt.vm.total,
1628
+ 0, ggtt->vm.total,
15911629 0);
1630
+ mutex_unlock(&ggtt->vm.mutex);
15921631 if (err) {
15931632 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1594
- total, i915->ggtt.vm.total, err);
1633
+ total, ggtt->vm.total, err);
15951634 goto out;
15961635 }
15971636 track_vma_bind(vma);
....@@ -1607,11 +1646,12 @@
16071646
16081647 /* And then force evictions */
16091648 for (total = 0;
1610
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1611
- total += 2*I915_GTT_PAGE_SIZE) {
1649
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1650
+ total += 2 * I915_GTT_PAGE_SIZE) {
16121651 struct i915_vma *vma;
16131652
1614
- obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1653
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
1654
+ 2 * I915_GTT_PAGE_SIZE);
16151655 if (IS_ERR(obj)) {
16161656 err = PTR_ERR(obj);
16171657 goto out;
....@@ -1625,19 +1665,21 @@
16251665
16261666 list_add(&obj->st_link, &objects);
16271667
1628
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1668
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
16291669 if (IS_ERR(vma)) {
16301670 err = PTR_ERR(vma);
16311671 goto out;
16321672 }
16331673
1634
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1674
+ mutex_lock(&ggtt->vm.mutex);
1675
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
16351676 obj->base.size, 0, obj->cache_level,
1636
- 0, i915->ggtt.vm.total,
1677
+ 0, ggtt->vm.total,
16371678 0);
1679
+ mutex_unlock(&ggtt->vm.mutex);
16381680 if (err) {
16391681 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1640
- total, i915->ggtt.vm.total, err);
1682
+ total, ggtt->vm.total, err);
16411683 goto out;
16421684 }
16431685 track_vma_bind(vma);
....@@ -1664,17 +1706,345 @@
16641706 SUBTEST(igt_gtt_insert),
16651707 };
16661708 struct drm_i915_private *i915;
1709
+ struct i915_ggtt *ggtt;
16671710 int err;
16681711
16691712 i915 = mock_gem_device();
16701713 if (!i915)
16711714 return -ENOMEM;
16721715
1673
- mutex_lock(&i915->drm.struct_mutex);
1674
- err = i915_subtests(tests, i915);
1675
- mutex_unlock(&i915->drm.struct_mutex);
1716
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
1717
+ if (!ggtt) {
1718
+ err = -ENOMEM;
1719
+ goto out_put;
1720
+ }
1721
+ mock_init_ggtt(i915, ggtt);
16761722
1677
- drm_dev_put(&i915->drm);
1723
+ err = i915_subtests(tests, ggtt);
1724
+
1725
+ mock_device_flush(i915);
1726
+ i915_gem_drain_freed_objects(i915);
1727
+ mock_fini_ggtt(ggtt);
1728
+ kfree(ggtt);
1729
+out_put:
1730
+ mock_destroy_device(i915);
1731
+ return err;
1732
+}
1733
+
1734
+static int context_sync(struct intel_context *ce)
1735
+{
1736
+ struct i915_request *rq;
1737
+ long timeout;
1738
+
1739
+ rq = intel_context_create_request(ce);
1740
+ if (IS_ERR(rq))
1741
+ return PTR_ERR(rq);
1742
+
1743
+ i915_request_get(rq);
1744
+ i915_request_add(rq);
1745
+
1746
+ timeout = i915_request_wait(rq, 0, HZ / 5);
1747
+ i915_request_put(rq);
1748
+
1749
+ return timeout < 0 ? -EIO : 0;
1750
+}
1751
+
1752
+static struct i915_request *
1753
+submit_batch(struct intel_context *ce, u64 addr)
1754
+{
1755
+ struct i915_request *rq;
1756
+ int err;
1757
+
1758
+ rq = intel_context_create_request(ce);
1759
+ if (IS_ERR(rq))
1760
+ return rq;
1761
+
1762
+ err = 0;
1763
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1764
+ err = rq->engine->emit_init_breadcrumb(rq);
1765
+ if (err == 0)
1766
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1767
+
1768
+ if (err == 0)
1769
+ i915_request_get(rq);
1770
+ i915_request_add(rq);
1771
+
1772
+ return err ? ERR_PTR(err) : rq;
1773
+}
1774
+
1775
+static u32 *spinner(u32 *batch, int i)
1776
+{
1777
+ return batch + i * 64 / sizeof(*batch) + 4;
1778
+}
1779
+
1780
+static void end_spin(u32 *batch, int i)
1781
+{
1782
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
1783
+ wmb();
1784
+}
1785
+
1786
+static int igt_cs_tlb(void *arg)
1787
+{
1788
+ const unsigned int count = PAGE_SIZE / 64;
1789
+ const unsigned int chunk_size = count * PAGE_SIZE;
1790
+ struct drm_i915_private *i915 = arg;
1791
+ struct drm_i915_gem_object *bbe, *act, *out;
1792
+ struct i915_gem_engines_iter it;
1793
+ struct i915_address_space *vm;
1794
+ struct i915_gem_context *ctx;
1795
+ struct intel_context *ce;
1796
+ struct i915_vma *vma;
1797
+ I915_RND_STATE(prng);
1798
+ struct file *file;
1799
+ unsigned int i;
1800
+ u32 *result;
1801
+ u32 *batch;
1802
+ int err = 0;
1803
+
1804
+ /*
1805
+ * Our mission here is to fool the hardware to execute something
1806
+ * from scratch as it has not seen the batch move (due to missing
1807
+ * the TLB invalidate).
1808
+ */
1809
+
1810
+ file = mock_file(i915);
1811
+ if (IS_ERR(file))
1812
+ return PTR_ERR(file);
1813
+
1814
+ ctx = live_context(i915, file);
1815
+ if (IS_ERR(ctx)) {
1816
+ err = PTR_ERR(ctx);
1817
+ goto out_unlock;
1818
+ }
1819
+
1820
+ vm = i915_gem_context_get_vm_rcu(ctx);
1821
+ if (i915_is_ggtt(vm))
1822
+ goto out_vm;
1823
+
1824
+ /* Create two pages; dummy we prefill the TLB, and intended */
1825
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
1826
+ if (IS_ERR(bbe)) {
1827
+ err = PTR_ERR(bbe);
1828
+ goto out_vm;
1829
+ }
1830
+
1831
+ batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
1832
+ if (IS_ERR(batch)) {
1833
+ err = PTR_ERR(batch);
1834
+ goto out_put_bbe;
1835
+ }
1836
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
1837
+ i915_gem_object_flush_map(bbe);
1838
+ i915_gem_object_unpin_map(bbe);
1839
+
1840
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
1841
+ if (IS_ERR(act)) {
1842
+ err = PTR_ERR(act);
1843
+ goto out_put_bbe;
1844
+ }
1845
+
1846
+ /* Track the execution of each request by writing into different slot */
1847
+ batch = i915_gem_object_pin_map(act, I915_MAP_WC);
1848
+ if (IS_ERR(batch)) {
1849
+ err = PTR_ERR(batch);
1850
+ goto out_put_act;
1851
+ }
1852
+ for (i = 0; i < count; i++) {
1853
+ u32 *cs = batch + i * 64 / sizeof(*cs);
1854
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
1855
+
1856
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
1857
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
1858
+ if (INTEL_GEN(i915) >= 8) {
1859
+ cs[1] = lower_32_bits(addr);
1860
+ cs[2] = upper_32_bits(addr);
1861
+ cs[3] = i;
1862
+ cs[4] = MI_NOOP;
1863
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
1864
+ } else {
1865
+ cs[1] = 0;
1866
+ cs[2] = lower_32_bits(addr);
1867
+ cs[3] = i;
1868
+ cs[4] = MI_NOOP;
1869
+ cs[5] = MI_BATCH_BUFFER_START;
1870
+ }
1871
+ }
1872
+
1873
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
1874
+ if (IS_ERR(out)) {
1875
+ err = PTR_ERR(out);
1876
+ goto out_put_batch;
1877
+ }
1878
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
1879
+
1880
+ vma = i915_vma_instance(out, vm, NULL);
1881
+ if (IS_ERR(vma)) {
1882
+ err = PTR_ERR(vma);
1883
+ goto out_put_out;
1884
+ }
1885
+
1886
+ err = i915_vma_pin(vma, 0, 0,
1887
+ PIN_USER |
1888
+ PIN_OFFSET_FIXED |
1889
+ (vm->total - PAGE_SIZE));
1890
+ if (err)
1891
+ goto out_put_out;
1892
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
1893
+
1894
+ result = i915_gem_object_pin_map(out, I915_MAP_WB);
1895
+ if (IS_ERR(result)) {
1896
+ err = PTR_ERR(result);
1897
+ goto out_put_out;
1898
+ }
1899
+
1900
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1901
+ IGT_TIMEOUT(end_time);
1902
+ unsigned long pass = 0;
1903
+
1904
+ if (!intel_engine_can_store_dword(ce->engine))
1905
+ continue;
1906
+
1907
+ while (!__igt_timeout(end_time, NULL)) {
1908
+ struct i915_vm_pt_stash stash = {};
1909
+ struct i915_request *rq;
1910
+ u64 offset;
1911
+
1912
+ offset = igt_random_offset(&prng,
1913
+ 0, vm->total - PAGE_SIZE,
1914
+ chunk_size, PAGE_SIZE);
1915
+
1916
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
1917
+
1918
+ vma = i915_vma_instance(bbe, vm, NULL);
1919
+ if (IS_ERR(vma)) {
1920
+ err = PTR_ERR(vma);
1921
+ goto end;
1922
+ }
1923
+
1924
+ err = vma->ops->set_pages(vma);
1925
+ if (err)
1926
+ goto end;
1927
+
1928
+ err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
1929
+ if (err)
1930
+ goto end;
1931
+
1932
+ err = i915_vm_pin_pt_stash(vm, &stash);
1933
+ if (err) {
1934
+ i915_vm_free_pt_stash(vm, &stash);
1935
+ goto end;
1936
+ }
1937
+
1938
+ vm->allocate_va_range(vm, &stash, offset, chunk_size);
1939
+
1940
+ i915_vm_free_pt_stash(vm, &stash);
1941
+
1942
+ /* Prime the TLB with the dummy pages */
1943
+ for (i = 0; i < count; i++) {
1944
+ vma->node.start = offset + i * PAGE_SIZE;
1945
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1946
+
1947
+ rq = submit_batch(ce, vma->node.start);
1948
+ if (IS_ERR(rq)) {
1949
+ err = PTR_ERR(rq);
1950
+ goto end;
1951
+ }
1952
+ i915_request_put(rq);
1953
+ }
1954
+
1955
+ vma->ops->clear_pages(vma);
1956
+
1957
+ err = context_sync(ce);
1958
+ if (err) {
1959
+ pr_err("%s: dummy setup timed out\n",
1960
+ ce->engine->name);
1961
+ goto end;
1962
+ }
1963
+
1964
+ vma = i915_vma_instance(act, vm, NULL);
1965
+ if (IS_ERR(vma)) {
1966
+ err = PTR_ERR(vma);
1967
+ goto end;
1968
+ }
1969
+
1970
+ err = vma->ops->set_pages(vma);
1971
+ if (err)
1972
+ goto end;
1973
+
1974
+ /* Replace the TLB with target batches */
1975
+ for (i = 0; i < count; i++) {
1976
+ struct i915_request *rq;
1977
+ u32 *cs = batch + i * 64 / sizeof(*cs);
1978
+ u64 addr;
1979
+
1980
+ vma->node.start = offset + i * PAGE_SIZE;
1981
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
1982
+
1983
+ addr = vma->node.start + i * 64;
1984
+ cs[4] = MI_NOOP;
1985
+ cs[6] = lower_32_bits(addr);
1986
+ cs[7] = upper_32_bits(addr);
1987
+ wmb();
1988
+
1989
+ rq = submit_batch(ce, addr);
1990
+ if (IS_ERR(rq)) {
1991
+ err = PTR_ERR(rq);
1992
+ goto end;
1993
+ }
1994
+
1995
+ /* Wait until the context chain has started */
1996
+ if (i == 0) {
1997
+ while (READ_ONCE(result[i]) &&
1998
+ !i915_request_completed(rq))
1999
+ cond_resched();
2000
+ } else {
2001
+ end_spin(batch, i - 1);
2002
+ }
2003
+
2004
+ i915_request_put(rq);
2005
+ }
2006
+ end_spin(batch, count - 1);
2007
+
2008
+ vma->ops->clear_pages(vma);
2009
+
2010
+ err = context_sync(ce);
2011
+ if (err) {
2012
+ pr_err("%s: writes timed out\n",
2013
+ ce->engine->name);
2014
+ goto end;
2015
+ }
2016
+
2017
+ for (i = 0; i < count; i++) {
2018
+ if (result[i] != i) {
2019
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2020
+ ce->engine->name, pass,
2021
+ offset, i, result[i], i);
2022
+ err = -EINVAL;
2023
+ goto end;
2024
+ }
2025
+ }
2026
+
2027
+ vm->clear_range(vm, offset, chunk_size);
2028
+ pass++;
2029
+ }
2030
+ }
2031
+end:
2032
+ if (igt_flush_test(i915))
2033
+ err = -EIO;
2034
+ i915_gem_context_unlock_engines(ctx);
2035
+ i915_gem_object_unpin_map(out);
2036
+out_put_out:
2037
+ i915_gem_object_put(out);
2038
+out_put_batch:
2039
+ i915_gem_object_unpin_map(act);
2040
+out_put_act:
2041
+ i915_gem_object_put(act);
2042
+out_put_bbe:
2043
+ i915_gem_object_put(bbe);
2044
+out_vm:
2045
+ i915_vm_put(vm);
2046
+out_unlock:
2047
+ fput(file);
16782048 return err;
16792049 }
16802050
....@@ -1695,6 +2065,7 @@
16952065 SUBTEST(igt_ggtt_pot),
16962066 SUBTEST(igt_ggtt_fill),
16972067 SUBTEST(igt_ggtt_page),
2068
+ SUBTEST(igt_cs_tlb),
16982069 };
16992070
17002071 GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));