From 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 13 May 2024 10:30:14 +0000
Subject: [PATCH] modify sin led gpio

---
 kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  755 +++++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 563 insertions(+), 192 deletions(-)

diff --git a/kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 127d815..713770f 100644
--- a/kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/kernel/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,25 +25,21 @@
 #include <linux/list_sort.h>
 #include <linux/prime_numbers.h>
 
-#include "../i915_selftest.h"
-#include "i915_random.h"
+#include "gem/i915_gem_context.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
 
-#include "mock_context.h"
+#include "i915_random.h"
+#include "i915_selftest.h"
+
 #include "mock_drm.h"
 #include "mock_gem_device.h"
+#include "mock_gtt.h"
+#include "igt_flush_test.h"
 
 static void cleanup_freed_objects(struct drm_i915_private *i915)
 {
-	/*
-	 * As we may hold onto the struct_mutex for inordinate lengths of
-	 * time, the NMI khungtaskd detector may fire for the free objects
-	 * worker.
-	 */
-	mutex_unlock(&i915->drm.struct_mutex);
-
 	i915_gem_drain_freed_objects(i915);
-
-	mutex_lock(&i915->drm.struct_mutex);
 }
 
 static void fake_free_pages(struct drm_i915_gem_object *obj,
@@ -87,8 +83,6 @@
 	}
 	GEM_BUG_ON(rem);
 
-	obj->mm.madv = I915_MADV_DONTNEED;
-
 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 
 	return 0;
@@ -100,10 +94,10 @@
 {
 	fake_free_pages(obj, pages);
 	obj->mm.dirty = false;
-	obj->mm.madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops fake_ops = {
+	.name = "fake-gem",
 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
 	.get_pages = fake_get_pages,
 	.put_pages = fake_put_pages,
@@ -112,6 +106,7 @@
 static struct drm_i915_gem_object *
 fake_dma_object(struct drm_i915_private *i915, u64 size)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 
 	GEM_BUG_ON(!size);
@@ -120,12 +115,14 @@
 	if (overflows_type(size, obj->base.size))
 		return ERR_PTR(-E2BIG);
 
-	obj = i915_gem_object_alloc(i915);
+	obj = i915_gem_object_alloc();
 	if (!obj)
 		goto err;
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &fake_ops);
+	i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+	i915_gem_object_set_volatile(obj);
 
 	obj->write_domain = I915_GEM_DOMAIN_CPU;
 	obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -147,16 +144,16 @@
 static int igt_ppgtt_alloc(void *arg)
 {
 	struct drm_i915_private *dev_priv = arg;
-	struct i915_hw_ppgtt *ppgtt;
+	struct i915_ppgtt *ppgtt;
 	u64 size, last, limit;
 	int err = 0;
 
 	/* Allocate a ppggt and try to fill the entire range */
 
-	if (!USES_PPGTT(dev_priv))
+	if (!HAS_PPGTT(dev_priv))
 		return 0;
 
-	ppgtt = __hw_ppgtt_create(dev_priv);
+	ppgtt = i915_ppgtt_create(&dev_priv->gt);
 	if (IS_ERR(ppgtt))
 		return PTR_ERR(ppgtt);
 
@@ -170,59 +167,68 @@
 	 * This should ensure that we do not run into the oomkiller during
 	 * the test and take down the machine wilfully.
 	 */
-	limit = totalram_pages << PAGE_SHIFT;
+	limit = totalram_pages() << PAGE_SHIFT;
 	limit = min(ppgtt->vm.total, limit);
 
 	/* Check we can allocate the entire range */
 	for (size = 4096; size <= limit; size <<= 2) {
-		err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
+		struct i915_vm_pt_stash stash = {};
+
+		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
+		if (err)
+			goto err_ppgtt_cleanup;
+
+		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
 		if (err) {
-			if (err == -ENOMEM) {
-				pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
-					size, ilog2(size));
-				err = 0; /* virtual space too large! */
-			}
+			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 			goto err_ppgtt_cleanup;
 		}
 
+		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
 		cond_resched();
 
 		ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
+
+		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 	}
 
 	/* Check we can incrementally allocate the entire range */
 	for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
-		err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
-						  last, size - last);
+		struct i915_vm_pt_stash stash = {};
+
+		err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
+		if (err)
+			goto err_ppgtt_cleanup;
+
+		err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
 		if (err) {
-			if (err == -ENOMEM) {
-				pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
-					last, size - last, ilog2(size));
-				err = 0; /* virtual space too large! */
-			}
+			i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 			goto err_ppgtt_cleanup;
 		}
 
+		ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
+					    last, size - last);
 		cond_resched();
+
+		i915_vm_free_pt_stash(&ppgtt->vm, &stash);
 	}
 
 err_ppgtt_cleanup:
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_ppgtt_put(ppgtt);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	i915_vm_put(&ppgtt->vm);
 	return err;
 }
 
-static int lowlevel_hole(struct drm_i915_private *i915,
-			 struct i915_address_space *vm,
+static int lowlevel_hole(struct i915_address_space *vm,
 			 u64 hole_start, u64 hole_end,
 			 unsigned long end_time)
 {
 	I915_RND_STATE(seed_prng);
+	struct i915_vma *mock_vma;
 	unsigned int size;
-	struct i915_vma mock_vma;
 
-	memset(&mock_vma, 0, sizeof(struct i915_vma));
+	mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
+	if (!mock_vma)
+		return -ENOMEM;
 
 	/* Keep creating larger objects until one cannot fit into the hole */
 	for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -246,8 +252,10 @@
 			if (order)
 				break;
 		} while (count >>= 1);
-		if (!count)
+		if (!count) {
+			kfree(mock_vma);
 			return -ENOMEM;
+		}
 		GEM_BUG_ON(!order);
 
 		GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
@@ -259,7 +267,7 @@
 		 * memory. We expect to hit -ENOMEM.
 		 */
 
-		obj = fake_dma_object(i915, BIT_ULL(size));
+		obj = fake_dma_object(vm->i915, BIT_ULL(size));
 		if (IS_ERR(obj)) {
 			kfree(order);
 			break;
@@ -275,6 +283,7 @@
 
 		for (n = 0; n < count; n++) {
 			u64 addr = hole_start + order[n] * BIT_ULL(size);
+			intel_wakeref_t wakeref;
 
 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
 
@@ -285,26 +294,42 @@
 				break;
 			}
 
-			if (vm->allocate_va_range &&
-			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
-				break;
+			if (vm->allocate_va_range) {
+				struct i915_vm_pt_stash stash = {};
 
-			mock_vma.pages = obj->mm.pages;
-			mock_vma.node.size = BIT_ULL(size);
-			mock_vma.node.start = addr;
+				if (i915_vm_alloc_pt_stash(vm, &stash,
+							   BIT_ULL(size)))
+					break;
 
-			intel_runtime_pm_get(i915);
-			vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
-			intel_runtime_pm_put(i915);
+				if (i915_vm_pin_pt_stash(vm, &stash)) {
+					i915_vm_free_pt_stash(vm, &stash);
+					break;
+				}
+
+				vm->allocate_va_range(vm, &stash,
+						      addr, BIT_ULL(size));
+
+				i915_vm_free_pt_stash(vm, &stash);
+			}
+
+			mock_vma->pages = obj->mm.pages;
+			mock_vma->node.size = BIT_ULL(size);
+			mock_vma->node.start = addr;
+
+			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+				vm->insert_entries(vm, mock_vma,
+						   I915_CACHE_NONE, 0);
 		}
 		count = n;
 
 		i915_random_reorder(order, count, &prng);
 		for (n = 0; n < count; n++) {
 			u64 addr = hole_start + order[n] * BIT_ULL(size);
+			intel_wakeref_t wakeref;
 
 			GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
-			vm->clear_range(vm, addr, BIT_ULL(size));
+			with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
+				vm->clear_range(vm, addr, BIT_ULL(size));
 		}
 
 		i915_gem_object_unpin_pages(obj);
@@ -312,9 +337,10 @@
 
 		kfree(order);
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
+	kfree(mock_vma);
 	return 0;
 }
 
@@ -330,17 +356,13 @@
 		vma = i915_vma_instance(obj, vm, NULL);
 		if (!IS_ERR(vma))
 			ignored = i915_vma_unbind(vma);
-		/* Only ppgtt vma may be closed before the object is freed */
-		if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
-			i915_vma_close(vma);
 
 		list_del(&obj->st_link);
 		i915_gem_object_put(obj);
 	}
 }
 
-static int fill_hole(struct drm_i915_private *i915,
-		     struct i915_address_space *vm,
+static int fill_hole(struct i915_address_space *vm,
 		     u64 hole_start, u64 hole_end,
 		     unsigned long end_time)
 {
@@ -373,7 +395,7 @@
 				{ }
 			}, *p;
 
-			obj = fake_dma_object(i915, full_size);
+			obj = fake_dma_object(vm->i915, full_size);
 			if (IS_ERR(obj))
 				break;
 
@@ -541,7 +563,7 @@
 		}
 
 		close_object_list(&objects, vm);
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
@@ -551,8 +573,7 @@
 	return err;
 }
 
-static int walk_hole(struct drm_i915_private *i915,
-		     struct i915_address_space *vm,
+static int walk_hole(struct i915_address_space *vm,
 		     u64 hole_start, u64 hole_end,
 		     unsigned long end_time)
 {
@@ -574,7 +595,7 @@
 		u64 addr;
 		int err = 0;
 
-		obj = fake_dma_object(i915, size << PAGE_SHIFT);
+		obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
 		if (IS_ERR(obj))
 			break;
 
@@ -592,7 +613,7 @@
 				pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
 				       __func__, addr, vma->size,
 				       hole_start, hole_end, err);
-				goto err_close;
+				goto err_put;
 			}
 			i915_vma_unpin(vma);
 
@@ -601,14 +622,14 @@
 				pr_err("%s incorrect at %llx + %llx\n",
 				       __func__, addr, vma->size);
 				err = -EINVAL;
-				goto err_close;
+				goto err_put;
 			}
 
 			err = i915_vma_unbind(vma);
 			if (err) {
 				pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
 				       __func__, addr, vma->size, err);
-				goto err_close;
+				goto err_put;
 			}
 
 			GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -617,26 +638,22 @@
 					"%s timed out at %llx\n",
 					__func__, addr)) {
 				err = -EINTR;
-				goto err_close;
+				goto err_put;
 			}
 		}
 
-err_close:
-		if (!i915_vma_is_ggtt(vma))
-			i915_vma_close(vma);
 err_put:
 		i915_gem_object_put(obj);
 		if (err)
 			return err;
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
 }
 
-static int pot_hole(struct drm_i915_private *i915,
-		    struct i915_address_space *vm,
+static int pot_hole(struct i915_address_space *vm,
 		    u64 hole_start, u64 hole_end,
 		    unsigned long end_time)
 {
@@ -650,7 +667,7 @@
 	if (i915_is_ggtt(vm))
 		flags |= PIN_GLOBAL;
 
-	obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+	obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
@@ -677,7 +694,7 @@
 				       addr,
 				       hole_start, hole_end,
 				       err);
-				goto err;
+				goto err_obj;
 			}
 
 			if (!drm_mm_node_allocated(&vma->node) ||
@@ -687,7 +704,7 @@
 				i915_vma_unpin(vma);
 				err = i915_vma_unbind(vma);
 				err = -EINVAL;
-				goto err;
+				goto err_obj;
 			}
 
 			i915_vma_unpin(vma);
@@ -699,20 +716,16 @@
 				"%s timed out after %d/%d\n",
 				__func__, pot, fls64(hole_end - 1) - 1)) {
 			err = -EINTR;
-			goto err;
+			goto err_obj;
 		}
 	}
 
-err:
-	if (!i915_vma_is_ggtt(vma))
-		i915_vma_close(vma);
 err_obj:
 	i915_gem_object_put(obj);
 	return err;
 }
 
-static int drunk_hole(struct drm_i915_private *i915,
-		      struct i915_address_space *vm,
+static int drunk_hole(struct i915_address_space *vm,
 		      u64 hole_start, u64 hole_end,
 		      unsigned long end_time)
 {
@@ -757,7 +770,7 @@
 		 * memory. We expect to hit -ENOMEM.
 		 */
 
-		obj = fake_dma_object(i915, BIT_ULL(size));
+		obj = fake_dma_object(vm->i915, BIT_ULL(size));
 		if (IS_ERR(obj)) {
 			kfree(order);
 			break;
@@ -781,7 +794,7 @@
 				       addr, BIT_ULL(size),
 				       hole_start, hole_end,
 				       err);
-				goto err;
+				goto err_obj;
 			}
 
 			if (!drm_mm_node_allocated(&vma->node) ||
@@ -791,7 +804,7 @@
 				i915_vma_unpin(vma);
 				err = i915_vma_unbind(vma);
 				err = -EINVAL;
-				goto err;
+				goto err_obj;
 			}
 
 			i915_vma_unpin(vma);
@@ -802,27 +815,23 @@
 					"%s timed out after %d/%d\n",
 					__func__, n, count)) {
 				err = -EINTR;
-				goto err;
+				goto err_obj;
 			}
 		}
 
-err:
-		if (!i915_vma_is_ggtt(vma))
-			i915_vma_close(vma);
 err_obj:
 		i915_gem_object_put(obj);
 		kfree(order);
 		if (err)
 			return err;
 
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
 }
 
-static int __shrink_hole(struct drm_i915_private *i915,
-			 struct i915_address_space *vm,
+static int __shrink_hole(struct i915_address_space *vm,
 			 u64 hole_start, u64 hole_end,
 			 unsigned long end_time)
 {
@@ -839,7 +848,7 @@
 		u64 size = BIT_ULL(order++);
 
 		size = min(size, hole_end - addr);
-		obj = fake_dma_object(i915, size);
+		obj = fake_dma_object(vm->i915, size);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			break;
@@ -875,6 +884,15 @@
 		i915_vma_unpin(vma);
 		addr += size;
 
+		/*
+		 * Since we are injecting allocation faults at random intervals,
+		 * wait for this allocation to complete before we change the
+		 * faultinjection.
+		 */
+		err = i915_vma_sync(vma);
+		if (err)
+			break;
+
 		if (igt_timeout(end_time,
 				"%s timed out at ofset %llx [%llx - %llx]\n",
 				__func__, addr, hole_start, hole_end)) {
@@ -884,12 +902,11 @@
 	}
 
 	close_object_list(&objects, vm);
-	cleanup_freed_objects(i915);
+	cleanup_freed_objects(vm->i915);
 	return err;
 }
 
-static int shrink_hole(struct drm_i915_private *i915,
-		       struct i915_address_space *vm,
+static int shrink_hole(struct i915_address_space *vm,
 		       u64 hole_start, u64 hole_end,
 		       unsigned long end_time)
 {
@@ -901,7 +918,7 @@
 
 	for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
 		vm->fault_attr.interval = prime;
-		err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+		err = __shrink_hole(vm, hole_start, hole_end, end_time);
 		if (err)
 			break;
 	}
@@ -911,8 +928,7 @@
 	return err;
 }
 
-static int shrink_boom(struct drm_i915_private *i915,
-		       struct i915_address_space *vm,
+static int shrink_boom(struct i915_address_space *vm,
 		       u64 hole_start, u64 hole_end,
 		       unsigned long end_time)
 {
@@ -934,7 +950,7 @@
 		unsigned int size = sizes[i];
 		struct i915_vma *vma;
 
-		purge = fake_dma_object(i915, size);
+		purge = fake_dma_object(vm->i915, size);
 		if (IS_ERR(purge))
 			return PTR_ERR(purge);
 
@@ -951,7 +967,7 @@
 		/* Should now be ripe for purging */
 		i915_vma_unpin(vma);
 
-		explode = fake_dma_object(i915, size);
+		explode = fake_dma_object(vm->i915, size);
 		if (IS_ERR(explode)) {
 			err = PTR_ERR(explode);
 			goto err_purge;
@@ -977,7 +993,7 @@
 		i915_gem_object_put(explode);
 
 		memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
-		cleanup_freed_objects(i915);
+		cleanup_freed_objects(vm->i915);
 	}
 
 	return 0;
@@ -991,40 +1007,36 @@
 }
 
 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
-			  int (*func)(struct drm_i915_private *i915,
-				      struct i915_address_space *vm,
+			  int (*func)(struct i915_address_space *vm,
 				      u64 hole_start, u64 hole_end,
 				      unsigned long end_time))
 {
-	struct drm_file *file;
-	struct i915_hw_ppgtt *ppgtt;
+	struct i915_ppgtt *ppgtt;
 	IGT_TIMEOUT(end_time);
+	struct file *file;
 	int err;
 
-	if (!USES_FULL_PPGTT(dev_priv))
+	if (!HAS_FULL_PPGTT(dev_priv))
 		return 0;
 
 	file = mock_file(dev_priv);
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+	ppgtt = i915_ppgtt_create(&dev_priv->gt);
 	if (IS_ERR(ppgtt)) {
 		err = PTR_ERR(ppgtt);
-		goto out_unlock;
+		goto out_free;
 	}
 	GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
-	GEM_BUG_ON(ppgtt->vm.closed);
+	GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
 
-	err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
+	err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
-	i915_ppgtt_close(&ppgtt->vm);
-	i915_ppgtt_put(ppgtt);
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	i915_vm_put(&ppgtt->vm);
 
-	mock_file_free(dev_priv, file);
+out_free:
+	fput(file);
 	return err;
 }
 
@@ -1075,8 +1087,7 @@
 }
 
 static int exercise_ggtt(struct drm_i915_private *i915,
-			 int (*func)(struct drm_i915_private *i915,
-				     struct i915_address_space *vm,
+			 int (*func)(struct i915_address_space *vm,
 				     u64 hole_start, u64 hole_end,
 				     unsigned long end_time))
 {
@@ -1086,7 +1097,6 @@
 	IGT_TIMEOUT(end_time);
 	int err = 0;
 
-	mutex_lock(&i915->drm.struct_mutex);
 restart:
 	list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
 	drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
@@ -1099,7 +1109,7 @@
 		if (hole_start >= hole_end)
 			continue;
 
-		err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
+		err = func(&ggtt->vm, hole_start, hole_end, end_time);
 		if (err)
 			break;
 
@@ -1107,7 +1117,6 @@
 		last = hole_end;
 		goto restart;
 	}
-	mutex_unlock(&i915->drm.struct_mutex);
 
 	return err;
 }
@@ -1144,32 +1153,34 @@
 	struct drm_i915_private *i915 = arg;
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct drm_i915_gem_object *obj;
+	intel_wakeref_t wakeref;
 	struct drm_mm_node tmp;
 	unsigned int *order, n;
 	int err;
 
-	mutex_lock(&i915->drm.struct_mutex);
+	if (!i915_ggtt_has_aperture(ggtt))
+		return 0;
 
 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
-	if (IS_ERR(obj)) {
-		err = PTR_ERR(obj);
-		goto out_unlock;
-	}
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
 
 	err = i915_gem_object_pin_pages(obj);
 	if (err)
 		goto out_free;
 
 	memset(&tmp, 0, sizeof(tmp));
+	mutex_lock(&ggtt->vm.mutex);
 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
 					  count * PAGE_SIZE, 0,
 					  I915_COLOR_UNEVICTABLE,
 					  0, ggtt->mappable_end,
 					  DRM_MM_INSERT_LOW);
+	mutex_unlock(&ggtt->vm.mutex);
 	if (err)
 		goto out_unpin;
 
-	intel_runtime_pm_get(i915);
+	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
 	for (n = 0; n < count; n++) {
 		u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1193,7 +1204,7 @@
 		iowrite32(n, vaddr + n);
 		io_mapping_unmap_atomic(vaddr);
 	}
-	i915_gem_flush_ggtt_writes(i915);
+	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
 
 	i915_random_reorder(order, count, &prng);
 	for (n = 0; n < count; n++) {
@@ -1216,14 +1227,14 @@
 	kfree(order);
 out_remove:
 	ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
-	intel_runtime_pm_put(i915);
+	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+	mutex_lock(&ggtt->vm.mutex);
 	drm_mm_remove_node(&tmp);
+	mutex_unlock(&ggtt->vm.mutex);
 out_unpin:
 	i915_gem_object_unpin_pages(obj);
 out_free:
 	i915_gem_object_put(obj);
-out_unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
 }
 
@@ -1231,22 +1242,26 @@
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	obj->bind_count++; /* track for eviction later */
 	__i915_gem_object_pin_pages(obj);
 
+	GEM_BUG_ON(vma->pages);
+	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+	__i915_gem_object_pin_pages(obj);
 	vma->pages = obj->mm.pages;
-	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+	mutex_lock(&vma->vm->mutex);
+	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+	mutex_unlock(&vma->vm->mutex);
 }
 
 static int exercise_mock(struct drm_i915_private *i915,
-			 int (*func)(struct drm_i915_private *i915,
-				     struct i915_address_space *vm,
+			 int (*func)(struct i915_address_space *vm,
 				     u64 hole_start, u64 hole_end,
 				     unsigned long end_time))
 {
-	const u64 limit = totalram_pages << PAGE_SHIFT;
+	const u64 limit = totalram_pages() << PAGE_SHIFT;
+	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
-	struct i915_hw_ppgtt *ppgtt;
 	IGT_TIMEOUT(end_time);
 	int err;
 
@@ -1254,10 +1269,9 @@
 	if (!ctx)
 		return -ENOMEM;
 
-	ppgtt = ctx->ppgtt;
-	GEM_BUG_ON(!ppgtt);
-
-	err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
+	vm = i915_gem_context_get_vm_rcu(ctx);
+	err = func(vm, 0, min(vm->total, limit), end_time);
+	i915_vm_put(vm);
 
 	mock_context_close(ctx);
 	return err;
@@ -1265,28 +1279,37 @@
 
 static int igt_mock_fill(void *arg)
 {
-	return exercise_mock(arg, fill_hole);
+	struct i915_ggtt *ggtt = arg;
+
+	return exercise_mock(ggtt->vm.i915, fill_hole);
 }
 
 static int igt_mock_walk(void *arg)
 {
-	return exercise_mock(arg, walk_hole);
+	struct i915_ggtt *ggtt = arg;
+
+	return exercise_mock(ggtt->vm.i915, walk_hole);
 }
 
 static int igt_mock_pot(void *arg)
 {
-	return exercise_mock(arg, pot_hole);
+	struct i915_ggtt *ggtt = arg;
+
+	return exercise_mock(ggtt->vm.i915, pot_hole);
 }
 
 static int igt_mock_drunk(void *arg)
 {
-	return exercise_mock(arg, drunk_hole);
+	struct i915_ggtt *ggtt = arg;
+
+	return exercise_mock(ggtt->vm.i915, drunk_hole);
 }
 
 static int igt_gtt_reserve(void *arg)
 {
-	struct drm_i915_private *i915 = arg;
+	struct i915_ggtt *ggtt = arg;
 	struct drm_i915_gem_object *obj, *on;
+	I915_RND_STATE(prng);
 	LIST_HEAD(objects);
 	u64 total;
 	int err = -ENODEV;
@@ -1298,11 +1321,12 @@
 
 	/* Start by filling the GGTT */
 	for (total = 0;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-	     total += 2*I915_GTT_PAGE_SIZE) {
+	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+	     total += 2 * I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
-		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+		obj = i915_gem_object_create_internal(ggtt->vm.i915,
+						      2 * PAGE_SIZE);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			goto out;
@@ -1316,20 +1340,22 @@
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
 					   obj->base.size,
 					   total,
 					   obj->cache_level,
 					   0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1347,11 +1373,12 @@
 
 	/* Now we start forcing evictions */
 	for (total = I915_GTT_PAGE_SIZE;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-	     total += 2*I915_GTT_PAGE_SIZE) {
+	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+	     total += 2 * I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
-		obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+		obj = i915_gem_object_create_internal(ggtt->vm.i915,
+						      2 * PAGE_SIZE);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			goto out;
@@ -1365,20 +1392,22 @@
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
 					   obj->base.size,
 					   total,
 					   obj->cache_level,
 					   0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1399,7 +1428,7 @@
 		struct i915_vma *vma;
 		u64 offset;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1411,18 +1440,21 @@
 			goto out;
 		}
 
-		offset = random_offset(0, i915->ggtt.vm.total,
-				       2*I915_GTT_PAGE_SIZE,
-				       I915_GTT_MIN_ALIGNMENT);
+		offset = igt_random_offset(&prng,
+					   0, ggtt->vm.total,
+					   2 * I915_GTT_PAGE_SIZE,
+					   I915_GTT_MIN_ALIGNMENT);
 
-		err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
 					   obj->base.size,
 					   offset,
 					   obj->cache_level,
 					   0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err) {
 			pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1448,7 +1480,7 @@
 
 static int igt_gtt_insert(void *arg)
 {
-	struct drm_i915_private *i915 = arg;
+	struct i915_ggtt *ggtt = arg;
 	struct drm_i915_gem_object *obj, *on;
 	struct drm_mm_node tmp = {};
 	const struct invalid_insert {
@@ -1457,8 +1489,8 @@
 		u64 start, end;
 	} invalid_insert[] = {
 		{
-			i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
-			0, i915->ggtt.vm.total,
+			ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
+			0, ggtt->vm.total,
 		},
 		{
 			2*I915_GTT_PAGE_SIZE, 0,
@@ -1488,11 +1520,13 @@
 
 	/* Check a couple of obviously invalid requests */
 	for (ii = invalid_insert; ii->size; ii++) {
-		err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
 					  ii->size, ii->alignment,
 					  I915_COLOR_UNEVICTABLE,
 					  ii->start, ii->end,
 					  0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err != -ENOSPC) {
 			pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
 			       ii->size, ii->alignment, ii->start, ii->end,
@@ -1503,11 +1537,12 @@
 
 	/* Start by filling the GGTT */
 	for (total = 0;
-	     total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
+	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
 	     total += I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
-		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+		obj = i915_gem_object_create_internal(ggtt->vm.i915,
+						      I915_GTT_PAGE_SIZE);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			goto out;
@@ -1521,16 +1556,18 @@
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.vm.total,
+					  0, ggtt->vm.total,
 					  0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err == -ENOSPC) {
 			/* maxed out the GGTT space */
 			i915_gem_object_put(obj);
@@ -1538,7 +1575,7 @@
 		}
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1550,7 +1587,7 @@
 	list_for_each_entry(obj, &objects, st_link) {
 		struct i915_vma *vma;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1570,7 +1607,7 @@
 		struct i915_vma *vma;
 		u64 offset;
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
@@ -1585,13 +1622,15 @@
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.vm.total,
+					  0, ggtt->vm.total,
 					  0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1607,11 +1646,12 @@
 
 	/* And then force evictions */
 	for (total = 0;
-	     total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-	     total += 2*I915_GTT_PAGE_SIZE) {
+	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+	     total += 2 * I915_GTT_PAGE_SIZE) {
 		struct i915_vma *vma;
 
-		obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+		obj = i915_gem_object_create_internal(ggtt->vm.i915,
+						      2 * I915_GTT_PAGE_SIZE);
 		if (IS_ERR(obj)) {
 			err = PTR_ERR(obj);
 			goto out;
@@ -1625,19 +1665,21 @@
 
 		list_add(&obj->st_link, &objects);
 
-		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+		vma = i915_vma_instance(obj, &ggtt->vm, NULL);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
 			goto out;
 		}
 
-		err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+		mutex_lock(&ggtt->vm.mutex);
+		err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
 					  obj->base.size, 0, obj->cache_level,
-					  0, i915->ggtt.vm.total,
+					  0, ggtt->vm.total,
 					  0);
+		mutex_unlock(&ggtt->vm.mutex);
 		if (err) {
 			pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
-			       total, i915->ggtt.vm.total, err);
+			       total, ggtt->vm.total, err);
 			goto out;
 		}
 		track_vma_bind(vma);
@@ -1664,17 +1706,345 @@
 		SUBTEST(igt_gtt_insert),
 	};
 	struct drm_i915_private *i915;
+	struct i915_ggtt *ggtt;
 	int err;
 
 	i915 = mock_gem_device();
 	if (!i915)
 		return -ENOMEM;
 
-	mutex_lock(&i915->drm.struct_mutex);
-	err = i915_subtests(tests, i915);
-	mutex_unlock(&i915->drm.struct_mutex);
+	ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+	if (!ggtt) {
+		err = -ENOMEM;
+		goto out_put;
+	}
+	mock_init_ggtt(i915, ggtt);
 
-	drm_dev_put(&i915->drm);
+	err = i915_subtests(tests, ggtt);
+
+	mock_device_flush(i915);
+	i915_gem_drain_freed_objects(i915);
+	mock_fini_ggtt(ggtt);
+	kfree(ggtt);
+out_put:
+	mock_destroy_device(i915);
+	return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+	struct i915_request *rq;
+	long timeout;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	timeout = i915_request_wait(rq, 0, HZ / 5);
+	i915_request_put(rq);
+
+	return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+	struct i915_request *rq;
+	int err;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	err = 0;
+	if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+		err = rq->engine->emit_init_breadcrumb(rq);
+	if (err == 0)
+		err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+	if (err == 0)
+		i915_request_get(rq);
+	i915_request_add(rq);
+
+	return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+	return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+	*spinner(batch, i) = MI_BATCH_BUFFER_END;
+	wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+	const unsigned int count = PAGE_SIZE / 64;
+	const unsigned int chunk_size = count * PAGE_SIZE;
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *bbe, *act, *out;
+	struct i915_gem_engines_iter it;
+	struct i915_address_space *vm;
+	struct i915_gem_context *ctx;
+	struct intel_context *ce;
+	struct i915_vma *vma;
+	I915_RND_STATE(prng);
+	struct file *file;
+	unsigned int i;
+	u32 *result;
+	u32 *batch;
+	int err = 0;
+
+	/*
+	 * Our mission here is to fool the hardware to execute something
+	 * from scratch as it has not seen the batch move (due to missing
+	 * the TLB invalidate).
+	 */
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_unlock;
+	}
+
+	vm = i915_gem_context_get_vm_rcu(ctx);
+	if (i915_is_ggtt(vm))
+		goto out_vm;
+
+	/* Create two pages; dummy we prefill the TLB, and intended */
+	bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(bbe)) {
+		err = PTR_ERR(bbe);
+		goto out_vm;
+	}
+
+	batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto out_put_bbe;
+	}
+	memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+	i915_gem_object_flush_map(bbe);
+	i915_gem_object_unpin_map(bbe);
+
+	act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(act)) {
+		err = PTR_ERR(act);
+		goto out_put_bbe;
+	}
+
+	/* Track the execution of each request by writing into different slot */
+	batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto out_put_act;
+	}
+	for (i = 0; i < count; i++) {
+		u32 *cs = batch + i * 64 / sizeof(*cs);
+		u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+		GEM_BUG_ON(INTEL_GEN(i915) < 6);
+		cs[0] = MI_STORE_DWORD_IMM_GEN4;
+		if (INTEL_GEN(i915) >= 8) {
+			cs[1] = lower_32_bits(addr);
+			cs[2] = upper_32_bits(addr);
+			cs[3] = i;
+			cs[4] = MI_NOOP;
+			cs[5] = MI_BATCH_BUFFER_START_GEN8;
+		} else {
+			cs[1] = 0;
+			cs[2] = lower_32_bits(addr);
+			cs[3] = i;
+			cs[4] = MI_NOOP;
+			cs[5] = MI_BATCH_BUFFER_START;
+		}
+	}
+
+	out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(out)) {
+		err = PTR_ERR(out);
+		goto out_put_batch;
+	}
+	i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+	vma = i915_vma_instance(out, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_put_out;
+	}
+
+	err = i915_vma_pin(vma, 0, 0,
+			   PIN_USER |
+			   PIN_OFFSET_FIXED |
+			   (vm->total - PAGE_SIZE));
+	if (err)
+		goto out_put_out;
+	GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+	result = i915_gem_object_pin_map(out, I915_MAP_WB);
+	if (IS_ERR(result)) {
+		err = PTR_ERR(result);
+		goto out_put_out;
+	}
+
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		IGT_TIMEOUT(end_time);
+		unsigned long pass = 0;
+
+		if (!intel_engine_can_store_dword(ce->engine))
+			continue;
+
+		while (!__igt_timeout(end_time, NULL)) {
+			struct i915_vm_pt_stash stash = {};
+			struct i915_request *rq;
+			u64 offset;
+
+			offset = igt_random_offset(&prng,
+						   0, vm->total - PAGE_SIZE,
+						   chunk_size, PAGE_SIZE);
+
+			memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+			vma = i915_vma_instance(bbe, vm, NULL);
+			if (IS_ERR(vma)) {
+				err = PTR_ERR(vma);
+				goto end;
+			}
+
+			err = vma->ops->set_pages(vma);
+			if (err)
+				goto end;
+
+			err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
+			if (err)
+				goto end;
+
+			err = i915_vm_pin_pt_stash(vm, &stash);
+			if (err) {
+				i915_vm_free_pt_stash(vm, &stash);
+				goto end;
+			}
+
+			vm->allocate_va_range(vm, &stash, offset, chunk_size);
+
+			i915_vm_free_pt_stash(vm, &stash);
+
+			/* Prime the TLB with the dummy pages */
+			for (i = 0; i < count; i++) {
+				vma->node.start = offset + i * PAGE_SIZE;
+				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+				rq = submit_batch(ce, vma->node.start);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					goto end;
+				}
+				i915_request_put(rq);
+			}
+
+			vma->ops->clear_pages(vma);
+
+			err = context_sync(ce);
+			if (err) {
+				pr_err("%s: dummy setup timed out\n",
+				       ce->engine->name);
+				goto end;
+			}
+
+			vma = i915_vma_instance(act, vm, NULL);
+			if (IS_ERR(vma)) {
+				err = PTR_ERR(vma);
+				goto end;
+			}
+
+			err = vma->ops->set_pages(vma);
+			if (err)
+				goto end;
+
+			/* Replace the TLB with target batches */
+			for (i = 0; i < count; i++) {
+				struct i915_request *rq;
+				u32 *cs = batch + i * 64 / sizeof(*cs);
+				u64 addr;
+
+				vma->node.start = offset + i * PAGE_SIZE;
+				vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+				addr = vma->node.start + i * 64;
+				cs[4] = MI_NOOP;
+				cs[6] = lower_32_bits(addr);
+				cs[7] = upper_32_bits(addr);
+				wmb();
+
+				rq = submit_batch(ce, addr);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					goto end;
+				}
+
+				/* Wait until the context chain has started */
+				if (i == 0) {
+					while (READ_ONCE(result[i]) &&
+					       !i915_request_completed(rq))
+						cond_resched();
+				} else {
+					end_spin(batch, i - 1);
+				}
+
+				i915_request_put(rq);
+			}
+			end_spin(batch, count - 1);
+
+			vma->ops->clear_pages(vma);
+
+			err = context_sync(ce);
+			if (err) {
+				pr_err("%s: writes timed out\n",
+				       ce->engine->name);
+				goto end;
+			}
+
+			for (i = 0; i < count; i++) {
+				if (result[i] != i) {
+					pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+					       ce->engine->name, pass,
+					       offset, i, result[i], i);
+					err = -EINVAL;
+					goto end;
+				}
+			}
+
+			vm->clear_range(vm, offset, chunk_size);
+			pass++;
+		}
+	}
+end:
+	if (igt_flush_test(i915))
+		err = -EIO;
+	i915_gem_context_unlock_engines(ctx);
+	i915_gem_object_unpin_map(out);
+out_put_out:
+	i915_gem_object_put(out);
+out_put_batch:
+	i915_gem_object_unpin_map(act);
+out_put_act:
+	i915_gem_object_put(act);
+out_put_bbe:
+	i915_gem_object_put(bbe);
+out_vm:
+	i915_vm_put(vm);
+out_unlock:
+	fput(file);
 	return err;
 }
 
@@ -1695,6 +2065,7 @@
 		SUBTEST(igt_ggtt_pot),
 		SUBTEST(igt_ggtt_fill),
 		SUBTEST(igt_ggtt_page),
+		SUBTEST(igt_cs_tlb),
 	};
 
 	GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));

--
Gitblit v1.6.2