From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB

---
 kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c |  526 ++++++++++++++++++++++-----------------------------------
 1 files changed, 206 insertions(+), 320 deletions(-)

diff --git a/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 05a8008..31e3e5c 100644
--- a/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,17 +24,23 @@
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  **************************************************************************/
-#include <linux/module.h>
-#include <linux/console.h>
 
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-#include "vmwgfx_binding.h"
-#include <drm/ttm/ttm_placement.h>
+#include <linux/console.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mem_encrypt.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_sysfs.h>
 #include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
-#include <linux/dma_remapping.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_object.h"
+#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
 
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
 #define VMWGFX_CHIP_SVGAII 0
@@ -48,6 +54,8 @@
 #endif
 
 #define VMWGFX_REPO "In Tree"
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
 
 
 /**
@@ -143,6 +151,9 @@
 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
 		union drm_vmw_gb_surface_reference_ext_arg)
+#define DRM_IOCTL_VMW_MSG						\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
+		struct drm_vmw_msg_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -158,9 +169,9 @@
 
 static const struct drm_ioctl_desc vmw_ioctls[] = {
 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
@@ -175,16 +186,16 @@
 		      DRM_MASTER),
 
 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_RENDER_ALLOW),
@@ -194,9 +205,9 @@
 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 
 	/* these allow direct access to the framebuffers mark as master only */
 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
@@ -214,28 +225,31 @@
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 		      vmw_shader_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 		      vmw_shader_destroy_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 		      vmw_gb_surface_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 		      vmw_gb_surface_reference_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_SYNCCPU,
 		      vmw_user_bo_synccpu_ioctl,
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 		      vmw_extended_context_define_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
 		      vmw_gb_surface_define_ext_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
 		      vmw_gb_surface_reference_ext_ioctl,
-		      DRM_AUTH | DRM_RENDER_ALLOW),
+		      DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_MSG,
+		      vmw_msg_ioctl,
+		      DRM_RENDER_ALLOW),
 };
 
 static const struct pci_device_id vmw_pci_id_list[] = {
@@ -252,7 +266,6 @@
 static int vmw_assume_16bpp;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
-static void vmw_master_init(struct vmw_master *);
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 			      void *ptr);
 
@@ -277,6 +290,8 @@
 		DRM_INFO("  Grow oTable.\n");
 	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
 		DRM_INFO("  IntraSurface copy.\n");
+	if (capabilities2 & SVGA_CAP2_DX3)
+		DRM_INFO("  DX3.\n");
 }
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -436,7 +451,7 @@
 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 	if (IS_ERR(dev_priv->cman)) {
 		dev_priv->cman = NULL;
-		dev_priv->has_dx = false;
+		dev_priv->sm_type = VMW_SM_LEGACY;
 	}
 
 	ret = vmw_request_device_late(dev_priv);
@@ -549,9 +564,8 @@
  *
  * @dev_priv: Pointer to a struct vmw_private
  *
- * This functions tries to determine the IOMMU setup and what actions
- * need to be taken by the driver to make system pages visible to the
- * device.
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
  * If this function decides that DMA is not possible, it returns -EINVAL.
  * The driver may then try to disable features of the device that require
  * DMA.
@@ -561,57 +575,25 @@
 	static const char *names[vmw_dma_map_max] = {
 		[vmw_dma_phys] = "Using physical TTM page addresses.",
 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
-		[vmw_dma_map_populate] = "Keeping DMA mappings.",
+		[vmw_dma_map_populate] = "Caching DMA mappings.",
 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
-#ifdef CONFIG_X86
-	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 
-#ifdef CONFIG_INTEL_IOMMU
-	if (intel_iommu_enabled) {
-		dev_priv->map_mode = vmw_dma_map_populate;
-		goto out_fixup;
-	}
-#endif
-
-	if (!(vmw_force_iommu || vmw_force_coherent)) {
-		dev_priv->map_mode = vmw_dma_phys;
-		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-		return 0;
-	}
-
-	dev_priv->map_mode = vmw_dma_map_populate;
-
-	if (dma_ops->sync_single_for_cpu)
-		dev_priv->map_mode = vmw_dma_alloc_coherent;
-#ifdef CONFIG_SWIOTLB
-	if (swiotlb_nr_tbl() == 0)
-		dev_priv->map_mode = vmw_dma_map_populate;
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU
-out_fixup:
-#endif
-	if (dev_priv->map_mode == vmw_dma_map_populate &&
-	    vmw_restrict_iommu)
-		dev_priv->map_mode = vmw_dma_map_bind;
+	/* TTM currently doesn't fully support SEV encryption. */
+	if (mem_encrypt_active())
+		return -EINVAL;
 
 	if (vmw_force_coherent)
 		dev_priv->map_mode = vmw_dma_alloc_coherent;
+	else if (vmw_restrict_iommu)
+		dev_priv->map_mode = vmw_dma_map_bind;
+	else
+		dev_priv->map_mode = vmw_dma_map_populate;
 
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
-	/*
-	 * No coherent page pool
-	 */
-	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+        if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
+	    (dev_priv->map_mode == vmw_dma_alloc_coherent))
 		return -EINVAL;
-#endif
-
-#else /* CONFIG_X86 */
-	dev_priv->map_mode = vmw_dma_map_populate;
-#endif /* CONFIG_X86 */
 
 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-
 	return 0;
 }
 
@@ -623,7 +605,6 @@
  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
  * restriction also for 64-bit systems.
  */
-#ifdef CONFIG_INTEL_IOMMU
 static int vmw_dma_masks(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
@@ -638,12 +619,28 @@
 
 	return ret;
 }
-#else
-static int vmw_dma_masks(struct vmw_private *dev_priv)
+
+static int vmw_vram_manager_init(struct vmw_private *dev_priv)
 {
-	return 0;
-}
+	int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	ret = vmw_thp_init(dev_priv);
+#else
+	ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
+				 dev_priv->vram_size >> PAGE_SHIFT);
 #endif
+	ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
+	return ret;
+}
+
+static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	vmw_thp_fini(dev_priv);
+#else
+	ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
+#endif
+}
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
@@ -668,10 +665,9 @@
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	mutex_init(&dev_priv->binding_mutex);
-	mutex_init(&dev_priv->requested_layout_mutex);
 	mutex_init(&dev_priv->global_kms_state_mutex);
-	rwlock_init(&dev_priv->resource_lock);
 	ttm_lock_init(&dev_priv->reservation_sem);
+	spin_lock_init(&dev_priv->resource_lock);
 	spin_lock_init(&dev_priv->hw_lock);
 	spin_lock_init(&dev_priv->waiter_lock);
 	spin_lock_init(&dev_priv->cap_lock);
@@ -683,7 +679,6 @@
 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 	}
 
-	mutex_init(&dev_priv->init_mutex);
 	init_waitqueue_head(&dev_priv->fence_queue);
 	init_waitqueue_head(&dev_priv->fifo_queue);
 	dev_priv->fence_queue_waiters = 0;
@@ -716,8 +711,10 @@
 
 	ret = vmw_dma_select_mode(dev_priv);
 	if (unlikely(ret != 0)) {
-		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+		DRM_INFO("Restricting capabilities since DMA not available.\n");
 		refuse_dma = true;
+		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+			DRM_INFO("Disabling 3D acceleration.\n");
 	}
 
 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -745,9 +742,15 @@
 	dev_priv->max_mob_pages = 0;
 	dev_priv->max_mob_size = 0;
 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-		uint64_t mem_size =
-			vmw_read(dev_priv,
-				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+		uint64_t mem_size;
+
+		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
+			mem_size = vmw_read(dev_priv,
+					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
+		else
+			mem_size =
+				vmw_read(dev_priv,
+					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
 		/*
 		 * Workaround for low memory 2D VMs to compensate for the
@@ -807,22 +810,13 @@
 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 
-	ret = vmw_ttm_global_init(dev_priv);
-	if (unlikely(ret != 0))
-		goto out_err0;
-
-
-	vmw_master_init(&dev_priv->fbdev_master);
-	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
-	dev_priv->active_master = &dev_priv->fbdev_master;
-
 	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 				       dev_priv->mmio_size, MEMREMAP_WB);
 
 	if (unlikely(dev_priv->mmio_virt == NULL)) {
 		ret = -ENOMEM;
 		DRM_ERROR("Failed mapping MMIO.\n");
-		goto out_err3;
+		goto out_err0;
 	}
 
 	/* Need mmio memory to check for fifo pitchlock cap. */
@@ -834,8 +828,8 @@
 		goto out_err4;
 	}
 
-	dev_priv->tdev = ttm_object_device_init
-		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+						&vmw_prime_dmabuf_ops);
 
 	if (unlikely(dev_priv->tdev == NULL)) {
 		DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -875,11 +869,13 @@
 		goto out_no_fman;
 	}
 
+	drm_vma_offset_manager_init(&dev_priv->vma_manager,
+				    DRM_FILE_PAGE_OFFSET_START,
+				    DRM_FILE_PAGE_OFFSET_SIZE);
 	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
 				 &vmw_bo_driver,
 				 dev->anon_inode->i_mapping,
-				 VMWGFX_FILE_PAGE_OFFSET,
+				 &dev_priv->vma_manager,
 				 false);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -890,40 +886,63 @@
 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
 	 * unhidden.
 	 */
-	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
-			     (dev_priv->vram_size >> PAGE_SHIFT));
+
+	ret = vmw_vram_manager_init(dev_priv);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 		goto out_no_vram;
 	}
-	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
+	/*
+	 * "Guest Memory Regions" is an aperture like feature with
+	 *  one slot per bo. There is an upper limit of the number of
+	 *  slots as well as the bo size.
+	 */
 	dev_priv->has_gmr = true;
+	/* TODO: This is most likely not correct */
 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
-	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-					 VMW_PL_GMR) != 0) {
+	    refuse_dma ||
+	    vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
 		DRM_INFO("No GMR memory available. "
 			 "Graphics memory resources are very limited.\n");
 		dev_priv->has_gmr = false;
 	}
 
-	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
 		dev_priv->has_mob = true;
-		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
-				   VMW_PL_MOB) != 0) {
+
+		if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
 			DRM_INFO("No MOB memory available. "
 				 "3D will be disabled.\n");
 			dev_priv->has_mob = false;
 		}
 	}
 
-	if (dev_priv->has_mob) {
+	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
 		spin_lock(&dev_priv->cap_lock);
 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
-		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+			dev_priv->sm_type = VMW_SM_4;
 		spin_unlock(&dev_priv->cap_lock);
 	}
 
+	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
+
+	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
+	if (has_sm4_context(dev_priv) &&
+	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
+
+		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+			dev_priv->sm_type = VMW_SM_4_1;
+
+		if (has_sm4_1_context(dev_priv) &&
+		    (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
+			vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
+			if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+				dev_priv->sm_type = VMW_SM_5;
+		}
+	}
 
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
@@ -934,23 +953,14 @@
 	if (ret)
 		goto out_no_fifo;
 
-	if (dev_priv->has_dx) {
-		/*
-		 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
-		 * support
-		 */
-		if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
-			vmw_write(dev_priv, SVGA_REG_DEV_CAP,
-					SVGA3D_DEVCAP_SM41);
-			dev_priv->has_sm4_1 = vmw_read(dev_priv,
-							SVGA_REG_DEV_CAP);
-		}
-	}
-
-	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
 		 ? "yes." : "no.");
-	DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
+	if (dev_priv->sm_type == VMW_SM_5)
+		DRM_INFO("SM5 support available.\n");
+	if (dev_priv->sm_type == VMW_SM_4_1)
+		DRM_INFO("SM4_1 support available.\n");
+	if (dev_priv->sm_type == VMW_SM_4)
+		DRM_INFO("SM4 support available.\n");
 
 	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
 		VMWGFX_REPO, VMWGFX_GIT_VERSION);
@@ -978,10 +988,10 @@
 	vmw_kms_close(dev_priv);
 out_no_kms:
 	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
+	vmw_vram_manager_fini(dev_priv);
 out_no_vram:
 	(void)ttm_bo_device_release(&dev_priv->bdev);
 out_no_bdev:
@@ -998,8 +1008,6 @@
 	ttm_object_device_release(&dev_priv->tdev);
 out_err4:
 	memunmap(dev_priv->mmio_virt);
-out_err3:
-	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
@@ -1031,13 +1039,14 @@
 	vmw_overlay_close(dev_priv);
 
 	if (dev_priv->has_gmr)
-		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+		vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
 
 	vmw_release_device_early(dev_priv);
 	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+		vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+	vmw_vram_manager_fini(dev_priv);
 	(void) ttm_bo_device_release(&dev_priv->bdev);
+	drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
 	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1051,7 +1060,6 @@
 	memunmap(dev_priv->mmio_virt);
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
@@ -1062,18 +1070,7 @@
 static void vmw_postclose(struct drm_device *dev,
 			 struct drm_file *file_priv)
 {
-	struct vmw_fpriv *vmw_fp;
-
-	vmw_fp = vmw_fpriv(file_priv);
-
-	if (vmw_fp->locked_master) {
-		struct vmw_master *vmaster =
-			vmw_master(vmw_fp->locked_master);
-
-		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
-		ttm_vt_unlock(&vmaster->lock);
-		drm_master_put(&vmw_fp->locked_master);
-	}
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 	ttm_object_file_release(&vmw_fp->tfile);
 	kfree(vmw_fp);
@@ -1102,55 +1099,6 @@
 	return ret;
 }
 
-static struct vmw_master *vmw_master_check(struct drm_device *dev,
-					   struct drm_file *file_priv,
-					   unsigned int flags)
-{
-	int ret;
-	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-	struct vmw_master *vmaster;
-
-	if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
-		return NULL;
-
-	ret = mutex_lock_interruptible(&dev->master_mutex);
-	if (unlikely(ret != 0))
-		return ERR_PTR(-ERESTARTSYS);
-
-	if (drm_is_current_master(file_priv)) {
-		mutex_unlock(&dev->master_mutex);
-		return NULL;
-	}
-
-	/*
-	 * Check if we were previously master, but now dropped. In that
-	 * case, allow at least render node functionality.
-	 */
-	if (vmw_fp->locked_master) {
-		mutex_unlock(&dev->master_mutex);
-
-		if (flags & DRM_RENDER_ALLOW)
-			return NULL;
-
-		DRM_ERROR("Dropped master trying to access ioctl that "
-			  "requires authentication.\n");
-		return ERR_PTR(-EACCES);
-	}
-	mutex_unlock(&dev->master_mutex);
-
-	/*
-	 * Take the TTM lock. Possibly sleep waiting for the authenticating
-	 * master to become master again, or for a SIGTERM if the
-	 * authenticating master exits.
-	 */
-	vmaster = vmw_master(file_priv->master);
-	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
-		vmaster = ERR_PTR(ret);
-
-	return vmaster;
-}
-
 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg,
 			      long (*ioctl_func)(struct file *, unsigned int,
@@ -1159,9 +1107,7 @@
 	struct drm_file *file_priv = filp->private_data;
 	struct drm_device *dev = file_priv->minor->dev;
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	struct vmw_master *vmaster;
 	unsigned int flags;
-	long ret;
 
 	/*
 	 * Do extra checking on driver private ioctls.
@@ -1173,15 +1119,7 @@
 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
-			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
-			if (unlikely(ret != 0))
-				return ret;
-
-			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
-				goto out_io_encoding;
-
-			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
-							_IOC_SIZE(cmd));
+			return ioctl_func(filp, cmd, arg);
 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
 			if (!drm_is_current_master(file_priv) &&
 			    !capable(CAP_SYS_ADMIN))
@@ -1195,21 +1133,7 @@
 	} else if (!drm_ioctl_flags(nr, &flags))
 		return -EINVAL;
 
-	vmaster = vmw_master_check(dev, file_priv, flags);
-	if (IS_ERR(vmaster)) {
-		ret = PTR_ERR(vmaster);
-
-		if (ret != -ERESTARTSYS)
-			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
-				 nr, ret);
-		return ret;
-	}
-
-	ret = ioctl_func(filp, cmd, arg);
-	if (vmaster)
-		ttm_read_unlock(&vmaster->lock);
-
-	return ret;
+	return ioctl_func(filp, cmd, arg);
 
 out_io_encoding:
 	DRM_ERROR("Invalid command format, ioctl %d\n",
@@ -1232,108 +1156,26 @@
 }
 #endif
 
-static void vmw_lastclose(struct drm_device *dev)
+static void vmw_master_set(struct drm_device *dev,
+			   struct drm_file *file_priv,
+			   bool from_open)
 {
-}
-
-static void vmw_master_init(struct vmw_master *vmaster)
-{
-	ttm_lock_init(&vmaster->lock);
-}
-
-static int vmw_master_create(struct drm_device *dev,
-			     struct drm_master *master)
-{
-	struct vmw_master *vmaster;
-
-	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
-	if (unlikely(!vmaster))
-		return -ENOMEM;
-
-	vmw_master_init(vmaster);
-	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
-	master->driver_priv = vmaster;
-
-	return 0;
-}
-
-static void vmw_master_destroy(struct drm_device *dev,
-			       struct drm_master *master)
-{
-	struct vmw_master *vmaster = vmw_master(master);
-
-	master->driver_priv = NULL;
-	kfree(vmaster);
-}
-
-static int vmw_master_set(struct drm_device *dev,
-			  struct drm_file *file_priv,
-			  bool from_open)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-	struct vmw_master *active = dev_priv->active_master;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
-	int ret = 0;
-
-	if (active) {
-		BUG_ON(active != &dev_priv->fbdev_master);
-		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
-		if (unlikely(ret != 0))
-			return ret;
-
-		ttm_lock_set_kill(&active->lock, true, SIGTERM);
-		dev_priv->active_master = NULL;
-	}
-
-	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-	if (!from_open) {
-		ttm_vt_unlock(&vmaster->lock);
-		BUG_ON(vmw_fp->locked_master != file_priv->master);
-		drm_master_put(&vmw_fp->locked_master);
-	}
-
-	dev_priv->active_master = vmaster;
-
 	/*
 	 * Inform a new master that the layout may have changed while
 	 * it was gone.
 	 */
 	if (!from_open)
 		drm_sysfs_hotplug_event(dev);
-
-	return 0;
 }
 
 static void vmw_master_drop(struct drm_device *dev,
 			    struct drm_file *file_priv)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
-	int ret;
 
-	/**
-	 * Make sure the master doesn't disappear while we have
-	 * it locked.
-	 */
-
-	vmw_fp->locked_master = drm_master_get(file_priv->master);
-	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
 	vmw_kms_legacy_hotspot_clear(dev_priv);
-	if (unlikely((ret != 0))) {
-		DRM_ERROR("Unable to lock TTM at VT switch.\n");
-		drm_master_put(&vmw_fp->locked_master);
-	}
-
-	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-
 	if (!dev_priv->enable_fb)
 		vmw_svga_disable(dev_priv);
-
-	dev_priv->active_master = &dev_priv->fbdev_master;
-	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
-	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 }
 
 /**
@@ -1344,10 +1186,12 @@
  */
 static void __vmw_svga_enable(struct vmw_private *dev_priv)
 {
+	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
 	spin_lock(&dev_priv->svga_lock);
-	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+	if (!ttm_resource_manager_used(man)) {
 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
-		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+		ttm_resource_manager_set_used(man, true);
 	}
 	spin_unlock(&dev_priv->svga_lock);
 }
@@ -1373,9 +1217,11 @@
  */
 static void __vmw_svga_disable(struct vmw_private *dev_priv)
 {
+	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
 	spin_lock(&dev_priv->svga_lock);
-	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
-		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+	if (ttm_resource_manager_used(man)) {
+		ttm_resource_manager_set_used(man, false);
 		vmw_write(dev_priv, SVGA_REG_ENABLE,
 			  SVGA_REG_ENABLE_HIDE |
 			  SVGA_REG_ENABLE_ENABLE);
@@ -1392,6 +1238,7 @@
  */
 void vmw_svga_disable(struct vmw_private *dev_priv)
 {
+	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
 	/*
 	 * Disabling SVGA will turn off device modesetting capabilities, so
 	 * notify KMS about that so that it doesn't cache atomic state that
@@ -1407,8 +1254,8 @@
 	vmw_kms_lost_device(dev_priv->dev);
 	ttm_write_lock(&dev_priv->reservation_sem, false);
 	spin_lock(&dev_priv->svga_lock);
-	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
-		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+	if (ttm_resource_manager_used(man)) {
+		ttm_resource_manager_set_used(man, false);
 		spin_unlock(&dev_priv->svga_lock);
 		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
 			DRM_ERROR("Failed evicting VRAM buffers.\n");
@@ -1424,8 +1271,22 @@
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
+	drm_dev_unregister(dev);
+	vmw_driver_unload(dev);
+	drm_dev_put(dev);
 	pci_disable_device(pdev);
-	drm_put_dev(dev);
+}
+
+static unsigned long
+vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
+		      unsigned long len, unsigned long pgoff,
+		      unsigned long flags)
+{
+	struct drm_file *file_priv = file->private_data;
+	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+
+	return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
+				     &dev_priv->vma_manager);
 }
 
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
@@ -1523,7 +1384,7 @@
 	vmw_execbuf_release_pinned_bo(dev_priv);
 	vmw_resource_evict_all(dev_priv);
 	vmw_release_device_early(dev_priv);
-	ttm_bo_swapout_all(&dev_priv->bdev);
+	ttm_bo_swapout_all();
 	if (dev_priv->enable_fb)
 		vmw_fifo_resource_dec(dev_priv);
 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
@@ -1599,21 +1460,14 @@
 	.compat_ioctl = vmw_compat_ioctl,
 #endif
 	.llseek = noop_llseek,
+	.get_unmapped_area = vmw_get_unmapped_area,
 };
 
 static struct drm_driver driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
-	.load = vmw_driver_load,
-	.unload = vmw_driver_unload,
-	.lastclose = vmw_lastclose,
-	.get_vblank_counter = vmw_get_vblank_counter,
-	.enable_vblank = vmw_enable_vblank,
-	.disable_vblank = vmw_disable_vblank,
+	.driver_features =
+	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
 	.ioctls = vmw_ioctls,
 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
-	.master_create = vmw_master_create,
-	.master_destroy = vmw_master_destroy,
 	.master_set = vmw_master_set,
 	.master_drop = vmw_master_drop,
 	.open = vmw_driver_open,
@@ -1647,7 +1501,39 @@
 
 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	return drm_get_pci_dev(pdev, ent, &driver);
+	struct drm_device *dev;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	dev = drm_dev_alloc(&driver, &pdev->dev);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto err_pci_disable_device;
+	}
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = vmw_driver_load(dev, ent->driver_data);
+	if (ret)
+		goto err_drm_dev_put;
+
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_vmw_driver_unload;
+
+	return 0;
+
+err_vmw_driver_unload:
+	vmw_driver_unload(dev);
+err_drm_dev_put:
+	drm_dev_put(dev);
+err_pci_disable_device:
+	pci_disable_device(pdev);
+	return ret;
 }
 
 static int __init vmwgfx_init(void)

--
Gitblit v1.6.2