forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
....@@ -24,17 +24,23 @@
2424 * USE OR OTHER DEALINGS IN THE SOFTWARE.
2525 *
2626 **************************************************************************/
27
-#include <linux/module.h>
28
-#include <linux/console.h>
2927
30
-#include <drm/drmP.h>
31
-#include "vmwgfx_drv.h"
32
-#include "vmwgfx_binding.h"
33
-#include <drm/ttm/ttm_placement.h>
28
+#include <linux/console.h>
29
+#include <linux/dma-mapping.h>
30
+#include <linux/module.h>
31
+#include <linux/pci.h>
32
+#include <linux/mem_encrypt.h>
33
+
34
+#include <drm/drm_drv.h>
35
+#include <drm/drm_ioctl.h>
36
+#include <drm/drm_sysfs.h>
3437 #include <drm/ttm/ttm_bo_driver.h>
35
-#include <drm/ttm/ttm_object.h>
3638 #include <drm/ttm/ttm_module.h>
37
-#include <linux/dma_remapping.h>
39
+#include <drm/ttm/ttm_placement.h>
40
+
41
+#include "ttm_object.h"
42
+#include "vmwgfx_binding.h"
43
+#include "vmwgfx_drv.h"
3844
3945 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
4046 #define VMWGFX_CHIP_SVGAII 0
....@@ -48,6 +54,8 @@
4854 #endif
4955
5056 #define VMWGFX_REPO "In Tree"
57
+
58
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
5159
5260
5361 /**
....@@ -143,6 +151,9 @@
143151 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
144152 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
145153 union drm_vmw_gb_surface_reference_ext_arg)
154
+#define DRM_IOCTL_VMW_MSG \
155
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
156
+ struct drm_vmw_msg_arg)
146157
147158 /**
148159 * The core DRM version of this macro doesn't account for
....@@ -158,9 +169,9 @@
158169
159170 static const struct drm_ioctl_desc vmw_ioctls[] = {
160171 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
161
- DRM_AUTH | DRM_RENDER_ALLOW),
172
+ DRM_RENDER_ALLOW),
162173 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
163
- DRM_AUTH | DRM_RENDER_ALLOW),
174
+ DRM_RENDER_ALLOW),
164175 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
165176 DRM_RENDER_ALLOW),
166177 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
....@@ -175,16 +186,16 @@
175186 DRM_MASTER),
176187
177188 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
178
- DRM_AUTH | DRM_RENDER_ALLOW),
189
+ DRM_RENDER_ALLOW),
179190 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
180191 DRM_RENDER_ALLOW),
181192 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
182
- DRM_AUTH | DRM_RENDER_ALLOW),
193
+ DRM_RENDER_ALLOW),
183194 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
184195 DRM_RENDER_ALLOW),
185196 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
186
- DRM_AUTH | DRM_RENDER_ALLOW),
187
- VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
197
+ DRM_RENDER_ALLOW),
198
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
188199 DRM_RENDER_ALLOW),
189200 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
190201 DRM_RENDER_ALLOW),
....@@ -194,9 +205,9 @@
194205 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
195206 DRM_RENDER_ALLOW),
196207 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
197
- DRM_AUTH | DRM_RENDER_ALLOW),
208
+ DRM_RENDER_ALLOW),
198209 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
199
- DRM_AUTH | DRM_RENDER_ALLOW),
210
+ DRM_RENDER_ALLOW),
200211
201212 /* these allow direct access to the framebuffers mark as master only */
202213 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
....@@ -214,28 +225,31 @@
214225 DRM_RENDER_ALLOW),
215226 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
216227 vmw_shader_define_ioctl,
217
- DRM_AUTH | DRM_RENDER_ALLOW),
228
+ DRM_RENDER_ALLOW),
218229 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
219230 vmw_shader_destroy_ioctl,
220231 DRM_RENDER_ALLOW),
221232 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
222233 vmw_gb_surface_define_ioctl,
223
- DRM_AUTH | DRM_RENDER_ALLOW),
234
+ DRM_RENDER_ALLOW),
224235 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
225236 vmw_gb_surface_reference_ioctl,
226
- DRM_AUTH | DRM_RENDER_ALLOW),
237
+ DRM_RENDER_ALLOW),
227238 VMW_IOCTL_DEF(VMW_SYNCCPU,
228239 vmw_user_bo_synccpu_ioctl,
229240 DRM_RENDER_ALLOW),
230241 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
231242 vmw_extended_context_define_ioctl,
232
- DRM_AUTH | DRM_RENDER_ALLOW),
243
+ DRM_RENDER_ALLOW),
233244 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
234245 vmw_gb_surface_define_ext_ioctl,
235
- DRM_AUTH | DRM_RENDER_ALLOW),
246
+ DRM_RENDER_ALLOW),
236247 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
237248 vmw_gb_surface_reference_ext_ioctl,
238
- DRM_AUTH | DRM_RENDER_ALLOW),
249
+ DRM_RENDER_ALLOW),
250
+ VMW_IOCTL_DEF(VMW_MSG,
251
+ vmw_msg_ioctl,
252
+ DRM_RENDER_ALLOW),
239253 };
240254
241255 static const struct pci_device_id vmw_pci_id_list[] = {
....@@ -252,7 +266,6 @@
252266 static int vmw_assume_16bpp;
253267
254268 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
255
-static void vmw_master_init(struct vmw_master *);
256269 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
257270 void *ptr);
258271
....@@ -277,6 +290,8 @@
277290 DRM_INFO(" Grow oTable.\n");
278291 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
279292 DRM_INFO(" IntraSurface copy.\n");
293
+ if (capabilities2 & SVGA_CAP2_DX3)
294
+ DRM_INFO(" DX3.\n");
280295 }
281296
282297 static void vmw_print_capabilities(uint32_t capabilities)
....@@ -436,7 +451,7 @@
436451 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
437452 if (IS_ERR(dev_priv->cman)) {
438453 dev_priv->cman = NULL;
439
- dev_priv->has_dx = false;
454
+ dev_priv->sm_type = VMW_SM_LEGACY;
440455 }
441456
442457 ret = vmw_request_device_late(dev_priv);
....@@ -549,9 +564,8 @@
549564 *
550565 * @dev_priv: Pointer to a struct vmw_private
551566 *
552
- * This functions tries to determine the IOMMU setup and what actions
553
- * need to be taken by the driver to make system pages visible to the
554
- * device.
567
+ * This functions tries to determine what actions need to be taken by the
568
+ * driver to make system pages visible to the device.
555569 * If this function decides that DMA is not possible, it returns -EINVAL.
556570 * The driver may then try to disable features of the device that require
557571 * DMA.
....@@ -561,57 +575,25 @@
561575 static const char *names[vmw_dma_map_max] = {
562576 [vmw_dma_phys] = "Using physical TTM page addresses.",
563577 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
564
- [vmw_dma_map_populate] = "Keeping DMA mappings.",
578
+ [vmw_dma_map_populate] = "Caching DMA mappings.",
565579 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
566
-#ifdef CONFIG_X86
567
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
568580
569
-#ifdef CONFIG_INTEL_IOMMU
570
- if (intel_iommu_enabled) {
571
- dev_priv->map_mode = vmw_dma_map_populate;
572
- goto out_fixup;
573
- }
574
-#endif
575
-
576
- if (!(vmw_force_iommu || vmw_force_coherent)) {
577
- dev_priv->map_mode = vmw_dma_phys;
578
- DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
579
- return 0;
580
- }
581
-
582
- dev_priv->map_mode = vmw_dma_map_populate;
583
-
584
- if (dma_ops->sync_single_for_cpu)
585
- dev_priv->map_mode = vmw_dma_alloc_coherent;
586
-#ifdef CONFIG_SWIOTLB
587
- if (swiotlb_nr_tbl() == 0)
588
- dev_priv->map_mode = vmw_dma_map_populate;
589
-#endif
590
-
591
-#ifdef CONFIG_INTEL_IOMMU
592
-out_fixup:
593
-#endif
594
- if (dev_priv->map_mode == vmw_dma_map_populate &&
595
- vmw_restrict_iommu)
596
- dev_priv->map_mode = vmw_dma_map_bind;
581
+ /* TTM currently doesn't fully support SEV encryption. */
582
+ if (mem_encrypt_active())
583
+ return -EINVAL;
597584
598585 if (vmw_force_coherent)
599586 dev_priv->map_mode = vmw_dma_alloc_coherent;
587
+ else if (vmw_restrict_iommu)
588
+ dev_priv->map_mode = vmw_dma_map_bind;
589
+ else
590
+ dev_priv->map_mode = vmw_dma_map_populate;
600591
601
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
602
- /*
603
- * No coherent page pool
604
- */
605
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
592
+ if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
593
+ (dev_priv->map_mode == vmw_dma_alloc_coherent))
606594 return -EINVAL;
607
-#endif
608
-
609
-#else /* CONFIG_X86 */
610
- dev_priv->map_mode = vmw_dma_map_populate;
611
-#endif /* CONFIG_X86 */
612595
613596 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
614
-
615597 return 0;
616598 }
617599
....@@ -623,7 +605,6 @@
623605 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
624606 * restriction also for 64-bit systems.
625607 */
626
-#ifdef CONFIG_INTEL_IOMMU
627608 static int vmw_dma_masks(struct vmw_private *dev_priv)
628609 {
629610 struct drm_device *dev = dev_priv->dev;
....@@ -638,12 +619,28 @@
638619
639620 return ret;
640621 }
641
-#else
642
-static int vmw_dma_masks(struct vmw_private *dev_priv)
622
+
623
+static int vmw_vram_manager_init(struct vmw_private *dev_priv)
643624 {
644
- return 0;
645
-}
625
+ int ret;
626
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
627
+ ret = vmw_thp_init(dev_priv);
628
+#else
629
+ ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
630
+ dev_priv->vram_size >> PAGE_SHIFT);
646631 #endif
632
+ ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
633
+ return ret;
634
+}
635
+
636
+static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
637
+{
638
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
639
+ vmw_thp_fini(dev_priv);
640
+#else
641
+ ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
642
+#endif
643
+}
647644
648645 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
649646 {
....@@ -668,10 +665,9 @@
668665 mutex_init(&dev_priv->cmdbuf_mutex);
669666 mutex_init(&dev_priv->release_mutex);
670667 mutex_init(&dev_priv->binding_mutex);
671
- mutex_init(&dev_priv->requested_layout_mutex);
672668 mutex_init(&dev_priv->global_kms_state_mutex);
673
- rwlock_init(&dev_priv->resource_lock);
674669 ttm_lock_init(&dev_priv->reservation_sem);
670
+ spin_lock_init(&dev_priv->resource_lock);
675671 spin_lock_init(&dev_priv->hw_lock);
676672 spin_lock_init(&dev_priv->waiter_lock);
677673 spin_lock_init(&dev_priv->cap_lock);
....@@ -683,7 +679,6 @@
683679 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
684680 }
685681
686
- mutex_init(&dev_priv->init_mutex);
687682 init_waitqueue_head(&dev_priv->fence_queue);
688683 init_waitqueue_head(&dev_priv->fifo_queue);
689684 dev_priv->fence_queue_waiters = 0;
....@@ -716,8 +711,10 @@
716711
717712 ret = vmw_dma_select_mode(dev_priv);
718713 if (unlikely(ret != 0)) {
719
- DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
714
+ DRM_INFO("Restricting capabilities since DMA not available.\n");
720715 refuse_dma = true;
716
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
717
+ DRM_INFO("Disabling 3D acceleration.\n");
721718 }
722719
723720 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
....@@ -745,9 +742,15 @@
745742 dev_priv->max_mob_pages = 0;
746743 dev_priv->max_mob_size = 0;
747744 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
748
- uint64_t mem_size =
749
- vmw_read(dev_priv,
750
- SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
745
+ uint64_t mem_size;
746
+
747
+ if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
748
+ mem_size = vmw_read(dev_priv,
749
+ SVGA_REG_GBOBJECT_MEM_SIZE_KB);
750
+ else
751
+ mem_size =
752
+ vmw_read(dev_priv,
753
+ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
751754
752755 /*
753756 * Workaround for low memory 2D VMs to compensate for the
....@@ -807,22 +810,13 @@
807810 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
808811 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
809812
810
- ret = vmw_ttm_global_init(dev_priv);
811
- if (unlikely(ret != 0))
812
- goto out_err0;
813
-
814
-
815
- vmw_master_init(&dev_priv->fbdev_master);
816
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
817
- dev_priv->active_master = &dev_priv->fbdev_master;
818
-
819813 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
820814 dev_priv->mmio_size, MEMREMAP_WB);
821815
822816 if (unlikely(dev_priv->mmio_virt == NULL)) {
823817 ret = -ENOMEM;
824818 DRM_ERROR("Failed mapping MMIO.\n");
825
- goto out_err3;
819
+ goto out_err0;
826820 }
827821
828822 /* Need mmio memory to check for fifo pitchlock cap. */
....@@ -834,8 +828,8 @@
834828 goto out_err4;
835829 }
836830
837
- dev_priv->tdev = ttm_object_device_init
838
- (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
831
+ dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
832
+ &vmw_prime_dmabuf_ops);
839833
840834 if (unlikely(dev_priv->tdev == NULL)) {
841835 DRM_ERROR("Unable to initialize TTM object management.\n");
....@@ -875,11 +869,13 @@
875869 goto out_no_fman;
876870 }
877871
872
+ drm_vma_offset_manager_init(&dev_priv->vma_manager,
873
+ DRM_FILE_PAGE_OFFSET_START,
874
+ DRM_FILE_PAGE_OFFSET_SIZE);
878875 ret = ttm_bo_device_init(&dev_priv->bdev,
879
- dev_priv->bo_global_ref.ref.object,
880876 &vmw_bo_driver,
881877 dev->anon_inode->i_mapping,
882
- VMWGFX_FILE_PAGE_OFFSET,
878
+ &dev_priv->vma_manager,
883879 false);
884880 if (unlikely(ret != 0)) {
885881 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
....@@ -890,40 +886,63 @@
890886 * Enable VRAM, but initially don't use it until SVGA is enabled and
891887 * unhidden.
892888 */
893
- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
894
- (dev_priv->vram_size >> PAGE_SHIFT));
889
+
890
+ ret = vmw_vram_manager_init(dev_priv);
895891 if (unlikely(ret != 0)) {
896892 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
897893 goto out_no_vram;
898894 }
899
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
900895
896
+ /*
897
+ * "Guest Memory Regions" is an aperture like feature with
898
+ * one slot per bo. There is an upper limit of the number of
899
+ * slots as well as the bo size.
900
+ */
901901 dev_priv->has_gmr = true;
902
+ /* TODO: This is most likely not correct */
902903 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
903
- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
904
- VMW_PL_GMR) != 0) {
904
+ refuse_dma ||
905
+ vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
905906 DRM_INFO("No GMR memory available. "
906907 "Graphics memory resources are very limited.\n");
907908 dev_priv->has_gmr = false;
908909 }
909910
910
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
911
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
911912 dev_priv->has_mob = true;
912
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
913
- VMW_PL_MOB) != 0) {
913
+
914
+ if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
914915 DRM_INFO("No MOB memory available. "
915916 "3D will be disabled.\n");
916917 dev_priv->has_mob = false;
917918 }
918919 }
919920
920
- if (dev_priv->has_mob) {
921
+ if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
921922 spin_lock(&dev_priv->cap_lock);
922923 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
923
- dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
924
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
925
+ dev_priv->sm_type = VMW_SM_4;
924926 spin_unlock(&dev_priv->cap_lock);
925927 }
926928
929
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
930
+
931
+ /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
932
+ if (has_sm4_context(dev_priv) &&
933
+ (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
934
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
935
+
936
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
937
+ dev_priv->sm_type = VMW_SM_4_1;
938
+
939
+ if (has_sm4_1_context(dev_priv) &&
940
+ (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
941
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
942
+ if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
943
+ dev_priv->sm_type = VMW_SM_5;
944
+ }
945
+ }
927946
928947 ret = vmw_kms_init(dev_priv);
929948 if (unlikely(ret != 0))
....@@ -934,23 +953,14 @@
934953 if (ret)
935954 goto out_no_fifo;
936955
937
- if (dev_priv->has_dx) {
938
- /*
939
- * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
940
- * support
941
- */
942
- if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
943
- vmw_write(dev_priv, SVGA_REG_DEV_CAP,
944
- SVGA3D_DEVCAP_SM41);
945
- dev_priv->has_sm4_1 = vmw_read(dev_priv,
946
- SVGA_REG_DEV_CAP);
947
- }
948
- }
949
-
950
- DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
951956 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
952957 ? "yes." : "no.");
953
- DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
958
+ if (dev_priv->sm_type == VMW_SM_5)
959
+ DRM_INFO("SM5 support available.\n");
960
+ if (dev_priv->sm_type == VMW_SM_4_1)
961
+ DRM_INFO("SM4_1 support available.\n");
962
+ if (dev_priv->sm_type == VMW_SM_4)
963
+ DRM_INFO("SM4 support available.\n");
954964
955965 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
956966 VMWGFX_REPO, VMWGFX_GIT_VERSION);
....@@ -978,10 +988,10 @@
978988 vmw_kms_close(dev_priv);
979989 out_no_kms:
980990 if (dev_priv->has_mob)
981
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
991
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
982992 if (dev_priv->has_gmr)
983
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
984
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
993
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
994
+ vmw_vram_manager_fini(dev_priv);
985995 out_no_vram:
986996 (void)ttm_bo_device_release(&dev_priv->bdev);
987997 out_no_bdev:
....@@ -998,8 +1008,6 @@
9981008 ttm_object_device_release(&dev_priv->tdev);
9991009 out_err4:
10001010 memunmap(dev_priv->mmio_virt);
1001
-out_err3:
1002
- vmw_ttm_global_release(dev_priv);
10031011 out_err0:
10041012 for (i = vmw_res_context; i < vmw_res_max; ++i)
10051013 idr_destroy(&dev_priv->res_idr[i]);
....@@ -1031,13 +1039,14 @@
10311039 vmw_overlay_close(dev_priv);
10321040
10331041 if (dev_priv->has_gmr)
1034
- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
1035
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
1042
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
10361043
10371044 vmw_release_device_early(dev_priv);
10381045 if (dev_priv->has_mob)
1039
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
1046
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1047
+ vmw_vram_manager_fini(dev_priv);
10401048 (void) ttm_bo_device_release(&dev_priv->bdev);
1049
+ drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
10411050 vmw_release_device_late(dev_priv);
10421051 vmw_fence_manager_takedown(dev_priv->fman);
10431052 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
....@@ -1051,7 +1060,6 @@
10511060 memunmap(dev_priv->mmio_virt);
10521061 if (dev_priv->ctx.staged_bindings)
10531062 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1054
- vmw_ttm_global_release(dev_priv);
10551063
10561064 for (i = vmw_res_context; i < vmw_res_max; ++i)
10571065 idr_destroy(&dev_priv->res_idr[i]);
....@@ -1062,18 +1070,7 @@
10621070 static void vmw_postclose(struct drm_device *dev,
10631071 struct drm_file *file_priv)
10641072 {
1065
- struct vmw_fpriv *vmw_fp;
1066
-
1067
- vmw_fp = vmw_fpriv(file_priv);
1068
-
1069
- if (vmw_fp->locked_master) {
1070
- struct vmw_master *vmaster =
1071
- vmw_master(vmw_fp->locked_master);
1072
-
1073
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1074
- ttm_vt_unlock(&vmaster->lock);
1075
- drm_master_put(&vmw_fp->locked_master);
1076
- }
1073
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
10771074
10781075 ttm_object_file_release(&vmw_fp->tfile);
10791076 kfree(vmw_fp);
....@@ -1102,55 +1099,6 @@
11021099 return ret;
11031100 }
11041101
1105
-static struct vmw_master *vmw_master_check(struct drm_device *dev,
1106
- struct drm_file *file_priv,
1107
- unsigned int flags)
1108
-{
1109
- int ret;
1110
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1111
- struct vmw_master *vmaster;
1112
-
1113
- if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1114
- return NULL;
1115
-
1116
- ret = mutex_lock_interruptible(&dev->master_mutex);
1117
- if (unlikely(ret != 0))
1118
- return ERR_PTR(-ERESTARTSYS);
1119
-
1120
- if (drm_is_current_master(file_priv)) {
1121
- mutex_unlock(&dev->master_mutex);
1122
- return NULL;
1123
- }
1124
-
1125
- /*
1126
- * Check if we were previously master, but now dropped. In that
1127
- * case, allow at least render node functionality.
1128
- */
1129
- if (vmw_fp->locked_master) {
1130
- mutex_unlock(&dev->master_mutex);
1131
-
1132
- if (flags & DRM_RENDER_ALLOW)
1133
- return NULL;
1134
-
1135
- DRM_ERROR("Dropped master trying to access ioctl that "
1136
- "requires authentication.\n");
1137
- return ERR_PTR(-EACCES);
1138
- }
1139
- mutex_unlock(&dev->master_mutex);
1140
-
1141
- /*
1142
- * Take the TTM lock. Possibly sleep waiting for the authenticating
1143
- * master to become master again, or for a SIGTERM if the
1144
- * authenticating master exits.
1145
- */
1146
- vmaster = vmw_master(file_priv->master);
1147
- ret = ttm_read_lock(&vmaster->lock, true);
1148
- if (unlikely(ret != 0))
1149
- vmaster = ERR_PTR(ret);
1150
-
1151
- return vmaster;
1152
-}
1153
-
11541102 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
11551103 unsigned long arg,
11561104 long (*ioctl_func)(struct file *, unsigned int,
....@@ -1159,9 +1107,7 @@
11591107 struct drm_file *file_priv = filp->private_data;
11601108 struct drm_device *dev = file_priv->minor->dev;
11611109 unsigned int nr = DRM_IOCTL_NR(cmd);
1162
- struct vmw_master *vmaster;
11631110 unsigned int flags;
1164
- long ret;
11651111
11661112 /*
11671113 * Do extra checking on driver private ioctls.
....@@ -1173,15 +1119,7 @@
11731119 &vmw_ioctls[nr - DRM_COMMAND_BASE];
11741120
11751121 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1176
- ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1177
- if (unlikely(ret != 0))
1178
- return ret;
1179
-
1180
- if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1181
- goto out_io_encoding;
1182
-
1183
- return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1184
- _IOC_SIZE(cmd));
1122
+ return ioctl_func(filp, cmd, arg);
11851123 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
11861124 if (!drm_is_current_master(file_priv) &&
11871125 !capable(CAP_SYS_ADMIN))
....@@ -1195,21 +1133,7 @@
11951133 } else if (!drm_ioctl_flags(nr, &flags))
11961134 return -EINVAL;
11971135
1198
- vmaster = vmw_master_check(dev, file_priv, flags);
1199
- if (IS_ERR(vmaster)) {
1200
- ret = PTR_ERR(vmaster);
1201
-
1202
- if (ret != -ERESTARTSYS)
1203
- DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1204
- nr, ret);
1205
- return ret;
1206
- }
1207
-
1208
- ret = ioctl_func(filp, cmd, arg);
1209
- if (vmaster)
1210
- ttm_read_unlock(&vmaster->lock);
1211
-
1212
- return ret;
1136
+ return ioctl_func(filp, cmd, arg);
12131137
12141138 out_io_encoding:
12151139 DRM_ERROR("Invalid command format, ioctl %d\n",
....@@ -1232,108 +1156,26 @@
12321156 }
12331157 #endif
12341158
1235
-static void vmw_lastclose(struct drm_device *dev)
1159
+static void vmw_master_set(struct drm_device *dev,
1160
+ struct drm_file *file_priv,
1161
+ bool from_open)
12361162 {
1237
-}
1238
-
1239
-static void vmw_master_init(struct vmw_master *vmaster)
1240
-{
1241
- ttm_lock_init(&vmaster->lock);
1242
-}
1243
-
1244
-static int vmw_master_create(struct drm_device *dev,
1245
- struct drm_master *master)
1246
-{
1247
- struct vmw_master *vmaster;
1248
-
1249
- vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1250
- if (unlikely(!vmaster))
1251
- return -ENOMEM;
1252
-
1253
- vmw_master_init(vmaster);
1254
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1255
- master->driver_priv = vmaster;
1256
-
1257
- return 0;
1258
-}
1259
-
1260
-static void vmw_master_destroy(struct drm_device *dev,
1261
- struct drm_master *master)
1262
-{
1263
- struct vmw_master *vmaster = vmw_master(master);
1264
-
1265
- master->driver_priv = NULL;
1266
- kfree(vmaster);
1267
-}
1268
-
1269
-static int vmw_master_set(struct drm_device *dev,
1270
- struct drm_file *file_priv,
1271
- bool from_open)
1272
-{
1273
- struct vmw_private *dev_priv = vmw_priv(dev);
1274
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1275
- struct vmw_master *active = dev_priv->active_master;
1276
- struct vmw_master *vmaster = vmw_master(file_priv->master);
1277
- int ret = 0;
1278
-
1279
- if (active) {
1280
- BUG_ON(active != &dev_priv->fbdev_master);
1281
- ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1282
- if (unlikely(ret != 0))
1283
- return ret;
1284
-
1285
- ttm_lock_set_kill(&active->lock, true, SIGTERM);
1286
- dev_priv->active_master = NULL;
1287
- }
1288
-
1289
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1290
- if (!from_open) {
1291
- ttm_vt_unlock(&vmaster->lock);
1292
- BUG_ON(vmw_fp->locked_master != file_priv->master);
1293
- drm_master_put(&vmw_fp->locked_master);
1294
- }
1295
-
1296
- dev_priv->active_master = vmaster;
1297
-
12981163 /*
12991164 * Inform a new master that the layout may have changed while
13001165 * it was gone.
13011166 */
13021167 if (!from_open)
13031168 drm_sysfs_hotplug_event(dev);
1304
-
1305
- return 0;
13061169 }
13071170
13081171 static void vmw_master_drop(struct drm_device *dev,
13091172 struct drm_file *file_priv)
13101173 {
13111174 struct vmw_private *dev_priv = vmw_priv(dev);
1312
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1313
- struct vmw_master *vmaster = vmw_master(file_priv->master);
1314
- int ret;
13151175
1316
- /**
1317
- * Make sure the master doesn't disappear while we have
1318
- * it locked.
1319
- */
1320
-
1321
- vmw_fp->locked_master = drm_master_get(file_priv->master);
1322
- ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
13231176 vmw_kms_legacy_hotspot_clear(dev_priv);
1324
- if (unlikely((ret != 0))) {
1325
- DRM_ERROR("Unable to lock TTM at VT switch.\n");
1326
- drm_master_put(&vmw_fp->locked_master);
1327
- }
1328
-
1329
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1330
-
13311177 if (!dev_priv->enable_fb)
13321178 vmw_svga_disable(dev_priv);
1333
-
1334
- dev_priv->active_master = &dev_priv->fbdev_master;
1335
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1336
- ttm_vt_unlock(&dev_priv->fbdev_master.lock);
13371179 }
13381180
13391181 /**
....@@ -1344,10 +1186,12 @@
13441186 */
13451187 static void __vmw_svga_enable(struct vmw_private *dev_priv)
13461188 {
1189
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1190
+
13471191 spin_lock(&dev_priv->svga_lock);
1348
- if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1192
+ if (!ttm_resource_manager_used(man)) {
13491193 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1350
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1194
+ ttm_resource_manager_set_used(man, true);
13511195 }
13521196 spin_unlock(&dev_priv->svga_lock);
13531197 }
....@@ -1373,9 +1217,11 @@
13731217 */
13741218 static void __vmw_svga_disable(struct vmw_private *dev_priv)
13751219 {
1220
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1221
+
13761222 spin_lock(&dev_priv->svga_lock);
1377
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1378
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1223
+ if (ttm_resource_manager_used(man)) {
1224
+ ttm_resource_manager_set_used(man, false);
13791225 vmw_write(dev_priv, SVGA_REG_ENABLE,
13801226 SVGA_REG_ENABLE_HIDE |
13811227 SVGA_REG_ENABLE_ENABLE);
....@@ -1392,6 +1238,7 @@
13921238 */
13931239 void vmw_svga_disable(struct vmw_private *dev_priv)
13941240 {
1241
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
13951242 /*
13961243 * Disabling SVGA will turn off device modesetting capabilities, so
13971244 * notify KMS about that so that it doesn't cache atomic state that
....@@ -1407,8 +1254,8 @@
14071254 vmw_kms_lost_device(dev_priv->dev);
14081255 ttm_write_lock(&dev_priv->reservation_sem, false);
14091256 spin_lock(&dev_priv->svga_lock);
1410
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1411
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1257
+ if (ttm_resource_manager_used(man)) {
1258
+ ttm_resource_manager_set_used(man, false);
14121259 spin_unlock(&dev_priv->svga_lock);
14131260 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
14141261 DRM_ERROR("Failed evicting VRAM buffers.\n");
....@@ -1424,8 +1271,22 @@
14241271 {
14251272 struct drm_device *dev = pci_get_drvdata(pdev);
14261273
1274
+ drm_dev_unregister(dev);
1275
+ vmw_driver_unload(dev);
1276
+ drm_dev_put(dev);
14271277 pci_disable_device(pdev);
1428
- drm_put_dev(dev);
1278
+}
1279
+
1280
+static unsigned long
1281
+vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1282
+ unsigned long len, unsigned long pgoff,
1283
+ unsigned long flags)
1284
+{
1285
+ struct drm_file *file_priv = file->private_data;
1286
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1287
+
1288
+ return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1289
+ &dev_priv->vma_manager);
14291290 }
14301291
14311292 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
....@@ -1523,7 +1384,7 @@
15231384 vmw_execbuf_release_pinned_bo(dev_priv);
15241385 vmw_resource_evict_all(dev_priv);
15251386 vmw_release_device_early(dev_priv);
1526
- ttm_bo_swapout_all(&dev_priv->bdev);
1387
+ ttm_bo_swapout_all();
15271388 if (dev_priv->enable_fb)
15281389 vmw_fifo_resource_dec(dev_priv);
15291390 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
....@@ -1599,21 +1460,14 @@
15991460 .compat_ioctl = vmw_compat_ioctl,
16001461 #endif
16011462 .llseek = noop_llseek,
1463
+ .get_unmapped_area = vmw_get_unmapped_area,
16021464 };
16031465
16041466 static struct drm_driver driver = {
1605
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1606
- DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
1607
- .load = vmw_driver_load,
1608
- .unload = vmw_driver_unload,
1609
- .lastclose = vmw_lastclose,
1610
- .get_vblank_counter = vmw_get_vblank_counter,
1611
- .enable_vblank = vmw_enable_vblank,
1612
- .disable_vblank = vmw_disable_vblank,
1467
+ .driver_features =
1468
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
16131469 .ioctls = vmw_ioctls,
16141470 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1615
- .master_create = vmw_master_create,
1616
- .master_destroy = vmw_master_destroy,
16171471 .master_set = vmw_master_set,
16181472 .master_drop = vmw_master_drop,
16191473 .open = vmw_driver_open,
....@@ -1647,7 +1501,39 @@
16471501
16481502 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16491503 {
1650
- return drm_get_pci_dev(pdev, ent, &driver);
1504
+ struct drm_device *dev;
1505
+ int ret;
1506
+
1507
+ ret = pci_enable_device(pdev);
1508
+ if (ret)
1509
+ return ret;
1510
+
1511
+ dev = drm_dev_alloc(&driver, &pdev->dev);
1512
+ if (IS_ERR(dev)) {
1513
+ ret = PTR_ERR(dev);
1514
+ goto err_pci_disable_device;
1515
+ }
1516
+
1517
+ dev->pdev = pdev;
1518
+ pci_set_drvdata(pdev, dev);
1519
+
1520
+ ret = vmw_driver_load(dev, ent->driver_data);
1521
+ if (ret)
1522
+ goto err_drm_dev_put;
1523
+
1524
+ ret = drm_dev_register(dev, ent->driver_data);
1525
+ if (ret)
1526
+ goto err_vmw_driver_unload;
1527
+
1528
+ return 0;
1529
+
1530
+err_vmw_driver_unload:
1531
+ vmw_driver_unload(dev);
1532
+err_drm_dev_put:
1533
+ drm_dev_put(dev);
1534
+err_pci_disable_device:
1535
+ pci_disable_device(pdev);
1536
+ return ret;
16511537 }
16521538
16531539 static int __init vmwgfx_init(void)