| .. | .. |
|---|
| 25 | 25 | * |
|---|
| 26 | 26 | **************************************************************************/ |
|---|
| 27 | 27 | |
|---|
| 28 | | -#include <drm/drmP.h> |
|---|
| 29 | 28 | #include "vmwgfx_drv.h" |
|---|
| 30 | 29 | |
|---|
| 31 | 30 | int vmw_mmap(struct file *filp, struct vm_area_struct *vma) |
|---|
| 32 | 31 | { |
|---|
| 33 | | - struct drm_file *file_priv; |
|---|
| 34 | | - struct vmw_private *dev_priv; |
|---|
| 32 | + static const struct vm_operations_struct vmw_vm_ops = { |
|---|
| 33 | + .pfn_mkwrite = vmw_bo_vm_mkwrite, |
|---|
| 34 | + .page_mkwrite = vmw_bo_vm_mkwrite, |
|---|
| 35 | + .fault = vmw_bo_vm_fault, |
|---|
| 36 | + .open = ttm_bo_vm_open, |
|---|
| 37 | + .close = ttm_bo_vm_close, |
|---|
| 38 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
|---|
| 39 | + .huge_fault = vmw_bo_vm_huge_fault, |
|---|
| 40 | +#endif |
|---|
| 41 | + }; |
|---|
| 42 | + struct drm_file *file_priv = filp->private_data; |
|---|
| 43 | + struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); |
|---|
| 44 | + int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); |
|---|
| 35 | 45 | |
|---|
| 36 | | - if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { |
|---|
| 37 | | - DRM_ERROR("Illegal attempt to mmap old fifo space.\n"); |
|---|
| 38 | | - return -EINVAL; |
|---|
| 39 | | - } |
|---|
| 40 | | - |
|---|
| 41 | | - file_priv = filp->private_data; |
|---|
| 42 | | - dev_priv = vmw_priv(file_priv->minor->dev); |
|---|
| 43 | | - return ttm_bo_mmap(filp, vma, &dev_priv->bdev); |
|---|
| 44 | | -} |
|---|
| 45 | | - |
|---|
| 46 | | -static int vmw_ttm_mem_global_init(struct drm_global_reference *ref) |
|---|
| 47 | | -{ |
|---|
| 48 | | - DRM_INFO("global init.\n"); |
|---|
| 49 | | - return ttm_mem_global_init(ref->object); |
|---|
| 50 | | -} |
|---|
| 51 | | - |
|---|
| 52 | | -static void vmw_ttm_mem_global_release(struct drm_global_reference *ref) |
|---|
| 53 | | -{ |
|---|
| 54 | | - ttm_mem_global_release(ref->object); |
|---|
| 55 | | -} |
|---|
| 56 | | - |
|---|
| 57 | | -int vmw_ttm_global_init(struct vmw_private *dev_priv) |
|---|
| 58 | | -{ |
|---|
| 59 | | - struct drm_global_reference *global_ref; |
|---|
| 60 | | - int ret; |
|---|
| 61 | | - |
|---|
| 62 | | - global_ref = &dev_priv->mem_global_ref; |
|---|
| 63 | | - global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
|---|
| 64 | | - global_ref->size = sizeof(struct ttm_mem_global); |
|---|
| 65 | | - global_ref->init = &vmw_ttm_mem_global_init; |
|---|
| 66 | | - global_ref->release = &vmw_ttm_mem_global_release; |
|---|
| 67 | | - |
|---|
| 68 | | - ret = drm_global_item_ref(global_ref); |
|---|
| 69 | | - if (unlikely(ret != 0)) { |
|---|
| 70 | | - DRM_ERROR("Failed setting up TTM memory accounting.\n"); |
|---|
| 46 | + if (ret) |
|---|
| 71 | 47 | return ret; |
|---|
| 72 | | - } |
|---|
| 73 | 48 | |
|---|
| 74 | | - dev_priv->bo_global_ref.mem_glob = |
|---|
| 75 | | - dev_priv->mem_global_ref.object; |
|---|
| 76 | | - global_ref = &dev_priv->bo_global_ref.ref; |
|---|
| 77 | | - global_ref->global_type = DRM_GLOBAL_TTM_BO; |
|---|
| 78 | | - global_ref->size = sizeof(struct ttm_bo_global); |
|---|
| 79 | | - global_ref->init = &ttm_bo_global_init; |
|---|
| 80 | | - global_ref->release = &ttm_bo_global_release; |
|---|
| 81 | | - ret = drm_global_item_ref(global_ref); |
|---|
| 49 | + vma->vm_ops = &vmw_vm_ops; |
|---|
| 82 | 50 | |
|---|
| 83 | | - if (unlikely(ret != 0)) { |
|---|
| 84 | | - DRM_ERROR("Failed setting up TTM buffer objects.\n"); |
|---|
| 85 | | - goto out_no_bo; |
|---|
| 86 | | - } |
|---|
| 51 | + /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */ |
|---|
| 52 | + if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) |
|---|
| 53 | + vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; |
|---|
| 87 | 54 | |
|---|
| 88 | 55 | return 0; |
|---|
| 89 | | -out_no_bo: |
|---|
| 90 | | - drm_global_item_unref(&dev_priv->mem_global_ref); |
|---|
| 91 | | - return ret; |
|---|
| 92 | 56 | } |
|---|
| 93 | 57 | |
|---|
| 94 | | -void vmw_ttm_global_release(struct vmw_private *dev_priv) |
|---|
| 58 | +/* struct vmw_validation_mem callback */ |
|---|
| 59 | +static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size) |
|---|
| 95 | 60 | { |
|---|
| 96 | | - drm_global_item_unref(&dev_priv->bo_global_ref.ref); |
|---|
| 97 | | - drm_global_item_unref(&dev_priv->mem_global_ref); |
|---|
| 61 | + static struct ttm_operation_ctx ctx = {.interruptible = false, |
|---|
| 62 | + .no_wait_gpu = false}; |
|---|
| 63 | + struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); |
|---|
| 64 | + |
|---|
| 65 | + return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx); |
|---|
| 66 | +} |
|---|
| 67 | + |
|---|
| 68 | +/* struct vmw_validation_mem callback */ |
|---|
| 69 | +static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size) |
|---|
| 70 | +{ |
|---|
| 71 | + struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); |
|---|
| 72 | + |
|---|
| 73 | + return ttm_mem_global_free(vmw_mem_glob(dev_priv), size); |
|---|
| 74 | +} |
|---|
| 75 | + |
|---|
| 76 | +/** |
|---|
| 77 | + * vmw_validation_mem_init_ttm - Interface the validation memory tracker |
|---|
| 78 | + * to ttm. |
|---|
| 79 | + * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private |
|---|
| 80 | + * rather than a struct vmw_validation_mem is to make sure assumption in the |
|---|
| 81 | + * callbacks that struct vmw_private derives from struct vmw_validation_mem |
|---|
| 82 | + * holds true. |
|---|
| 83 | + * @gran: The recommended allocation granularity |
|---|
| 84 | + */ |
|---|
| 85 | +void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran) |
|---|
| 86 | +{ |
|---|
| 87 | + struct vmw_validation_mem *vvm = &dev_priv->vvm; |
|---|
| 88 | + |
|---|
| 89 | + vvm->reserve_mem = vmw_vmt_reserve; |
|---|
| 90 | + vvm->unreserve_mem = vmw_vmt_unreserve; |
|---|
| 91 | + vvm->gran = gran; |
|---|
| 98 | 92 | } |
|---|