| .. | .. |
|---|
| 35 | 35 | */ |
|---|
| 36 | 36 | |
|---|
| 37 | 37 | #include "i915_drv.h" |
|---|
| 38 | +#include "gt/intel_ggtt_fencing.h" |
|---|
| 38 | 39 | #include "gvt.h" |
|---|
| 39 | 40 | |
|---|
| 40 | 41 | static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) |
|---|
| 41 | 42 | { |
|---|
| 42 | 43 | struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 43 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
|---|
| 44 | + struct intel_gt *gt = gvt->gt; |
|---|
| 44 | 45 | unsigned int flags; |
|---|
| 45 | 46 | u64 start, end, size; |
|---|
| 46 | 47 | struct drm_mm_node *node; |
|---|
| .. | .. |
|---|
| 60 | 61 | flags = PIN_MAPPABLE; |
|---|
| 61 | 62 | } |
|---|
| 62 | 63 | |
|---|
| 63 | | - mutex_lock(&dev_priv->drm.struct_mutex); |
|---|
| 64 | | - ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, |
|---|
| 64 | + mutex_lock(>->ggtt->vm.mutex); |
|---|
| 65 | + mmio_hw_access_pre(gt); |
|---|
| 66 | + ret = i915_gem_gtt_insert(>->ggtt->vm, node, |
|---|
| 65 | 67 | size, I915_GTT_PAGE_SIZE, |
|---|
| 66 | 68 | I915_COLOR_UNEVICTABLE, |
|---|
| 67 | 69 | start, end, flags); |
|---|
| 68 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 70 | + mmio_hw_access_post(gt); |
|---|
| 71 | + mutex_unlock(>->ggtt->vm.mutex); |
|---|
| 69 | 72 | if (ret) |
|---|
| 70 | 73 | gvt_err("fail to alloc %s gm space from host\n", |
|---|
| 71 | 74 | high_gm ? "high" : "low"); |
|---|
| .. | .. |
|---|
| 76 | 79 | static int alloc_vgpu_gm(struct intel_vgpu *vgpu) |
|---|
| 77 | 80 | { |
|---|
| 78 | 81 | struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 79 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
|---|
| 82 | + struct intel_gt *gt = gvt->gt; |
|---|
| 80 | 83 | int ret; |
|---|
| 81 | 84 | |
|---|
| 82 | 85 | ret = alloc_gm(vgpu, false); |
|---|
| .. | .. |
|---|
| 95 | 98 | |
|---|
| 96 | 99 | return 0; |
|---|
| 97 | 100 | out_free_aperture: |
|---|
| 98 | | - mutex_lock(&dev_priv->drm.struct_mutex); |
|---|
| 101 | + mutex_lock(>->ggtt->vm.mutex); |
|---|
| 99 | 102 | drm_mm_remove_node(&vgpu->gm.low_gm_node); |
|---|
| 100 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 103 | + mutex_unlock(>->ggtt->vm.mutex); |
|---|
| 101 | 104 | return ret; |
|---|
| 102 | 105 | } |
|---|
| 103 | 106 | |
|---|
| 104 | 107 | static void free_vgpu_gm(struct intel_vgpu *vgpu) |
|---|
| 105 | 108 | { |
|---|
| 106 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
|---|
| 109 | + struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 110 | + struct intel_gt *gt = gvt->gt; |
|---|
| 107 | 111 | |
|---|
| 108 | | - mutex_lock(&dev_priv->drm.struct_mutex); |
|---|
| 112 | + mutex_lock(>->ggtt->vm.mutex); |
|---|
| 109 | 113 | drm_mm_remove_node(&vgpu->gm.low_gm_node); |
|---|
| 110 | 114 | drm_mm_remove_node(&vgpu->gm.high_gm_node); |
|---|
| 111 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 115 | + mutex_unlock(>->ggtt->vm.mutex); |
|---|
| 112 | 116 | } |
|---|
| 113 | 117 | |
|---|
| 114 | 118 | /** |
|---|
| .. | .. |
|---|
| 125 | 129 | u32 fence, u64 value) |
|---|
| 126 | 130 | { |
|---|
| 127 | 131 | struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 128 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
|---|
| 129 | | - struct drm_i915_fence_reg *reg; |
|---|
| 132 | + struct drm_i915_private *i915 = gvt->gt->i915; |
|---|
| 133 | + struct intel_uncore *uncore = gvt->gt->uncore; |
|---|
| 134 | + struct i915_fence_reg *reg; |
|---|
| 130 | 135 | i915_reg_t fence_reg_lo, fence_reg_hi; |
|---|
| 131 | 136 | |
|---|
| 132 | | - assert_rpm_wakelock_held(dev_priv); |
|---|
| 137 | + assert_rpm_wakelock_held(uncore->rpm); |
|---|
| 133 | 138 | |
|---|
| 134 | | - if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) |
|---|
| 139 | + if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu))) |
|---|
| 135 | 140 | return; |
|---|
| 136 | 141 | |
|---|
| 137 | 142 | reg = vgpu->fence.regs[fence]; |
|---|
| 138 | | - if (WARN_ON(!reg)) |
|---|
| 143 | + if (drm_WARN_ON(&i915->drm, !reg)) |
|---|
| 139 | 144 | return; |
|---|
| 140 | 145 | |
|---|
| 141 | 146 | fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); |
|---|
| 142 | 147 | fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); |
|---|
| 143 | 148 | |
|---|
| 144 | | - I915_WRITE(fence_reg_lo, 0); |
|---|
| 145 | | - POSTING_READ(fence_reg_lo); |
|---|
| 149 | + intel_uncore_write(uncore, fence_reg_lo, 0); |
|---|
| 150 | + intel_uncore_posting_read(uncore, fence_reg_lo); |
|---|
| 146 | 151 | |
|---|
| 147 | | - I915_WRITE(fence_reg_hi, upper_32_bits(value)); |
|---|
| 148 | | - I915_WRITE(fence_reg_lo, lower_32_bits(value)); |
|---|
| 149 | | - POSTING_READ(fence_reg_lo); |
|---|
| 152 | + intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value)); |
|---|
| 153 | + intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value)); |
|---|
| 154 | + intel_uncore_posting_read(uncore, fence_reg_lo); |
|---|
| 150 | 155 | } |
|---|
| 151 | 156 | |
|---|
| 152 | 157 | static void _clear_vgpu_fence(struct intel_vgpu *vgpu) |
|---|
| .. | .. |
|---|
| 160 | 165 | static void free_vgpu_fence(struct intel_vgpu *vgpu) |
|---|
| 161 | 166 | { |
|---|
| 162 | 167 | struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 163 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
|---|
| 164 | | - struct drm_i915_fence_reg *reg; |
|---|
| 168 | + struct intel_uncore *uncore = gvt->gt->uncore; |
|---|
| 169 | + struct i915_fence_reg *reg; |
|---|
| 170 | + intel_wakeref_t wakeref; |
|---|
| 165 | 171 | u32 i; |
|---|
| 166 | 172 | |
|---|
| 167 | | - if (WARN_ON(!vgpu_fence_sz(vgpu))) |
|---|
| 173 | + if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu))) |
|---|
| 168 | 174 | return; |
|---|
| 169 | 175 | |
|---|
| 170 | | - intel_runtime_pm_get(dev_priv); |
|---|
| 176 | + wakeref = intel_runtime_pm_get(uncore->rpm); |
|---|
| 171 | 177 | |
|---|
| 172 | | - mutex_lock(&dev_priv->drm.struct_mutex); |
|---|
| 178 | + mutex_lock(&gvt->gt->ggtt->vm.mutex); |
|---|
| 173 | 179 | _clear_vgpu_fence(vgpu); |
|---|
| 174 | 180 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { |
|---|
| 175 | 181 | reg = vgpu->fence.regs[i]; |
|---|
| 176 | 182 | i915_unreserve_fence(reg); |
|---|
| 177 | 183 | vgpu->fence.regs[i] = NULL; |
|---|
| 178 | 184 | } |
|---|
| 179 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 185 | + mutex_unlock(&gvt->gt->ggtt->vm.mutex); |
|---|
| 180 | 186 | |
|---|
| 181 | | - intel_runtime_pm_put(dev_priv); |
|---|
| 187 | + intel_runtime_pm_put(uncore->rpm, wakeref); |
|---|
| 182 | 188 | } |
|---|
| 183 | 189 | |
|---|
| 184 | 190 | static int alloc_vgpu_fence(struct intel_vgpu *vgpu) |
|---|
| 185 | 191 | { |
|---|
| 186 | 192 | struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 187 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
|---|
| 188 | | - struct drm_i915_fence_reg *reg; |
|---|
| 193 | + struct intel_uncore *uncore = gvt->gt->uncore; |
|---|
| 194 | + struct i915_fence_reg *reg; |
|---|
| 195 | + intel_wakeref_t wakeref; |
|---|
| 189 | 196 | int i; |
|---|
| 190 | 197 | |
|---|
| 191 | | - intel_runtime_pm_get(dev_priv); |
|---|
| 198 | + wakeref = intel_runtime_pm_get(uncore->rpm); |
|---|
| 192 | 199 | |
|---|
| 193 | 200 | /* Request fences from host */ |
|---|
| 194 | | - mutex_lock(&dev_priv->drm.struct_mutex); |
|---|
| 201 | + mutex_lock(&gvt->gt->ggtt->vm.mutex); |
|---|
| 195 | 202 | |
|---|
| 196 | 203 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { |
|---|
| 197 | | - reg = i915_reserve_fence(dev_priv); |
|---|
| 204 | + reg = i915_reserve_fence(gvt->gt->ggtt); |
|---|
| 198 | 205 | if (IS_ERR(reg)) |
|---|
| 199 | 206 | goto out_free_fence; |
|---|
| 200 | 207 | |
|---|
| .. | .. |
|---|
| 203 | 210 | |
|---|
| 204 | 211 | _clear_vgpu_fence(vgpu); |
|---|
| 205 | 212 | |
|---|
| 206 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 207 | | - intel_runtime_pm_put(dev_priv); |
|---|
| 213 | + mutex_unlock(&gvt->gt->ggtt->vm.mutex); |
|---|
| 214 | + intel_runtime_pm_put(uncore->rpm, wakeref); |
|---|
| 208 | 215 | return 0; |
|---|
| 216 | + |
|---|
| 209 | 217 | out_free_fence: |
|---|
| 210 | 218 | gvt_vgpu_err("Failed to alloc fences\n"); |
|---|
| 211 | 219 | /* Return fences to host, if fail */ |
|---|
| .. | .. |
|---|
| 216 | 224 | i915_unreserve_fence(reg); |
|---|
| 217 | 225 | vgpu->fence.regs[i] = NULL; |
|---|
| 218 | 226 | } |
|---|
| 219 | | - mutex_unlock(&dev_priv->drm.struct_mutex); |
|---|
| 220 | | - intel_runtime_pm_put(dev_priv); |
|---|
| 227 | + mutex_unlock(&gvt->gt->ggtt->vm.mutex); |
|---|
| 228 | + intel_runtime_pm_put_unchecked(uncore->rpm); |
|---|
| 221 | 229 | return -ENOSPC; |
|---|
| 222 | 230 | } |
|---|
| 223 | 231 | |
|---|
| .. | .. |
|---|
| 311 | 319 | */ |
|---|
| 312 | 320 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) |
|---|
| 313 | 321 | { |
|---|
| 314 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
|---|
| 322 | + struct intel_gvt *gvt = vgpu->gvt; |
|---|
| 323 | + intel_wakeref_t wakeref; |
|---|
| 315 | 324 | |
|---|
| 316 | | - intel_runtime_pm_get(dev_priv); |
|---|
| 317 | | - _clear_vgpu_fence(vgpu); |
|---|
| 318 | | - intel_runtime_pm_put(dev_priv); |
|---|
| 325 | + with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref) |
|---|
| 326 | + _clear_vgpu_fence(vgpu); |
|---|
| 319 | 327 | } |
|---|
| 320 | 328 | |
|---|
| 321 | 329 | /** |
|---|