.. | .. |
---|
29 | 29 | */ |
---|
30 | 30 | |
---|
31 | 31 | #include <linux/dma-buf.h> |
---|
32 | | -#include <drm/drmP.h> |
---|
33 | 32 | #include <linux/vfio.h> |
---|
34 | 33 | |
---|
35 | 34 | #include "i915_drv.h" |
---|
.. | .. |
---|
37 | 36 | |
---|
38 | 37 | #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) |
---|
39 | 38 | |
---|
| 39 | +static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, |
---|
| 40 | + unsigned long size, |
---|
| 41 | + dma_addr_t dma_addr) |
---|
| 42 | +{ |
---|
| 43 | + int ret = 0; |
---|
| 44 | + |
---|
| 45 | + if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) |
---|
| 46 | + ret = -EINVAL; |
---|
| 47 | + |
---|
| 48 | + return ret; |
---|
| 49 | +} |
---|
| 50 | + |
---|
| 51 | +static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, |
---|
| 52 | + dma_addr_t dma_addr) |
---|
| 53 | +{ |
---|
| 54 | + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); |
---|
| 55 | +} |
---|
| 56 | + |
---|
40 | 57 | static int vgpu_gem_get_pages( |
---|
41 | 58 | struct drm_i915_gem_object *obj) |
---|
42 | 59 | { |
---|
43 | 60 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
---|
| 61 | + struct intel_vgpu *vgpu; |
---|
44 | 62 | struct sg_table *st; |
---|
45 | 63 | struct scatterlist *sg; |
---|
46 | | - int i, ret; |
---|
| 64 | + int i, j, ret; |
---|
47 | 65 | gen8_pte_t __iomem *gtt_entries; |
---|
48 | 66 | struct intel_vgpu_fb_info *fb_info; |
---|
| 67 | + u32 page_num; |
---|
49 | 68 | |
---|
50 | 69 | fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; |
---|
51 | | - if (WARN_ON(!fb_info)) |
---|
| 70 | + if (drm_WARN_ON(&dev_priv->drm, !fb_info)) |
---|
| 71 | + return -ENODEV; |
---|
| 72 | + |
---|
| 73 | + vgpu = fb_info->obj->vgpu; |
---|
| 74 | + if (drm_WARN_ON(&dev_priv->drm, !vgpu)) |
---|
52 | 75 | return -ENODEV; |
---|
53 | 76 | |
---|
54 | 77 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
---|
55 | 78 | if (unlikely(!st)) |
---|
56 | 79 | return -ENOMEM; |
---|
57 | 80 | |
---|
58 | | - ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL); |
---|
| 81 | + page_num = obj->base.size >> PAGE_SHIFT; |
---|
| 82 | + ret = sg_alloc_table(st, page_num, GFP_KERNEL); |
---|
59 | 83 | if (ret) { |
---|
60 | 84 | kfree(st); |
---|
61 | 85 | return ret; |
---|
62 | 86 | } |
---|
63 | 87 | gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + |
---|
64 | 88 | (fb_info->start >> PAGE_SHIFT); |
---|
65 | | - for_each_sg(st->sgl, sg, fb_info->size, i) { |
---|
| 89 | + for_each_sg(st->sgl, sg, page_num, i) { |
---|
| 90 | + dma_addr_t dma_addr = |
---|
| 91 | + GEN8_DECODE_PTE(readq(>t_entries[i])); |
---|
| 92 | + if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { |
---|
| 93 | + ret = -EINVAL; |
---|
| 94 | + goto out; |
---|
| 95 | + } |
---|
| 96 | + |
---|
66 | 97 | sg->offset = 0; |
---|
67 | 98 | sg->length = PAGE_SIZE; |
---|
68 | | - sg_dma_address(sg) = |
---|
69 | | - GEN8_DECODE_PTE(readq(>t_entries[i])); |
---|
70 | 99 | sg_dma_len(sg) = PAGE_SIZE; |
---|
| 100 | + sg_dma_address(sg) = dma_addr; |
---|
71 | 101 | } |
---|
72 | 102 | |
---|
73 | 103 | __i915_gem_object_set_pages(obj, st, PAGE_SIZE); |
---|
| 104 | +out: |
---|
| 105 | + if (ret) { |
---|
| 106 | + dma_addr_t dma_addr; |
---|
74 | 107 | |
---|
75 | | - return 0; |
---|
| 108 | + for_each_sg(st->sgl, sg, i, j) { |
---|
| 109 | + dma_addr = sg_dma_address(sg); |
---|
| 110 | + if (dma_addr) |
---|
| 111 | + vgpu_unpin_dma_address(vgpu, dma_addr); |
---|
| 112 | + } |
---|
| 113 | + sg_free_table(st); |
---|
| 114 | + kfree(st); |
---|
| 115 | + } |
---|
| 116 | + |
---|
| 117 | + return ret; |
---|
| 118 | + |
---|
76 | 119 | } |
---|
77 | 120 | |
---|
78 | 121 | static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, |
---|
79 | 122 | struct sg_table *pages) |
---|
80 | 123 | { |
---|
| 124 | + struct scatterlist *sg; |
---|
| 125 | + |
---|
| 126 | + if (obj->base.dma_buf) { |
---|
| 127 | + struct intel_vgpu_fb_info *fb_info = obj->gvt_info; |
---|
| 128 | + struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; |
---|
| 129 | + struct intel_vgpu *vgpu = obj->vgpu; |
---|
| 130 | + int i; |
---|
| 131 | + |
---|
| 132 | + for_each_sg(pages->sgl, sg, fb_info->size, i) |
---|
| 133 | + vgpu_unpin_dma_address(vgpu, |
---|
| 134 | + sg_dma_address(sg)); |
---|
| 135 | + } |
---|
| 136 | + |
---|
81 | 137 | sg_free_table(pages); |
---|
82 | 138 | kfree(pages); |
---|
83 | 139 | } |
---|
.. | .. |
---|
142 | 198 | } |
---|
143 | 199 | |
---|
144 | 200 | static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { |
---|
| 201 | + .name = "i915_gem_object_vgpu", |
---|
145 | 202 | .flags = I915_GEM_OBJECT_IS_PROXY, |
---|
146 | 203 | .get_pages = vgpu_gem_get_pages, |
---|
147 | 204 | .put_pages = vgpu_gem_put_pages, |
---|
.. | .. |
---|
151 | 208 | static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, |
---|
152 | 209 | struct intel_vgpu_fb_info *info) |
---|
153 | 210 | { |
---|
| 211 | + static struct lock_class_key lock_class; |
---|
154 | 212 | struct drm_i915_private *dev_priv = to_i915(dev); |
---|
155 | 213 | struct drm_i915_gem_object *obj; |
---|
156 | 214 | |
---|
157 | | - obj = i915_gem_object_alloc(dev_priv); |
---|
| 215 | + obj = i915_gem_object_alloc(); |
---|
158 | 216 | if (obj == NULL) |
---|
159 | 217 | return NULL; |
---|
160 | 218 | |
---|
161 | 219 | drm_gem_private_object_init(dev, &obj->base, |
---|
162 | | - info->size << PAGE_SHIFT); |
---|
163 | | - i915_gem_object_init(obj, &intel_vgpu_gem_ops); |
---|
| 220 | + roundup(info->size, PAGE_SIZE)); |
---|
| 221 | + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); |
---|
| 222 | + i915_gem_object_set_readonly(obj); |
---|
164 | 223 | |
---|
165 | 224 | obj->read_domains = I915_GEM_DOMAIN_GTT; |
---|
166 | 225 | obj->write_domain = 0; |
---|
167 | | - if (IS_SKYLAKE(dev_priv) |
---|
168 | | - || IS_KABYLAKE(dev_priv) |
---|
169 | | - || IS_BROXTON(dev_priv)) { |
---|
| 226 | + if (INTEL_GEN(dev_priv) >= 9) { |
---|
170 | 227 | unsigned int tiling_mode = 0; |
---|
171 | 228 | unsigned int stride = 0; |
---|
172 | 229 | |
---|
.. | .. |
---|
209 | 266 | struct intel_vgpu_fb_info *info, |
---|
210 | 267 | int plane_id) |
---|
211 | 268 | { |
---|
212 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
213 | 269 | struct intel_vgpu_primary_plane_format p; |
---|
214 | 270 | struct intel_vgpu_cursor_plane_format c; |
---|
215 | | - int ret; |
---|
| 271 | + int ret, tile_height = 1; |
---|
| 272 | + |
---|
| 273 | + memset(info, 0, sizeof(*info)); |
---|
216 | 274 | |
---|
217 | 275 | if (plane_id == DRM_PLANE_TYPE_PRIMARY) { |
---|
218 | 276 | ret = intel_vgpu_decode_primary_plane(vgpu, &p); |
---|
.. | .. |
---|
231 | 289 | break; |
---|
232 | 290 | case PLANE_CTL_TILED_X: |
---|
233 | 291 | info->drm_format_mod = I915_FORMAT_MOD_X_TILED; |
---|
| 292 | + tile_height = 8; |
---|
234 | 293 | break; |
---|
235 | 294 | case PLANE_CTL_TILED_Y: |
---|
236 | 295 | info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; |
---|
| 296 | + tile_height = 32; |
---|
237 | 297 | break; |
---|
238 | 298 | case PLANE_CTL_TILED_YF: |
---|
239 | 299 | info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; |
---|
| 300 | + tile_height = 32; |
---|
240 | 301 | break; |
---|
241 | 302 | default: |
---|
242 | 303 | gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); |
---|
243 | 304 | } |
---|
244 | | - |
---|
245 | | - info->size = (((p.stride * p.height * p.bpp) / 8) + |
---|
246 | | - (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
---|
247 | 305 | } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { |
---|
248 | 306 | ret = intel_vgpu_decode_cursor_plane(vgpu, &c); |
---|
249 | 307 | if (ret) |
---|
.. | .. |
---|
265 | 323 | info->x_hot = UINT_MAX; |
---|
266 | 324 | info->y_hot = UINT_MAX; |
---|
267 | 325 | } |
---|
268 | | - |
---|
269 | | - info->size = (((info->stride * c.height * c.bpp) / 8) |
---|
270 | | - + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
---|
271 | 326 | } else { |
---|
272 | 327 | gvt_vgpu_err("invalid plane id:%d\n", plane_id); |
---|
273 | 328 | return -EINVAL; |
---|
274 | 329 | } |
---|
275 | 330 | |
---|
| 331 | + info->size = info->stride * roundup(info->height, tile_height); |
---|
276 | 332 | if (info->size == 0) { |
---|
277 | 333 | gvt_vgpu_err("fb size is zero\n"); |
---|
278 | 334 | return -EINVAL; |
---|
.. | .. |
---|
280 | 336 | |
---|
281 | 337 | if (info->start & (PAGE_SIZE - 1)) { |
---|
282 | 338 | gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start); |
---|
283 | | - return -EFAULT; |
---|
284 | | - } |
---|
285 | | - if (((info->start >> PAGE_SHIFT) + info->size) > |
---|
286 | | - ggtt_total_entries(&dev_priv->ggtt)) { |
---|
287 | | - gvt_vgpu_err("Invalid GTT offset or size\n"); |
---|
288 | 339 | return -EFAULT; |
---|
289 | 340 | } |
---|
290 | 341 | |
---|
.. | .. |
---|
367 | 418 | |
---|
368 | 419 | int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) |
---|
369 | 420 | { |
---|
370 | | - struct drm_device *dev = &vgpu->gvt->dev_priv->drm; |
---|
| 421 | + struct drm_device *dev = &vgpu->gvt->gt->i915->drm; |
---|
371 | 422 | struct vfio_device_gfx_plane_info *gfx_plane_info = args; |
---|
372 | 423 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
---|
373 | 424 | struct intel_vgpu_fb_info fb_info; |
---|
.. | .. |
---|
473 | 524 | /* To associate an exposed dmabuf with the dmabuf_obj */ |
---|
474 | 525 | int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) |
---|
475 | 526 | { |
---|
476 | | - struct drm_device *dev = &vgpu->gvt->dev_priv->drm; |
---|
| 527 | + struct drm_device *dev = &vgpu->gvt->gt->i915->drm; |
---|
477 | 528 | struct intel_vgpu_dmabuf_obj *dmabuf_obj; |
---|
478 | 529 | struct drm_i915_gem_object *obj; |
---|
479 | 530 | struct dma_buf *dmabuf; |
---|
.. | .. |
---|
498 | 549 | |
---|
499 | 550 | obj->gvt_info = dmabuf_obj->info; |
---|
500 | 551 | |
---|
501 | | - dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR); |
---|
| 552 | + dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); |
---|
502 | 553 | if (IS_ERR(dmabuf)) { |
---|
503 | 554 | gvt_vgpu_err("export dma-buf failed\n"); |
---|
504 | 555 | ret = PTR_ERR(dmabuf); |
---|
505 | 556 | goto out_free_gem; |
---|
506 | 557 | } |
---|
507 | | - |
---|
508 | | - i915_gem_object_put(obj); |
---|
509 | 558 | |
---|
510 | 559 | ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); |
---|
511 | 560 | if (ret < 0) { |
---|
.. | .. |
---|
531 | 580 | file_count(dmabuf->file), |
---|
532 | 581 | kref_read(&obj->base.refcount)); |
---|
533 | 582 | |
---|
| 583 | + i915_gem_object_put(obj); |
---|
| 584 | + |
---|
534 | 585 | return dmabuf_fd; |
---|
535 | 586 | |
---|
536 | 587 | out_free_dmabuf: |
---|