hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/drivers/gpu/drm/i915/gvt/dmabuf.c
....@@ -29,7 +29,6 @@
2929 */
3030
3131 #include <linux/dma-buf.h>
32
-#include <drm/drmP.h>
3332 #include <linux/vfio.h>
3433
3534 #include "i915_drv.h"
....@@ -37,47 +36,104 @@
3736
3837 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
3938
39
+static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
40
+ unsigned long size,
41
+ dma_addr_t dma_addr)
42
+{
43
+ int ret = 0;
44
+
45
+ if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
46
+ ret = -EINVAL;
47
+
48
+ return ret;
49
+}
50
+
51
+static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
52
+ dma_addr_t dma_addr)
53
+{
54
+ intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
55
+}
56
+
4057 static int vgpu_gem_get_pages(
4158 struct drm_i915_gem_object *obj)
4259 {
4360 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
61
+ struct intel_vgpu *vgpu;
4462 struct sg_table *st;
4563 struct scatterlist *sg;
46
- int i, ret;
64
+ int i, j, ret;
4765 gen8_pte_t __iomem *gtt_entries;
4866 struct intel_vgpu_fb_info *fb_info;
67
+ u32 page_num;
4968
5069 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
51
- if (WARN_ON(!fb_info))
70
+ if (drm_WARN_ON(&dev_priv->drm, !fb_info))
71
+ return -ENODEV;
72
+
73
+ vgpu = fb_info->obj->vgpu;
74
+ if (drm_WARN_ON(&dev_priv->drm, !vgpu))
5275 return -ENODEV;
5376
5477 st = kmalloc(sizeof(*st), GFP_KERNEL);
5578 if (unlikely(!st))
5679 return -ENOMEM;
5780
58
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
81
+ page_num = obj->base.size >> PAGE_SHIFT;
82
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
5983 if (ret) {
6084 kfree(st);
6185 return ret;
6286 }
6387 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
6488 (fb_info->start >> PAGE_SHIFT);
65
- for_each_sg(st->sgl, sg, fb_info->size, i) {
89
+ for_each_sg(st->sgl, sg, page_num, i) {
90
+ dma_addr_t dma_addr =
91
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
92
+ if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
93
+ ret = -EINVAL;
94
+ goto out;
95
+ }
96
+
6697 sg->offset = 0;
6798 sg->length = PAGE_SIZE;
68
- sg_dma_address(sg) =
69
- GEN8_DECODE_PTE(readq(&gtt_entries[i]));
7099 sg_dma_len(sg) = PAGE_SIZE;
100
+ sg_dma_address(sg) = dma_addr;
71101 }
72102
73103 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
104
+out:
105
+ if (ret) {
106
+ dma_addr_t dma_addr;
74107
75
- return 0;
108
+ for_each_sg(st->sgl, sg, i, j) {
109
+ dma_addr = sg_dma_address(sg);
110
+ if (dma_addr)
111
+ vgpu_unpin_dma_address(vgpu, dma_addr);
112
+ }
113
+ sg_free_table(st);
114
+ kfree(st);
115
+ }
116
+
117
+ return ret;
118
+
76119 }
77120
78121 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
79122 struct sg_table *pages)
80123 {
124
+ struct scatterlist *sg;
125
+
126
+ if (obj->base.dma_buf) {
127
+ struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
128
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
129
+ struct intel_vgpu *vgpu = obj->vgpu;
130
+ int i;
131
+
132
+ for_each_sg(pages->sgl, sg, fb_info->size, i)
133
+ vgpu_unpin_dma_address(vgpu,
134
+ sg_dma_address(sg));
135
+ }
136
+
81137 sg_free_table(pages);
82138 kfree(pages);
83139 }
....@@ -142,6 +198,7 @@
142198 }
143199
144200 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
201
+ .name = "i915_gem_object_vgpu",
145202 .flags = I915_GEM_OBJECT_IS_PROXY,
146203 .get_pages = vgpu_gem_get_pages,
147204 .put_pages = vgpu_gem_put_pages,
....@@ -151,22 +208,22 @@
151208 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
152209 struct intel_vgpu_fb_info *info)
153210 {
211
+ static struct lock_class_key lock_class;
154212 struct drm_i915_private *dev_priv = to_i915(dev);
155213 struct drm_i915_gem_object *obj;
156214
157
- obj = i915_gem_object_alloc(dev_priv);
215
+ obj = i915_gem_object_alloc();
158216 if (obj == NULL)
159217 return NULL;
160218
161219 drm_gem_private_object_init(dev, &obj->base,
162
- info->size << PAGE_SHIFT);
163
- i915_gem_object_init(obj, &intel_vgpu_gem_ops);
220
+ roundup(info->size, PAGE_SIZE));
221
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
222
+ i915_gem_object_set_readonly(obj);
164223
165224 obj->read_domains = I915_GEM_DOMAIN_GTT;
166225 obj->write_domain = 0;
167
- if (IS_SKYLAKE(dev_priv)
168
- || IS_KABYLAKE(dev_priv)
169
- || IS_BROXTON(dev_priv)) {
226
+ if (INTEL_GEN(dev_priv) >= 9) {
170227 unsigned int tiling_mode = 0;
171228 unsigned int stride = 0;
172229
....@@ -209,10 +266,11 @@
209266 struct intel_vgpu_fb_info *info,
210267 int plane_id)
211268 {
212
- struct drm_i915_private *dev_priv = to_i915(dev);
213269 struct intel_vgpu_primary_plane_format p;
214270 struct intel_vgpu_cursor_plane_format c;
215
- int ret;
271
+ int ret, tile_height = 1;
272
+
273
+ memset(info, 0, sizeof(*info));
216274
217275 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
218276 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
....@@ -231,19 +289,19 @@
231289 break;
232290 case PLANE_CTL_TILED_X:
233291 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
292
+ tile_height = 8;
234293 break;
235294 case PLANE_CTL_TILED_Y:
236295 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
296
+ tile_height = 32;
237297 break;
238298 case PLANE_CTL_TILED_YF:
239299 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
300
+ tile_height = 32;
240301 break;
241302 default:
242303 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
243304 }
244
-
245
- info->size = (((p.stride * p.height * p.bpp) / 8) +
246
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
247305 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
248306 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
249307 if (ret)
....@@ -265,14 +323,12 @@
265323 info->x_hot = UINT_MAX;
266324 info->y_hot = UINT_MAX;
267325 }
268
-
269
- info->size = (((info->stride * c.height * c.bpp) / 8)
270
- + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
271326 } else {
272327 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
273328 return -EINVAL;
274329 }
275330
331
+ info->size = info->stride * roundup(info->height, tile_height);
276332 if (info->size == 0) {
277333 gvt_vgpu_err("fb size is zero\n");
278334 return -EINVAL;
....@@ -280,11 +336,6 @@
280336
281337 if (info->start & (PAGE_SIZE - 1)) {
282338 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
283
- return -EFAULT;
284
- }
285
- if (((info->start >> PAGE_SHIFT) + info->size) >
286
- ggtt_total_entries(&dev_priv->ggtt)) {
287
- gvt_vgpu_err("Invalid GTT offset or size\n");
288339 return -EFAULT;
289340 }
290341
....@@ -367,7 +418,7 @@
367418
368419 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
369420 {
370
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
421
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
371422 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
372423 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
373424 struct intel_vgpu_fb_info fb_info;
....@@ -473,7 +524,7 @@
473524 /* To associate an exposed dmabuf with the dmabuf_obj */
474525 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
475526 {
476
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
527
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
477528 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
478529 struct drm_i915_gem_object *obj;
479530 struct dma_buf *dmabuf;
....@@ -498,14 +549,12 @@
498549
499550 obj->gvt_info = dmabuf_obj->info;
500551
501
- dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
552
+ dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
502553 if (IS_ERR(dmabuf)) {
503554 gvt_vgpu_err("export dma-buf failed\n");
504555 ret = PTR_ERR(dmabuf);
505556 goto out_free_gem;
506557 }
507
-
508
- i915_gem_object_put(obj);
509558
510559 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
511560 if (ret < 0) {
....@@ -531,6 +580,8 @@
531580 file_count(dmabuf->file),
532581 kref_read(&obj->base.refcount));
533582
583
+ i915_gem_object_put(obj);
584
+
534585 return dmabuf_fd;
535586
536587 out_free_dmabuf: