forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/gpu/drm/i915/gvt/kvmgt.c
....@@ -31,7 +31,7 @@
3131 #include <linux/init.h>
3232 #include <linux/device.h>
3333 #include <linux/mm.h>
34
-#include <linux/mmu_context.h>
34
+#include <linux/kthread.h>
3535 #include <linux/sched/mm.h>
3636 #include <linux/types.h>
3737 #include <linux/list.h>
....@@ -57,6 +57,8 @@
5757 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
5858 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
5959
60
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
61
+
6062 #define OPREGION_SIGNATURE "IntelGraphicsMem"
6163
6264 struct vfio_region;
....@@ -74,6 +76,11 @@
7476 u32 flags;
7577 const struct intel_vgpu_regops *ops;
7678 void *data;
79
+};
80
+
81
+struct vfio_edid_region {
82
+ struct vfio_region_gfx_edid vfio_edid_regs;
83
+ void *edid_blob;
7784 };
7885
7986 struct kvmgt_pgfn {
....@@ -101,6 +108,37 @@
101108 struct kref ref;
102109 };
103110
111
+struct kvmgt_vdev {
112
+ struct intel_vgpu *vgpu;
113
+ struct mdev_device *mdev;
114
+ struct vfio_region *region;
115
+ int num_regions;
116
+ struct eventfd_ctx *intx_trigger;
117
+ struct eventfd_ctx *msi_trigger;
118
+
119
+ /*
120
+ * Two caches are used to avoid mapping duplicated pages (eg.
121
+ * scratch pages). This help to reduce dma setup overhead.
122
+ */
123
+ struct rb_root gfn_cache;
124
+ struct rb_root dma_addr_cache;
125
+ unsigned long nr_cache_entries;
126
+ struct mutex cache_lock;
127
+
128
+ struct notifier_block iommu_notifier;
129
+ struct notifier_block group_notifier;
130
+ struct kvm *kvm;
131
+ struct work_struct release_work;
132
+ atomic_t released;
133
+ struct vfio_device *vfio_device;
134
+ struct vfio_group *vfio_group;
135
+};
136
+
137
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
138
+{
139
+ return intel_vgpu_vdev(vgpu);
140
+}
141
+
104142 static inline bool handle_valid(unsigned long handle)
105143 {
106144 return !!(handle & ~0xff);
....@@ -113,6 +151,8 @@
113151 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
114152 unsigned long size)
115153 {
154
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
155
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
116156 int total_pages;
117157 int npage;
118158 int ret;
....@@ -122,8 +162,8 @@
122162 for (npage = 0; npage < total_pages; npage++) {
123163 unsigned long cur_gfn = gfn + npage;
124164
125
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
126
- WARN_ON(ret != 1);
165
+ ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
166
+ drm_WARN_ON(&i915->drm, ret != 1);
127167 }
128168 }
129169
....@@ -131,6 +171,7 @@
131171 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
132172 unsigned long size, struct page **page)
133173 {
174
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
134175 unsigned long base_pfn = 0;
135176 int total_pages;
136177 int npage;
....@@ -145,8 +186,8 @@
145186 unsigned long cur_gfn = gfn + npage;
146187 unsigned long pfn;
147188
148
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
149
- IOMMU_READ | IOMMU_WRITE, &pfn);
189
+ ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
190
+ IOMMU_READ | IOMMU_WRITE, &pfn);
150191 if (ret != 1) {
151192 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
152193 cur_gfn, ret);
....@@ -180,7 +221,7 @@
180221 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
181222 dma_addr_t *dma_addr, unsigned long size)
182223 {
183
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
224
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
184225 struct page *page = NULL;
185226 int ret;
186227
....@@ -203,7 +244,7 @@
203244 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
204245 dma_addr_t dma_addr, unsigned long size)
205246 {
206
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
247
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
207248
208249 dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
209250 gvt_unpin_guest_page(vgpu, gfn, size);
....@@ -212,7 +253,7 @@
212253 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
213254 dma_addr_t dma_addr)
214255 {
215
- struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
256
+ struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
216257 struct gvt_dma *itr;
217258
218259 while (node) {
....@@ -230,7 +271,7 @@
230271
231272 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
232273 {
233
- struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
274
+ struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
234275 struct gvt_dma *itr;
235276
236277 while (node) {
....@@ -251,6 +292,7 @@
251292 {
252293 struct gvt_dma *new, *itr;
253294 struct rb_node **link, *parent = NULL;
295
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
254296
255297 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
256298 if (!new)
....@@ -263,7 +305,7 @@
263305 kref_init(&new->ref);
264306
265307 /* gfn_cache maps gfn to struct gvt_dma. */
266
- link = &vgpu->vdev.gfn_cache.rb_node;
308
+ link = &vdev->gfn_cache.rb_node;
267309 while (*link) {
268310 parent = *link;
269311 itr = rb_entry(parent, struct gvt_dma, gfn_node);
....@@ -274,11 +316,11 @@
274316 link = &parent->rb_right;
275317 }
276318 rb_link_node(&new->gfn_node, parent, link);
277
- rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
319
+ rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
278320
279321 /* dma_addr_cache maps dma addr to struct gvt_dma. */
280322 parent = NULL;
281
- link = &vgpu->vdev.dma_addr_cache.rb_node;
323
+ link = &vdev->dma_addr_cache.rb_node;
282324 while (*link) {
283325 parent = *link;
284326 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
....@@ -289,46 +331,51 @@
289331 link = &parent->rb_right;
290332 }
291333 rb_link_node(&new->dma_addr_node, parent, link);
292
- rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
334
+ rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
293335
294
- vgpu->vdev.nr_cache_entries++;
336
+ vdev->nr_cache_entries++;
295337 return 0;
296338 }
297339
298340 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
299341 struct gvt_dma *entry)
300342 {
301
- rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
302
- rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
343
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
344
+
345
+ rb_erase(&entry->gfn_node, &vdev->gfn_cache);
346
+ rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
303347 kfree(entry);
304
- vgpu->vdev.nr_cache_entries--;
348
+ vdev->nr_cache_entries--;
305349 }
306350
307351 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
308352 {
309353 struct gvt_dma *dma;
310354 struct rb_node *node = NULL;
355
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
311356
312357 for (;;) {
313
- mutex_lock(&vgpu->vdev.cache_lock);
314
- node = rb_first(&vgpu->vdev.gfn_cache);
358
+ mutex_lock(&vdev->cache_lock);
359
+ node = rb_first(&vdev->gfn_cache);
315360 if (!node) {
316
- mutex_unlock(&vgpu->vdev.cache_lock);
361
+ mutex_unlock(&vdev->cache_lock);
317362 break;
318363 }
319364 dma = rb_entry(node, struct gvt_dma, gfn_node);
320365 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
321366 __gvt_cache_remove_entry(vgpu, dma);
322
- mutex_unlock(&vgpu->vdev.cache_lock);
367
+ mutex_unlock(&vdev->cache_lock);
323368 }
324369 }
325370
326371 static void gvt_cache_init(struct intel_vgpu *vgpu)
327372 {
328
- vgpu->vdev.gfn_cache = RB_ROOT;
329
- vgpu->vdev.dma_addr_cache = RB_ROOT;
330
- vgpu->vdev.nr_cache_entries = 0;
331
- mutex_init(&vgpu->vdev.cache_lock);
373
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
374
+
375
+ vdev->gfn_cache = RB_ROOT;
376
+ vdev->dma_addr_cache = RB_ROOT;
377
+ vdev->nr_cache_entries = 0;
378
+ mutex_init(&vdev->cache_lock);
332379 }
333380
334381 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
....@@ -402,16 +449,18 @@
402449 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
403450 size_t count, loff_t *ppos, bool iswrite)
404451 {
452
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
405453 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
406454 VFIO_PCI_NUM_REGIONS;
407
- void *base = vgpu->vdev.region[i].data;
455
+ void *base = vdev->region[i].data;
408456 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
409457
410
- if (pos >= vgpu->vdev.region[i].size || iswrite) {
458
+
459
+ if (pos >= vdev->region[i].size || iswrite) {
411460 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
412461 return -EINVAL;
413462 }
414
- count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
463
+ count = min(count, (size_t)(vdev->region[i].size - pos));
415464 memcpy(buf, base + pos, count);
416465
417466 return count;
....@@ -427,37 +476,144 @@
427476 .release = intel_vgpu_reg_release_opregion,
428477 };
429478
479
+static int handle_edid_regs(struct intel_vgpu *vgpu,
480
+ struct vfio_edid_region *region, char *buf,
481
+ size_t count, u16 offset, bool is_write)
482
+{
483
+ struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
484
+ unsigned int data;
485
+
486
+ if (offset + count > sizeof(*regs))
487
+ return -EINVAL;
488
+
489
+ if (count != 4)
490
+ return -EINVAL;
491
+
492
+ if (is_write) {
493
+ data = *((unsigned int *)buf);
494
+ switch (offset) {
495
+ case offsetof(struct vfio_region_gfx_edid, link_state):
496
+ if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
497
+ if (!drm_edid_block_valid(
498
+ (u8 *)region->edid_blob,
499
+ 0,
500
+ true,
501
+ NULL)) {
502
+ gvt_vgpu_err("invalid EDID blob\n");
503
+ return -EINVAL;
504
+ }
505
+ intel_gvt_ops->emulate_hotplug(vgpu, true);
506
+ } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
507
+ intel_gvt_ops->emulate_hotplug(vgpu, false);
508
+ else {
509
+ gvt_vgpu_err("invalid EDID link state %d\n",
510
+ regs->link_state);
511
+ return -EINVAL;
512
+ }
513
+ regs->link_state = data;
514
+ break;
515
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
516
+ if (data > regs->edid_max_size) {
517
+ gvt_vgpu_err("EDID size is bigger than %d!\n",
518
+ regs->edid_max_size);
519
+ return -EINVAL;
520
+ }
521
+ regs->edid_size = data;
522
+ break;
523
+ default:
524
+ /* read-only regs */
525
+ gvt_vgpu_err("write read-only EDID region at offset %d\n",
526
+ offset);
527
+ return -EPERM;
528
+ }
529
+ } else {
530
+ memcpy(buf, (char *)regs + offset, count);
531
+ }
532
+
533
+ return count;
534
+}
535
+
536
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
537
+ size_t count, u16 offset, bool is_write)
538
+{
539
+ if (offset + count > region->vfio_edid_regs.edid_size)
540
+ return -EINVAL;
541
+
542
+ if (is_write)
543
+ memcpy(region->edid_blob + offset, buf, count);
544
+ else
545
+ memcpy(buf, region->edid_blob + offset, count);
546
+
547
+ return count;
548
+}
549
+
550
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
551
+ size_t count, loff_t *ppos, bool iswrite)
552
+{
553
+ int ret;
554
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
555
+ VFIO_PCI_NUM_REGIONS;
556
+ struct vfio_edid_region *region =
557
+ (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
558
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
559
+
560
+ if (pos < region->vfio_edid_regs.edid_offset) {
561
+ ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
562
+ } else {
563
+ pos -= EDID_BLOB_OFFSET;
564
+ ret = handle_edid_blob(region, buf, count, pos, iswrite);
565
+ }
566
+
567
+ if (ret < 0)
568
+ gvt_vgpu_err("failed to access EDID region\n");
569
+
570
+ return ret;
571
+}
572
+
573
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
574
+ struct vfio_region *region)
575
+{
576
+ kfree(region->data);
577
+}
578
+
579
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
580
+ .rw = intel_vgpu_reg_rw_edid,
581
+ .release = intel_vgpu_reg_release_edid,
582
+};
583
+
430584 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
431585 unsigned int type, unsigned int subtype,
432586 const struct intel_vgpu_regops *ops,
433587 size_t size, u32 flags, void *data)
434588 {
589
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
435590 struct vfio_region *region;
436591
437
- region = krealloc(vgpu->vdev.region,
438
- (vgpu->vdev.num_regions + 1) * sizeof(*region),
592
+ region = krealloc(vdev->region,
593
+ (vdev->num_regions + 1) * sizeof(*region),
439594 GFP_KERNEL);
440595 if (!region)
441596 return -ENOMEM;
442597
443
- vgpu->vdev.region = region;
444
- vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
445
- vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
446
- vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
447
- vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
448
- vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
449
- vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
450
- vgpu->vdev.num_regions++;
598
+ vdev->region = region;
599
+ vdev->region[vdev->num_regions].type = type;
600
+ vdev->region[vdev->num_regions].subtype = subtype;
601
+ vdev->region[vdev->num_regions].ops = ops;
602
+ vdev->region[vdev->num_regions].size = size;
603
+ vdev->region[vdev->num_regions].flags = flags;
604
+ vdev->region[vdev->num_regions].data = data;
605
+ vdev->num_regions++;
451606 return 0;
452607 }
453608
454609 static int kvmgt_get_vfio_device(void *p_vgpu)
455610 {
456611 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
612
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
457613
458
- vgpu->vdev.vfio_device = vfio_device_get_from_dev(
459
- mdev_dev(vgpu->vdev.mdev));
460
- if (!vgpu->vdev.vfio_device) {
614
+ vdev->vfio_device = vfio_device_get_from_dev(
615
+ mdev_dev(vdev->mdev));
616
+ if (!vdev->vfio_device) {
461617 gvt_vgpu_err("failed to get vfio device\n");
462618 return -ENODEV;
463619 }
....@@ -493,12 +649,44 @@
493649 return ret;
494650 }
495651
652
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
653
+{
654
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
655
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
656
+ struct vfio_edid_region *base;
657
+ int ret;
658
+
659
+ base = kzalloc(sizeof(*base), GFP_KERNEL);
660
+ if (!base)
661
+ return -ENOMEM;
662
+
663
+ /* TODO: Add multi-port and EDID extension block support */
664
+ base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
665
+ base->vfio_edid_regs.edid_max_size = EDID_SIZE;
666
+ base->vfio_edid_regs.edid_size = EDID_SIZE;
667
+ base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
668
+ base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
669
+ base->edid_blob = port->edid->edid_block;
670
+
671
+ ret = intel_vgpu_register_reg(vgpu,
672
+ VFIO_REGION_TYPE_GFX,
673
+ VFIO_REGION_SUBTYPE_GFX_EDID,
674
+ &intel_vgpu_regops_edid, EDID_SIZE,
675
+ VFIO_REGION_INFO_FLAG_READ |
676
+ VFIO_REGION_INFO_FLAG_WRITE |
677
+ VFIO_REGION_INFO_FLAG_CAPS, base);
678
+
679
+ return ret;
680
+}
681
+
496682 static void kvmgt_put_vfio_device(void *vgpu)
497683 {
498
- if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
684
+ struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
685
+
686
+ if (WARN_ON(!vdev->vfio_device))
499687 return;
500688
501
- vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
689
+ vfio_device_put(vdev->vfio_device);
502690 }
503691
504692 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
....@@ -527,9 +715,9 @@
527715 goto out;
528716 }
529717
530
- INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
718
+ INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
531719
532
- vgpu->vdev.mdev = mdev;
720
+ kvmgt_vdev(vgpu)->mdev = mdev;
533721 mdev_set_drvdata(mdev, vgpu);
534722
535723 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
....@@ -554,9 +742,10 @@
554742 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
555743 unsigned long action, void *data)
556744 {
557
- struct intel_vgpu *vgpu = container_of(nb,
558
- struct intel_vgpu,
559
- vdev.iommu_notifier);
745
+ struct kvmgt_vdev *vdev = container_of(nb,
746
+ struct kvmgt_vdev,
747
+ iommu_notifier);
748
+ struct intel_vgpu *vgpu = vdev->vgpu;
560749
561750 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
562751 struct vfio_iommu_type1_dma_unmap *unmap = data;
....@@ -566,7 +755,7 @@
566755 iov_pfn = unmap->iova >> PAGE_SHIFT;
567756 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
568757
569
- mutex_lock(&vgpu->vdev.cache_lock);
758
+ mutex_lock(&vdev->cache_lock);
570759 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
571760 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
572761 if (!entry)
....@@ -576,7 +765,7 @@
576765 entry->size);
577766 __gvt_cache_remove_entry(vgpu, entry);
578767 }
579
- mutex_unlock(&vgpu->vdev.cache_lock);
768
+ mutex_unlock(&vdev->cache_lock);
580769 }
581770
582771 return NOTIFY_OK;
....@@ -585,16 +774,16 @@
585774 static int intel_vgpu_group_notifier(struct notifier_block *nb,
586775 unsigned long action, void *data)
587776 {
588
- struct intel_vgpu *vgpu = container_of(nb,
589
- struct intel_vgpu,
590
- vdev.group_notifier);
777
+ struct kvmgt_vdev *vdev = container_of(nb,
778
+ struct kvmgt_vdev,
779
+ group_notifier);
591780
592781 /* the only action we care about */
593782 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
594
- vgpu->vdev.kvm = data;
783
+ vdev->kvm = data;
595784
596785 if (!data)
597
- schedule_work(&vgpu->vdev.release_work);
786
+ schedule_work(&vdev->release_work);
598787 }
599788
600789 return NOTIFY_OK;
....@@ -603,15 +792,17 @@
603792 static int intel_vgpu_open(struct mdev_device *mdev)
604793 {
605794 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
795
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
606796 unsigned long events;
607797 int ret;
798
+ struct vfio_group *vfio_group;
608799
609
- vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
610
- vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
800
+ vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
801
+ vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
611802
612803 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
613804 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
614
- &vgpu->vdev.iommu_notifier);
805
+ &vdev->iommu_notifier);
615806 if (ret != 0) {
616807 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
617808 ret);
....@@ -620,11 +811,27 @@
620811
621812 events = VFIO_GROUP_NOTIFY_SET_KVM;
622813 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
623
- &vgpu->vdev.group_notifier);
814
+ &vdev->group_notifier);
624815 if (ret != 0) {
625816 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
626817 ret);
627818 goto undo_iommu;
819
+ }
820
+
821
+ vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
822
+ if (IS_ERR_OR_NULL(vfio_group)) {
823
+ ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
824
+ gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
825
+ goto undo_register;
826
+ }
827
+ vdev->vfio_group = vfio_group;
828
+
829
+ /* Take a module reference as mdev core doesn't take
830
+ * a reference for vendor driver.
831
+ */
832
+ if (!try_module_get(THIS_MODULE)) {
833
+ ret = -ENODEV;
834
+ goto undo_group;
628835 }
629836
630837 ret = kvmgt_guest_init(mdev);
....@@ -633,58 +840,71 @@
633840
634841 intel_gvt_ops->vgpu_activate(vgpu);
635842
636
- atomic_set(&vgpu->vdev.released, 0);
843
+ atomic_set(&vdev->released, 0);
637844 return ret;
638845
639846 undo_group:
847
+ vfio_group_put_external_user(vdev->vfio_group);
848
+ vdev->vfio_group = NULL;
849
+
850
+undo_register:
640851 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
641
- &vgpu->vdev.group_notifier);
852
+ &vdev->group_notifier);
642853
643854 undo_iommu:
644855 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
645
- &vgpu->vdev.iommu_notifier);
856
+ &vdev->iommu_notifier);
646857 out:
647858 return ret;
648859 }
649860
650861 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
651862 {
863
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
652864 struct eventfd_ctx *trigger;
653865
654
- trigger = vgpu->vdev.msi_trigger;
866
+ trigger = vdev->msi_trigger;
655867 if (trigger) {
656868 eventfd_ctx_put(trigger);
657
- vgpu->vdev.msi_trigger = NULL;
869
+ vdev->msi_trigger = NULL;
658870 }
659871 }
660872
661873 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
662874 {
875
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
876
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
663877 struct kvmgt_guest_info *info;
664878 int ret;
665879
666880 if (!handle_valid(vgpu->handle))
667881 return;
668882
669
- if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
883
+ if (atomic_cmpxchg(&vdev->released, 0, 1))
670884 return;
671885
672886 intel_gvt_ops->vgpu_release(vgpu);
673887
674
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
675
- &vgpu->vdev.iommu_notifier);
676
- WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
888
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
889
+ &vdev->iommu_notifier);
890
+ drm_WARN(&i915->drm, ret,
891
+ "vfio_unregister_notifier for iommu failed: %d\n", ret);
677892
678
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
679
- &vgpu->vdev.group_notifier);
680
- WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
893
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
894
+ &vdev->group_notifier);
895
+ drm_WARN(&i915->drm, ret,
896
+ "vfio_unregister_notifier for group failed: %d\n", ret);
897
+
898
+ /* dereference module reference taken at open */
899
+ module_put(THIS_MODULE);
681900
682901 info = (struct kvmgt_guest_info *)vgpu->handle;
683902 kvmgt_guest_exit(info);
684903
685904 intel_vgpu_release_msi_eventfd_ctx(vgpu);
905
+ vfio_group_put_external_user(vdev->vfio_group);
686906
687
- vgpu->vdev.kvm = NULL;
907
+ vdev->kvm = NULL;
688908 vgpu->handle = 0;
689909 }
690910
....@@ -697,13 +917,13 @@
697917
698918 static void intel_vgpu_release_work(struct work_struct *work)
699919 {
700
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
701
- vdev.release_work);
920
+ struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
921
+ release_work);
702922
703
- __intel_vgpu_release(vgpu);
923
+ __intel_vgpu_release(vdev->vgpu);
704924 }
705925
706
-static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
926
+static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
707927 {
708928 u32 start_lo, start_hi;
709929 u32 mem_type;
....@@ -730,10 +950,10 @@
730950 return ((u64)start_hi << 32) | start_lo;
731951 }
732952
733
-static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
953
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
734954 void *buf, unsigned int count, bool is_write)
735955 {
736
- uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
956
+ u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
737957 int ret;
738958
739959 if (is_write)
....@@ -745,16 +965,16 @@
745965 return ret;
746966 }
747967
748
-static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
968
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
749969 {
750970 return off >= vgpu_aperture_offset(vgpu) &&
751971 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
752972 }
753973
754
-static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
974
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
755975 void *buf, unsigned long count, bool is_write)
756976 {
757
- void *aperture_va;
977
+ void __iomem *aperture_va;
758978
759979 if (!intel_vgpu_in_aperture(vgpu, off) ||
760980 !intel_vgpu_in_aperture(vgpu, off + count)) {
....@@ -762,16 +982,16 @@
762982 return -EINVAL;
763983 }
764984
765
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
985
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
766986 ALIGN_DOWN(off, PAGE_SIZE),
767987 count + offset_in_page(off));
768988 if (!aperture_va)
769989 return -EIO;
770990
771991 if (is_write)
772
- memcpy(aperture_va + offset_in_page(off), buf, count);
992
+ memcpy_toio(aperture_va + offset_in_page(off), buf, count);
773993 else
774
- memcpy(buf, aperture_va + offset_in_page(off), count);
994
+ memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
775995
776996 io_mapping_unmap(aperture_va);
777997
....@@ -782,12 +1002,13 @@
7821002 size_t count, loff_t *ppos, bool is_write)
7831003 {
7841004 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1005
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
7851006 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
786
- uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1007
+ u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
7871008 int ret = -EINVAL;
7881009
7891010
790
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
1011
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
7911012 gvt_vgpu_err("invalid index: %u\n", index);
7921013 return -EINVAL;
7931014 }
....@@ -816,11 +1037,11 @@
8161037 case VFIO_PCI_ROM_REGION_INDEX:
8171038 break;
8181039 default:
819
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
1040
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
8201041 return -EINVAL;
8211042
8221043 index -= VFIO_PCI_NUM_REGIONS;
823
- return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
1044
+ return vdev->region[index].ops->rw(vgpu, buf, count,
8241045 ppos, is_write);
8251046 }
8261047
....@@ -1039,7 +1260,7 @@
10391260
10401261 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
10411262 unsigned int index, unsigned int start,
1042
- unsigned int count, uint32_t flags,
1263
+ unsigned int count, u32 flags,
10431264 void *data)
10441265 {
10451266 return 0;
....@@ -1047,21 +1268,21 @@
10471268
10481269 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
10491270 unsigned int index, unsigned int start,
1050
- unsigned int count, uint32_t flags, void *data)
1271
+ unsigned int count, u32 flags, void *data)
10511272 {
10521273 return 0;
10531274 }
10541275
10551276 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
10561277 unsigned int index, unsigned int start, unsigned int count,
1057
- uint32_t flags, void *data)
1278
+ u32 flags, void *data)
10581279 {
10591280 return 0;
10601281 }
10611282
10621283 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
10631284 unsigned int index, unsigned int start, unsigned int count,
1064
- uint32_t flags, void *data)
1285
+ u32 flags, void *data)
10651286 {
10661287 struct eventfd_ctx *trigger;
10671288
....@@ -1073,19 +1294,19 @@
10731294 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
10741295 return PTR_ERR(trigger);
10751296 }
1076
- vgpu->vdev.msi_trigger = trigger;
1297
+ kvmgt_vdev(vgpu)->msi_trigger = trigger;
10771298 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
10781299 intel_vgpu_release_msi_eventfd_ctx(vgpu);
10791300
10801301 return 0;
10811302 }
10821303
1083
-static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
1304
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
10841305 unsigned int index, unsigned int start, unsigned int count,
10851306 void *data)
10861307 {
10871308 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1088
- unsigned int start, unsigned int count, uint32_t flags,
1309
+ unsigned int start, unsigned int count, u32 flags,
10891310 void *data) = NULL;
10901311
10911312 switch (index) {
....@@ -1125,6 +1346,7 @@
11251346 unsigned long arg)
11261347 {
11271348 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1349
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
11281350 unsigned long minsz;
11291351
11301352 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
....@@ -1143,7 +1365,7 @@
11431365 info.flags = VFIO_DEVICE_FLAGS_PCI;
11441366 info.flags |= VFIO_DEVICE_FLAGS_RESET;
11451367 info.num_regions = VFIO_PCI_NUM_REGIONS +
1146
- vgpu->vdev.num_regions;
1368
+ vdev->num_regions;
11471369 info.num_irqs = VFIO_PCI_NUM_IRQS;
11481370
11491371 return copy_to_user((void __user *)arg, &info, minsz) ?
....@@ -1155,7 +1377,6 @@
11551377 unsigned int i;
11561378 int ret;
11571379 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1158
- size_t size;
11591380 int nr_areas = 1;
11601381 int cap_type_id;
11611382
....@@ -1198,9 +1419,8 @@
11981419 VFIO_REGION_INFO_FLAG_WRITE;
11991420 info.size = gvt_aperture_sz(vgpu->gvt);
12001421
1201
- size = sizeof(*sparse) +
1202
- (nr_areas * sizeof(*sparse->areas));
1203
- sparse = kzalloc(size, GFP_KERNEL);
1422
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1423
+ GFP_KERNEL);
12041424 if (!sparse)
12051425 return -ENOMEM;
12061426
....@@ -1236,22 +1456,22 @@
12361456 .header.version = 1 };
12371457
12381458 if (info.index >= VFIO_PCI_NUM_REGIONS +
1239
- vgpu->vdev.num_regions)
1459
+ vdev->num_regions)
12401460 return -EINVAL;
12411461 info.index =
12421462 array_index_nospec(info.index,
12431463 VFIO_PCI_NUM_REGIONS +
1244
- vgpu->vdev.num_regions);
1464
+ vdev->num_regions);
12451465
12461466 i = info.index - VFIO_PCI_NUM_REGIONS;
12471467
12481468 info.offset =
12491469 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1250
- info.size = vgpu->vdev.region[i].size;
1251
- info.flags = vgpu->vdev.region[i].flags;
1470
+ info.size = vdev->region[i].size;
1471
+ info.flags = vdev->region[i].flags;
12521472
1253
- cap_type.type = vgpu->vdev.region[i].type;
1254
- cap_type.subtype = vgpu->vdev.region[i].subtype;
1473
+ cap_type.type = vdev->region[i].type;
1474
+ cap_type.subtype = vdev->region[i].subtype;
12551475
12561476 ret = vfio_info_add_capability(&caps,
12571477 &cap_type.header,
....@@ -1265,9 +1485,9 @@
12651485 switch (cap_type_id) {
12661486 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
12671487 ret = vfio_info_add_capability(&caps,
1268
- &sparse->header, sizeof(*sparse) +
1269
- (sparse->nr_areas *
1270
- sizeof(*sparse->areas)));
1488
+ &sparse->header,
1489
+ struct_size(sparse, areas,
1490
+ sparse->nr_areas));
12711491 if (ret) {
12721492 kfree(sparse);
12731493 return ret;
....@@ -1415,27 +1635,10 @@
14151635 return sprintf(buf, "\n");
14161636 }
14171637
1418
-static ssize_t
1419
-hw_id_show(struct device *dev, struct device_attribute *attr,
1420
- char *buf)
1421
-{
1422
- struct mdev_device *mdev = mdev_from_dev(dev);
1423
-
1424
- if (mdev) {
1425
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
1426
- mdev_get_drvdata(mdev);
1427
- return sprintf(buf, "%u\n",
1428
- vgpu->submission.shadow_ctx->hw_id);
1429
- }
1430
- return sprintf(buf, "\n");
1431
-}
1432
-
14331638 static DEVICE_ATTR_RO(vgpu_id);
1434
-static DEVICE_ATTR_RO(hw_id);
14351639
14361640 static struct attribute *intel_vgpu_attrs[] = {
14371641 &dev_attr_vgpu_id.attr,
1438
- &dev_attr_hw_id.attr,
14391642 NULL
14401643 };
14411644
....@@ -1465,19 +1668,17 @@
14651668
14661669 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
14671670 {
1468
- struct attribute **kvm_type_attrs;
14691671 struct attribute_group **kvm_vgpu_type_groups;
14701672
14711673 intel_gvt_ops = ops;
1472
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
1473
- &kvm_vgpu_type_groups))
1674
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
14741675 return -EFAULT;
14751676 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
14761677
14771678 return mdev_register_device(dev, &intel_vgpu_ops);
14781679 }
14791680
1480
-static void kvmgt_host_exit(struct device *dev, void *gvt)
1681
+static void kvmgt_host_exit(struct device *dev)
14811682 {
14821683 mdev_unregister_device(dev);
14831684 }
....@@ -1610,13 +1811,15 @@
16101811 {
16111812 struct kvmgt_guest_info *info;
16121813 struct intel_vgpu *vgpu;
1814
+ struct kvmgt_vdev *vdev;
16131815 struct kvm *kvm;
16141816
16151817 vgpu = mdev_get_drvdata(mdev);
16161818 if (handle_valid(vgpu->handle))
16171819 return -EEXIST;
16181820
1619
- kvm = vgpu->vdev.kvm;
1821
+ vdev = kvmgt_vdev(vgpu);
1822
+ kvm = vdev->kvm;
16201823 if (!kvm || kvm->mm != current->mm) {
16211824 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
16221825 return -ESRCH;
....@@ -1637,8 +1840,6 @@
16371840 kvmgt_protect_table_init(info);
16381841 gvt_cache_init(vgpu);
16391842
1640
- init_completion(&vgpu->vblank_done);
1641
-
16421843 info->track_node.track_write = kvmgt_page_track_write;
16431844 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
16441845 kvm_page_track_register_notifier(kvm, &info->track_node);
....@@ -1646,10 +1847,7 @@
16461847 info->debugfs_cache_entries = debugfs_create_ulong(
16471848 "kvmgt_nr_cache_entries",
16481849 0444, vgpu->debugfs,
1649
- &vgpu->vdev.nr_cache_entries);
1650
- if (!info->debugfs_cache_entries)
1651
- gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
1652
-
1850
+ &vdev->nr_cache_entries);
16531851 return 0;
16541852 }
16551853
....@@ -1666,27 +1864,52 @@
16661864 return true;
16671865 }
16681866
1669
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1867
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
16701868 {
1671
- /* nothing to do here */
1869
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1870
+
1871
+ vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
1872
+
1873
+ if (!vgpu->vdev)
1874
+ return -ENOMEM;
1875
+
1876
+ kvmgt_vdev(vgpu)->vgpu = vgpu;
1877
+
16721878 return 0;
16731879 }
16741880
1675
-static void kvmgt_detach_vgpu(unsigned long handle)
1881
+static void kvmgt_detach_vgpu(void *p_vgpu)
16761882 {
1677
- /* nothing to do here */
1883
+ int i;
1884
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1885
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1886
+
1887
+ if (!vdev->region)
1888
+ return;
1889
+
1890
+ for (i = 0; i < vdev->num_regions; i++)
1891
+ if (vdev->region[i].ops->release)
1892
+ vdev->region[i].ops->release(vgpu,
1893
+ &vdev->region[i]);
1894
+ vdev->num_regions = 0;
1895
+ kfree(vdev->region);
1896
+ vdev->region = NULL;
1897
+
1898
+ kfree(vdev);
16781899 }
16791900
16801901 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
16811902 {
16821903 struct kvmgt_guest_info *info;
16831904 struct intel_vgpu *vgpu;
1905
+ struct kvmgt_vdev *vdev;
16841906
16851907 if (!handle_valid(handle))
16861908 return -ESRCH;
16871909
16881910 info = (struct kvmgt_guest_info *)handle;
16891911 vgpu = info->vgpu;
1912
+ vdev = kvmgt_vdev(vgpu);
16901913
16911914 /*
16921915 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
....@@ -1697,10 +1920,10 @@
16971920 * enabled by guest. so if msi_trigger is null, success is still
16981921 * returned and don't inject interrupt into guest.
16991922 */
1700
- if (vgpu->vdev.msi_trigger == NULL)
1923
+ if (vdev->msi_trigger == NULL)
17011924 return 0;
17021925
1703
- if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1926
+ if (eventfd_signal(vdev->msi_trigger, 1) == 1)
17041927 return 0;
17051928
17061929 return -EFAULT;
....@@ -1723,29 +1946,29 @@
17231946 return pfn;
17241947 }
17251948
1726
-int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1949
+static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
17271950 unsigned long size, dma_addr_t *dma_addr)
17281951 {
1729
- struct kvmgt_guest_info *info;
17301952 struct intel_vgpu *vgpu;
1953
+ struct kvmgt_vdev *vdev;
17311954 struct gvt_dma *entry;
17321955 int ret;
17331956
17341957 if (!handle_valid(handle))
17351958 return -EINVAL;
17361959
1737
- info = (struct kvmgt_guest_info *)handle;
1738
- vgpu = info->vgpu;
1960
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
1961
+ vdev = kvmgt_vdev(vgpu);
17391962
1740
- mutex_lock(&info->vgpu->vdev.cache_lock);
1963
+ mutex_lock(&vdev->cache_lock);
17411964
1742
- entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1965
+ entry = __gvt_cache_find_gfn(vgpu, gfn);
17431966 if (!entry) {
17441967 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
17451968 if (ret)
17461969 goto err_unlock;
17471970
1748
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1971
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
17491972 if (ret)
17501973 goto err_unmap;
17511974 } else if (entry->size != size) {
....@@ -1757,7 +1980,7 @@
17571980 if (ret)
17581981 goto err_unlock;
17591982
1760
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1983
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
17611984 if (ret)
17621985 goto err_unmap;
17631986 } else {
....@@ -1765,13 +1988,37 @@
17651988 *dma_addr = entry->dma_addr;
17661989 }
17671990
1768
- mutex_unlock(&info->vgpu->vdev.cache_lock);
1991
+ mutex_unlock(&vdev->cache_lock);
17691992 return 0;
17701993
17711994 err_unmap:
17721995 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
17731996 err_unlock:
1774
- mutex_unlock(&info->vgpu->vdev.cache_lock);
1997
+ mutex_unlock(&vdev->cache_lock);
1998
+ return ret;
1999
+}
2000
+
2001
+static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
2002
+{
2003
+ struct kvmgt_guest_info *info;
2004
+ struct kvmgt_vdev *vdev;
2005
+ struct gvt_dma *entry;
2006
+ int ret = 0;
2007
+
2008
+ if (!handle_valid(handle))
2009
+ return -ENODEV;
2010
+
2011
+ info = (struct kvmgt_guest_info *)handle;
2012
+ vdev = kvmgt_vdev(info->vgpu);
2013
+
2014
+ mutex_lock(&vdev->cache_lock);
2015
+ entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2016
+ if (entry)
2017
+ kref_get(&entry->ref);
2018
+ else
2019
+ ret = -ENOMEM;
2020
+ mutex_unlock(&vdev->cache_lock);
2021
+
17752022 return ret;
17762023 }
17772024
....@@ -1784,54 +2031,37 @@
17842031 __gvt_cache_remove_entry(entry->vgpu, entry);
17852032 }
17862033
1787
-void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
2034
+static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
17882035 {
1789
- struct kvmgt_guest_info *info;
2036
+ struct intel_vgpu *vgpu;
2037
+ struct kvmgt_vdev *vdev;
17902038 struct gvt_dma *entry;
17912039
17922040 if (!handle_valid(handle))
17932041 return;
17942042
1795
- info = (struct kvmgt_guest_info *)handle;
2043
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
2044
+ vdev = kvmgt_vdev(vgpu);
17962045
1797
- mutex_lock(&info->vgpu->vdev.cache_lock);
1798
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2046
+ mutex_lock(&vdev->cache_lock);
2047
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
17992048 if (entry)
18002049 kref_put(&entry->ref, __gvt_dma_release);
1801
- mutex_unlock(&info->vgpu->vdev.cache_lock);
2050
+ mutex_unlock(&vdev->cache_lock);
18022051 }
18032052
18042053 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
18052054 void *buf, unsigned long len, bool write)
18062055 {
18072056 struct kvmgt_guest_info *info;
1808
- struct kvm *kvm;
1809
- int idx, ret;
1810
- bool kthread = current->mm == NULL;
18112057
18122058 if (!handle_valid(handle))
18132059 return -ESRCH;
18142060
18152061 info = (struct kvmgt_guest_info *)handle;
1816
- kvm = info->kvm;
18172062
1818
- if (kthread) {
1819
- if (!mmget_not_zero(kvm->mm))
1820
- return -EFAULT;
1821
- use_mm(kvm->mm);
1822
- }
1823
-
1824
- idx = srcu_read_lock(&kvm->srcu);
1825
- ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1826
- kvm_read_guest(kvm, gpa, buf, len);
1827
- srcu_read_unlock(&kvm->srcu, idx);
1828
-
1829
- if (kthread) {
1830
- unuse_mm(kvm->mm);
1831
- mmput(kvm->mm);
1832
- }
1833
-
1834
- return ret;
2063
+ return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
2064
+ gpa, buf, len, write);
18352065 }
18362066
18372067 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
....@@ -1871,7 +2101,8 @@
18712101 return ret;
18722102 }
18732103
1874
-struct intel_gvt_mpt kvmgt_mpt = {
2104
+static struct intel_gvt_mpt kvmgt_mpt = {
2105
+ .type = INTEL_GVT_HYPERVISOR_KVM,
18752106 .host_init = kvmgt_host_init,
18762107 .host_exit = kvmgt_host_exit,
18772108 .attach_vgpu = kvmgt_attach_vgpu,
....@@ -1885,20 +2116,24 @@
18852116 .gfn_to_mfn = kvmgt_gfn_to_pfn,
18862117 .dma_map_guest_page = kvmgt_dma_map_guest_page,
18872118 .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2119
+ .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
18882120 .set_opregion = kvmgt_set_opregion,
2121
+ .set_edid = kvmgt_set_edid,
18892122 .get_vfio_device = kvmgt_get_vfio_device,
18902123 .put_vfio_device = kvmgt_put_vfio_device,
18912124 .is_valid_gfn = kvmgt_is_valid_gfn,
18922125 };
1893
-EXPORT_SYMBOL_GPL(kvmgt_mpt);
18942126
18952127 static int __init kvmgt_init(void)
18962128 {
2129
+ if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2130
+ return -ENODEV;
18972131 return 0;
18982132 }
18992133
19002134 static void __exit kvmgt_exit(void)
19012135 {
2136
+ intel_gvt_unregister_hypervisor();
19022137 }
19032138
19042139 module_init(kvmgt_init);