.. | .. |
---|
31 | 31 | #include <linux/init.h> |
---|
32 | 32 | #include <linux/device.h> |
---|
33 | 33 | #include <linux/mm.h> |
---|
34 | | -#include <linux/mmu_context.h> |
---|
| 34 | +#include <linux/kthread.h> |
---|
35 | 35 | #include <linux/sched/mm.h> |
---|
36 | 36 | #include <linux/types.h> |
---|
37 | 37 | #include <linux/list.h> |
---|
.. | .. |
---|
57 | 57 | #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) |
---|
58 | 58 | #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) |
---|
59 | 59 | |
---|
| 60 | +#define EDID_BLOB_OFFSET (PAGE_SIZE/2) |
---|
| 61 | + |
---|
60 | 62 | #define OPREGION_SIGNATURE "IntelGraphicsMem" |
---|
61 | 63 | |
---|
62 | 64 | struct vfio_region; |
---|
.. | .. |
---|
74 | 76 | u32 flags; |
---|
75 | 77 | const struct intel_vgpu_regops *ops; |
---|
76 | 78 | void *data; |
---|
| 79 | +}; |
---|
| 80 | + |
---|
| 81 | +struct vfio_edid_region { |
---|
| 82 | + struct vfio_region_gfx_edid vfio_edid_regs; |
---|
| 83 | + void *edid_blob; |
---|
77 | 84 | }; |
---|
78 | 85 | |
---|
79 | 86 | struct kvmgt_pgfn { |
---|
.. | .. |
---|
101 | 108 | struct kref ref; |
---|
102 | 109 | }; |
---|
103 | 110 | |
---|
| 111 | +struct kvmgt_vdev { |
---|
| 112 | + struct intel_vgpu *vgpu; |
---|
| 113 | + struct mdev_device *mdev; |
---|
| 114 | + struct vfio_region *region; |
---|
| 115 | + int num_regions; |
---|
| 116 | + struct eventfd_ctx *intx_trigger; |
---|
| 117 | + struct eventfd_ctx *msi_trigger; |
---|
| 118 | + |
---|
| 119 | + /* |
---|
| 120 | + * Two caches are used to avoid mapping duplicated pages (eg. |
---|
| 121 | + * scratch pages). This help to reduce dma setup overhead. |
---|
| 122 | + */ |
---|
| 123 | + struct rb_root gfn_cache; |
---|
| 124 | + struct rb_root dma_addr_cache; |
---|
| 125 | + unsigned long nr_cache_entries; |
---|
| 126 | + struct mutex cache_lock; |
---|
| 127 | + |
---|
| 128 | + struct notifier_block iommu_notifier; |
---|
| 129 | + struct notifier_block group_notifier; |
---|
| 130 | + struct kvm *kvm; |
---|
| 131 | + struct work_struct release_work; |
---|
| 132 | + atomic_t released; |
---|
| 133 | + struct vfio_device *vfio_device; |
---|
| 134 | + struct vfio_group *vfio_group; |
---|
| 135 | +}; |
---|
| 136 | + |
---|
| 137 | +static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) |
---|
| 138 | +{ |
---|
| 139 | + return intel_vgpu_vdev(vgpu); |
---|
| 140 | +} |
---|
| 141 | + |
---|
104 | 142 | static inline bool handle_valid(unsigned long handle) |
---|
105 | 143 | { |
---|
106 | 144 | return !!(handle & ~0xff); |
---|
.. | .. |
---|
113 | 151 | static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
---|
114 | 152 | unsigned long size) |
---|
115 | 153 | { |
---|
| 154 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
| 155 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
116 | 156 | int total_pages; |
---|
117 | 157 | int npage; |
---|
118 | 158 | int ret; |
---|
.. | .. |
---|
122 | 162 | for (npage = 0; npage < total_pages; npage++) { |
---|
123 | 163 | unsigned long cur_gfn = gfn + npage; |
---|
124 | 164 | |
---|
125 | | - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); |
---|
126 | | - WARN_ON(ret != 1); |
---|
| 165 | + ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); |
---|
| 166 | + drm_WARN_ON(&i915->drm, ret != 1); |
---|
127 | 167 | } |
---|
128 | 168 | } |
---|
129 | 169 | |
---|
.. | .. |
---|
131 | 171 | static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, |
---|
132 | 172 | unsigned long size, struct page **page) |
---|
133 | 173 | { |
---|
| 174 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
134 | 175 | unsigned long base_pfn = 0; |
---|
135 | 176 | int total_pages; |
---|
136 | 177 | int npage; |
---|
.. | .. |
---|
145 | 186 | unsigned long cur_gfn = gfn + npage; |
---|
146 | 187 | unsigned long pfn; |
---|
147 | 188 | |
---|
148 | | - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, |
---|
149 | | - IOMMU_READ | IOMMU_WRITE, &pfn); |
---|
| 189 | + ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, |
---|
| 190 | + IOMMU_READ | IOMMU_WRITE, &pfn); |
---|
150 | 191 | if (ret != 1) { |
---|
151 | 192 | gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", |
---|
152 | 193 | cur_gfn, ret); |
---|
.. | .. |
---|
180 | 221 | static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, |
---|
181 | 222 | dma_addr_t *dma_addr, unsigned long size) |
---|
182 | 223 | { |
---|
183 | | - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 224 | + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
184 | 225 | struct page *page = NULL; |
---|
185 | 226 | int ret; |
---|
186 | 227 | |
---|
.. | .. |
---|
203 | 244 | static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, |
---|
204 | 245 | dma_addr_t dma_addr, unsigned long size) |
---|
205 | 246 | { |
---|
206 | | - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 247 | + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
207 | 248 | |
---|
208 | 249 | dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); |
---|
209 | 250 | gvt_unpin_guest_page(vgpu, gfn, size); |
---|
.. | .. |
---|
212 | 253 | static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, |
---|
213 | 254 | dma_addr_t dma_addr) |
---|
214 | 255 | { |
---|
215 | | - struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node; |
---|
| 256 | + struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; |
---|
216 | 257 | struct gvt_dma *itr; |
---|
217 | 258 | |
---|
218 | 259 | while (node) { |
---|
.. | .. |
---|
230 | 271 | |
---|
231 | 272 | static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) |
---|
232 | 273 | { |
---|
233 | | - struct rb_node *node = vgpu->vdev.gfn_cache.rb_node; |
---|
| 274 | + struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; |
---|
234 | 275 | struct gvt_dma *itr; |
---|
235 | 276 | |
---|
236 | 277 | while (node) { |
---|
.. | .. |
---|
251 | 292 | { |
---|
252 | 293 | struct gvt_dma *new, *itr; |
---|
253 | 294 | struct rb_node **link, *parent = NULL; |
---|
| 295 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
254 | 296 | |
---|
255 | 297 | new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); |
---|
256 | 298 | if (!new) |
---|
.. | .. |
---|
263 | 305 | kref_init(&new->ref); |
---|
264 | 306 | |
---|
265 | 307 | /* gfn_cache maps gfn to struct gvt_dma. */ |
---|
266 | | - link = &vgpu->vdev.gfn_cache.rb_node; |
---|
| 308 | + link = &vdev->gfn_cache.rb_node; |
---|
267 | 309 | while (*link) { |
---|
268 | 310 | parent = *link; |
---|
269 | 311 | itr = rb_entry(parent, struct gvt_dma, gfn_node); |
---|
.. | .. |
---|
274 | 316 | link = &parent->rb_right; |
---|
275 | 317 | } |
---|
276 | 318 | rb_link_node(&new->gfn_node, parent, link); |
---|
277 | | - rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache); |
---|
| 319 | + rb_insert_color(&new->gfn_node, &vdev->gfn_cache); |
---|
278 | 320 | |
---|
279 | 321 | /* dma_addr_cache maps dma addr to struct gvt_dma. */ |
---|
280 | 322 | parent = NULL; |
---|
281 | | - link = &vgpu->vdev.dma_addr_cache.rb_node; |
---|
| 323 | + link = &vdev->dma_addr_cache.rb_node; |
---|
282 | 324 | while (*link) { |
---|
283 | 325 | parent = *link; |
---|
284 | 326 | itr = rb_entry(parent, struct gvt_dma, dma_addr_node); |
---|
.. | .. |
---|
289 | 331 | link = &parent->rb_right; |
---|
290 | 332 | } |
---|
291 | 333 | rb_link_node(&new->dma_addr_node, parent, link); |
---|
292 | | - rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache); |
---|
| 334 | + rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); |
---|
293 | 335 | |
---|
294 | | - vgpu->vdev.nr_cache_entries++; |
---|
| 336 | + vdev->nr_cache_entries++; |
---|
295 | 337 | return 0; |
---|
296 | 338 | } |
---|
297 | 339 | |
---|
298 | 340 | static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, |
---|
299 | 341 | struct gvt_dma *entry) |
---|
300 | 342 | { |
---|
301 | | - rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache); |
---|
302 | | - rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache); |
---|
| 343 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
| 344 | + |
---|
| 345 | + rb_erase(&entry->gfn_node, &vdev->gfn_cache); |
---|
| 346 | + rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); |
---|
303 | 347 | kfree(entry); |
---|
304 | | - vgpu->vdev.nr_cache_entries--; |
---|
| 348 | + vdev->nr_cache_entries--; |
---|
305 | 349 | } |
---|
306 | 350 | |
---|
307 | 351 | static void gvt_cache_destroy(struct intel_vgpu *vgpu) |
---|
308 | 352 | { |
---|
309 | 353 | struct gvt_dma *dma; |
---|
310 | 354 | struct rb_node *node = NULL; |
---|
| 355 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
311 | 356 | |
---|
312 | 357 | for (;;) { |
---|
313 | | - mutex_lock(&vgpu->vdev.cache_lock); |
---|
314 | | - node = rb_first(&vgpu->vdev.gfn_cache); |
---|
| 358 | + mutex_lock(&vdev->cache_lock); |
---|
| 359 | + node = rb_first(&vdev->gfn_cache); |
---|
315 | 360 | if (!node) { |
---|
316 | | - mutex_unlock(&vgpu->vdev.cache_lock); |
---|
| 361 | + mutex_unlock(&vdev->cache_lock); |
---|
317 | 362 | break; |
---|
318 | 363 | } |
---|
319 | 364 | dma = rb_entry(node, struct gvt_dma, gfn_node); |
---|
320 | 365 | gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); |
---|
321 | 366 | __gvt_cache_remove_entry(vgpu, dma); |
---|
322 | | - mutex_unlock(&vgpu->vdev.cache_lock); |
---|
| 367 | + mutex_unlock(&vdev->cache_lock); |
---|
323 | 368 | } |
---|
324 | 369 | } |
---|
325 | 370 | |
---|
326 | 371 | static void gvt_cache_init(struct intel_vgpu *vgpu) |
---|
327 | 372 | { |
---|
328 | | - vgpu->vdev.gfn_cache = RB_ROOT; |
---|
329 | | - vgpu->vdev.dma_addr_cache = RB_ROOT; |
---|
330 | | - vgpu->vdev.nr_cache_entries = 0; |
---|
331 | | - mutex_init(&vgpu->vdev.cache_lock); |
---|
| 373 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
| 374 | + |
---|
| 375 | + vdev->gfn_cache = RB_ROOT; |
---|
| 376 | + vdev->dma_addr_cache = RB_ROOT; |
---|
| 377 | + vdev->nr_cache_entries = 0; |
---|
| 378 | + mutex_init(&vdev->cache_lock); |
---|
332 | 379 | } |
---|
333 | 380 | |
---|
334 | 381 | static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) |
---|
.. | .. |
---|
402 | 449 | static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, |
---|
403 | 450 | size_t count, loff_t *ppos, bool iswrite) |
---|
404 | 451 | { |
---|
| 452 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
405 | 453 | unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - |
---|
406 | 454 | VFIO_PCI_NUM_REGIONS; |
---|
407 | | - void *base = vgpu->vdev.region[i].data; |
---|
| 455 | + void *base = vdev->region[i].data; |
---|
408 | 456 | loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; |
---|
409 | 457 | |
---|
410 | | - if (pos >= vgpu->vdev.region[i].size || iswrite) { |
---|
| 458 | + |
---|
| 459 | + if (pos >= vdev->region[i].size || iswrite) { |
---|
411 | 460 | gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); |
---|
412 | 461 | return -EINVAL; |
---|
413 | 462 | } |
---|
414 | | - count = min(count, (size_t)(vgpu->vdev.region[i].size - pos)); |
---|
| 463 | + count = min(count, (size_t)(vdev->region[i].size - pos)); |
---|
415 | 464 | memcpy(buf, base + pos, count); |
---|
416 | 465 | |
---|
417 | 466 | return count; |
---|
.. | .. |
---|
427 | 476 | .release = intel_vgpu_reg_release_opregion, |
---|
428 | 477 | }; |
---|
429 | 478 | |
---|
| 479 | +static int handle_edid_regs(struct intel_vgpu *vgpu, |
---|
| 480 | + struct vfio_edid_region *region, char *buf, |
---|
| 481 | + size_t count, u16 offset, bool is_write) |
---|
| 482 | +{ |
---|
| 483 | + struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs; |
---|
| 484 | + unsigned int data; |
---|
| 485 | + |
---|
| 486 | + if (offset + count > sizeof(*regs)) |
---|
| 487 | + return -EINVAL; |
---|
| 488 | + |
---|
| 489 | + if (count != 4) |
---|
| 490 | + return -EINVAL; |
---|
| 491 | + |
---|
| 492 | + if (is_write) { |
---|
| 493 | + data = *((unsigned int *)buf); |
---|
| 494 | + switch (offset) { |
---|
| 495 | + case offsetof(struct vfio_region_gfx_edid, link_state): |
---|
| 496 | + if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) { |
---|
| 497 | + if (!drm_edid_block_valid( |
---|
| 498 | + (u8 *)region->edid_blob, |
---|
| 499 | + 0, |
---|
| 500 | + true, |
---|
| 501 | + NULL)) { |
---|
| 502 | + gvt_vgpu_err("invalid EDID blob\n"); |
---|
| 503 | + return -EINVAL; |
---|
| 504 | + } |
---|
| 505 | + intel_gvt_ops->emulate_hotplug(vgpu, true); |
---|
| 506 | + } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) |
---|
| 507 | + intel_gvt_ops->emulate_hotplug(vgpu, false); |
---|
| 508 | + else { |
---|
| 509 | + gvt_vgpu_err("invalid EDID link state %d\n", |
---|
| 510 | + regs->link_state); |
---|
| 511 | + return -EINVAL; |
---|
| 512 | + } |
---|
| 513 | + regs->link_state = data; |
---|
| 514 | + break; |
---|
| 515 | + case offsetof(struct vfio_region_gfx_edid, edid_size): |
---|
| 516 | + if (data > regs->edid_max_size) { |
---|
| 517 | + gvt_vgpu_err("EDID size is bigger than %d!\n", |
---|
| 518 | + regs->edid_max_size); |
---|
| 519 | + return -EINVAL; |
---|
| 520 | + } |
---|
| 521 | + regs->edid_size = data; |
---|
| 522 | + break; |
---|
| 523 | + default: |
---|
| 524 | + /* read-only regs */ |
---|
| 525 | + gvt_vgpu_err("write read-only EDID region at offset %d\n", |
---|
| 526 | + offset); |
---|
| 527 | + return -EPERM; |
---|
| 528 | + } |
---|
| 529 | + } else { |
---|
| 530 | + memcpy(buf, (char *)regs + offset, count); |
---|
| 531 | + } |
---|
| 532 | + |
---|
| 533 | + return count; |
---|
| 534 | +} |
---|
| 535 | + |
---|
| 536 | +static int handle_edid_blob(struct vfio_edid_region *region, char *buf, |
---|
| 537 | + size_t count, u16 offset, bool is_write) |
---|
| 538 | +{ |
---|
| 539 | + if (offset + count > region->vfio_edid_regs.edid_size) |
---|
| 540 | + return -EINVAL; |
---|
| 541 | + |
---|
| 542 | + if (is_write) |
---|
| 543 | + memcpy(region->edid_blob + offset, buf, count); |
---|
| 544 | + else |
---|
| 545 | + memcpy(buf, region->edid_blob + offset, count); |
---|
| 546 | + |
---|
| 547 | + return count; |
---|
| 548 | +} |
---|
| 549 | + |
---|
| 550 | +static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, |
---|
| 551 | + size_t count, loff_t *ppos, bool iswrite) |
---|
| 552 | +{ |
---|
| 553 | + int ret; |
---|
| 554 | + unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - |
---|
| 555 | + VFIO_PCI_NUM_REGIONS; |
---|
| 556 | + struct vfio_edid_region *region = |
---|
| 557 | + (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; |
---|
| 558 | + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; |
---|
| 559 | + |
---|
| 560 | + if (pos < region->vfio_edid_regs.edid_offset) { |
---|
| 561 | + ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite); |
---|
| 562 | + } else { |
---|
| 563 | + pos -= EDID_BLOB_OFFSET; |
---|
| 564 | + ret = handle_edid_blob(region, buf, count, pos, iswrite); |
---|
| 565 | + } |
---|
| 566 | + |
---|
| 567 | + if (ret < 0) |
---|
| 568 | + gvt_vgpu_err("failed to access EDID region\n"); |
---|
| 569 | + |
---|
| 570 | + return ret; |
---|
| 571 | +} |
---|
| 572 | + |
---|
| 573 | +static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu, |
---|
| 574 | + struct vfio_region *region) |
---|
| 575 | +{ |
---|
| 576 | + kfree(region->data); |
---|
| 577 | +} |
---|
| 578 | + |
---|
| 579 | +static const struct intel_vgpu_regops intel_vgpu_regops_edid = { |
---|
| 580 | + .rw = intel_vgpu_reg_rw_edid, |
---|
| 581 | + .release = intel_vgpu_reg_release_edid, |
---|
| 582 | +}; |
---|
| 583 | + |
---|
430 | 584 | static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, |
---|
431 | 585 | unsigned int type, unsigned int subtype, |
---|
432 | 586 | const struct intel_vgpu_regops *ops, |
---|
433 | 587 | size_t size, u32 flags, void *data) |
---|
434 | 588 | { |
---|
| 589 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
435 | 590 | struct vfio_region *region; |
---|
436 | 591 | |
---|
437 | | - region = krealloc(vgpu->vdev.region, |
---|
438 | | - (vgpu->vdev.num_regions + 1) * sizeof(*region), |
---|
| 592 | + region = krealloc(vdev->region, |
---|
| 593 | + (vdev->num_regions + 1) * sizeof(*region), |
---|
439 | 594 | GFP_KERNEL); |
---|
440 | 595 | if (!region) |
---|
441 | 596 | return -ENOMEM; |
---|
442 | 597 | |
---|
443 | | - vgpu->vdev.region = region; |
---|
444 | | - vgpu->vdev.region[vgpu->vdev.num_regions].type = type; |
---|
445 | | - vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype; |
---|
446 | | - vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops; |
---|
447 | | - vgpu->vdev.region[vgpu->vdev.num_regions].size = size; |
---|
448 | | - vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags; |
---|
449 | | - vgpu->vdev.region[vgpu->vdev.num_regions].data = data; |
---|
450 | | - vgpu->vdev.num_regions++; |
---|
| 598 | + vdev->region = region; |
---|
| 599 | + vdev->region[vdev->num_regions].type = type; |
---|
| 600 | + vdev->region[vdev->num_regions].subtype = subtype; |
---|
| 601 | + vdev->region[vdev->num_regions].ops = ops; |
---|
| 602 | + vdev->region[vdev->num_regions].size = size; |
---|
| 603 | + vdev->region[vdev->num_regions].flags = flags; |
---|
| 604 | + vdev->region[vdev->num_regions].data = data; |
---|
| 605 | + vdev->num_regions++; |
---|
451 | 606 | return 0; |
---|
452 | 607 | } |
---|
453 | 608 | |
---|
454 | 609 | static int kvmgt_get_vfio_device(void *p_vgpu) |
---|
455 | 610 | { |
---|
456 | 611 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; |
---|
| 612 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
457 | 613 | |
---|
458 | | - vgpu->vdev.vfio_device = vfio_device_get_from_dev( |
---|
459 | | - mdev_dev(vgpu->vdev.mdev)); |
---|
460 | | - if (!vgpu->vdev.vfio_device) { |
---|
| 614 | + vdev->vfio_device = vfio_device_get_from_dev( |
---|
| 615 | + mdev_dev(vdev->mdev)); |
---|
| 616 | + if (!vdev->vfio_device) { |
---|
461 | 617 | gvt_vgpu_err("failed to get vfio device\n"); |
---|
462 | 618 | return -ENODEV; |
---|
463 | 619 | } |
---|
.. | .. |
---|
493 | 649 | return ret; |
---|
494 | 650 | } |
---|
495 | 651 | |
---|
| 652 | +static int kvmgt_set_edid(void *p_vgpu, int port_num) |
---|
| 653 | +{ |
---|
| 654 | + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; |
---|
| 655 | + struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); |
---|
| 656 | + struct vfio_edid_region *base; |
---|
| 657 | + int ret; |
---|
| 658 | + |
---|
| 659 | + base = kzalloc(sizeof(*base), GFP_KERNEL); |
---|
| 660 | + if (!base) |
---|
| 661 | + return -ENOMEM; |
---|
| 662 | + |
---|
| 663 | + /* TODO: Add multi-port and EDID extension block support */ |
---|
| 664 | + base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET; |
---|
| 665 | + base->vfio_edid_regs.edid_max_size = EDID_SIZE; |
---|
| 666 | + base->vfio_edid_regs.edid_size = EDID_SIZE; |
---|
| 667 | + base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id); |
---|
| 668 | + base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id); |
---|
| 669 | + base->edid_blob = port->edid->edid_block; |
---|
| 670 | + |
---|
| 671 | + ret = intel_vgpu_register_reg(vgpu, |
---|
| 672 | + VFIO_REGION_TYPE_GFX, |
---|
| 673 | + VFIO_REGION_SUBTYPE_GFX_EDID, |
---|
| 674 | + &intel_vgpu_regops_edid, EDID_SIZE, |
---|
| 675 | + VFIO_REGION_INFO_FLAG_READ | |
---|
| 676 | + VFIO_REGION_INFO_FLAG_WRITE | |
---|
| 677 | + VFIO_REGION_INFO_FLAG_CAPS, base); |
---|
| 678 | + |
---|
| 679 | + return ret; |
---|
| 680 | +} |
---|
| 681 | + |
---|
496 | 682 | static void kvmgt_put_vfio_device(void *vgpu) |
---|
497 | 683 | { |
---|
498 | | - if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device)) |
---|
| 684 | + struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); |
---|
| 685 | + |
---|
| 686 | + if (WARN_ON(!vdev->vfio_device)) |
---|
499 | 687 | return; |
---|
500 | 688 | |
---|
501 | | - vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device); |
---|
| 689 | + vfio_device_put(vdev->vfio_device); |
---|
502 | 690 | } |
---|
503 | 691 | |
---|
504 | 692 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) |
---|
.. | .. |
---|
527 | 715 | goto out; |
---|
528 | 716 | } |
---|
529 | 717 | |
---|
530 | | - INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); |
---|
| 718 | + INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); |
---|
531 | 719 | |
---|
532 | | - vgpu->vdev.mdev = mdev; |
---|
| 720 | + kvmgt_vdev(vgpu)->mdev = mdev; |
---|
533 | 721 | mdev_set_drvdata(mdev, vgpu); |
---|
534 | 722 | |
---|
535 | 723 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", |
---|
.. | .. |
---|
554 | 742 | static int intel_vgpu_iommu_notifier(struct notifier_block *nb, |
---|
555 | 743 | unsigned long action, void *data) |
---|
556 | 744 | { |
---|
557 | | - struct intel_vgpu *vgpu = container_of(nb, |
---|
558 | | - struct intel_vgpu, |
---|
559 | | - vdev.iommu_notifier); |
---|
| 745 | + struct kvmgt_vdev *vdev = container_of(nb, |
---|
| 746 | + struct kvmgt_vdev, |
---|
| 747 | + iommu_notifier); |
---|
| 748 | + struct intel_vgpu *vgpu = vdev->vgpu; |
---|
560 | 749 | |
---|
561 | 750 | if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { |
---|
562 | 751 | struct vfio_iommu_type1_dma_unmap *unmap = data; |
---|
.. | .. |
---|
566 | 755 | iov_pfn = unmap->iova >> PAGE_SHIFT; |
---|
567 | 756 | end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; |
---|
568 | 757 | |
---|
569 | | - mutex_lock(&vgpu->vdev.cache_lock); |
---|
| 758 | + mutex_lock(&vdev->cache_lock); |
---|
570 | 759 | for (; iov_pfn < end_iov_pfn; iov_pfn++) { |
---|
571 | 760 | entry = __gvt_cache_find_gfn(vgpu, iov_pfn); |
---|
572 | 761 | if (!entry) |
---|
.. | .. |
---|
576 | 765 | entry->size); |
---|
577 | 766 | __gvt_cache_remove_entry(vgpu, entry); |
---|
578 | 767 | } |
---|
579 | | - mutex_unlock(&vgpu->vdev.cache_lock); |
---|
| 768 | + mutex_unlock(&vdev->cache_lock); |
---|
580 | 769 | } |
---|
581 | 770 | |
---|
582 | 771 | return NOTIFY_OK; |
---|
.. | .. |
---|
585 | 774 | static int intel_vgpu_group_notifier(struct notifier_block *nb, |
---|
586 | 775 | unsigned long action, void *data) |
---|
587 | 776 | { |
---|
588 | | - struct intel_vgpu *vgpu = container_of(nb, |
---|
589 | | - struct intel_vgpu, |
---|
590 | | - vdev.group_notifier); |
---|
| 777 | + struct kvmgt_vdev *vdev = container_of(nb, |
---|
| 778 | + struct kvmgt_vdev, |
---|
| 779 | + group_notifier); |
---|
591 | 780 | |
---|
592 | 781 | /* the only action we care about */ |
---|
593 | 782 | if (action == VFIO_GROUP_NOTIFY_SET_KVM) { |
---|
594 | | - vgpu->vdev.kvm = data; |
---|
| 783 | + vdev->kvm = data; |
---|
595 | 784 | |
---|
596 | 785 | if (!data) |
---|
597 | | - schedule_work(&vgpu->vdev.release_work); |
---|
| 786 | + schedule_work(&vdev->release_work); |
---|
598 | 787 | } |
---|
599 | 788 | |
---|
600 | 789 | return NOTIFY_OK; |
---|
.. | .. |
---|
603 | 792 | static int intel_vgpu_open(struct mdev_device *mdev) |
---|
604 | 793 | { |
---|
605 | 794 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
---|
| 795 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
606 | 796 | unsigned long events; |
---|
607 | 797 | int ret; |
---|
| 798 | + struct vfio_group *vfio_group; |
---|
608 | 799 | |
---|
609 | | - vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; |
---|
610 | | - vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; |
---|
| 800 | + vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; |
---|
| 801 | + vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; |
---|
611 | 802 | |
---|
612 | 803 | events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; |
---|
613 | 804 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, |
---|
614 | | - &vgpu->vdev.iommu_notifier); |
---|
| 805 | + &vdev->iommu_notifier); |
---|
615 | 806 | if (ret != 0) { |
---|
616 | 807 | gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", |
---|
617 | 808 | ret); |
---|
.. | .. |
---|
620 | 811 | |
---|
621 | 812 | events = VFIO_GROUP_NOTIFY_SET_KVM; |
---|
622 | 813 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, |
---|
623 | | - &vgpu->vdev.group_notifier); |
---|
| 814 | + &vdev->group_notifier); |
---|
624 | 815 | if (ret != 0) { |
---|
625 | 816 | gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", |
---|
626 | 817 | ret); |
---|
627 | 818 | goto undo_iommu; |
---|
| 819 | + } |
---|
| 820 | + |
---|
| 821 | + vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); |
---|
| 822 | + if (IS_ERR_OR_NULL(vfio_group)) { |
---|
| 823 | + ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); |
---|
| 824 | + gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); |
---|
| 825 | + goto undo_register; |
---|
| 826 | + } |
---|
| 827 | + vdev->vfio_group = vfio_group; |
---|
| 828 | + |
---|
| 829 | + /* Take a module reference as mdev core doesn't take |
---|
| 830 | + * a reference for vendor driver. |
---|
| 831 | + */ |
---|
| 832 | + if (!try_module_get(THIS_MODULE)) { |
---|
| 833 | + ret = -ENODEV; |
---|
| 834 | + goto undo_group; |
---|
628 | 835 | } |
---|
629 | 836 | |
---|
630 | 837 | ret = kvmgt_guest_init(mdev); |
---|
.. | .. |
---|
633 | 840 | |
---|
634 | 841 | intel_gvt_ops->vgpu_activate(vgpu); |
---|
635 | 842 | |
---|
636 | | - atomic_set(&vgpu->vdev.released, 0); |
---|
| 843 | + atomic_set(&vdev->released, 0); |
---|
637 | 844 | return ret; |
---|
638 | 845 | |
---|
639 | 846 | undo_group: |
---|
| 847 | + vfio_group_put_external_user(vdev->vfio_group); |
---|
| 848 | + vdev->vfio_group = NULL; |
---|
| 849 | + |
---|
| 850 | +undo_register: |
---|
640 | 851 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, |
---|
641 | | - &vgpu->vdev.group_notifier); |
---|
| 852 | + &vdev->group_notifier); |
---|
642 | 853 | |
---|
643 | 854 | undo_iommu: |
---|
644 | 855 | vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, |
---|
645 | | - &vgpu->vdev.iommu_notifier); |
---|
| 856 | + &vdev->iommu_notifier); |
---|
646 | 857 | out: |
---|
647 | 858 | return ret; |
---|
648 | 859 | } |
---|
649 | 860 | |
---|
650 | 861 | static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) |
---|
651 | 862 | { |
---|
| 863 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
652 | 864 | struct eventfd_ctx *trigger; |
---|
653 | 865 | |
---|
654 | | - trigger = vgpu->vdev.msi_trigger; |
---|
| 866 | + trigger = vdev->msi_trigger; |
---|
655 | 867 | if (trigger) { |
---|
656 | 868 | eventfd_ctx_put(trigger); |
---|
657 | | - vgpu->vdev.msi_trigger = NULL; |
---|
| 869 | + vdev->msi_trigger = NULL; |
---|
658 | 870 | } |
---|
659 | 871 | } |
---|
660 | 872 | |
---|
661 | 873 | static void __intel_vgpu_release(struct intel_vgpu *vgpu) |
---|
662 | 874 | { |
---|
| 875 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
| 876 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
663 | 877 | struct kvmgt_guest_info *info; |
---|
664 | 878 | int ret; |
---|
665 | 879 | |
---|
666 | 880 | if (!handle_valid(vgpu->handle)) |
---|
667 | 881 | return; |
---|
668 | 882 | |
---|
669 | | - if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) |
---|
| 883 | + if (atomic_cmpxchg(&vdev->released, 0, 1)) |
---|
670 | 884 | return; |
---|
671 | 885 | |
---|
672 | 886 | intel_gvt_ops->vgpu_release(vgpu); |
---|
673 | 887 | |
---|
674 | | - ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, |
---|
675 | | - &vgpu->vdev.iommu_notifier); |
---|
676 | | - WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); |
---|
| 888 | + ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, |
---|
| 889 | + &vdev->iommu_notifier); |
---|
| 890 | + drm_WARN(&i915->drm, ret, |
---|
| 891 | + "vfio_unregister_notifier for iommu failed: %d\n", ret); |
---|
677 | 892 | |
---|
678 | | - ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, |
---|
679 | | - &vgpu->vdev.group_notifier); |
---|
680 | | - WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); |
---|
| 893 | + ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, |
---|
| 894 | + &vdev->group_notifier); |
---|
| 895 | + drm_WARN(&i915->drm, ret, |
---|
| 896 | + "vfio_unregister_notifier for group failed: %d\n", ret); |
---|
| 897 | + |
---|
| 898 | + /* dereference module reference taken at open */ |
---|
| 899 | + module_put(THIS_MODULE); |
---|
681 | 900 | |
---|
682 | 901 | info = (struct kvmgt_guest_info *)vgpu->handle; |
---|
683 | 902 | kvmgt_guest_exit(info); |
---|
684 | 903 | |
---|
685 | 904 | intel_vgpu_release_msi_eventfd_ctx(vgpu); |
---|
| 905 | + vfio_group_put_external_user(vdev->vfio_group); |
---|
686 | 906 | |
---|
687 | | - vgpu->vdev.kvm = NULL; |
---|
| 907 | + vdev->kvm = NULL; |
---|
688 | 908 | vgpu->handle = 0; |
---|
689 | 909 | } |
---|
690 | 910 | |
---|
.. | .. |
---|
697 | 917 | |
---|
698 | 918 | static void intel_vgpu_release_work(struct work_struct *work) |
---|
699 | 919 | { |
---|
700 | | - struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, |
---|
701 | | - vdev.release_work); |
---|
| 920 | + struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, |
---|
| 921 | + release_work); |
---|
702 | 922 | |
---|
703 | | - __intel_vgpu_release(vgpu); |
---|
| 923 | + __intel_vgpu_release(vdev->vgpu); |
---|
704 | 924 | } |
---|
705 | 925 | |
---|
706 | | -static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) |
---|
| 926 | +static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) |
---|
707 | 927 | { |
---|
708 | 928 | u32 start_lo, start_hi; |
---|
709 | 929 | u32 mem_type; |
---|
.. | .. |
---|
730 | 950 | return ((u64)start_hi << 32) | start_lo; |
---|
731 | 951 | } |
---|
732 | 952 | |
---|
733 | | -static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, |
---|
| 953 | +static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, |
---|
734 | 954 | void *buf, unsigned int count, bool is_write) |
---|
735 | 955 | { |
---|
736 | | - uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar); |
---|
| 956 | + u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar); |
---|
737 | 957 | int ret; |
---|
738 | 958 | |
---|
739 | 959 | if (is_write) |
---|
.. | .. |
---|
745 | 965 | return ret; |
---|
746 | 966 | } |
---|
747 | 967 | |
---|
748 | | -static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) |
---|
| 968 | +static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) |
---|
749 | 969 | { |
---|
750 | 970 | return off >= vgpu_aperture_offset(vgpu) && |
---|
751 | 971 | off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); |
---|
752 | 972 | } |
---|
753 | 973 | |
---|
754 | | -static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, |
---|
| 974 | +static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, |
---|
755 | 975 | void *buf, unsigned long count, bool is_write) |
---|
756 | 976 | { |
---|
757 | | - void *aperture_va; |
---|
| 977 | + void __iomem *aperture_va; |
---|
758 | 978 | |
---|
759 | 979 | if (!intel_vgpu_in_aperture(vgpu, off) || |
---|
760 | 980 | !intel_vgpu_in_aperture(vgpu, off + count)) { |
---|
.. | .. |
---|
762 | 982 | return -EINVAL; |
---|
763 | 983 | } |
---|
764 | 984 | |
---|
765 | | - aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, |
---|
| 985 | + aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap, |
---|
766 | 986 | ALIGN_DOWN(off, PAGE_SIZE), |
---|
767 | 987 | count + offset_in_page(off)); |
---|
768 | 988 | if (!aperture_va) |
---|
769 | 989 | return -EIO; |
---|
770 | 990 | |
---|
771 | 991 | if (is_write) |
---|
772 | | - memcpy(aperture_va + offset_in_page(off), buf, count); |
---|
| 992 | + memcpy_toio(aperture_va + offset_in_page(off), buf, count); |
---|
773 | 993 | else |
---|
774 | | - memcpy(buf, aperture_va + offset_in_page(off), count); |
---|
| 994 | + memcpy_fromio(buf, aperture_va + offset_in_page(off), count); |
---|
775 | 995 | |
---|
776 | 996 | io_mapping_unmap(aperture_va); |
---|
777 | 997 | |
---|
.. | .. |
---|
782 | 1002 | size_t count, loff_t *ppos, bool is_write) |
---|
783 | 1003 | { |
---|
784 | 1004 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
---|
| 1005 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
785 | 1006 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); |
---|
786 | | - uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK; |
---|
| 1007 | + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; |
---|
787 | 1008 | int ret = -EINVAL; |
---|
788 | 1009 | |
---|
789 | 1010 | |
---|
790 | | - if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) { |
---|
| 1011 | + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { |
---|
791 | 1012 | gvt_vgpu_err("invalid index: %u\n", index); |
---|
792 | 1013 | return -EINVAL; |
---|
793 | 1014 | } |
---|
.. | .. |
---|
816 | 1037 | case VFIO_PCI_ROM_REGION_INDEX: |
---|
817 | 1038 | break; |
---|
818 | 1039 | default: |
---|
819 | | - if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) |
---|
| 1040 | + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) |
---|
820 | 1041 | return -EINVAL; |
---|
821 | 1042 | |
---|
822 | 1043 | index -= VFIO_PCI_NUM_REGIONS; |
---|
823 | | - return vgpu->vdev.region[index].ops->rw(vgpu, buf, count, |
---|
| 1044 | + return vdev->region[index].ops->rw(vgpu, buf, count, |
---|
824 | 1045 | ppos, is_write); |
---|
825 | 1046 | } |
---|
826 | 1047 | |
---|
.. | .. |
---|
1039 | 1260 | |
---|
1040 | 1261 | static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, |
---|
1041 | 1262 | unsigned int index, unsigned int start, |
---|
1042 | | - unsigned int count, uint32_t flags, |
---|
| 1263 | + unsigned int count, u32 flags, |
---|
1043 | 1264 | void *data) |
---|
1044 | 1265 | { |
---|
1045 | 1266 | return 0; |
---|
.. | .. |
---|
1047 | 1268 | |
---|
1048 | 1269 | static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, |
---|
1049 | 1270 | unsigned int index, unsigned int start, |
---|
1050 | | - unsigned int count, uint32_t flags, void *data) |
---|
| 1271 | + unsigned int count, u32 flags, void *data) |
---|
1051 | 1272 | { |
---|
1052 | 1273 | return 0; |
---|
1053 | 1274 | } |
---|
1054 | 1275 | |
---|
1055 | 1276 | static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, |
---|
1056 | 1277 | unsigned int index, unsigned int start, unsigned int count, |
---|
1057 | | - uint32_t flags, void *data) |
---|
| 1278 | + u32 flags, void *data) |
---|
1058 | 1279 | { |
---|
1059 | 1280 | return 0; |
---|
1060 | 1281 | } |
---|
1061 | 1282 | |
---|
1062 | 1283 | static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, |
---|
1063 | 1284 | unsigned int index, unsigned int start, unsigned int count, |
---|
1064 | | - uint32_t flags, void *data) |
---|
| 1285 | + u32 flags, void *data) |
---|
1065 | 1286 | { |
---|
1066 | 1287 | struct eventfd_ctx *trigger; |
---|
1067 | 1288 | |
---|
.. | .. |
---|
1073 | 1294 | gvt_vgpu_err("eventfd_ctx_fdget failed\n"); |
---|
1074 | 1295 | return PTR_ERR(trigger); |
---|
1075 | 1296 | } |
---|
1076 | | - vgpu->vdev.msi_trigger = trigger; |
---|
| 1297 | + kvmgt_vdev(vgpu)->msi_trigger = trigger; |
---|
1077 | 1298 | } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) |
---|
1078 | 1299 | intel_vgpu_release_msi_eventfd_ctx(vgpu); |
---|
1079 | 1300 | |
---|
1080 | 1301 | return 0; |
---|
1081 | 1302 | } |
---|
1082 | 1303 | |
---|
1083 | | -static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags, |
---|
| 1304 | +static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, |
---|
1084 | 1305 | unsigned int index, unsigned int start, unsigned int count, |
---|
1085 | 1306 | void *data) |
---|
1086 | 1307 | { |
---|
1087 | 1308 | int (*func)(struct intel_vgpu *vgpu, unsigned int index, |
---|
1088 | | - unsigned int start, unsigned int count, uint32_t flags, |
---|
| 1309 | + unsigned int start, unsigned int count, u32 flags, |
---|
1089 | 1310 | void *data) = NULL; |
---|
1090 | 1311 | |
---|
1091 | 1312 | switch (index) { |
---|
.. | .. |
---|
1125 | 1346 | unsigned long arg) |
---|
1126 | 1347 | { |
---|
1127 | 1348 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
---|
| 1349 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
1128 | 1350 | unsigned long minsz; |
---|
1129 | 1351 | |
---|
1130 | 1352 | gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); |
---|
.. | .. |
---|
1143 | 1365 | info.flags = VFIO_DEVICE_FLAGS_PCI; |
---|
1144 | 1366 | info.flags |= VFIO_DEVICE_FLAGS_RESET; |
---|
1145 | 1367 | info.num_regions = VFIO_PCI_NUM_REGIONS + |
---|
1146 | | - vgpu->vdev.num_regions; |
---|
| 1368 | + vdev->num_regions; |
---|
1147 | 1369 | info.num_irqs = VFIO_PCI_NUM_IRQS; |
---|
1148 | 1370 | |
---|
1149 | 1371 | return copy_to_user((void __user *)arg, &info, minsz) ? |
---|
.. | .. |
---|
1155 | 1377 | unsigned int i; |
---|
1156 | 1378 | int ret; |
---|
1157 | 1379 | struct vfio_region_info_cap_sparse_mmap *sparse = NULL; |
---|
1158 | | - size_t size; |
---|
1159 | 1380 | int nr_areas = 1; |
---|
1160 | 1381 | int cap_type_id; |
---|
1161 | 1382 | |
---|
.. | .. |
---|
1198 | 1419 | VFIO_REGION_INFO_FLAG_WRITE; |
---|
1199 | 1420 | info.size = gvt_aperture_sz(vgpu->gvt); |
---|
1200 | 1421 | |
---|
1201 | | - size = sizeof(*sparse) + |
---|
1202 | | - (nr_areas * sizeof(*sparse->areas)); |
---|
1203 | | - sparse = kzalloc(size, GFP_KERNEL); |
---|
| 1422 | + sparse = kzalloc(struct_size(sparse, areas, nr_areas), |
---|
| 1423 | + GFP_KERNEL); |
---|
1204 | 1424 | if (!sparse) |
---|
1205 | 1425 | return -ENOMEM; |
---|
1206 | 1426 | |
---|
.. | .. |
---|
1236 | 1456 | .header.version = 1 }; |
---|
1237 | 1457 | |
---|
1238 | 1458 | if (info.index >= VFIO_PCI_NUM_REGIONS + |
---|
1239 | | - vgpu->vdev.num_regions) |
---|
| 1459 | + vdev->num_regions) |
---|
1240 | 1460 | return -EINVAL; |
---|
1241 | 1461 | info.index = |
---|
1242 | 1462 | array_index_nospec(info.index, |
---|
1243 | 1463 | VFIO_PCI_NUM_REGIONS + |
---|
1244 | | - vgpu->vdev.num_regions); |
---|
| 1464 | + vdev->num_regions); |
---|
1245 | 1465 | |
---|
1246 | 1466 | i = info.index - VFIO_PCI_NUM_REGIONS; |
---|
1247 | 1467 | |
---|
1248 | 1468 | info.offset = |
---|
1249 | 1469 | VFIO_PCI_INDEX_TO_OFFSET(info.index); |
---|
1250 | | - info.size = vgpu->vdev.region[i].size; |
---|
1251 | | - info.flags = vgpu->vdev.region[i].flags; |
---|
| 1470 | + info.size = vdev->region[i].size; |
---|
| 1471 | + info.flags = vdev->region[i].flags; |
---|
1252 | 1472 | |
---|
1253 | | - cap_type.type = vgpu->vdev.region[i].type; |
---|
1254 | | - cap_type.subtype = vgpu->vdev.region[i].subtype; |
---|
| 1473 | + cap_type.type = vdev->region[i].type; |
---|
| 1474 | + cap_type.subtype = vdev->region[i].subtype; |
---|
1255 | 1475 | |
---|
1256 | 1476 | ret = vfio_info_add_capability(&caps, |
---|
1257 | 1477 | &cap_type.header, |
---|
.. | .. |
---|
1265 | 1485 | switch (cap_type_id) { |
---|
1266 | 1486 | case VFIO_REGION_INFO_CAP_SPARSE_MMAP: |
---|
1267 | 1487 | ret = vfio_info_add_capability(&caps, |
---|
1268 | | - &sparse->header, sizeof(*sparse) + |
---|
1269 | | - (sparse->nr_areas * |
---|
1270 | | - sizeof(*sparse->areas))); |
---|
| 1488 | + &sparse->header, |
---|
| 1489 | + struct_size(sparse, areas, |
---|
| 1490 | + sparse->nr_areas)); |
---|
1271 | 1491 | if (ret) { |
---|
1272 | 1492 | kfree(sparse); |
---|
1273 | 1493 | return ret; |
---|
.. | .. |
---|
1415 | 1635 | return sprintf(buf, "\n"); |
---|
1416 | 1636 | } |
---|
1417 | 1637 | |
---|
1418 | | -static ssize_t |
---|
1419 | | -hw_id_show(struct device *dev, struct device_attribute *attr, |
---|
1420 | | - char *buf) |
---|
1421 | | -{ |
---|
1422 | | - struct mdev_device *mdev = mdev_from_dev(dev); |
---|
1423 | | - |
---|
1424 | | - if (mdev) { |
---|
1425 | | - struct intel_vgpu *vgpu = (struct intel_vgpu *) |
---|
1426 | | - mdev_get_drvdata(mdev); |
---|
1427 | | - return sprintf(buf, "%u\n", |
---|
1428 | | - vgpu->submission.shadow_ctx->hw_id); |
---|
1429 | | - } |
---|
1430 | | - return sprintf(buf, "\n"); |
---|
1431 | | -} |
---|
1432 | | - |
---|
1433 | 1638 | static DEVICE_ATTR_RO(vgpu_id); |
---|
1434 | | -static DEVICE_ATTR_RO(hw_id); |
---|
1435 | 1639 | |
---|
1436 | 1640 | static struct attribute *intel_vgpu_attrs[] = { |
---|
1437 | 1641 | &dev_attr_vgpu_id.attr, |
---|
1438 | | - &dev_attr_hw_id.attr, |
---|
1439 | 1642 | NULL |
---|
1440 | 1643 | }; |
---|
1441 | 1644 | |
---|
.. | .. |
---|
1465 | 1668 | |
---|
1466 | 1669 | static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) |
---|
1467 | 1670 | { |
---|
1468 | | - struct attribute **kvm_type_attrs; |
---|
1469 | 1671 | struct attribute_group **kvm_vgpu_type_groups; |
---|
1470 | 1672 | |
---|
1471 | 1673 | intel_gvt_ops = ops; |
---|
1472 | | - if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs, |
---|
1473 | | - &kvm_vgpu_type_groups)) |
---|
| 1674 | + if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups)) |
---|
1474 | 1675 | return -EFAULT; |
---|
1475 | 1676 | intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups; |
---|
1476 | 1677 | |
---|
1477 | 1678 | return mdev_register_device(dev, &intel_vgpu_ops); |
---|
1478 | 1679 | } |
---|
1479 | 1680 | |
---|
1480 | | -static void kvmgt_host_exit(struct device *dev, void *gvt) |
---|
| 1681 | +static void kvmgt_host_exit(struct device *dev) |
---|
1481 | 1682 | { |
---|
1482 | 1683 | mdev_unregister_device(dev); |
---|
1483 | 1684 | } |
---|
.. | .. |
---|
1610 | 1811 | { |
---|
1611 | 1812 | struct kvmgt_guest_info *info; |
---|
1612 | 1813 | struct intel_vgpu *vgpu; |
---|
| 1814 | + struct kvmgt_vdev *vdev; |
---|
1613 | 1815 | struct kvm *kvm; |
---|
1614 | 1816 | |
---|
1615 | 1817 | vgpu = mdev_get_drvdata(mdev); |
---|
1616 | 1818 | if (handle_valid(vgpu->handle)) |
---|
1617 | 1819 | return -EEXIST; |
---|
1618 | 1820 | |
---|
1619 | | - kvm = vgpu->vdev.kvm; |
---|
| 1821 | + vdev = kvmgt_vdev(vgpu); |
---|
| 1822 | + kvm = vdev->kvm; |
---|
1620 | 1823 | if (!kvm || kvm->mm != current->mm) { |
---|
1621 | 1824 | gvt_vgpu_err("KVM is required to use Intel vGPU\n"); |
---|
1622 | 1825 | return -ESRCH; |
---|
.. | .. |
---|
1637 | 1840 | kvmgt_protect_table_init(info); |
---|
1638 | 1841 | gvt_cache_init(vgpu); |
---|
1639 | 1842 | |
---|
1640 | | - init_completion(&vgpu->vblank_done); |
---|
1641 | | - |
---|
1642 | 1843 | info->track_node.track_write = kvmgt_page_track_write; |
---|
1643 | 1844 | info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; |
---|
1644 | 1845 | kvm_page_track_register_notifier(kvm, &info->track_node); |
---|
.. | .. |
---|
1646 | 1847 | info->debugfs_cache_entries = debugfs_create_ulong( |
---|
1647 | 1848 | "kvmgt_nr_cache_entries", |
---|
1648 | 1849 | 0444, vgpu->debugfs, |
---|
1649 | | - &vgpu->vdev.nr_cache_entries); |
---|
1650 | | - if (!info->debugfs_cache_entries) |
---|
1651 | | - gvt_vgpu_err("Cannot create kvmgt debugfs entry\n"); |
---|
1652 | | - |
---|
| 1850 | + &vdev->nr_cache_entries); |
---|
1653 | 1851 | return 0; |
---|
1654 | 1852 | } |
---|
1655 | 1853 | |
---|
.. | .. |
---|
1666 | 1864 | return true; |
---|
1667 | 1865 | } |
---|
1668 | 1866 | |
---|
1669 | | -static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) |
---|
| 1867 | +static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) |
---|
1670 | 1868 | { |
---|
1671 | | - /* nothing to do here */ |
---|
| 1869 | + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; |
---|
| 1870 | + |
---|
| 1871 | + vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); |
---|
| 1872 | + |
---|
| 1873 | + if (!vgpu->vdev) |
---|
| 1874 | + return -ENOMEM; |
---|
| 1875 | + |
---|
| 1876 | + kvmgt_vdev(vgpu)->vgpu = vgpu; |
---|
| 1877 | + |
---|
1672 | 1878 | return 0; |
---|
1673 | 1879 | } |
---|
1674 | 1880 | |
---|
1675 | | -static void kvmgt_detach_vgpu(unsigned long handle) |
---|
| 1881 | +static void kvmgt_detach_vgpu(void *p_vgpu) |
---|
1676 | 1882 | { |
---|
1677 | | - /* nothing to do here */ |
---|
| 1883 | + int i; |
---|
| 1884 | + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; |
---|
| 1885 | + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); |
---|
| 1886 | + |
---|
| 1887 | + if (!vdev->region) |
---|
| 1888 | + return; |
---|
| 1889 | + |
---|
| 1890 | + for (i = 0; i < vdev->num_regions; i++) |
---|
| 1891 | + if (vdev->region[i].ops->release) |
---|
| 1892 | + vdev->region[i].ops->release(vgpu, |
---|
| 1893 | + &vdev->region[i]); |
---|
| 1894 | + vdev->num_regions = 0; |
---|
| 1895 | + kfree(vdev->region); |
---|
| 1896 | + vdev->region = NULL; |
---|
| 1897 | + |
---|
| 1898 | + kfree(vdev); |
---|
1678 | 1899 | } |
---|
1679 | 1900 | |
---|
1680 | 1901 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
---|
1681 | 1902 | { |
---|
1682 | 1903 | struct kvmgt_guest_info *info; |
---|
1683 | 1904 | struct intel_vgpu *vgpu; |
---|
| 1905 | + struct kvmgt_vdev *vdev; |
---|
1684 | 1906 | |
---|
1685 | 1907 | if (!handle_valid(handle)) |
---|
1686 | 1908 | return -ESRCH; |
---|
1687 | 1909 | |
---|
1688 | 1910 | info = (struct kvmgt_guest_info *)handle; |
---|
1689 | 1911 | vgpu = info->vgpu; |
---|
| 1912 | + vdev = kvmgt_vdev(vgpu); |
---|
1690 | 1913 | |
---|
1691 | 1914 | /* |
---|
1692 | 1915 | * When guest is poweroff, msi_trigger is set to NULL, but vgpu's |
---|
.. | .. |
---|
1697 | 1920 | * enabled by guest. so if msi_trigger is null, success is still |
---|
1698 | 1921 | * returned and don't inject interrupt into guest. |
---|
1699 | 1922 | */ |
---|
1700 | | - if (vgpu->vdev.msi_trigger == NULL) |
---|
| 1923 | + if (vdev->msi_trigger == NULL) |
---|
1701 | 1924 | return 0; |
---|
1702 | 1925 | |
---|
1703 | | - if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1) |
---|
| 1926 | + if (eventfd_signal(vdev->msi_trigger, 1) == 1) |
---|
1704 | 1927 | return 0; |
---|
1705 | 1928 | |
---|
1706 | 1929 | return -EFAULT; |
---|
.. | .. |
---|
1723 | 1946 | return pfn; |
---|
1724 | 1947 | } |
---|
1725 | 1948 | |
---|
1726 | | -int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, |
---|
| 1949 | +static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, |
---|
1727 | 1950 | unsigned long size, dma_addr_t *dma_addr) |
---|
1728 | 1951 | { |
---|
1729 | | - struct kvmgt_guest_info *info; |
---|
1730 | 1952 | struct intel_vgpu *vgpu; |
---|
| 1953 | + struct kvmgt_vdev *vdev; |
---|
1731 | 1954 | struct gvt_dma *entry; |
---|
1732 | 1955 | int ret; |
---|
1733 | 1956 | |
---|
1734 | 1957 | if (!handle_valid(handle)) |
---|
1735 | 1958 | return -EINVAL; |
---|
1736 | 1959 | |
---|
1737 | | - info = (struct kvmgt_guest_info *)handle; |
---|
1738 | | - vgpu = info->vgpu; |
---|
| 1960 | + vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; |
---|
| 1961 | + vdev = kvmgt_vdev(vgpu); |
---|
1739 | 1962 | |
---|
1740 | | - mutex_lock(&info->vgpu->vdev.cache_lock); |
---|
| 1963 | + mutex_lock(&vdev->cache_lock); |
---|
1741 | 1964 | |
---|
1742 | | - entry = __gvt_cache_find_gfn(info->vgpu, gfn); |
---|
| 1965 | + entry = __gvt_cache_find_gfn(vgpu, gfn); |
---|
1743 | 1966 | if (!entry) { |
---|
1744 | 1967 | ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); |
---|
1745 | 1968 | if (ret) |
---|
1746 | 1969 | goto err_unlock; |
---|
1747 | 1970 | |
---|
1748 | | - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); |
---|
| 1971 | + ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); |
---|
1749 | 1972 | if (ret) |
---|
1750 | 1973 | goto err_unmap; |
---|
1751 | 1974 | } else if (entry->size != size) { |
---|
.. | .. |
---|
1757 | 1980 | if (ret) |
---|
1758 | 1981 | goto err_unlock; |
---|
1759 | 1982 | |
---|
1760 | | - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); |
---|
| 1983 | + ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); |
---|
1761 | 1984 | if (ret) |
---|
1762 | 1985 | goto err_unmap; |
---|
1763 | 1986 | } else { |
---|
.. | .. |
---|
1765 | 1988 | *dma_addr = entry->dma_addr; |
---|
1766 | 1989 | } |
---|
1767 | 1990 | |
---|
1768 | | - mutex_unlock(&info->vgpu->vdev.cache_lock); |
---|
| 1991 | + mutex_unlock(&vdev->cache_lock); |
---|
1769 | 1992 | return 0; |
---|
1770 | 1993 | |
---|
1771 | 1994 | err_unmap: |
---|
1772 | 1995 | gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); |
---|
1773 | 1996 | err_unlock: |
---|
1774 | | - mutex_unlock(&info->vgpu->vdev.cache_lock); |
---|
| 1997 | + mutex_unlock(&vdev->cache_lock); |
---|
| 1998 | + return ret; |
---|
| 1999 | +} |
---|
| 2000 | + |
---|
| 2001 | +static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) |
---|
| 2002 | +{ |
---|
| 2003 | + struct kvmgt_guest_info *info; |
---|
| 2004 | + struct kvmgt_vdev *vdev; |
---|
| 2005 | + struct gvt_dma *entry; |
---|
| 2006 | + int ret = 0; |
---|
| 2007 | + |
---|
| 2008 | + if (!handle_valid(handle)) |
---|
| 2009 | + return -ENODEV; |
---|
| 2010 | + |
---|
| 2011 | + info = (struct kvmgt_guest_info *)handle; |
---|
| 2012 | + vdev = kvmgt_vdev(info->vgpu); |
---|
| 2013 | + |
---|
| 2014 | + mutex_lock(&vdev->cache_lock); |
---|
| 2015 | + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); |
---|
| 2016 | + if (entry) |
---|
| 2017 | + kref_get(&entry->ref); |
---|
| 2018 | + else |
---|
| 2019 | + ret = -ENOMEM; |
---|
| 2020 | + mutex_unlock(&vdev->cache_lock); |
---|
| 2021 | + |
---|
1775 | 2022 | return ret; |
---|
1776 | 2023 | } |
---|
1777 | 2024 | |
---|
.. | .. |
---|
1784 | 2031 | __gvt_cache_remove_entry(entry->vgpu, entry); |
---|
1785 | 2032 | } |
---|
1786 | 2033 | |
---|
1787 | | -void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) |
---|
| 2034 | +static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) |
---|
1788 | 2035 | { |
---|
1789 | | - struct kvmgt_guest_info *info; |
---|
| 2036 | + struct intel_vgpu *vgpu; |
---|
| 2037 | + struct kvmgt_vdev *vdev; |
---|
1790 | 2038 | struct gvt_dma *entry; |
---|
1791 | 2039 | |
---|
1792 | 2040 | if (!handle_valid(handle)) |
---|
1793 | 2041 | return; |
---|
1794 | 2042 | |
---|
1795 | | - info = (struct kvmgt_guest_info *)handle; |
---|
| 2043 | + vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; |
---|
| 2044 | + vdev = kvmgt_vdev(vgpu); |
---|
1796 | 2045 | |
---|
1797 | | - mutex_lock(&info->vgpu->vdev.cache_lock); |
---|
1798 | | - entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); |
---|
| 2046 | + mutex_lock(&vdev->cache_lock); |
---|
| 2047 | + entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); |
---|
1799 | 2048 | if (entry) |
---|
1800 | 2049 | kref_put(&entry->ref, __gvt_dma_release); |
---|
1801 | | - mutex_unlock(&info->vgpu->vdev.cache_lock); |
---|
| 2050 | + mutex_unlock(&vdev->cache_lock); |
---|
1802 | 2051 | } |
---|
1803 | 2052 | |
---|
1804 | 2053 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
---|
1805 | 2054 | void *buf, unsigned long len, bool write) |
---|
1806 | 2055 | { |
---|
1807 | 2056 | struct kvmgt_guest_info *info; |
---|
1808 | | - struct kvm *kvm; |
---|
1809 | | - int idx, ret; |
---|
1810 | | - bool kthread = current->mm == NULL; |
---|
1811 | 2057 | |
---|
1812 | 2058 | if (!handle_valid(handle)) |
---|
1813 | 2059 | return -ESRCH; |
---|
1814 | 2060 | |
---|
1815 | 2061 | info = (struct kvmgt_guest_info *)handle; |
---|
1816 | | - kvm = info->kvm; |
---|
1817 | 2062 | |
---|
1818 | | - if (kthread) { |
---|
1819 | | - if (!mmget_not_zero(kvm->mm)) |
---|
1820 | | - return -EFAULT; |
---|
1821 | | - use_mm(kvm->mm); |
---|
1822 | | - } |
---|
1823 | | - |
---|
1824 | | - idx = srcu_read_lock(&kvm->srcu); |
---|
1825 | | - ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
---|
1826 | | - kvm_read_guest(kvm, gpa, buf, len); |
---|
1827 | | - srcu_read_unlock(&kvm->srcu, idx); |
---|
1828 | | - |
---|
1829 | | - if (kthread) { |
---|
1830 | | - unuse_mm(kvm->mm); |
---|
1831 | | - mmput(kvm->mm); |
---|
1832 | | - } |
---|
1833 | | - |
---|
1834 | | - return ret; |
---|
| 2063 | + return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, |
---|
| 2064 | + gpa, buf, len, write); |
---|
1835 | 2065 | } |
---|
1836 | 2066 | |
---|
1837 | 2067 | static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, |
---|
.. | .. |
---|
1871 | 2101 | return ret; |
---|
1872 | 2102 | } |
---|
1873 | 2103 | |
---|
1874 | | -struct intel_gvt_mpt kvmgt_mpt = { |
---|
| 2104 | +static struct intel_gvt_mpt kvmgt_mpt = { |
---|
| 2105 | + .type = INTEL_GVT_HYPERVISOR_KVM, |
---|
1875 | 2106 | .host_init = kvmgt_host_init, |
---|
1876 | 2107 | .host_exit = kvmgt_host_exit, |
---|
1877 | 2108 | .attach_vgpu = kvmgt_attach_vgpu, |
---|
.. | .. |
---|
1885 | 2116 | .gfn_to_mfn = kvmgt_gfn_to_pfn, |
---|
1886 | 2117 | .dma_map_guest_page = kvmgt_dma_map_guest_page, |
---|
1887 | 2118 | .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, |
---|
| 2119 | + .dma_pin_guest_page = kvmgt_dma_pin_guest_page, |
---|
1888 | 2120 | .set_opregion = kvmgt_set_opregion, |
---|
| 2121 | + .set_edid = kvmgt_set_edid, |
---|
1889 | 2122 | .get_vfio_device = kvmgt_get_vfio_device, |
---|
1890 | 2123 | .put_vfio_device = kvmgt_put_vfio_device, |
---|
1891 | 2124 | .is_valid_gfn = kvmgt_is_valid_gfn, |
---|
1892 | 2125 | }; |
---|
1893 | | -EXPORT_SYMBOL_GPL(kvmgt_mpt); |
---|
1894 | 2126 | |
---|
1895 | 2127 | static int __init kvmgt_init(void) |
---|
1896 | 2128 | { |
---|
| 2129 | + if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) |
---|
| 2130 | + return -ENODEV; |
---|
1897 | 2131 | return 0; |
---|
1898 | 2132 | } |
---|
1899 | 2133 | |
---|
1900 | 2134 | static void __exit kvmgt_exit(void) |
---|
1901 | 2135 | { |
---|
| 2136 | + intel_gvt_unregister_hypervisor(); |
---|
1902 | 2137 | } |
---|
1903 | 2138 | |
---|
1904 | 2139 | module_init(kvmgt_init); |
---|