.. | .. |
---|
52 | 52 | |
---|
53 | 53 | #define GVT_MAX_VGPU 8 |
---|
54 | 54 | |
---|
55 | | -enum { |
---|
56 | | - INTEL_GVT_HYPERVISOR_XEN = 0, |
---|
57 | | - INTEL_GVT_HYPERVISOR_KVM, |
---|
58 | | -}; |
---|
59 | | - |
---|
60 | 55 | struct intel_gvt_host { |
---|
| 56 | + struct device *dev; |
---|
61 | 57 | bool initialized; |
---|
62 | 58 | int hypervisor_type; |
---|
63 | 59 | struct intel_gvt_mpt *mpt; |
---|
.. | .. |
---|
91 | 87 | |
---|
92 | 88 | /* Fences owned by a vGPU */ |
---|
93 | 89 | struct intel_vgpu_fence { |
---|
94 | | - struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
---|
| 90 | + struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; |
---|
95 | 91 | u32 base; |
---|
96 | 92 | u32 size; |
---|
97 | 93 | }; |
---|
98 | 94 | |
---|
99 | 95 | struct intel_vgpu_mmio { |
---|
100 | 96 | void *vreg; |
---|
101 | | - void *sreg; |
---|
102 | 97 | }; |
---|
103 | 98 | |
---|
104 | 99 | #define INTEL_GVT_MAX_BAR_NUM 4 |
---|
.. | .. |
---|
111 | 106 | struct intel_vgpu_cfg_space { |
---|
112 | 107 | unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; |
---|
113 | 108 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; |
---|
| 109 | + u32 pmcsr_off; |
---|
114 | 110 | }; |
---|
115 | 111 | |
---|
116 | 112 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) |
---|
117 | 113 | |
---|
118 | | -#define INTEL_GVT_MAX_PIPE 4 |
---|
119 | | - |
---|
120 | 114 | struct intel_vgpu_irq { |
---|
121 | 115 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; |
---|
122 | | - DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], |
---|
| 116 | + DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], |
---|
123 | 117 | INTEL_GVT_EVENT_MAX); |
---|
124 | 118 | }; |
---|
125 | 119 | |
---|
.. | .. |
---|
148 | 142 | |
---|
149 | 143 | struct intel_vgpu_submission_ops { |
---|
150 | 144 | const char *name; |
---|
151 | | - int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); |
---|
152 | | - void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); |
---|
153 | | - void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); |
---|
| 145 | + int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
---|
| 146 | + void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
---|
| 147 | + void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); |
---|
154 | 148 | }; |
---|
155 | 149 | |
---|
156 | 150 | struct intel_vgpu_submission { |
---|
157 | 151 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; |
---|
158 | 152 | struct list_head workload_q_head[I915_NUM_ENGINES]; |
---|
| 153 | + struct intel_context *shadow[I915_NUM_ENGINES]; |
---|
159 | 154 | struct kmem_cache *workloads; |
---|
160 | 155 | atomic_t running_workload_num; |
---|
161 | | - struct i915_gem_context *shadow_ctx; |
---|
| 156 | + union { |
---|
| 157 | + u64 i915_context_pml4; |
---|
| 158 | + u64 i915_context_pdps[GEN8_3LVL_PDPES]; |
---|
| 159 | + }; |
---|
162 | 160 | DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); |
---|
163 | 161 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
---|
164 | 162 | void *ring_scan_buffer[I915_NUM_ENGINES]; |
---|
.. | .. |
---|
166 | 164 | const struct intel_vgpu_submission_ops *ops; |
---|
167 | 165 | int virtual_submission_interface; |
---|
168 | 166 | bool active; |
---|
| 167 | + struct { |
---|
| 168 | + u32 lrca; |
---|
| 169 | + bool valid; |
---|
| 170 | + u64 ring_context_gpa; |
---|
| 171 | + } last_ctx[I915_NUM_ENGINES]; |
---|
169 | 172 | }; |
---|
170 | 173 | |
---|
171 | 174 | struct intel_vgpu { |
---|
.. | .. |
---|
196 | 199 | struct intel_vgpu_submission submission; |
---|
197 | 200 | struct radix_tree_root page_track_tree; |
---|
198 | 201 | u32 hws_pga[I915_NUM_ENGINES]; |
---|
| 202 | + /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ |
---|
| 203 | + bool d3_entered; |
---|
199 | 204 | |
---|
200 | 205 | struct dentry *debugfs; |
---|
201 | 206 | |
---|
202 | | -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) |
---|
203 | | - struct { |
---|
204 | | - struct mdev_device *mdev; |
---|
205 | | - struct vfio_region *region; |
---|
206 | | - int num_regions; |
---|
207 | | - struct eventfd_ctx *intx_trigger; |
---|
208 | | - struct eventfd_ctx *msi_trigger; |
---|
209 | | - |
---|
210 | | - /* |
---|
211 | | - * Two caches are used to avoid mapping duplicated pages (eg. |
---|
212 | | - * scratch pages). This help to reduce dma setup overhead. |
---|
213 | | - */ |
---|
214 | | - struct rb_root gfn_cache; |
---|
215 | | - struct rb_root dma_addr_cache; |
---|
216 | | - unsigned long nr_cache_entries; |
---|
217 | | - struct mutex cache_lock; |
---|
218 | | - |
---|
219 | | - struct notifier_block iommu_notifier; |
---|
220 | | - struct notifier_block group_notifier; |
---|
221 | | - struct kvm *kvm; |
---|
222 | | - struct work_struct release_work; |
---|
223 | | - atomic_t released; |
---|
224 | | - struct vfio_device *vfio_device; |
---|
225 | | - } vdev; |
---|
226 | | -#endif |
---|
| 207 | + /* Hypervisor-specific device state. */ |
---|
| 208 | + void *vdev; |
---|
227 | 209 | |
---|
228 | 210 | struct list_head dmabuf_obj_list_head; |
---|
229 | 211 | struct mutex dmabuf_lock; |
---|
230 | 212 | struct idr object_idr; |
---|
231 | 213 | |
---|
232 | | - struct completion vblank_done; |
---|
233 | | - |
---|
234 | 214 | u32 scan_nonprivbb; |
---|
235 | 215 | }; |
---|
| 216 | + |
---|
| 217 | +static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) |
---|
| 218 | +{ |
---|
| 219 | + return vgpu->vdev; |
---|
| 220 | +} |
---|
236 | 221 | |
---|
237 | 222 | /* validating GM healthy status*/ |
---|
238 | 223 | #define vgpu_is_vm_unhealthy(ret_val) \ |
---|
.. | .. |
---|
270 | 255 | #define F_CMD_ACCESS (1 << 3) |
---|
271 | 256 | /* This reg has been accessed by a VM */ |
---|
272 | 257 | #define F_ACCESSED (1 << 4) |
---|
273 | | -/* This reg has been accessed through GPU commands */ |
---|
274 | | -#define F_CMD_ACCESSED (1 << 5) |
---|
| 258 | +/* This reg requires save & restore during host PM suspend/resume */ |
---|
| 259 | +#define F_PM_SAVE (1 << 5) |
---|
275 | 260 | /* This reg could be accessed by unaligned address */ |
---|
276 | 261 | #define F_UNALIGN (1 << 6) |
---|
277 | | -/* This reg is saved/restored in context */ |
---|
278 | | -#define F_IN_CTX (1 << 7) |
---|
| 262 | +/* This reg is in GVT's mmio save-restor list and in hardware |
---|
| 263 | + * logical context image |
---|
| 264 | + */ |
---|
| 265 | +#define F_SR_IN_CTX (1 << 7) |
---|
279 | 266 | |
---|
280 | 267 | struct gvt_mmio_block *mmio_block; |
---|
281 | 268 | unsigned int num_mmio_block; |
---|
.. | .. |
---|
309 | 296 | /* scheduler scope lock, protect gvt and vgpu schedule related data */ |
---|
310 | 297 | struct mutex sched_lock; |
---|
311 | 298 | |
---|
312 | | - struct drm_i915_private *dev_priv; |
---|
| 299 | + struct intel_gt *gt; |
---|
313 | 300 | struct idr vgpu_idr; /* vGPU IDR pool */ |
---|
314 | 301 | |
---|
315 | 302 | struct intel_gvt_device_info device_info; |
---|
.. | .. |
---|
337 | 324 | struct { |
---|
338 | 325 | struct engine_mmio *mmio; |
---|
339 | 326 | int ctx_mmio_count[I915_NUM_ENGINES]; |
---|
| 327 | + u32 *tlb_mmio_offset_list; |
---|
| 328 | + u32 tlb_mmio_offset_list_cnt; |
---|
| 329 | + u32 *mocs_mmio_offset_list; |
---|
| 330 | + u32 mocs_mmio_offset_list_cnt; |
---|
340 | 331 | } engine_mmio_list; |
---|
341 | 332 | |
---|
342 | 333 | struct dentry *debugfs_root; |
---|
.. | .. |
---|
375 | 366 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) |
---|
376 | 367 | #define HOST_FENCE 4 |
---|
377 | 368 | |
---|
378 | | -/* Aperture/GM space definitions for GVT device */ |
---|
379 | | -#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) |
---|
380 | | -#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) |
---|
| 369 | +#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) |
---|
381 | 370 | |
---|
382 | | -#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) |
---|
383 | | -#define gvt_ggtt_sz(gvt) \ |
---|
384 | | - ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3) |
---|
385 | | -#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
---|
| 371 | +/* Aperture/GM space definitions for GVT device */ |
---|
| 372 | +#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end |
---|
| 373 | +#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start |
---|
| 374 | + |
---|
| 375 | +#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total |
---|
| 376 | +#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) |
---|
| 377 | +#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
---|
386 | 378 | |
---|
387 | 379 | #define gvt_aperture_gmadr_base(gvt) (0) |
---|
388 | 380 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ |
---|
.. | .. |
---|
393 | 385 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ |
---|
394 | 386 | + gvt_hidden_sz(gvt) - 1) |
---|
395 | 387 | |
---|
396 | | -#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) |
---|
| 388 | +#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) |
---|
397 | 389 | |
---|
398 | 390 | /* Aperture/GM space definitions for vGPU */ |
---|
399 | 391 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) |
---|
.. | .. |
---|
449 | 441 | (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) |
---|
450 | 442 | #define vgpu_vreg64(vgpu, offset) \ |
---|
451 | 443 | (*(u64 *)(vgpu->mmio.vreg + (offset))) |
---|
452 | | -#define vgpu_sreg_t(vgpu, reg) \ |
---|
453 | | - (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg))) |
---|
454 | | -#define vgpu_sreg(vgpu, offset) \ |
---|
455 | | - (*(u32 *)(vgpu->mmio.sreg + (offset))) |
---|
456 | 444 | |
---|
457 | 445 | #define for_each_active_vgpu(gvt, vgpu, id) \ |
---|
458 | 446 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ |
---|
.. | .. |
---|
488 | 476 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
---|
489 | 477 | void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); |
---|
490 | 478 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, |
---|
491 | | - unsigned int engine_mask); |
---|
| 479 | + intel_engine_mask_t engine_mask); |
---|
492 | 480 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
---|
493 | 481 | void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); |
---|
494 | 482 | void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); |
---|
.. | .. |
---|
536 | 524 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
537 | 525 | void *p_data, unsigned int bytes); |
---|
538 | 526 | |
---|
| 527 | +void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected); |
---|
| 528 | + |
---|
539 | 529 | static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) |
---|
540 | 530 | { |
---|
541 | 531 | /* We are 64bit bar. */ |
---|
.. | .. |
---|
571 | 561 | void (*vgpu_deactivate)(struct intel_vgpu *); |
---|
572 | 562 | struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt, |
---|
573 | 563 | const char *name); |
---|
574 | | - bool (*get_gvt_attrs)(struct attribute ***type_attrs, |
---|
575 | | - struct attribute_group ***intel_vgpu_type_groups); |
---|
| 564 | + bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups); |
---|
576 | 565 | int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); |
---|
577 | 566 | int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); |
---|
578 | 567 | int (*write_protect_handler)(struct intel_vgpu *, u64, void *, |
---|
579 | 568 | unsigned int); |
---|
| 569 | + void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); |
---|
580 | 570 | }; |
---|
581 | 571 | |
---|
582 | 572 | |
---|
.. | .. |
---|
586 | 576 | GVT_FAILSAFE_GUEST_ERR, |
---|
587 | 577 | }; |
---|
588 | 578 | |
---|
589 | | -static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) |
---|
| 579 | +static inline void mmio_hw_access_pre(struct intel_gt *gt) |
---|
590 | 580 | { |
---|
591 | | - intel_runtime_pm_get(dev_priv); |
---|
| 581 | + intel_runtime_pm_get(gt->uncore->rpm); |
---|
592 | 582 | } |
---|
593 | 583 | |
---|
594 | | -static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) |
---|
| 584 | +static inline void mmio_hw_access_post(struct intel_gt *gt) |
---|
595 | 585 | { |
---|
596 | | - intel_runtime_pm_put(dev_priv); |
---|
| 586 | + intel_runtime_pm_put_unchecked(gt->uncore->rpm); |
---|
597 | 587 | } |
---|
598 | 588 | |
---|
599 | 589 | /** |
---|
.. | .. |
---|
609 | 599 | } |
---|
610 | 600 | |
---|
611 | 601 | /** |
---|
612 | | - * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command |
---|
| 602 | + * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command |
---|
| 603 | + * @gvt: a GVT device |
---|
| 604 | + * @offset: register offset |
---|
| 605 | + * |
---|
| 606 | + * Returns: |
---|
| 607 | + * True if an MMIO is able to be accessed by GPU commands |
---|
| 608 | + */ |
---|
| 609 | +static inline bool intel_gvt_mmio_is_cmd_accessible( |
---|
| 610 | + struct intel_gvt *gvt, unsigned int offset) |
---|
| 611 | +{ |
---|
| 612 | + return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; |
---|
| 613 | +} |
---|
| 614 | + |
---|
| 615 | +/** |
---|
| 616 | + * intel_gvt_mmio_set_cmd_accessible - |
---|
| 617 | + * mark a MMIO could be accessible by command |
---|
613 | 618 | * @gvt: a GVT device |
---|
614 | 619 | * @offset: register offset |
---|
615 | 620 | * |
---|
616 | 621 | */ |
---|
617 | | -static inline bool intel_gvt_mmio_is_cmd_access( |
---|
| 622 | +static inline void intel_gvt_mmio_set_cmd_accessible( |
---|
618 | 623 | struct intel_gvt *gvt, unsigned int offset) |
---|
619 | 624 | { |
---|
620 | | - return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; |
---|
| 625 | + gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; |
---|
621 | 626 | } |
---|
622 | 627 | |
---|
623 | 628 | /** |
---|
.. | .. |
---|
630 | 635 | struct intel_gvt *gvt, unsigned int offset) |
---|
631 | 636 | { |
---|
632 | 637 | return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; |
---|
633 | | -} |
---|
634 | | - |
---|
635 | | -/** |
---|
636 | | - * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command |
---|
637 | | - * @gvt: a GVT device |
---|
638 | | - * @offset: register offset |
---|
639 | | - * |
---|
640 | | - */ |
---|
641 | | -static inline void intel_gvt_mmio_set_cmd_accessed( |
---|
642 | | - struct intel_gvt *gvt, unsigned int offset) |
---|
643 | | -{ |
---|
644 | | - gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED; |
---|
645 | 638 | } |
---|
646 | 639 | |
---|
647 | 640 | /** |
---|
.. | .. |
---|
660 | 653 | } |
---|
661 | 654 | |
---|
662 | 655 | /** |
---|
663 | | - * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask |
---|
| 656 | + * intel_gvt_mmio_is_sr_in_ctx - |
---|
| 657 | + * check if an MMIO has F_SR_IN_CTX mask |
---|
664 | 658 | * @gvt: a GVT device |
---|
665 | 659 | * @offset: register offset |
---|
666 | 660 | * |
---|
667 | 661 | * Returns: |
---|
668 | | - * True if a MMIO has a in-context mask, false if it isn't. |
---|
| 662 | + * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. |
---|
669 | 663 | * |
---|
670 | 664 | */ |
---|
671 | | -static inline bool intel_gvt_mmio_is_in_ctx( |
---|
| 665 | +static inline bool intel_gvt_mmio_is_sr_in_ctx( |
---|
672 | 666 | struct intel_gvt *gvt, unsigned int offset) |
---|
673 | 667 | { |
---|
674 | | - return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; |
---|
| 668 | + return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; |
---|
675 | 669 | } |
---|
676 | 670 | |
---|
677 | 671 | /** |
---|
678 | | - * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context |
---|
| 672 | + * intel_gvt_mmio_set_sr_in_ctx - |
---|
| 673 | + * mask an MMIO in GVT's mmio save-restore list and also |
---|
| 674 | + * in hardware logical context image |
---|
679 | 675 | * @gvt: a GVT device |
---|
680 | 676 | * @offset: register offset |
---|
681 | 677 | * |
---|
682 | 678 | */ |
---|
683 | | -static inline void intel_gvt_mmio_set_in_ctx( |
---|
| 679 | +static inline void intel_gvt_mmio_set_sr_in_ctx( |
---|
684 | 680 | struct intel_gvt *gvt, unsigned int offset) |
---|
685 | 681 | { |
---|
686 | | - gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; |
---|
| 682 | + gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; |
---|
687 | 683 | } |
---|
688 | 684 | |
---|
689 | | -int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
---|
| 685 | +void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); |
---|
690 | 686 | void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); |
---|
691 | | -int intel_gvt_debugfs_init(struct intel_gvt *gvt); |
---|
| 687 | +void intel_gvt_debugfs_init(struct intel_gvt *gvt); |
---|
692 | 688 | void intel_gvt_debugfs_clean(struct intel_gvt *gvt); |
---|
693 | 689 | |
---|
| 690 | +int intel_gvt_pm_resume(struct intel_gvt *gvt); |
---|
694 | 691 | |
---|
695 | 692 | #include "trace.h" |
---|
696 | 693 | #include "mpt.h" |
---|