hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/gpu/drm/i915/gvt/gvt.h
....@@ -52,12 +52,8 @@
5252
5353 #define GVT_MAX_VGPU 8
5454
55
-enum {
56
- INTEL_GVT_HYPERVISOR_XEN = 0,
57
- INTEL_GVT_HYPERVISOR_KVM,
58
-};
59
-
6055 struct intel_gvt_host {
56
+ struct device *dev;
6157 bool initialized;
6258 int hypervisor_type;
6359 struct intel_gvt_mpt *mpt;
....@@ -91,14 +87,13 @@
9187
9288 /* Fences owned by a vGPU */
9389 struct intel_vgpu_fence {
94
- struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
90
+ struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
9591 u32 base;
9692 u32 size;
9793 };
9894
9995 struct intel_vgpu_mmio {
10096 void *vreg;
101
- void *sreg;
10297 };
10398
10499 #define INTEL_GVT_MAX_BAR_NUM 4
....@@ -111,15 +106,14 @@
111106 struct intel_vgpu_cfg_space {
112107 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
113108 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
109
+ u32 pmcsr_off;
114110 };
115111
116112 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117113
118
-#define INTEL_GVT_MAX_PIPE 4
119
-
120114 struct intel_vgpu_irq {
121115 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
122
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
116
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
123117 INTEL_GVT_EVENT_MAX);
124118 };
125119
....@@ -148,17 +142,21 @@
148142
149143 struct intel_vgpu_submission_ops {
150144 const char *name;
151
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
152
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
153
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
145
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
146
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
147
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
154148 };
155149
156150 struct intel_vgpu_submission {
157151 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
158152 struct list_head workload_q_head[I915_NUM_ENGINES];
153
+ struct intel_context *shadow[I915_NUM_ENGINES];
159154 struct kmem_cache *workloads;
160155 atomic_t running_workload_num;
161
- struct i915_gem_context *shadow_ctx;
156
+ union {
157
+ u64 i915_context_pml4;
158
+ u64 i915_context_pdps[GEN8_3LVL_PDPES];
159
+ };
162160 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
163161 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164162 void *ring_scan_buffer[I915_NUM_ENGINES];
....@@ -166,6 +164,11 @@
166164 const struct intel_vgpu_submission_ops *ops;
167165 int virtual_submission_interface;
168166 bool active;
167
+ struct {
168
+ u32 lrca;
169
+ bool valid;
170
+ u64 ring_context_gpa;
171
+ } last_ctx[I915_NUM_ENGINES];
169172 };
170173
171174 struct intel_vgpu {
....@@ -196,43 +199,25 @@
196199 struct intel_vgpu_submission submission;
197200 struct radix_tree_root page_track_tree;
198201 u32 hws_pga[I915_NUM_ENGINES];
202
+ /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
203
+ bool d3_entered;
199204
200205 struct dentry *debugfs;
201206
202
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
203
- struct {
204
- struct mdev_device *mdev;
205
- struct vfio_region *region;
206
- int num_regions;
207
- struct eventfd_ctx *intx_trigger;
208
- struct eventfd_ctx *msi_trigger;
209
-
210
- /*
211
- * Two caches are used to avoid mapping duplicated pages (eg.
212
- * scratch pages). This help to reduce dma setup overhead.
213
- */
214
- struct rb_root gfn_cache;
215
- struct rb_root dma_addr_cache;
216
- unsigned long nr_cache_entries;
217
- struct mutex cache_lock;
218
-
219
- struct notifier_block iommu_notifier;
220
- struct notifier_block group_notifier;
221
- struct kvm *kvm;
222
- struct work_struct release_work;
223
- atomic_t released;
224
- struct vfio_device *vfio_device;
225
- } vdev;
226
-#endif
207
+ /* Hypervisor-specific device state. */
208
+ void *vdev;
227209
228210 struct list_head dmabuf_obj_list_head;
229211 struct mutex dmabuf_lock;
230212 struct idr object_idr;
231213
232
- struct completion vblank_done;
233
-
234214 u32 scan_nonprivbb;
235215 };
216
+
217
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
218
+{
219
+ return vgpu->vdev;
220
+}
236221
237222 /* validating GM healthy status*/
238223 #define vgpu_is_vm_unhealthy(ret_val) \
....@@ -270,12 +255,14 @@
270255 #define F_CMD_ACCESS (1 << 3)
271256 /* This reg has been accessed by a VM */
272257 #define F_ACCESSED (1 << 4)
273
-/* This reg has been accessed through GPU commands */
274
-#define F_CMD_ACCESSED (1 << 5)
258
+/* This reg requires save & restore during host PM suspend/resume */
259
+#define F_PM_SAVE (1 << 5)
275260 /* This reg could be accessed by unaligned address */
276261 #define F_UNALIGN (1 << 6)
277
-/* This reg is saved/restored in context */
278
-#define F_IN_CTX (1 << 7)
262
+/* This reg is in GVT's mmio save-restor list and in hardware
263
+ * logical context image
264
+ */
265
+#define F_SR_IN_CTX (1 << 7)
279266
280267 struct gvt_mmio_block *mmio_block;
281268 unsigned int num_mmio_block;
....@@ -309,7 +296,7 @@
309296 /* scheduler scope lock, protect gvt and vgpu schedule related data */
310297 struct mutex sched_lock;
311298
312
- struct drm_i915_private *dev_priv;
299
+ struct intel_gt *gt;
313300 struct idr vgpu_idr; /* vGPU IDR pool */
314301
315302 struct intel_gvt_device_info device_info;
....@@ -337,6 +324,10 @@
337324 struct {
338325 struct engine_mmio *mmio;
339326 int ctx_mmio_count[I915_NUM_ENGINES];
327
+ u32 *tlb_mmio_offset_list;
328
+ u32 tlb_mmio_offset_list_cnt;
329
+ u32 *mocs_mmio_offset_list;
330
+ u32 mocs_mmio_offset_list_cnt;
340331 } engine_mmio_list;
341332
342333 struct dentry *debugfs_root;
....@@ -375,14 +366,15 @@
375366 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
376367 #define HOST_FENCE 4
377368
378
-/* Aperture/GM space definitions for GVT device */
379
-#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
380
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
369
+#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
381370
382
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
383
-#define gvt_ggtt_sz(gvt) \
384
- ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
385
-#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
371
+/* Aperture/GM space definitions for GVT device */
372
+#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
373
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
374
+
375
+#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
376
+#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
377
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
386378
387379 #define gvt_aperture_gmadr_base(gvt) (0)
388380 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
....@@ -393,7 +385,7 @@
393385 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
394386 + gvt_hidden_sz(gvt) - 1)
395387
396
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
388
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
397389
398390 /* Aperture/GM space definitions for vGPU */
399391 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
....@@ -449,10 +441,6 @@
449441 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
450442 #define vgpu_vreg64(vgpu, offset) \
451443 (*(u64 *)(vgpu->mmio.vreg + (offset)))
452
-#define vgpu_sreg_t(vgpu, reg) \
453
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
454
-#define vgpu_sreg(vgpu, offset) \
455
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
456444
457445 #define for_each_active_vgpu(gvt, vgpu, id) \
458446 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
....@@ -488,7 +476,7 @@
488476 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
489477 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
490478 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
491
- unsigned int engine_mask);
479
+ intel_engine_mask_t engine_mask);
492480 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
493481 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
494482 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
....@@ -536,6 +524,8 @@
536524 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
537525 void *p_data, unsigned int bytes);
538526
527
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
528
+
539529 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
540530 {
541531 /* We are 64bit bar. */
....@@ -571,12 +561,12 @@
571561 void (*vgpu_deactivate)(struct intel_vgpu *);
572562 struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
573563 const char *name);
574
- bool (*get_gvt_attrs)(struct attribute ***type_attrs,
575
- struct attribute_group ***intel_vgpu_type_groups);
564
+ bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
576565 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
577566 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
578567 int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
579568 unsigned int);
569
+ void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
580570 };
581571
582572
....@@ -586,14 +576,14 @@
586576 GVT_FAILSAFE_GUEST_ERR,
587577 };
588578
589
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
579
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
590580 {
591
- intel_runtime_pm_get(dev_priv);
581
+ intel_runtime_pm_get(gt->uncore->rpm);
592582 }
593583
594
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
584
+static inline void mmio_hw_access_post(struct intel_gt *gt)
595585 {
596
- intel_runtime_pm_put(dev_priv);
586
+ intel_runtime_pm_put_unchecked(gt->uncore->rpm);
597587 }
598588
599589 /**
....@@ -609,15 +599,30 @@
609599 }
610600
611601 /**
612
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
602
+ * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
603
+ * @gvt: a GVT device
604
+ * @offset: register offset
605
+ *
606
+ * Returns:
607
+ * True if an MMIO is able to be accessed by GPU commands
608
+ */
609
+static inline bool intel_gvt_mmio_is_cmd_accessible(
610
+ struct intel_gvt *gvt, unsigned int offset)
611
+{
612
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
613
+}
614
+
615
+/**
616
+ * intel_gvt_mmio_set_cmd_accessible -
617
+ * mark a MMIO could be accessible by command
613618 * @gvt: a GVT device
614619 * @offset: register offset
615620 *
616621 */
617
-static inline bool intel_gvt_mmio_is_cmd_access(
622
+static inline void intel_gvt_mmio_set_cmd_accessible(
618623 struct intel_gvt *gvt, unsigned int offset)
619624 {
620
- return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
625
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
621626 }
622627
623628 /**
....@@ -630,18 +635,6 @@
630635 struct intel_gvt *gvt, unsigned int offset)
631636 {
632637 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
633
-}
634
-
635
-/**
636
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
637
- * @gvt: a GVT device
638
- * @offset: register offset
639
- *
640
- */
641
-static inline void intel_gvt_mmio_set_cmd_accessed(
642
- struct intel_gvt *gvt, unsigned int offset)
643
-{
644
- gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
645638 }
646639
647640 /**
....@@ -660,37 +653,41 @@
660653 }
661654
662655 /**
663
- * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
656
+ * intel_gvt_mmio_is_sr_in_ctx -
657
+ * check if an MMIO has F_SR_IN_CTX mask
664658 * @gvt: a GVT device
665659 * @offset: register offset
666660 *
667661 * Returns:
668
- * True if a MMIO has a in-context mask, false if it isn't.
662
+ * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't.
669663 *
670664 */
671
-static inline bool intel_gvt_mmio_is_in_ctx(
665
+static inline bool intel_gvt_mmio_is_sr_in_ctx(
672666 struct intel_gvt *gvt, unsigned int offset)
673667 {
674
- return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
668
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
675669 }
676670
677671 /**
678
- * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
672
+ * intel_gvt_mmio_set_sr_in_ctx -
673
+ * mask an MMIO in GVT's mmio save-restore list and also
674
+ * in hardware logical context image
679675 * @gvt: a GVT device
680676 * @offset: register offset
681677 *
682678 */
683
-static inline void intel_gvt_mmio_set_in_ctx(
679
+static inline void intel_gvt_mmio_set_sr_in_ctx(
684680 struct intel_gvt *gvt, unsigned int offset)
685681 {
686
- gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
682
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
687683 }
688684
689
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
685
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
690686 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
691
-int intel_gvt_debugfs_init(struct intel_gvt *gvt);
687
+void intel_gvt_debugfs_init(struct intel_gvt *gvt);
692688 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
693689
690
+int intel_gvt_pm_resume(struct intel_gvt *gvt);
694691
695692 #include "trace.h"
696693 #include "mpt.h"