hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/gpu/drm/i915/gvt/vgpu.c
....@@ -37,6 +37,7 @@
3737
3838 void populate_pvinfo_page(struct intel_vgpu *vgpu)
3939 {
40
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
4041 /* setup the ballooning information */
4142 vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
4243 vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
....@@ -44,7 +45,7 @@
4445 vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
4546 vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
4647
47
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
48
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
4849 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
4950 vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
5051
....@@ -69,7 +70,7 @@
6970 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
7071 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
7172
72
- WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
73
+ drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
7374 }
7475
7576 #define VGPU_MAX_WEIGHT 16
....@@ -123,7 +124,7 @@
123124 */
124125 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
125126 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
126
- num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
127
+ num_types = ARRAY_SIZE(vgpu_types);
127128
128129 gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
129130 GFP_KERNEL);
....@@ -148,12 +149,12 @@
148149 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
149150 high_avail / vgpu_types[i].high_mm);
150151
151
- if (IS_GEN8(gvt->dev_priv))
152
+ if (IS_GEN(gvt->gt->i915, 8))
152153 sprintf(gvt->types[i].name, "GVTg_V4_%s",
153
- vgpu_types[i].name);
154
- else if (IS_GEN9(gvt->dev_priv))
154
+ vgpu_types[i].name);
155
+ else if (IS_GEN(gvt->gt->i915, 9))
155156 sprintf(gvt->types[i].name, "GVTg_V5_%s",
156
- vgpu_types[i].name);
157
+ vgpu_types[i].name);
157158
158159 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
159160 i, gvt->types[i].name,
....@@ -212,9 +213,9 @@
212213 */
213214 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
214215 {
215
- mutex_lock(&vgpu->gvt->lock);
216
+ mutex_lock(&vgpu->vgpu_lock);
216217 vgpu->active = true;
217
- mutex_unlock(&vgpu->gvt->lock);
218
+ mutex_unlock(&vgpu->vgpu_lock);
218219 }
219220
220221 /**
....@@ -256,6 +257,7 @@
256257 intel_gvt_deactivate_vgpu(vgpu);
257258
258259 mutex_lock(&vgpu->vgpu_lock);
260
+ vgpu->d3_entered = false;
259261 intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
260262 intel_vgpu_dmabuf_cleanup(vgpu);
261263 mutex_unlock(&vgpu->vgpu_lock);
....@@ -271,8 +273,9 @@
271273 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
272274 {
273275 struct intel_gvt *gvt = vgpu->gvt;
276
+ struct drm_i915_private *i915 = gvt->gt->i915;
274277
275
- WARN(vgpu->active, "vGPU is still active!\n");
278
+ drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
276279
277280 /*
278281 * remove idr first so later clean can judge if need to stop
....@@ -365,6 +368,7 @@
365368 static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
366369 struct intel_vgpu_creation_params *param)
367370 {
371
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
368372 struct intel_vgpu *vgpu;
369373 int ret;
370374
....@@ -391,6 +395,7 @@
391395 INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
392396 idr_init(&vgpu->object_idr);
393397 intel_vgpu_init_cfg_space(vgpu, param->primary);
398
+ vgpu->d3_entered = false;
394399
395400 ret = intel_vgpu_init_mmio(vgpu);
396401 if (ret)
....@@ -426,11 +431,16 @@
426431 if (ret)
427432 goto out_clean_submission;
428433
429
- ret = intel_gvt_debugfs_add_vgpu(vgpu);
434
+ intel_gvt_debugfs_add_vgpu(vgpu);
435
+
436
+ ret = intel_gvt_hypervisor_set_opregion(vgpu);
430437 if (ret)
431438 goto out_clean_sched_policy;
432439
433
- ret = intel_gvt_hypervisor_set_opregion(vgpu);
440
+ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
441
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
442
+ else
443
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
434444 if (ret)
435445 goto out_clean_sched_policy;
436446
....@@ -526,11 +536,11 @@
526536 * GPU engines. For FLR, engine_mask is ignored.
527537 */
528538 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
529
- unsigned int engine_mask)
539
+ intel_engine_mask_t engine_mask)
530540 {
531541 struct intel_gvt *gvt = vgpu->gvt;
532542 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
533
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
543
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
534544
535545 gvt_dbg_core("------------------------------------------\n");
536546 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
....@@ -553,10 +563,15 @@
553563 /* full GPU reset or device model level reset */
554564 if (engine_mask == ALL_ENGINES || dmlr) {
555565 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
556
- intel_vgpu_invalidate_ppgtt(vgpu);
566
+ if (engine_mask == ALL_ENGINES)
567
+ intel_vgpu_invalidate_ppgtt(vgpu);
557568 /*fence will not be reset during virtual reset */
558569 if (dmlr) {
559
- intel_vgpu_reset_gtt(vgpu);
570
+ if(!vgpu->d3_entered) {
571
+ intel_vgpu_invalidate_ppgtt(vgpu);
572
+ intel_vgpu_destroy_all_ppgtt_mm(vgpu);
573
+ }
574
+ intel_vgpu_reset_ggtt(vgpu, true);
560575 intel_vgpu_reset_resource(vgpu);
561576 }
562577
....@@ -568,7 +583,14 @@
568583 intel_vgpu_reset_cfg_space(vgpu);
569584 /* only reset the failsafe mode when dmlr reset */
570585 vgpu->failsafe = false;
571
- vgpu->pv_notified = false;
586
+ /*
587
+ * PCI_D0 is set before dmlr, so reset d3_entered here
588
+ * after done using.
589
+ */
590
+ if(vgpu->d3_entered)
591
+ vgpu->d3_entered = false;
592
+ else
593
+ vgpu->pv_notified = false;
572594 }
573595 }
574596