.. | .. |
---|
37 | 37 | |
---|
38 | 38 | void populate_pvinfo_page(struct intel_vgpu *vgpu) |
---|
39 | 39 | { |
---|
| 40 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
40 | 41 | /* setup the ballooning information */ |
---|
41 | 42 | vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; |
---|
42 | 43 | vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; |
---|
.. | .. |
---|
44 | 45 | vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; |
---|
45 | 46 | vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; |
---|
46 | 47 | |
---|
47 | | - vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; |
---|
| 48 | + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; |
---|
48 | 49 | vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; |
---|
49 | 50 | vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; |
---|
50 | 51 | |
---|
.. | .. |
---|
69 | 70 | vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); |
---|
70 | 71 | gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); |
---|
71 | 72 | |
---|
72 | | - WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); |
---|
| 73 | + drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE); |
---|
73 | 74 | } |
---|
74 | 75 | |
---|
75 | 76 | #define VGPU_MAX_WEIGHT 16 |
---|
.. | .. |
---|
123 | 124 | */ |
---|
124 | 125 | low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; |
---|
125 | 126 | high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; |
---|
126 | | - num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]); |
---|
| 127 | + num_types = ARRAY_SIZE(vgpu_types); |
---|
127 | 128 | |
---|
128 | 129 | gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), |
---|
129 | 130 | GFP_KERNEL); |
---|
.. | .. |
---|
148 | 149 | gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, |
---|
149 | 150 | high_avail / vgpu_types[i].high_mm); |
---|
150 | 151 | |
---|
151 | | - if (IS_GEN8(gvt->dev_priv)) |
---|
| 152 | + if (IS_GEN(gvt->gt->i915, 8)) |
---|
152 | 153 | sprintf(gvt->types[i].name, "GVTg_V4_%s", |
---|
153 | | - vgpu_types[i].name); |
---|
154 | | - else if (IS_GEN9(gvt->dev_priv)) |
---|
| 154 | + vgpu_types[i].name); |
---|
| 155 | + else if (IS_GEN(gvt->gt->i915, 9)) |
---|
155 | 156 | sprintf(gvt->types[i].name, "GVTg_V5_%s", |
---|
156 | | - vgpu_types[i].name); |
---|
| 157 | + vgpu_types[i].name); |
---|
157 | 158 | |
---|
158 | 159 | gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", |
---|
159 | 160 | i, gvt->types[i].name, |
---|
.. | .. |
---|
212 | 213 | */ |
---|
213 | 214 | void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) |
---|
214 | 215 | { |
---|
215 | | - mutex_lock(&vgpu->gvt->lock); |
---|
| 216 | + mutex_lock(&vgpu->vgpu_lock); |
---|
216 | 217 | vgpu->active = true; |
---|
217 | | - mutex_unlock(&vgpu->gvt->lock); |
---|
| 218 | + mutex_unlock(&vgpu->vgpu_lock); |
---|
218 | 219 | } |
---|
219 | 220 | |
---|
220 | 221 | /** |
---|
.. | .. |
---|
256 | 257 | intel_gvt_deactivate_vgpu(vgpu); |
---|
257 | 258 | |
---|
258 | 259 | mutex_lock(&vgpu->vgpu_lock); |
---|
| 260 | + vgpu->d3_entered = false; |
---|
259 | 261 | intel_vgpu_clean_workloads(vgpu, ALL_ENGINES); |
---|
260 | 262 | intel_vgpu_dmabuf_cleanup(vgpu); |
---|
261 | 263 | mutex_unlock(&vgpu->vgpu_lock); |
---|
.. | .. |
---|
271 | 273 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) |
---|
272 | 274 | { |
---|
273 | 275 | struct intel_gvt *gvt = vgpu->gvt; |
---|
| 276 | + struct drm_i915_private *i915 = gvt->gt->i915; |
---|
274 | 277 | |
---|
275 | | - WARN(vgpu->active, "vGPU is still active!\n"); |
---|
| 278 | + drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n"); |
---|
276 | 279 | |
---|
277 | 280 | /* |
---|
278 | 281 | * remove idr first so later clean can judge if need to stop |
---|
.. | .. |
---|
365 | 368 | static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, |
---|
366 | 369 | struct intel_vgpu_creation_params *param) |
---|
367 | 370 | { |
---|
| 371 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
368 | 372 | struct intel_vgpu *vgpu; |
---|
369 | 373 | int ret; |
---|
370 | 374 | |
---|
.. | .. |
---|
391 | 395 | INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); |
---|
392 | 396 | idr_init(&vgpu->object_idr); |
---|
393 | 397 | intel_vgpu_init_cfg_space(vgpu, param->primary); |
---|
| 398 | + vgpu->d3_entered = false; |
---|
394 | 399 | |
---|
395 | 400 | ret = intel_vgpu_init_mmio(vgpu); |
---|
396 | 401 | if (ret) |
---|
.. | .. |
---|
426 | 431 | if (ret) |
---|
427 | 432 | goto out_clean_submission; |
---|
428 | 433 | |
---|
429 | | - ret = intel_gvt_debugfs_add_vgpu(vgpu); |
---|
| 434 | + intel_gvt_debugfs_add_vgpu(vgpu); |
---|
| 435 | + |
---|
| 436 | + ret = intel_gvt_hypervisor_set_opregion(vgpu); |
---|
430 | 437 | if (ret) |
---|
431 | 438 | goto out_clean_sched_policy; |
---|
432 | 439 | |
---|
433 | | - ret = intel_gvt_hypervisor_set_opregion(vgpu); |
---|
| 440 | + if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) |
---|
| 441 | + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); |
---|
| 442 | + else |
---|
| 443 | + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); |
---|
434 | 444 | if (ret) |
---|
435 | 445 | goto out_clean_sched_policy; |
---|
436 | 446 | |
---|
.. | .. |
---|
526 | 536 | * GPU engines. For FLR, engine_mask is ignored. |
---|
527 | 537 | */ |
---|
528 | 538 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, |
---|
529 | | - unsigned int engine_mask) |
---|
| 539 | + intel_engine_mask_t engine_mask) |
---|
530 | 540 | { |
---|
531 | 541 | struct intel_gvt *gvt = vgpu->gvt; |
---|
532 | 542 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
---|
533 | | - unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; |
---|
| 543 | + intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask; |
---|
534 | 544 | |
---|
535 | 545 | gvt_dbg_core("------------------------------------------\n"); |
---|
536 | 546 | gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", |
---|
.. | .. |
---|
553 | 563 | /* full GPU reset or device model level reset */ |
---|
554 | 564 | if (engine_mask == ALL_ENGINES || dmlr) { |
---|
555 | 565 | intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); |
---|
556 | | - intel_vgpu_invalidate_ppgtt(vgpu); |
---|
| 566 | + if (engine_mask == ALL_ENGINES) |
---|
| 567 | + intel_vgpu_invalidate_ppgtt(vgpu); |
---|
557 | 568 | /*fence will not be reset during virtual reset */ |
---|
558 | 569 | if (dmlr) { |
---|
559 | | - intel_vgpu_reset_gtt(vgpu); |
---|
| 570 | + if(!vgpu->d3_entered) { |
---|
| 571 | + intel_vgpu_invalidate_ppgtt(vgpu); |
---|
| 572 | + intel_vgpu_destroy_all_ppgtt_mm(vgpu); |
---|
| 573 | + } |
---|
| 574 | + intel_vgpu_reset_ggtt(vgpu, true); |
---|
560 | 575 | intel_vgpu_reset_resource(vgpu); |
---|
561 | 576 | } |
---|
562 | 577 | |
---|
.. | .. |
---|
568 | 583 | intel_vgpu_reset_cfg_space(vgpu); |
---|
569 | 584 | /* only reset the failsafe mode when dmlr reset */ |
---|
570 | 585 | vgpu->failsafe = false; |
---|
571 | | - vgpu->pv_notified = false; |
---|
| 586 | + /* |
---|
| 587 | + * PCI_D0 is set before dmlr, so reset d3_entered here |
---|
| 588 | + * after done using. |
---|
| 589 | + */ |
---|
| 590 | + if(vgpu->d3_entered) |
---|
| 591 | + vgpu->d3_entered = false; |
---|
| 592 | + else |
---|
| 593 | + vgpu->pv_notified = false; |
---|
572 | 594 | } |
---|
573 | 595 | } |
---|
574 | 596 | |
---|