.. | .. |
---|
49 | 49 | |
---|
50 | 50 | unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) |
---|
51 | 51 | { |
---|
52 | | - if (IS_BROADWELL(gvt->dev_priv)) |
---|
| 52 | + struct drm_i915_private *i915 = gvt->gt->i915; |
---|
| 53 | + |
---|
| 54 | + if (IS_BROADWELL(i915)) |
---|
53 | 55 | return D_BDW; |
---|
54 | | - else if (IS_SKYLAKE(gvt->dev_priv)) |
---|
| 56 | + else if (IS_SKYLAKE(i915)) |
---|
55 | 57 | return D_SKL; |
---|
56 | | - else if (IS_KABYLAKE(gvt->dev_priv)) |
---|
| 58 | + else if (IS_KABYLAKE(i915)) |
---|
57 | 59 | return D_KBL; |
---|
58 | | - else if (IS_BROXTON(gvt->dev_priv)) |
---|
| 60 | + else if (IS_BROXTON(i915)) |
---|
59 | 61 | return D_BXT; |
---|
| 62 | + else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) |
---|
| 63 | + return D_CFL; |
---|
60 | 64 | |
---|
61 | 65 | return 0; |
---|
62 | 66 | } |
---|
.. | .. |
---|
140 | 144 | } |
---|
141 | 145 | |
---|
142 | 146 | /** |
---|
143 | | - * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id |
---|
| 147 | + * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine |
---|
144 | 148 | * @gvt: a GVT device |
---|
145 | 149 | * @offset: register offset |
---|
146 | 150 | * |
---|
147 | 151 | * Returns: |
---|
148 | | - * Ring ID on success, negative error code if failed. |
---|
| 152 | + * The engine containing the offset within its mmio page. |
---|
149 | 153 | */ |
---|
150 | | -int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, |
---|
151 | | - unsigned int offset) |
---|
| 154 | +const struct intel_engine_cs * |
---|
| 155 | +intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) |
---|
152 | 156 | { |
---|
153 | | - enum intel_engine_id id; |
---|
154 | 157 | struct intel_engine_cs *engine; |
---|
| 158 | + enum intel_engine_id id; |
---|
155 | 159 | |
---|
156 | 160 | offset &= ~GENMASK(11, 0); |
---|
157 | | - for_each_engine(engine, gvt->dev_priv, id) { |
---|
| 161 | + for_each_engine(engine, gvt->gt, id) |
---|
158 | 162 | if (engine->mmio_base == offset) |
---|
159 | | - return id; |
---|
160 | | - } |
---|
161 | | - return -ENODEV; |
---|
| 163 | + return engine; |
---|
| 164 | + |
---|
| 165 | + return NULL; |
---|
162 | 166 | } |
---|
163 | 167 | |
---|
164 | 168 | #define offset_to_fence_num(offset) \ |
---|
.. | .. |
---|
215 | 219 | { |
---|
216 | 220 | u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; |
---|
217 | 221 | |
---|
218 | | - if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { |
---|
| 222 | + if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) { |
---|
219 | 223 | if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) |
---|
220 | 224 | gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); |
---|
221 | 225 | else if (!ips) |
---|
.. | .. |
---|
251 | 255 | static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, |
---|
252 | 256 | void *p_data, unsigned int bytes) |
---|
253 | 257 | { |
---|
254 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 258 | + struct intel_gvt *gvt = vgpu->gvt; |
---|
255 | 259 | unsigned int fence_num = offset_to_fence_num(off); |
---|
256 | 260 | int ret; |
---|
257 | 261 | |
---|
.. | .. |
---|
260 | 264 | return ret; |
---|
261 | 265 | write_vreg(vgpu, off, p_data, bytes); |
---|
262 | 266 | |
---|
263 | | - mmio_hw_access_pre(dev_priv); |
---|
| 267 | + mmio_hw_access_pre(gvt->gt); |
---|
264 | 268 | intel_vgpu_write_fence(vgpu, fence_num, |
---|
265 | 269 | vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); |
---|
266 | | - mmio_hw_access_post(dev_priv); |
---|
| 270 | + mmio_hw_access_post(gvt->gt); |
---|
267 | 271 | return 0; |
---|
268 | 272 | } |
---|
269 | 273 | |
---|
.. | .. |
---|
276 | 280 | unsigned int offset, void *p_data, unsigned int bytes) |
---|
277 | 281 | { |
---|
278 | 282 | u32 old, new; |
---|
279 | | - uint32_t ack_reg_offset; |
---|
| 283 | + u32 ack_reg_offset; |
---|
280 | 284 | |
---|
281 | 285 | old = vgpu_vreg(vgpu, offset); |
---|
282 | 286 | new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); |
---|
283 | 287 | |
---|
284 | | - if (IS_SKYLAKE(vgpu->gvt->dev_priv) |
---|
285 | | - || IS_KABYLAKE(vgpu->gvt->dev_priv) |
---|
286 | | - || IS_BROXTON(vgpu->gvt->dev_priv)) { |
---|
| 288 | + if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) { |
---|
287 | 289 | switch (offset) { |
---|
288 | 290 | case FORCEWAKE_RENDER_GEN9_REG: |
---|
289 | 291 | ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; |
---|
.. | .. |
---|
311 | 313 | static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
312 | 314 | void *p_data, unsigned int bytes) |
---|
313 | 315 | { |
---|
314 | | - unsigned int engine_mask = 0; |
---|
| 316 | + intel_engine_mask_t engine_mask = 0; |
---|
315 | 317 | u32 data; |
---|
316 | 318 | |
---|
317 | 319 | write_vreg(vgpu, offset, p_data, bytes); |
---|
.. | .. |
---|
323 | 325 | } else { |
---|
324 | 326 | if (data & GEN6_GRDOM_RENDER) { |
---|
325 | 327 | gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); |
---|
326 | | - engine_mask |= (1 << RCS); |
---|
| 328 | + engine_mask |= BIT(RCS0); |
---|
327 | 329 | } |
---|
328 | 330 | if (data & GEN6_GRDOM_MEDIA) { |
---|
329 | 331 | gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); |
---|
330 | | - engine_mask |= (1 << VCS); |
---|
| 332 | + engine_mask |= BIT(VCS0); |
---|
331 | 333 | } |
---|
332 | 334 | if (data & GEN6_GRDOM_BLT) { |
---|
333 | 335 | gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); |
---|
334 | | - engine_mask |= (1 << BCS); |
---|
| 336 | + engine_mask |= BIT(BCS0); |
---|
335 | 337 | } |
---|
336 | 338 | if (data & GEN6_GRDOM_VECS) { |
---|
337 | 339 | gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); |
---|
338 | | - engine_mask |= (1 << VECS); |
---|
| 340 | + engine_mask |= BIT(VECS0); |
---|
339 | 341 | } |
---|
340 | 342 | if (data & GEN8_GRDOM_MEDIA2) { |
---|
341 | 343 | gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); |
---|
342 | | - if (HAS_BSD2(vgpu->gvt->dev_priv)) |
---|
343 | | - engine_mask |= (1 << VCS2); |
---|
| 344 | + engine_mask |= BIT(VCS1); |
---|
344 | 345 | } |
---|
| 346 | + if (data & GEN9_GRDOM_GUC) { |
---|
| 347 | + gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); |
---|
| 348 | + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; |
---|
| 349 | + } |
---|
| 350 | + engine_mask &= vgpu->gvt->gt->info.engine_mask; |
---|
345 | 351 | } |
---|
346 | 352 | |
---|
347 | 353 | /* vgpu_lock already hold by emulate mmio r/w */ |
---|
.. | .. |
---|
456 | 462 | return 0; |
---|
457 | 463 | } |
---|
458 | 464 | |
---|
459 | | -/* ascendingly sorted */ |
---|
| 465 | +/* sorted in ascending order */ |
---|
460 | 466 | static i915_reg_t force_nonpriv_white_list[] = { |
---|
| 467 | + _MMIO(0xd80), |
---|
461 | 468 | GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) |
---|
462 | 469 | GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) |
---|
| 470 | + CL_PRIMITIVES_COUNT, //_MMIO(0x2340) |
---|
| 471 | + PS_INVOCATION_COUNT, //_MMIO(0x2348) |
---|
| 472 | + PS_DEPTH_COUNT, //_MMIO(0x2350) |
---|
463 | 473 | GEN8_CS_CHICKEN1,//_MMIO(0x2580) |
---|
464 | 474 | _MMIO(0x2690), |
---|
465 | 475 | _MMIO(0x2694), |
---|
466 | 476 | _MMIO(0x2698), |
---|
| 477 | + _MMIO(0x2754), |
---|
| 478 | + _MMIO(0x28a0), |
---|
467 | 479 | _MMIO(0x4de0), |
---|
468 | 480 | _MMIO(0x4de4), |
---|
469 | 481 | _MMIO(0x4dfc), |
---|
.. | .. |
---|
475 | 487 | _MMIO(0x7704), |
---|
476 | 488 | _MMIO(0x7708), |
---|
477 | 489 | _MMIO(0x770c), |
---|
| 490 | + _MMIO(0x83a8), |
---|
478 | 491 | _MMIO(0xb110), |
---|
479 | 492 | GEN8_L3SQCREG4,//_MMIO(0xb118) |
---|
480 | 493 | _MMIO(0xe100), |
---|
481 | 494 | _MMIO(0xe18c), |
---|
482 | 495 | _MMIO(0xe48c), |
---|
483 | 496 | _MMIO(0xe5f4), |
---|
| 497 | + _MMIO(0x64844), |
---|
484 | 498 | }; |
---|
485 | 499 | |
---|
486 | 500 | /* a simple bsearch */ |
---|
487 | | -static inline bool in_whitelist(unsigned int reg) |
---|
| 501 | +static inline bool in_whitelist(u32 reg) |
---|
488 | 502 | { |
---|
489 | 503 | int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); |
---|
490 | 504 | i915_reg_t *array = force_nonpriv_white_list; |
---|
.. | .. |
---|
505 | 519 | static int force_nonpriv_write(struct intel_vgpu *vgpu, |
---|
506 | 520 | unsigned int offset, void *p_data, unsigned int bytes) |
---|
507 | 521 | { |
---|
508 | | - u32 reg_nonpriv = *(u32 *)p_data; |
---|
509 | | - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); |
---|
510 | | - u32 ring_base; |
---|
511 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
512 | | - int ret = -EINVAL; |
---|
| 522 | + u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); |
---|
| 523 | + const struct intel_engine_cs *engine = |
---|
| 524 | + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); |
---|
513 | 525 | |
---|
514 | | - if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) { |
---|
515 | | - gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n", |
---|
516 | | - vgpu->id, ring_id, offset, bytes); |
---|
517 | | - return ret; |
---|
| 526 | + if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) { |
---|
| 527 | + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", |
---|
| 528 | + vgpu->id, offset, bytes); |
---|
| 529 | + return -EINVAL; |
---|
518 | 530 | } |
---|
519 | 531 | |
---|
520 | | - ring_base = dev_priv->engine[ring_id]->mmio_base; |
---|
521 | | - |
---|
522 | | - if (in_whitelist(reg_nonpriv) || |
---|
523 | | - reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) { |
---|
524 | | - ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, |
---|
525 | | - bytes); |
---|
526 | | - } else |
---|
| 532 | + if (!in_whitelist(reg_nonpriv) && |
---|
| 533 | + reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) { |
---|
527 | 534 | gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", |
---|
528 | 535 | vgpu->id, reg_nonpriv, offset); |
---|
| 536 | + } else |
---|
| 537 | + intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); |
---|
529 | 538 | |
---|
530 | 539 | return 0; |
---|
531 | 540 | } |
---|
.. | .. |
---|
651 | 660 | else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) |
---|
652 | 661 | index = FDI_RX_IMR_TO_PIPE(offset); |
---|
653 | 662 | else { |
---|
654 | | - gvt_vgpu_err("Unsupport registers %x\n", offset); |
---|
| 663 | + gvt_vgpu_err("Unsupported registers %x\n", offset); |
---|
655 | 664 | return -EINVAL; |
---|
656 | 665 | } |
---|
657 | 666 | |
---|
.. | .. |
---|
748 | 757 | static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
749 | 758 | void *p_data, unsigned int bytes) |
---|
750 | 759 | { |
---|
751 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
752 | | - unsigned int index = DSPSURF_TO_PIPE(offset); |
---|
753 | | - i915_reg_t surflive_reg = DSPSURFLIVE(index); |
---|
754 | | - int flip_event[] = { |
---|
755 | | - [PIPE_A] = PRIMARY_A_FLIP_DONE, |
---|
756 | | - [PIPE_B] = PRIMARY_B_FLIP_DONE, |
---|
757 | | - [PIPE_C] = PRIMARY_C_FLIP_DONE, |
---|
758 | | - }; |
---|
| 760 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
| 761 | + u32 pipe = DSPSURF_TO_PIPE(offset); |
---|
| 762 | + int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); |
---|
759 | 763 | |
---|
760 | 764 | write_vreg(vgpu, offset, p_data, bytes); |
---|
761 | | - vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); |
---|
| 765 | + vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); |
---|
762 | 766 | |
---|
763 | | - set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); |
---|
| 767 | + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; |
---|
| 768 | + |
---|
| 769 | + if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP) |
---|
| 770 | + intel_vgpu_trigger_virtual_event(vgpu, event); |
---|
| 771 | + else |
---|
| 772 | + set_bit(event, vgpu->irq.flip_done_event[pipe]); |
---|
| 773 | + |
---|
764 | 774 | return 0; |
---|
765 | 775 | } |
---|
766 | 776 | |
---|
.. | .. |
---|
770 | 780 | static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
771 | 781 | void *p_data, unsigned int bytes) |
---|
772 | 782 | { |
---|
773 | | - unsigned int index = SPRSURF_TO_PIPE(offset); |
---|
774 | | - i915_reg_t surflive_reg = SPRSURFLIVE(index); |
---|
775 | | - int flip_event[] = { |
---|
776 | | - [PIPE_A] = SPRITE_A_FLIP_DONE, |
---|
777 | | - [PIPE_B] = SPRITE_B_FLIP_DONE, |
---|
778 | | - [PIPE_C] = SPRITE_C_FLIP_DONE, |
---|
779 | | - }; |
---|
| 783 | + u32 pipe = SPRSURF_TO_PIPE(offset); |
---|
| 784 | + int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0); |
---|
780 | 785 | |
---|
781 | 786 | write_vreg(vgpu, offset, p_data, bytes); |
---|
782 | | - vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); |
---|
| 787 | + vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); |
---|
783 | 788 | |
---|
784 | | - set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); |
---|
| 789 | + if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP) |
---|
| 790 | + intel_vgpu_trigger_virtual_event(vgpu, event); |
---|
| 791 | + else |
---|
| 792 | + set_bit(event, vgpu->irq.flip_done_event[pipe]); |
---|
| 793 | + |
---|
| 794 | + return 0; |
---|
| 795 | +} |
---|
| 796 | + |
---|
| 797 | +static int reg50080_mmio_write(struct intel_vgpu *vgpu, |
---|
| 798 | + unsigned int offset, void *p_data, |
---|
| 799 | + unsigned int bytes) |
---|
| 800 | +{ |
---|
| 801 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
| 802 | + enum pipe pipe = REG_50080_TO_PIPE(offset); |
---|
| 803 | + enum plane_id plane = REG_50080_TO_PLANE(offset); |
---|
| 804 | + int event = SKL_FLIP_EVENT(pipe, plane); |
---|
| 805 | + |
---|
| 806 | + write_vreg(vgpu, offset, p_data, bytes); |
---|
| 807 | + if (plane == PLANE_PRIMARY) { |
---|
| 808 | + vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); |
---|
| 809 | + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; |
---|
| 810 | + } else { |
---|
| 811 | + vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset); |
---|
| 812 | + } |
---|
| 813 | + |
---|
| 814 | + if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC) |
---|
| 815 | + intel_vgpu_trigger_virtual_event(vgpu, event); |
---|
| 816 | + else |
---|
| 817 | + set_bit(event, vgpu->irq.flip_done_event[pipe]); |
---|
| 818 | + |
---|
785 | 819 | return 0; |
---|
786 | 820 | } |
---|
787 | 821 | |
---|
788 | 822 | static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, |
---|
789 | 823 | unsigned int reg) |
---|
790 | 824 | { |
---|
791 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 825 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
792 | 826 | enum intel_gvt_event_type event; |
---|
793 | 827 | |
---|
794 | | - if (reg == _DPA_AUX_CH_CTL) |
---|
| 828 | + if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) |
---|
795 | 829 | event = AUX_CHANNEL_A; |
---|
796 | | - else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL) |
---|
| 830 | + else if (reg == _PCH_DPB_AUX_CH_CTL || |
---|
| 831 | + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) |
---|
797 | 832 | event = AUX_CHANNEL_B; |
---|
798 | | - else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL) |
---|
| 833 | + else if (reg == _PCH_DPC_AUX_CH_CTL || |
---|
| 834 | + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) |
---|
799 | 835 | event = AUX_CHANNEL_C; |
---|
800 | | - else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL) |
---|
| 836 | + else if (reg == _PCH_DPD_AUX_CH_CTL || |
---|
| 837 | + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) |
---|
801 | 838 | event = AUX_CHANNEL_D; |
---|
802 | 839 | else { |
---|
803 | | - WARN_ON(true); |
---|
| 840 | + drm_WARN_ON(&dev_priv->drm, true); |
---|
804 | 841 | return -EINVAL; |
---|
805 | 842 | } |
---|
806 | 843 | |
---|
.. | .. |
---|
832 | 869 | } |
---|
833 | 870 | |
---|
834 | 871 | static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, |
---|
835 | | - uint8_t t) |
---|
| 872 | + u8 t) |
---|
836 | 873 | { |
---|
837 | 874 | if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { |
---|
838 | 875 | /* training pattern 1 for CR */ |
---|
.. | .. |
---|
888 | 925 | write_vreg(vgpu, offset, p_data, bytes); |
---|
889 | 926 | data = vgpu_vreg(vgpu, offset); |
---|
890 | 927 | |
---|
891 | | - if ((IS_SKYLAKE(vgpu->gvt->dev_priv) |
---|
892 | | - || IS_KABYLAKE(vgpu->gvt->dev_priv) |
---|
893 | | - || IS_BROXTON(vgpu->gvt->dev_priv)) |
---|
| 928 | + if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9) |
---|
894 | 929 | && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { |
---|
895 | 930 | /* SKL DPB/C/D aux ctl register changed */ |
---|
896 | 931 | return 0; |
---|
897 | | - } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && |
---|
| 932 | + } else if (IS_BROADWELL(vgpu->gvt->gt->i915) && |
---|
898 | 933 | offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) { |
---|
899 | 934 | /* write to the data registers */ |
---|
900 | 935 | return 0; |
---|
.. | .. |
---|
918 | 953 | |
---|
919 | 954 | if (op == GVT_AUX_NATIVE_WRITE) { |
---|
920 | 955 | int t; |
---|
921 | | - uint8_t buf[16]; |
---|
| 956 | + u8 buf[16]; |
---|
922 | 957 | |
---|
923 | 958 | if ((addr + len + 1) >= DPCD_SIZE) { |
---|
924 | 959 | /* |
---|
.. | .. |
---|
1182 | 1217 | |
---|
1183 | 1218 | static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) |
---|
1184 | 1219 | { |
---|
1185 | | - intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; |
---|
| 1220 | + enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; |
---|
1186 | 1221 | struct intel_vgpu_mm *mm; |
---|
1187 | 1222 | u64 *pdps; |
---|
1188 | 1223 | |
---|
.. | .. |
---|
1191 | 1226 | switch (notification) { |
---|
1192 | 1227 | case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: |
---|
1193 | 1228 | root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; |
---|
1194 | | - /* fall through */ |
---|
| 1229 | + fallthrough; |
---|
1195 | 1230 | case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: |
---|
1196 | 1231 | mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); |
---|
1197 | 1232 | return PTR_ERR_OR_ZERO(mm); |
---|
.. | .. |
---|
1210 | 1245 | |
---|
1211 | 1246 | static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) |
---|
1212 | 1247 | { |
---|
1213 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
1214 | | - struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; |
---|
| 1248 | + struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj; |
---|
1215 | 1249 | char *env[3] = {NULL, NULL, NULL}; |
---|
1216 | 1250 | char vmid_str[20]; |
---|
1217 | 1251 | char display_ready_str[20]; |
---|
.. | .. |
---|
1228 | 1262 | static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
1229 | 1263 | void *p_data, unsigned int bytes) |
---|
1230 | 1264 | { |
---|
1231 | | - u32 data; |
---|
1232 | | - int ret; |
---|
1233 | | - |
---|
1234 | | - write_vreg(vgpu, offset, p_data, bytes); |
---|
1235 | | - data = vgpu_vreg(vgpu, offset); |
---|
| 1265 | + u32 data = *(u32 *)p_data; |
---|
| 1266 | + bool invalid_write = false; |
---|
1236 | 1267 | |
---|
1237 | 1268 | switch (offset) { |
---|
1238 | 1269 | case _vgtif_reg(display_ready): |
---|
1239 | 1270 | send_display_ready_uevent(vgpu, data ? 1 : 0); |
---|
1240 | 1271 | break; |
---|
1241 | 1272 | case _vgtif_reg(g2v_notify): |
---|
1242 | | - ret = handle_g2v_notification(vgpu, data); |
---|
| 1273 | + handle_g2v_notification(vgpu, data); |
---|
1243 | 1274 | break; |
---|
1244 | 1275 | /* add xhot and yhot to handled list to avoid error log */ |
---|
1245 | 1276 | case _vgtif_reg(cursor_x_hot): |
---|
.. | .. |
---|
1256 | 1287 | case _vgtif_reg(execlist_context_descriptor_hi): |
---|
1257 | 1288 | break; |
---|
1258 | 1289 | case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): |
---|
| 1290 | + invalid_write = true; |
---|
1259 | 1291 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); |
---|
1260 | 1292 | break; |
---|
1261 | 1293 | default: |
---|
| 1294 | + invalid_write = true; |
---|
1262 | 1295 | gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", |
---|
1263 | 1296 | offset, bytes, data); |
---|
1264 | 1297 | break; |
---|
1265 | 1298 | } |
---|
| 1299 | + |
---|
| 1300 | + if (!invalid_write) |
---|
| 1301 | + write_vreg(vgpu, offset, p_data, bytes); |
---|
| 1302 | + |
---|
1266 | 1303 | return 0; |
---|
1267 | 1304 | } |
---|
1268 | 1305 | |
---|
1269 | 1306 | static int pf_write(struct intel_vgpu *vgpu, |
---|
1270 | 1307 | unsigned int offset, void *p_data, unsigned int bytes) |
---|
1271 | 1308 | { |
---|
| 1309 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
1272 | 1310 | u32 val = *(u32 *)p_data; |
---|
1273 | 1311 | |
---|
1274 | 1312 | if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || |
---|
1275 | 1313 | offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || |
---|
1276 | 1314 | offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { |
---|
1277 | | - WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n", |
---|
1278 | | - vgpu->id); |
---|
| 1315 | + drm_WARN_ONCE(&i915->drm, true, |
---|
| 1316 | + "VM(%d): guest is trying to scaling a plane\n", |
---|
| 1317 | + vgpu->id); |
---|
1279 | 1318 | return 0; |
---|
1280 | 1319 | } |
---|
1281 | 1320 | |
---|
.. | .. |
---|
1287 | 1326 | { |
---|
1288 | 1327 | write_vreg(vgpu, offset, p_data, bytes); |
---|
1289 | 1328 | |
---|
1290 | | - if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL)) |
---|
| 1329 | + if (vgpu_vreg(vgpu, offset) & |
---|
| 1330 | + HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL)) |
---|
1291 | 1331 | vgpu_vreg(vgpu, offset) |= |
---|
1292 | | - HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); |
---|
| 1332 | + HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); |
---|
1293 | 1333 | else |
---|
1294 | 1334 | vgpu_vreg(vgpu, offset) &= |
---|
1295 | | - ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); |
---|
| 1335 | + ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); |
---|
1296 | 1336 | return 0; |
---|
1297 | 1337 | } |
---|
1298 | 1338 | |
---|
.. | .. |
---|
1322 | 1362 | static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
1323 | 1363 | void *p_data, unsigned int bytes) |
---|
1324 | 1364 | { |
---|
| 1365 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
1325 | 1366 | u32 mode; |
---|
1326 | 1367 | |
---|
1327 | 1368 | write_vreg(vgpu, offset, p_data, bytes); |
---|
1328 | 1369 | mode = vgpu_vreg(vgpu, offset); |
---|
1329 | 1370 | |
---|
1330 | 1371 | if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { |
---|
1331 | | - WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n", |
---|
| 1372 | + drm_WARN_ONCE(&i915->drm, 1, |
---|
| 1373 | + "VM(%d): iGVT-g doesn't support GuC\n", |
---|
1332 | 1374 | vgpu->id); |
---|
1333 | 1375 | return 0; |
---|
1334 | 1376 | } |
---|
.. | .. |
---|
1339 | 1381 | static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
1340 | 1382 | void *p_data, unsigned int bytes) |
---|
1341 | 1383 | { |
---|
1342 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 1384 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
1343 | 1385 | u32 trtte = *(u32 *)p_data; |
---|
1344 | 1386 | |
---|
1345 | 1387 | if ((trtte & 1) && (trtte & (1 << 1)) == 0) { |
---|
1346 | | - WARN(1, "VM(%d): Use physical address for TRTT!\n", |
---|
| 1388 | + drm_WARN(&i915->drm, 1, |
---|
| 1389 | + "VM(%d): Use physical address for TRTT!\n", |
---|
1347 | 1390 | vgpu->id); |
---|
1348 | 1391 | return -EINVAL; |
---|
1349 | 1392 | } |
---|
1350 | 1393 | write_vreg(vgpu, offset, p_data, bytes); |
---|
1351 | | - /* TRTTE is not per-context */ |
---|
1352 | | - |
---|
1353 | | - mmio_hw_access_pre(dev_priv); |
---|
1354 | | - I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); |
---|
1355 | | - mmio_hw_access_post(dev_priv); |
---|
1356 | 1394 | |
---|
1357 | 1395 | return 0; |
---|
1358 | 1396 | } |
---|
.. | .. |
---|
1360 | 1398 | static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
1361 | 1399 | void *p_data, unsigned int bytes) |
---|
1362 | 1400 | { |
---|
1363 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
1364 | | - u32 val = *(u32 *)p_data; |
---|
1365 | | - |
---|
1366 | | - if (val & 1) { |
---|
1367 | | - /* unblock hw logic */ |
---|
1368 | | - mmio_hw_access_pre(dev_priv); |
---|
1369 | | - I915_WRITE(_MMIO(offset), val); |
---|
1370 | | - mmio_hw_access_post(dev_priv); |
---|
1371 | | - } |
---|
1372 | 1401 | write_vreg(vgpu, offset, p_data, bytes); |
---|
1373 | 1402 | return 0; |
---|
1374 | 1403 | } |
---|
.. | .. |
---|
1404 | 1433 | |
---|
1405 | 1434 | switch (cmd) { |
---|
1406 | 1435 | case GEN9_PCODE_READ_MEM_LATENCY: |
---|
1407 | | - if (IS_SKYLAKE(vgpu->gvt->dev_priv) |
---|
1408 | | - || IS_KABYLAKE(vgpu->gvt->dev_priv)) { |
---|
| 1436 | + if (IS_SKYLAKE(vgpu->gvt->gt->i915) || |
---|
| 1437 | + IS_KABYLAKE(vgpu->gvt->gt->i915) || |
---|
| 1438 | + IS_COFFEELAKE(vgpu->gvt->gt->i915) || |
---|
| 1439 | + IS_COMETLAKE(vgpu->gvt->gt->i915)) { |
---|
1409 | 1440 | /** |
---|
1410 | 1441 | * "Read memory latency" command on gen9. |
---|
1411 | 1442 | * Below memory latency values are read |
---|
.. | .. |
---|
1415 | 1446 | *data0 = 0x1e1a1100; |
---|
1416 | 1447 | else |
---|
1417 | 1448 | *data0 = 0x61514b3d; |
---|
1418 | | - } else if (IS_BROXTON(vgpu->gvt->dev_priv)) { |
---|
| 1449 | + } else if (IS_BROXTON(vgpu->gvt->gt->i915)) { |
---|
1419 | 1450 | /** |
---|
1420 | 1451 | * "Read memory latency" command on gen9. |
---|
1421 | 1452 | * Below memory latency values are read |
---|
.. | .. |
---|
1428 | 1459 | } |
---|
1429 | 1460 | break; |
---|
1430 | 1461 | case SKL_PCODE_CDCLK_CONTROL: |
---|
1431 | | - if (IS_SKYLAKE(vgpu->gvt->dev_priv) |
---|
1432 | | - || IS_KABYLAKE(vgpu->gvt->dev_priv)) |
---|
| 1462 | + if (IS_SKYLAKE(vgpu->gvt->gt->i915) || |
---|
| 1463 | + IS_KABYLAKE(vgpu->gvt->gt->i915) || |
---|
| 1464 | + IS_COFFEELAKE(vgpu->gvt->gt->i915) || |
---|
| 1465 | + IS_COMETLAKE(vgpu->gvt->gt->i915)) |
---|
1433 | 1466 | *data0 = SKL_CDCLK_READY_FOR_CHANGE; |
---|
1434 | 1467 | break; |
---|
1435 | 1468 | case GEN6_PCODE_READ_RC6VIDS: |
---|
.. | .. |
---|
1453 | 1486 | void *p_data, unsigned int bytes) |
---|
1454 | 1487 | { |
---|
1455 | 1488 | u32 value = *(u32 *)p_data; |
---|
1456 | | - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); |
---|
| 1489 | + const struct intel_engine_cs *engine = |
---|
| 1490 | + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); |
---|
1457 | 1491 | |
---|
1458 | | - if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { |
---|
| 1492 | + if (value != 0 && |
---|
| 1493 | + !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { |
---|
1459 | 1494 | gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", |
---|
1460 | 1495 | offset, value); |
---|
1461 | 1496 | return -EINVAL; |
---|
1462 | 1497 | } |
---|
| 1498 | + |
---|
1463 | 1499 | /* |
---|
1464 | 1500 | * Need to emulate all the HWSP register write to ensure host can |
---|
1465 | 1501 | * update the VM CSB status correctly. Here listed registers can |
---|
1466 | 1502 | * support BDW, SKL or other platforms with same HWSP registers. |
---|
1467 | 1503 | */ |
---|
1468 | | - if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { |
---|
| 1504 | + if (unlikely(!engine)) { |
---|
1469 | 1505 | gvt_vgpu_err("access unknown hardware status page register:0x%x\n", |
---|
1470 | 1506 | offset); |
---|
1471 | 1507 | return -EINVAL; |
---|
1472 | 1508 | } |
---|
1473 | | - vgpu->hws_pga[ring_id] = value; |
---|
| 1509 | + vgpu->hws_pga[engine->id] = value; |
---|
1474 | 1510 | gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n", |
---|
1475 | 1511 | vgpu->id, value, offset); |
---|
1476 | 1512 | |
---|
.. | .. |
---|
1482 | 1518 | { |
---|
1483 | 1519 | u32 v = *(u32 *)p_data; |
---|
1484 | 1520 | |
---|
1485 | | - if (IS_BROXTON(vgpu->gvt->dev_priv)) |
---|
| 1521 | + if (IS_BROXTON(vgpu->gvt->gt->i915)) |
---|
1486 | 1522 | v &= (1 << 31) | (1 << 29); |
---|
1487 | 1523 | else |
---|
1488 | 1524 | v &= (1 << 31) | (1 << 29) | (1 << 9) | |
---|
.. | .. |
---|
1608 | 1644 | return 0; |
---|
1609 | 1645 | } |
---|
1610 | 1646 | |
---|
1611 | | -static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, |
---|
| 1647 | +static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, |
---|
1612 | 1648 | unsigned int offset, void *p_data, unsigned int bytes) |
---|
1613 | 1649 | { |
---|
1614 | 1650 | vgpu_vreg(vgpu, offset) = 0; |
---|
| 1651 | + return 0; |
---|
| 1652 | +} |
---|
| 1653 | + |
---|
| 1654 | +/** |
---|
| 1655 | + * FixMe: |
---|
| 1656 | + * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: |
---|
| 1657 | + * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) |
---|
| 1658 | + * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing |
---|
| 1659 | + * these MI_BATCH_BUFFER. |
---|
| 1660 | + * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT |
---|
| 1661 | + * PML4 PTE: PAT(0) PCD(1) PWT(1). |
---|
| 1662 | + * The performance is still expected to be low, will need further improvement. |
---|
| 1663 | + */ |
---|
| 1664 | +static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
| 1665 | + void *p_data, unsigned int bytes) |
---|
| 1666 | +{ |
---|
| 1667 | + u64 pat = |
---|
| 1668 | + GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
---|
| 1669 | + GEN8_PPAT(1, 0) | |
---|
| 1670 | + GEN8_PPAT(2, 0) | |
---|
| 1671 | + GEN8_PPAT(3, CHV_PPAT_SNOOP) | |
---|
| 1672 | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
---|
| 1673 | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
---|
| 1674 | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
---|
| 1675 | + GEN8_PPAT(7, CHV_PPAT_SNOOP); |
---|
| 1676 | + |
---|
| 1677 | + vgpu_vreg(vgpu, offset) = lower_32_bits(pat); |
---|
| 1678 | + |
---|
| 1679 | + return 0; |
---|
| 1680 | +} |
---|
| 1681 | + |
---|
| 1682 | +static int guc_status_read(struct intel_vgpu *vgpu, |
---|
| 1683 | + unsigned int offset, void *p_data, |
---|
| 1684 | + unsigned int bytes) |
---|
| 1685 | +{ |
---|
| 1686 | + /* keep MIA_IN_RESET before clearing */ |
---|
| 1687 | + read_vreg(vgpu, offset, p_data, bytes); |
---|
| 1688 | + vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; |
---|
1615 | 1689 | return 0; |
---|
1616 | 1690 | } |
---|
1617 | 1691 | |
---|
.. | .. |
---|
1619 | 1693 | unsigned int offset, void *p_data, unsigned int bytes) |
---|
1620 | 1694 | { |
---|
1621 | 1695 | struct intel_gvt *gvt = vgpu->gvt; |
---|
1622 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
1623 | | - int ring_id; |
---|
1624 | | - u32 ring_base; |
---|
| 1696 | + const struct intel_engine_cs *engine = |
---|
| 1697 | + intel_gvt_render_mmio_to_engine(gvt, offset); |
---|
1625 | 1698 | |
---|
1626 | | - ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset); |
---|
1627 | 1699 | /** |
---|
1628 | 1700 | * Read HW reg in following case |
---|
1629 | 1701 | * a. the offset isn't a ring mmio |
---|
1630 | 1702 | * b. the offset's ring is running on hw. |
---|
1631 | 1703 | * c. the offset is ring time stamp mmio |
---|
1632 | 1704 | */ |
---|
1633 | | - if (ring_id >= 0) |
---|
1634 | | - ring_base = dev_priv->engine[ring_id]->mmio_base; |
---|
1635 | 1705 | |
---|
1636 | | - if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] || |
---|
1637 | | - offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) || |
---|
1638 | | - offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) { |
---|
1639 | | - mmio_hw_access_pre(dev_priv); |
---|
1640 | | - vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); |
---|
1641 | | - mmio_hw_access_post(dev_priv); |
---|
| 1706 | + if (!engine || |
---|
| 1707 | + vgpu == gvt->scheduler.engine_owner[engine->id] || |
---|
| 1708 | + offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) || |
---|
| 1709 | + offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) { |
---|
| 1710 | + mmio_hw_access_pre(gvt->gt); |
---|
| 1711 | + vgpu_vreg(vgpu, offset) = |
---|
| 1712 | + intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); |
---|
| 1713 | + mmio_hw_access_post(gvt->gt); |
---|
1642 | 1714 | } |
---|
1643 | 1715 | |
---|
1644 | 1716 | return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); |
---|
.. | .. |
---|
1647 | 1719 | static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
---|
1648 | 1720 | void *p_data, unsigned int bytes) |
---|
1649 | 1721 | { |
---|
1650 | | - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); |
---|
| 1722 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
| 1723 | + const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); |
---|
1651 | 1724 | struct intel_vgpu_execlist *execlist; |
---|
1652 | 1725 | u32 data = *(u32 *)p_data; |
---|
1653 | 1726 | int ret = 0; |
---|
1654 | 1727 | |
---|
1655 | | - if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) |
---|
| 1728 | + if (drm_WARN_ON(&i915->drm, !engine)) |
---|
1656 | 1729 | return -EINVAL; |
---|
1657 | 1730 | |
---|
1658 | | - execlist = &vgpu->submission.execlist[ring_id]; |
---|
| 1731 | + /* |
---|
| 1732 | + * Due to d3_entered is used to indicate skipping PPGTT invalidation on |
---|
| 1733 | + * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after |
---|
| 1734 | + * vGPU reset if in resuming. |
---|
| 1735 | + * In S0ix exit, the device power state also transite from D3 to D0 as |
---|
| 1736 | + * S3 resume, but no vGPU reset (triggered by QEMU devic model). After |
---|
| 1737 | + * S0ix exit, all engines continue to work. However the d3_entered |
---|
| 1738 | + * remains set which will break next vGPU reset logic (miss the expected |
---|
| 1739 | + * PPGTT invalidation). |
---|
| 1740 | + * Engines can only work in D0. Thus the 1st elsp write gives GVT a |
---|
| 1741 | + * chance to clear d3_entered. |
---|
| 1742 | + */ |
---|
| 1743 | + if (vgpu->d3_entered) |
---|
| 1744 | + vgpu->d3_entered = false; |
---|
| 1745 | + |
---|
| 1746 | + execlist = &vgpu->submission.execlist[engine->id]; |
---|
1659 | 1747 | |
---|
1660 | 1748 | execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; |
---|
1661 | 1749 | if (execlist->elsp_dwords.index == 3) { |
---|
1662 | | - ret = intel_vgpu_submit_execlist(vgpu, ring_id); |
---|
| 1750 | + ret = intel_vgpu_submit_execlist(vgpu, engine); |
---|
1663 | 1751 | if(ret) |
---|
1664 | | - gvt_vgpu_err("fail submit workload on ring %d\n", |
---|
1665 | | - ring_id); |
---|
| 1752 | + gvt_vgpu_err("fail submit workload on ring %s\n", |
---|
| 1753 | + engine->name); |
---|
1666 | 1754 | } |
---|
1667 | 1755 | |
---|
1668 | 1756 | ++execlist->elsp_dwords.index; |
---|
.. | .. |
---|
1674 | 1762 | void *p_data, unsigned int bytes) |
---|
1675 | 1763 | { |
---|
1676 | 1764 | u32 data = *(u32 *)p_data; |
---|
1677 | | - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); |
---|
| 1765 | + const struct intel_engine_cs *engine = |
---|
| 1766 | + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); |
---|
1678 | 1767 | bool enable_execlist; |
---|
1679 | 1768 | int ret; |
---|
1680 | 1769 | |
---|
| 1770 | + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1); |
---|
| 1771 | + if (IS_COFFEELAKE(vgpu->gvt->gt->i915) || |
---|
| 1772 | + IS_COMETLAKE(vgpu->gvt->gt->i915)) |
---|
| 1773 | + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); |
---|
1681 | 1774 | write_vreg(vgpu, offset, p_data, bytes); |
---|
| 1775 | + |
---|
| 1776 | + if (IS_MASKED_BITS_ENABLED(data, 1)) { |
---|
| 1777 | + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); |
---|
| 1778 | + return 0; |
---|
| 1779 | + } |
---|
| 1780 | + |
---|
| 1781 | + if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) || |
---|
| 1782 | + IS_COMETLAKE(vgpu->gvt->gt->i915)) && |
---|
| 1783 | + IS_MASKED_BITS_ENABLED(data, 2)) { |
---|
| 1784 | + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); |
---|
| 1785 | + return 0; |
---|
| 1786 | + } |
---|
1682 | 1787 | |
---|
1683 | 1788 | /* when PPGTT mode enabled, we will check if guest has called |
---|
1684 | 1789 | * pvinfo, if not, we will treat this guest as non-gvtg-aware |
---|
1685 | 1790 | * guest, and stop emulating its cfg space, mmio, gtt, etc. |
---|
1686 | 1791 | */ |
---|
1687 | | - if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || |
---|
1688 | | - (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) |
---|
1689 | | - && !vgpu->pv_notified) { |
---|
| 1792 | + if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) || |
---|
| 1793 | + IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) && |
---|
| 1794 | + !vgpu->pv_notified) { |
---|
1690 | 1795 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); |
---|
1691 | 1796 | return 0; |
---|
1692 | 1797 | } |
---|
1693 | | - if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) |
---|
1694 | | - || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { |
---|
| 1798 | + if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) || |
---|
| 1799 | + IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) { |
---|
1695 | 1800 | enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); |
---|
1696 | 1801 | |
---|
1697 | | - gvt_dbg_core("EXECLIST %s on ring %d\n", |
---|
1698 | | - (enable_execlist ? "enabling" : "disabling"), |
---|
1699 | | - ring_id); |
---|
| 1802 | + gvt_dbg_core("EXECLIST %s on ring %s\n", |
---|
| 1803 | + (enable_execlist ? "enabling" : "disabling"), |
---|
| 1804 | + engine->name); |
---|
1700 | 1805 | |
---|
1701 | 1806 | if (!enable_execlist) |
---|
1702 | 1807 | return 0; |
---|
1703 | 1808 | |
---|
1704 | 1809 | ret = intel_vgpu_select_submission_ops(vgpu, |
---|
1705 | | - ENGINE_MASK(ring_id), |
---|
1706 | | - INTEL_VGPU_EXECLIST_SUBMISSION); |
---|
| 1810 | + engine->mask, |
---|
| 1811 | + INTEL_VGPU_EXECLIST_SUBMISSION); |
---|
1707 | 1812 | if (ret) |
---|
1708 | 1813 | return ret; |
---|
1709 | 1814 | |
---|
.. | .. |
---|
1722 | 1827 | |
---|
1723 | 1828 | switch (offset) { |
---|
1724 | 1829 | case 0x4260: |
---|
1725 | | - id = RCS; |
---|
| 1830 | + id = RCS0; |
---|
1726 | 1831 | break; |
---|
1727 | 1832 | case 0x4264: |
---|
1728 | | - id = VCS; |
---|
| 1833 | + id = VCS0; |
---|
1729 | 1834 | break; |
---|
1730 | 1835 | case 0x4268: |
---|
1731 | | - id = VCS2; |
---|
| 1836 | + id = VCS1; |
---|
1732 | 1837 | break; |
---|
1733 | 1838 | case 0x426c: |
---|
1734 | | - id = BCS; |
---|
| 1839 | + id = BCS0; |
---|
1735 | 1840 | break; |
---|
1736 | 1841 | case 0x4270: |
---|
1737 | | - id = VECS; |
---|
| 1842 | + id = VECS0; |
---|
1738 | 1843 | break; |
---|
1739 | 1844 | default: |
---|
1740 | 1845 | return -EINVAL; |
---|
.. | .. |
---|
1752 | 1857 | write_vreg(vgpu, offset, p_data, bytes); |
---|
1753 | 1858 | data = vgpu_vreg(vgpu, offset); |
---|
1754 | 1859 | |
---|
1755 | | - if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)) |
---|
| 1860 | + if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET)) |
---|
1756 | 1861 | data |= RESET_CTL_READY_TO_RESET; |
---|
1757 | 1862 | else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)) |
---|
1758 | 1863 | data &= ~RESET_CTL_READY_TO_RESET; |
---|
1759 | 1864 | |
---|
1760 | 1865 | vgpu_vreg(vgpu, offset) = data; |
---|
| 1866 | + return 0; |
---|
| 1867 | +} |
---|
| 1868 | + |
---|
| 1869 | +static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, |
---|
| 1870 | + unsigned int offset, void *p_data, |
---|
| 1871 | + unsigned int bytes) |
---|
| 1872 | +{ |
---|
| 1873 | + u32 data = *(u32 *)p_data; |
---|
| 1874 | + |
---|
| 1875 | + (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18); |
---|
| 1876 | + write_vreg(vgpu, offset, p_data, bytes); |
---|
| 1877 | + |
---|
| 1878 | + if (IS_MASKED_BITS_ENABLED(data, 0x10) || |
---|
| 1879 | + IS_MASKED_BITS_ENABLED(data, 0x8)) |
---|
| 1880 | + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); |
---|
| 1881 | + |
---|
1761 | 1882 | return 0; |
---|
1762 | 1883 | } |
---|
1763 | 1884 | |
---|
.. | .. |
---|
1791 | 1912 | MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ |
---|
1792 | 1913 | MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ |
---|
1793 | 1914 | MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ |
---|
1794 | | - if (HAS_BSD2(dev_priv)) \ |
---|
| 1915 | + if (HAS_ENGINE(gvt->gt, VCS1)) \ |
---|
1795 | 1916 | MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ |
---|
1796 | 1917 | } while (0) |
---|
1797 | 1918 | |
---|
.. | .. |
---|
1812 | 1933 | |
---|
1813 | 1934 | static int init_generic_mmio_info(struct intel_gvt *gvt) |
---|
1814 | 1935 | { |
---|
1815 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
| 1936 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
1816 | 1937 | int ret; |
---|
1817 | 1938 | |
---|
1818 | | - MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, |
---|
| 1939 | + MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, |
---|
1819 | 1940 | intel_vgpu_reg_imr_handler); |
---|
1820 | 1941 | |
---|
1821 | 1942 | MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); |
---|
.. | .. |
---|
1823 | 1944 | MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); |
---|
1824 | 1945 | MMIO_D(SDEISR, D_ALL); |
---|
1825 | 1946 | |
---|
1826 | | - MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
| 1947 | + MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); |
---|
| 1948 | + |
---|
1827 | 1949 | |
---|
1828 | 1950 | MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, |
---|
1829 | 1951 | gamw_echo_dev_rw_ia_write); |
---|
.. | .. |
---|
1846 | 1968 | MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); |
---|
1847 | 1969 | |
---|
1848 | 1970 | MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); |
---|
1849 | | - MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); |
---|
| 1971 | + MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); |
---|
1850 | 1972 | MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); |
---|
1851 | 1973 | MMIO_D(GEN7_CXT_SIZE, D_ALL); |
---|
1852 | 1974 | |
---|
1853 | | - MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
1854 | | - MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
1855 | | - MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
1856 | | - MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); |
---|
1857 | | - MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); |
---|
| 1975 | + MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); |
---|
| 1976 | + MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); |
---|
| 1977 | + MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL); |
---|
| 1978 | + MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL); |
---|
| 1979 | + MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); |
---|
1858 | 1980 | |
---|
1859 | 1981 | /* RING MODE */ |
---|
1860 | 1982 | #define RING_REG(base) _MMIO((base) + 0x29c) |
---|
.. | .. |
---|
1881 | 2003 | MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
1882 | 2004 | MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
1883 | 2005 | MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
1884 | | - MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
| 2006 | + MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL, |
---|
| 2007 | + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
1885 | 2008 | MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
1886 | 2009 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
1887 | 2010 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
---|
.. | .. |
---|
1967 | 2090 | MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); |
---|
1968 | 2091 | MMIO_D(DSPOFFSET(PIPE_A), D_ALL); |
---|
1969 | 2092 | MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); |
---|
| 2093 | + MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, |
---|
| 2094 | + reg50080_mmio_write); |
---|
1970 | 2095 | |
---|
1971 | 2096 | MMIO_D(DSPCNTR(PIPE_B), D_ALL); |
---|
1972 | 2097 | MMIO_D(DSPADDR(PIPE_B), D_ALL); |
---|
.. | .. |
---|
1976 | 2101 | MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); |
---|
1977 | 2102 | MMIO_D(DSPOFFSET(PIPE_B), D_ALL); |
---|
1978 | 2103 | MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); |
---|
| 2104 | + MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, |
---|
| 2105 | + reg50080_mmio_write); |
---|
1979 | 2106 | |
---|
1980 | 2107 | MMIO_D(DSPCNTR(PIPE_C), D_ALL); |
---|
1981 | 2108 | MMIO_D(DSPADDR(PIPE_C), D_ALL); |
---|
.. | .. |
---|
1985 | 2112 | MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); |
---|
1986 | 2113 | MMIO_D(DSPOFFSET(PIPE_C), D_ALL); |
---|
1987 | 2114 | MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); |
---|
| 2115 | + MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, |
---|
| 2116 | + reg50080_mmio_write); |
---|
1988 | 2117 | |
---|
1989 | 2118 | MMIO_D(SPRCTL(PIPE_A), D_ALL); |
---|
1990 | 2119 | MMIO_D(SPRLINOFF(PIPE_A), D_ALL); |
---|
.. | .. |
---|
1998 | 2127 | MMIO_D(SPROFFSET(PIPE_A), D_ALL); |
---|
1999 | 2128 | MMIO_D(SPRSCALE(PIPE_A), D_ALL); |
---|
2000 | 2129 | MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); |
---|
| 2130 | + MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, |
---|
| 2131 | + reg50080_mmio_write); |
---|
2001 | 2132 | |
---|
2002 | 2133 | MMIO_D(SPRCTL(PIPE_B), D_ALL); |
---|
2003 | 2134 | MMIO_D(SPRLINOFF(PIPE_B), D_ALL); |
---|
.. | .. |
---|
2011 | 2142 | MMIO_D(SPROFFSET(PIPE_B), D_ALL); |
---|
2012 | 2143 | MMIO_D(SPRSCALE(PIPE_B), D_ALL); |
---|
2013 | 2144 | MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); |
---|
| 2145 | + MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, |
---|
| 2146 | + reg50080_mmio_write); |
---|
2014 | 2147 | |
---|
2015 | 2148 | MMIO_D(SPRCTL(PIPE_C), D_ALL); |
---|
2016 | 2149 | MMIO_D(SPRLINOFF(PIPE_C), D_ALL); |
---|
.. | .. |
---|
2024 | 2157 | MMIO_D(SPROFFSET(PIPE_C), D_ALL); |
---|
2025 | 2158 | MMIO_D(SPRSCALE(PIPE_C), D_ALL); |
---|
2026 | 2159 | MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); |
---|
| 2160 | + MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, |
---|
| 2161 | + reg50080_mmio_write); |
---|
2027 | 2162 | |
---|
2028 | 2163 | MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); |
---|
2029 | 2164 | MMIO_D(HBLANK(TRANSCODER_A), D_ALL); |
---|
.. | .. |
---|
2137 | 2272 | |
---|
2138 | 2273 | MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, |
---|
2139 | 2274 | gmbus_mmio_write); |
---|
2140 | | - MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); |
---|
| 2275 | + MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); |
---|
2141 | 2276 | MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); |
---|
2142 | 2277 | |
---|
2143 | 2278 | MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, |
---|
.. | .. |
---|
2338 | 2473 | MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); |
---|
2339 | 2474 | MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); |
---|
2340 | 2475 | |
---|
2341 | | - MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL); |
---|
2342 | | - MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL); |
---|
2343 | | - MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL); |
---|
| 2476 | + MMIO_D(WM_LINETIME(PIPE_A), D_ALL); |
---|
| 2477 | + MMIO_D(WM_LINETIME(PIPE_B), D_ALL); |
---|
| 2478 | + MMIO_D(WM_LINETIME(PIPE_C), D_ALL); |
---|
2344 | 2479 | MMIO_D(SPLL_CTL, D_ALL); |
---|
2345 | 2480 | MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL); |
---|
2346 | 2481 | MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL); |
---|
.. | .. |
---|
2462 | 2597 | MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); |
---|
2463 | 2598 | MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); |
---|
2464 | 2599 | MMIO_D(GEN6_PMINTRMSK, D_ALL); |
---|
2465 | | - /* |
---|
2466 | | - * Use an arbitrary power well controlled by the PWR_WELL_CTL |
---|
2467 | | - * register. |
---|
2468 | | - */ |
---|
2469 | | - MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL, |
---|
2470 | | - power_well_ctl_mmio_write); |
---|
2471 | | - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL, |
---|
2472 | | - power_well_ctl_mmio_write); |
---|
2473 | | - MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
2474 | | - MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL, |
---|
2475 | | - power_well_ctl_mmio_write); |
---|
| 2600 | + MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
| 2601 | + MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
| 2602 | + MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
| 2603 | + MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
2476 | 2604 | MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
2477 | 2605 | MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); |
---|
2478 | 2606 | |
---|
.. | .. |
---|
2603 | 2731 | MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2604 | 2732 | |
---|
2605 | 2733 | MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
2606 | | - MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); |
---|
| 2734 | + MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL); |
---|
2607 | 2735 | MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
2608 | 2736 | MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
2609 | 2737 | MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL); |
---|
.. | .. |
---|
2613 | 2741 | MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2614 | 2742 | MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2615 | 2743 | MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 2744 | + |
---|
| 2745 | + MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); |
---|
| 2746 | + MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); |
---|
| 2747 | + MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); |
---|
| 2748 | + |
---|
2616 | 2749 | return 0; |
---|
2617 | 2750 | } |
---|
2618 | 2751 | |
---|
2619 | | -static int init_broadwell_mmio_info(struct intel_gvt *gvt) |
---|
| 2752 | +static int init_bdw_mmio_info(struct intel_gvt *gvt) |
---|
2620 | 2753 | { |
---|
2621 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
| 2754 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
2622 | 2755 | int ret; |
---|
2623 | 2756 | |
---|
2624 | 2757 | MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); |
---|
.. | .. |
---|
2683 | 2816 | MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, |
---|
2684 | 2817 | intel_vgpu_reg_master_irq_handler); |
---|
2685 | 2818 | |
---|
2686 | | - MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, |
---|
| 2819 | + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0, |
---|
2687 | 2820 | mmio_read_from_hw, NULL); |
---|
2688 | 2821 | |
---|
2689 | 2822 | #define RING_REG(base) _MMIO((base) + 0xd0) |
---|
.. | .. |
---|
2697 | 2830 | #undef RING_REG |
---|
2698 | 2831 | |
---|
2699 | 2832 | #define RING_REG(base) _MMIO((base) + 0x234) |
---|
2700 | | - MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, |
---|
| 2833 | + MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, |
---|
2701 | 2834 | NULL, NULL); |
---|
2702 | 2835 | #undef RING_REG |
---|
2703 | 2836 | |
---|
.. | .. |
---|
2723 | 2856 | |
---|
2724 | 2857 | MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); |
---|
2725 | 2858 | |
---|
2726 | | - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); |
---|
| 2859 | + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); |
---|
2727 | 2860 | MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); |
---|
2728 | 2861 | |
---|
2729 | 2862 | MMIO_D(GAMTARBMODE, D_BDW_PLUS); |
---|
2730 | 2863 | |
---|
2731 | 2864 | #define RING_REG(base) _MMIO((base) + 0x270) |
---|
2732 | | - MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); |
---|
| 2865 | + MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); |
---|
2733 | 2866 | #undef RING_REG |
---|
2734 | 2867 | |
---|
2735 | | - MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); |
---|
| 2868 | + MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); |
---|
2736 | 2869 | |
---|
2737 | 2870 | MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
2738 | 2871 | |
---|
.. | .. |
---|
2741 | 2874 | MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); |
---|
2742 | 2875 | |
---|
2743 | 2876 | MMIO_D(WM_MISC, D_BDW); |
---|
2744 | | - MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); |
---|
| 2877 | + MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); |
---|
2745 | 2878 | |
---|
2746 | 2879 | MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); |
---|
2747 | 2880 | MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); |
---|
.. | .. |
---|
2801 | 2934 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2802 | 2935 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2803 | 2936 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 2937 | + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2804 | 2938 | return 0; |
---|
2805 | 2939 | } |
---|
2806 | 2940 | |
---|
2807 | 2941 | static int init_skl_mmio_info(struct intel_gvt *gvt) |
---|
2808 | 2942 | { |
---|
2809 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
| 2943 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
2810 | 2944 | int ret; |
---|
2811 | 2945 | |
---|
2812 | 2946 | MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); |
---|
.. | .. |
---|
2816 | 2950 | MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); |
---|
2817 | 2951 | MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); |
---|
2818 | 2952 | |
---|
2819 | | - MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
| 2953 | + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
2820 | 2954 | dp_aux_ch_ctl_mmio_write); |
---|
2821 | | - MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
| 2955 | + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
2822 | 2956 | dp_aux_ch_ctl_mmio_write); |
---|
2823 | | - MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
| 2957 | + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, |
---|
2824 | 2958 | dp_aux_ch_ctl_mmio_write); |
---|
2825 | 2959 | |
---|
2826 | | - /* |
---|
2827 | | - * Use an arbitrary power well controlled by the PWR_WELL_CTL |
---|
2828 | | - * register. |
---|
2829 | | - */ |
---|
2830 | | - MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); |
---|
2831 | | - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, |
---|
2832 | | - skl_power_well_ctl_write); |
---|
| 2960 | + MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); |
---|
| 2961 | + MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); |
---|
2833 | 2962 | |
---|
2834 | | - MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); |
---|
| 2963 | + MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); |
---|
2835 | 2964 | |
---|
2836 | | - MMIO_D(_MMIO(0xa210), D_SKL_PLUS); |
---|
| 2965 | + MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); |
---|
2837 | 2966 | MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
---|
2838 | 2967 | MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); |
---|
2839 | 2968 | MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2840 | | - MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL); |
---|
2841 | | - MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL); |
---|
2842 | | - MMIO_D(_MMIO(0x45504), D_SKL_PLUS); |
---|
2843 | | - MMIO_D(_MMIO(0x45520), D_SKL_PLUS); |
---|
2844 | | - MMIO_D(_MMIO(0x46000), D_SKL_PLUS); |
---|
2845 | | - MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write); |
---|
2846 | | - MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write); |
---|
2847 | | - MMIO_D(_MMIO(0x6C040), D_SKL_PLUS); |
---|
2848 | | - MMIO_D(_MMIO(0x6C048), D_SKL_PLUS); |
---|
2849 | | - MMIO_D(_MMIO(0x6C050), D_SKL_PLUS); |
---|
2850 | | - MMIO_D(_MMIO(0x6C044), D_SKL_PLUS); |
---|
2851 | | - MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS); |
---|
2852 | | - MMIO_D(_MMIO(0x6C054), D_SKL_PLUS); |
---|
2853 | | - MMIO_D(_MMIO(0x6c058), D_SKL_PLUS); |
---|
2854 | | - MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); |
---|
2855 | | - MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); |
---|
| 2969 | + MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 2970 | + MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); |
---|
| 2971 | + MMIO_D(DC_STATE_EN, D_SKL_PLUS); |
---|
| 2972 | + MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); |
---|
| 2973 | + MMIO_D(CDCLK_CTL, D_SKL_PLUS); |
---|
| 2974 | + MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); |
---|
| 2975 | + MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); |
---|
| 2976 | + MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS); |
---|
| 2977 | + MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS); |
---|
| 2978 | + MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS); |
---|
| 2979 | + MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS); |
---|
| 2980 | + MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS); |
---|
| 2981 | + MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS); |
---|
| 2982 | + MMIO_D(DPLL_CTRL1, D_SKL_PLUS); |
---|
| 2983 | + MMIO_D(DPLL_CTRL2, D_SKL_PLUS); |
---|
| 2984 | + MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); |
---|
2856 | 2985 | |
---|
2857 | 2986 | MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); |
---|
2858 | 2987 | MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); |
---|
.. | .. |
---|
2971 | 3100 | MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); |
---|
2972 | 3101 | MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); |
---|
2973 | 3102 | |
---|
2974 | | - MMIO_D(_MMIO(0x70380), D_SKL_PLUS); |
---|
2975 | | - MMIO_D(_MMIO(0x71380), D_SKL_PLUS); |
---|
| 3103 | + MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS); |
---|
| 3104 | + MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS); |
---|
2976 | 3105 | MMIO_D(_MMIO(0x72380), D_SKL_PLUS); |
---|
2977 | 3106 | MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); |
---|
2978 | | - MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); |
---|
| 3107 | + MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); |
---|
| 3108 | + MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); |
---|
2979 | 3109 | |
---|
2980 | | - MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); |
---|
2981 | | - MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); |
---|
2982 | | - MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); |
---|
| 3110 | + MMIO_D(CSR_SSP_BASE, D_SKL_PLUS); |
---|
| 3111 | + MMIO_D(CSR_HTP_SKL, D_SKL_PLUS); |
---|
| 3112 | + MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS); |
---|
2983 | 3113 | |
---|
2984 | | - MMIO_D(_MMIO(0xb11c), D_SKL_PLUS); |
---|
| 3114 | + MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
2985 | 3115 | |
---|
2986 | | - MMIO_D(_MMIO(0x51000), D_SKL_PLUS); |
---|
2987 | | - MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS); |
---|
| 3116 | + MMIO_D(SKL_DFSM, D_SKL_PLUS); |
---|
| 3117 | + MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); |
---|
2988 | 3118 | |
---|
2989 | | - MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, |
---|
| 3119 | + MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, |
---|
2990 | 3120 | NULL, NULL); |
---|
2991 | | - MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, |
---|
| 3121 | + MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, |
---|
2992 | 3122 | NULL, NULL); |
---|
2993 | 3123 | |
---|
2994 | 3124 | MMIO_D(RPM_CONFIG0, D_SKL_PLUS); |
---|
2995 | 3125 | MMIO_D(_MMIO(0xd08), D_SKL_PLUS); |
---|
2996 | 3126 | MMIO_D(RC6_LOCATION, D_SKL_PLUS); |
---|
2997 | | - MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL); |
---|
2998 | | - MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
---|
| 3127 | + MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, |
---|
| 3128 | + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
---|
| 3129 | + MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
---|
2999 | 3130 | NULL, NULL); |
---|
3000 | 3131 | |
---|
3001 | 3132 | /* TRTT */ |
---|
3002 | | - MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3003 | | - MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3004 | | - MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3005 | | - MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3006 | | - MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3007 | | - MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS, |
---|
3008 | | - NULL, gen9_trtte_write); |
---|
3009 | | - MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); |
---|
| 3133 | + MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3134 | + MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3135 | + MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3136 | + MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3137 | + MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3138 | + MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE, |
---|
| 3139 | + NULL, gen9_trtte_write); |
---|
| 3140 | + MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, |
---|
| 3141 | + NULL, gen9_trtt_chicken_write); |
---|
3010 | 3142 | |
---|
3011 | 3143 | MMIO_D(_MMIO(0x46430), D_SKL_PLUS); |
---|
3012 | 3144 | |
---|
3013 | 3145 | MMIO_D(_MMIO(0x46520), D_SKL_PLUS); |
---|
3014 | 3146 | |
---|
3015 | 3147 | MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); |
---|
3016 | | - MMIO_D(_MMIO(0xb004), D_SKL_PLUS); |
---|
| 3148 | + MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
3017 | 3149 | MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); |
---|
3018 | 3150 | |
---|
3019 | 3151 | MMIO_D(_MMIO(0x65900), D_SKL_PLUS); |
---|
3020 | | - MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS); |
---|
| 3152 | + MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS); |
---|
3021 | 3153 | MMIO_D(_MMIO(0x4068), D_SKL_PLUS); |
---|
3022 | 3154 | MMIO_D(_MMIO(0x67054), D_SKL_PLUS); |
---|
3023 | 3155 | MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); |
---|
.. | .. |
---|
3042 | 3174 | MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); |
---|
3043 | 3175 | |
---|
3044 | 3176 | MMIO_D(_MMIO(0x44500), D_SKL_PLUS); |
---|
3045 | | - MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); |
---|
| 3177 | +#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) |
---|
| 3178 | + MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
---|
| 3179 | + NULL, csfe_chicken1_mmio_write); |
---|
| 3180 | +#undef CSFE_CHICKEN1_REG |
---|
3046 | 3181 | MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
---|
3047 | 3182 | NULL, NULL); |
---|
3048 | 3183 | MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, |
---|
3049 | 3184 | NULL, NULL); |
---|
3050 | 3185 | |
---|
3051 | | - MMIO_D(_MMIO(0x4ab8), D_KBL); |
---|
3052 | | - MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); |
---|
| 3186 | + MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); |
---|
| 3187 | + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); |
---|
3053 | 3188 | |
---|
3054 | 3189 | return 0; |
---|
3055 | 3190 | } |
---|
3056 | 3191 | |
---|
3057 | 3192 | static int init_bxt_mmio_info(struct intel_gvt *gvt) |
---|
3058 | 3193 | { |
---|
3059 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
| 3194 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
3060 | 3195 | int ret; |
---|
3061 | 3196 | |
---|
3062 | 3197 | MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); |
---|
.. | .. |
---|
3216 | 3351 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); |
---|
3217 | 3352 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); |
---|
3218 | 3353 | |
---|
3219 | | - MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); |
---|
3220 | | - MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); |
---|
3221 | | - |
---|
3222 | 3354 | MMIO_D(RC6_CTX_BASE, D_BXT); |
---|
3223 | 3355 | |
---|
3224 | 3356 | MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); |
---|
3225 | 3357 | MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); |
---|
3226 | 3358 | MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); |
---|
3227 | 3359 | MMIO_D(GEN6_GFXPAUSE, D_BXT); |
---|
3228 | | - MMIO_D(GEN8_L3SQCREG1, D_BXT); |
---|
| 3360 | + MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); |
---|
| 3361 | + MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); |
---|
| 3362 | + MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); |
---|
| 3363 | + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS, |
---|
| 3364 | + 0, 0, D_BXT, NULL, NULL); |
---|
| 3365 | + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS, |
---|
| 3366 | + 0, 0, D_BXT, NULL, NULL); |
---|
| 3367 | + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS, |
---|
| 3368 | + 0, 0, D_BXT, NULL, NULL); |
---|
| 3369 | + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS, |
---|
| 3370 | + 0, 0, D_BXT, NULL, NULL); |
---|
3229 | 3371 | |
---|
3230 | 3372 | MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); |
---|
| 3373 | + |
---|
| 3374 | + MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); |
---|
3231 | 3375 | |
---|
3232 | 3376 | return 0; |
---|
3233 | 3377 | } |
---|
.. | .. |
---|
3271 | 3415 | gvt->mmio.mmio_attribute = NULL; |
---|
3272 | 3416 | } |
---|
3273 | 3417 | |
---|
3274 | | -/* Special MMIO blocks. */ |
---|
| 3418 | +/* Special MMIO blocks. registers in MMIO block ranges should not be command |
---|
| 3419 | + * accessible (should have no F_CMD_ACCESS flag). |
---|
| 3420 | + * otherwise, need to update cmd_reg_handler in cmd_parser.c |
---|
| 3421 | + */ |
---|
3275 | 3422 | static struct gvt_mmio_block mmio_blocks[] = { |
---|
3276 | 3423 | {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, |
---|
3277 | 3424 | {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, |
---|
.. | .. |
---|
3295 | 3442 | int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) |
---|
3296 | 3443 | { |
---|
3297 | 3444 | struct intel_gvt_device_info *info = &gvt->device_info; |
---|
3298 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
| 3445 | + struct drm_i915_private *i915 = gvt->gt->i915; |
---|
3299 | 3446 | int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); |
---|
3300 | 3447 | int ret; |
---|
3301 | 3448 | |
---|
.. | .. |
---|
3307 | 3454 | if (ret) |
---|
3308 | 3455 | goto err; |
---|
3309 | 3456 | |
---|
3310 | | - if (IS_BROADWELL(dev_priv)) { |
---|
3311 | | - ret = init_broadwell_mmio_info(gvt); |
---|
| 3457 | + if (IS_BROADWELL(i915)) { |
---|
| 3458 | + ret = init_bdw_mmio_info(gvt); |
---|
3312 | 3459 | if (ret) |
---|
3313 | 3460 | goto err; |
---|
3314 | | - } else if (IS_SKYLAKE(dev_priv) |
---|
3315 | | - || IS_KABYLAKE(dev_priv)) { |
---|
3316 | | - ret = init_broadwell_mmio_info(gvt); |
---|
| 3461 | + } else if (IS_SKYLAKE(i915) || |
---|
| 3462 | + IS_KABYLAKE(i915) || |
---|
| 3463 | + IS_COFFEELAKE(i915) || |
---|
| 3464 | + IS_COMETLAKE(i915)) { |
---|
| 3465 | + ret = init_bdw_mmio_info(gvt); |
---|
3317 | 3466 | if (ret) |
---|
3318 | 3467 | goto err; |
---|
3319 | 3468 | ret = init_skl_mmio_info(gvt); |
---|
3320 | 3469 | if (ret) |
---|
3321 | 3470 | goto err; |
---|
3322 | | - } else if (IS_BROXTON(dev_priv)) { |
---|
3323 | | - ret = init_broadwell_mmio_info(gvt); |
---|
| 3471 | + } else if (IS_BROXTON(i915)) { |
---|
| 3472 | + ret = init_bdw_mmio_info(gvt); |
---|
3324 | 3473 | if (ret) |
---|
3325 | 3474 | goto err; |
---|
3326 | 3475 | ret = init_skl_mmio_info(gvt); |
---|
.. | .. |
---|
3364 | 3513 | } |
---|
3365 | 3514 | |
---|
3366 | 3515 | for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { |
---|
| 3516 | + /* pvinfo data doesn't come from hw mmio */ |
---|
| 3517 | + if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) |
---|
| 3518 | + continue; |
---|
| 3519 | + |
---|
3367 | 3520 | for (j = 0; j < block->size; j += 4) { |
---|
3368 | 3521 | ret = handler(gvt, |
---|
3369 | 3522 | i915_mmio_reg_offset(block->offset) + j, |
---|
.. | .. |
---|
3456 | 3609 | * @offset: register offset |
---|
3457 | 3610 | * @pdata: data buffer |
---|
3458 | 3611 | * @bytes: data length |
---|
| 3612 | + * @is_read: read or write |
---|
3459 | 3613 | * |
---|
3460 | 3614 | * Returns: |
---|
3461 | 3615 | * Zero on success, negative error code if failed. |
---|
.. | .. |
---|
3463 | 3617 | int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, |
---|
3464 | 3618 | void *pdata, unsigned int bytes, bool is_read) |
---|
3465 | 3619 | { |
---|
| 3620 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
3466 | 3621 | struct intel_gvt *gvt = vgpu->gvt; |
---|
3467 | 3622 | struct intel_gvt_mmio_info *mmio_info; |
---|
3468 | 3623 | struct gvt_mmio_block *mmio_block; |
---|
3469 | 3624 | gvt_mmio_func func; |
---|
3470 | 3625 | int ret; |
---|
3471 | 3626 | |
---|
3472 | | - if (WARN_ON(bytes > 8)) |
---|
| 3627 | + if (drm_WARN_ON(&i915->drm, bytes > 8)) |
---|
3473 | 3628 | return -EINVAL; |
---|
3474 | 3629 | |
---|
3475 | 3630 | /* |
---|
.. | .. |
---|
3496 | 3651 | return mmio_info->read(vgpu, offset, pdata, bytes); |
---|
3497 | 3652 | else { |
---|
3498 | 3653 | u64 ro_mask = mmio_info->ro_mask; |
---|
3499 | | - u32 old_vreg = 0, old_sreg = 0; |
---|
| 3654 | + u32 old_vreg = 0; |
---|
3500 | 3655 | u64 data = 0; |
---|
3501 | 3656 | |
---|
3502 | 3657 | if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { |
---|
3503 | 3658 | old_vreg = vgpu_vreg(vgpu, offset); |
---|
3504 | | - old_sreg = vgpu_sreg(vgpu, offset); |
---|
3505 | 3659 | } |
---|
3506 | 3660 | |
---|
3507 | 3661 | if (likely(!ro_mask)) |
---|
.. | .. |
---|
3523 | 3677 | |
---|
3524 | 3678 | vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
---|
3525 | 3679 | | (vgpu_vreg(vgpu, offset) & mask); |
---|
3526 | | - vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) |
---|
3527 | | - | (vgpu_sreg(vgpu, offset) & mask); |
---|
3528 | 3680 | } |
---|
3529 | 3681 | } |
---|
3530 | 3682 | |
---|
.. | .. |
---|
3535 | 3687 | intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : |
---|
3536 | 3688 | intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); |
---|
3537 | 3689 | } |
---|
| 3690 | + |
---|
| 3691 | +void intel_gvt_restore_fence(struct intel_gvt *gvt) |
---|
| 3692 | +{ |
---|
| 3693 | + struct intel_vgpu *vgpu; |
---|
| 3694 | + int i, id; |
---|
| 3695 | + |
---|
| 3696 | + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { |
---|
| 3697 | + mmio_hw_access_pre(gvt->gt); |
---|
| 3698 | + for (i = 0; i < vgpu_fence_sz(vgpu); i++) |
---|
| 3699 | + intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); |
---|
| 3700 | + mmio_hw_access_post(gvt->gt); |
---|
| 3701 | + } |
---|
| 3702 | +} |
---|
| 3703 | + |
---|
| 3704 | +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt, |
---|
| 3705 | + u32 offset, void *data) |
---|
| 3706 | +{ |
---|
| 3707 | + struct intel_vgpu *vgpu = data; |
---|
| 3708 | + struct drm_i915_private *dev_priv = gvt->gt->i915; |
---|
| 3709 | + |
---|
| 3710 | + if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) |
---|
| 3711 | + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); |
---|
| 3712 | + |
---|
| 3713 | + return 0; |
---|
| 3714 | +} |
---|
| 3715 | + |
---|
| 3716 | +void intel_gvt_restore_mmio(struct intel_gvt *gvt) |
---|
| 3717 | +{ |
---|
| 3718 | + struct intel_vgpu *vgpu; |
---|
| 3719 | + int id; |
---|
| 3720 | + |
---|
| 3721 | + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { |
---|
| 3722 | + mmio_hw_access_pre(gvt->gt); |
---|
| 3723 | + intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); |
---|
| 3724 | + mmio_hw_access_post(gvt->gt); |
---|
| 3725 | + } |
---|
| 3726 | +} |
---|