forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/i915/gvt/handlers.c
....@@ -49,14 +49,18 @@
4949
5050 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
5151 {
52
- if (IS_BROADWELL(gvt->dev_priv))
52
+ struct drm_i915_private *i915 = gvt->gt->i915;
53
+
54
+ if (IS_BROADWELL(i915))
5355 return D_BDW;
54
- else if (IS_SKYLAKE(gvt->dev_priv))
56
+ else if (IS_SKYLAKE(i915))
5557 return D_SKL;
56
- else if (IS_KABYLAKE(gvt->dev_priv))
58
+ else if (IS_KABYLAKE(i915))
5759 return D_KBL;
58
- else if (IS_BROXTON(gvt->dev_priv))
60
+ else if (IS_BROXTON(i915))
5961 return D_BXT;
62
+ else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
63
+ return D_CFL;
6064
6165 return 0;
6266 }
....@@ -140,25 +144,25 @@
140144 }
141145
142146 /**
143
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
147
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
144148 * @gvt: a GVT device
145149 * @offset: register offset
146150 *
147151 * Returns:
148
- * Ring ID on success, negative error code if failed.
152
+ * The engine containing the offset within its mmio page.
149153 */
150
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
151
- unsigned int offset)
154
+const struct intel_engine_cs *
155
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
152156 {
153
- enum intel_engine_id id;
154157 struct intel_engine_cs *engine;
158
+ enum intel_engine_id id;
155159
156160 offset &= ~GENMASK(11, 0);
157
- for_each_engine(engine, gvt->dev_priv, id) {
161
+ for_each_engine(engine, gvt->gt, id)
158162 if (engine->mmio_base == offset)
159
- return id;
160
- }
161
- return -ENODEV;
163
+ return engine;
164
+
165
+ return NULL;
162166 }
163167
164168 #define offset_to_fence_num(offset) \
....@@ -215,7 +219,7 @@
215219 {
216220 u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
217221
218
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
222
+ if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
219223 if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
220224 gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
221225 else if (!ips)
....@@ -251,7 +255,7 @@
251255 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
252256 void *p_data, unsigned int bytes)
253257 {
254
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
258
+ struct intel_gvt *gvt = vgpu->gvt;
255259 unsigned int fence_num = offset_to_fence_num(off);
256260 int ret;
257261
....@@ -260,10 +264,10 @@
260264 return ret;
261265 write_vreg(vgpu, off, p_data, bytes);
262266
263
- mmio_hw_access_pre(dev_priv);
267
+ mmio_hw_access_pre(gvt->gt);
264268 intel_vgpu_write_fence(vgpu, fence_num,
265269 vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
266
- mmio_hw_access_post(dev_priv);
270
+ mmio_hw_access_post(gvt->gt);
267271 return 0;
268272 }
269273
....@@ -276,14 +280,12 @@
276280 unsigned int offset, void *p_data, unsigned int bytes)
277281 {
278282 u32 old, new;
279
- uint32_t ack_reg_offset;
283
+ u32 ack_reg_offset;
280284
281285 old = vgpu_vreg(vgpu, offset);
282286 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
283287
284
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
285
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
286
- || IS_BROXTON(vgpu->gvt->dev_priv)) {
288
+ if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
287289 switch (offset) {
288290 case FORCEWAKE_RENDER_GEN9_REG:
289291 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
....@@ -311,7 +313,7 @@
311313 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
312314 void *p_data, unsigned int bytes)
313315 {
314
- unsigned int engine_mask = 0;
316
+ intel_engine_mask_t engine_mask = 0;
315317 u32 data;
316318
317319 write_vreg(vgpu, offset, p_data, bytes);
....@@ -323,25 +325,29 @@
323325 } else {
324326 if (data & GEN6_GRDOM_RENDER) {
325327 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
326
- engine_mask |= (1 << RCS);
328
+ engine_mask |= BIT(RCS0);
327329 }
328330 if (data & GEN6_GRDOM_MEDIA) {
329331 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
330
- engine_mask |= (1 << VCS);
332
+ engine_mask |= BIT(VCS0);
331333 }
332334 if (data & GEN6_GRDOM_BLT) {
333335 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
334
- engine_mask |= (1 << BCS);
336
+ engine_mask |= BIT(BCS0);
335337 }
336338 if (data & GEN6_GRDOM_VECS) {
337339 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
338
- engine_mask |= (1 << VECS);
340
+ engine_mask |= BIT(VECS0);
339341 }
340342 if (data & GEN8_GRDOM_MEDIA2) {
341343 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
342
- if (HAS_BSD2(vgpu->gvt->dev_priv))
343
- engine_mask |= (1 << VCS2);
344
+ engine_mask |= BIT(VCS1);
344345 }
346
+ if (data & GEN9_GRDOM_GUC) {
347
+ gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
348
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
349
+ }
350
+ engine_mask &= vgpu->gvt->gt->info.engine_mask;
345351 }
346352
347353 /* vgpu_lock already hold by emulate mmio r/w */
....@@ -456,14 +462,20 @@
456462 return 0;
457463 }
458464
459
-/* ascendingly sorted */
465
+/* sorted in ascending order */
460466 static i915_reg_t force_nonpriv_white_list[] = {
467
+ _MMIO(0xd80),
461468 GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
462469 GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
470
+ CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
471
+ PS_INVOCATION_COUNT, //_MMIO(0x2348)
472
+ PS_DEPTH_COUNT, //_MMIO(0x2350)
463473 GEN8_CS_CHICKEN1,//_MMIO(0x2580)
464474 _MMIO(0x2690),
465475 _MMIO(0x2694),
466476 _MMIO(0x2698),
477
+ _MMIO(0x2754),
478
+ _MMIO(0x28a0),
467479 _MMIO(0x4de0),
468480 _MMIO(0x4de4),
469481 _MMIO(0x4dfc),
....@@ -475,16 +487,18 @@
475487 _MMIO(0x7704),
476488 _MMIO(0x7708),
477489 _MMIO(0x770c),
490
+ _MMIO(0x83a8),
478491 _MMIO(0xb110),
479492 GEN8_L3SQCREG4,//_MMIO(0xb118)
480493 _MMIO(0xe100),
481494 _MMIO(0xe18c),
482495 _MMIO(0xe48c),
483496 _MMIO(0xe5f4),
497
+ _MMIO(0x64844),
484498 };
485499
486500 /* a simple bsearch */
487
-static inline bool in_whitelist(unsigned int reg)
501
+static inline bool in_whitelist(u32 reg)
488502 {
489503 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
490504 i915_reg_t *array = force_nonpriv_white_list;
....@@ -505,27 +519,22 @@
505519 static int force_nonpriv_write(struct intel_vgpu *vgpu,
506520 unsigned int offset, void *p_data, unsigned int bytes)
507521 {
508
- u32 reg_nonpriv = *(u32 *)p_data;
509
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
510
- u32 ring_base;
511
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
512
- int ret = -EINVAL;
522
+ u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
523
+ const struct intel_engine_cs *engine =
524
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
513525
514
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
515
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
516
- vgpu->id, ring_id, offset, bytes);
517
- return ret;
526
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
527
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
528
+ vgpu->id, offset, bytes);
529
+ return -EINVAL;
518530 }
519531
520
- ring_base = dev_priv->engine[ring_id]->mmio_base;
521
-
522
- if (in_whitelist(reg_nonpriv) ||
523
- reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
524
- ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
525
- bytes);
526
- } else
532
+ if (!in_whitelist(reg_nonpriv) &&
533
+ reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
527534 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
528535 vgpu->id, reg_nonpriv, offset);
536
+ } else
537
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
529538
530539 return 0;
531540 }
....@@ -651,7 +660,7 @@
651660 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
652661 index = FDI_RX_IMR_TO_PIPE(offset);
653662 else {
654
- gvt_vgpu_err("Unsupport registers %x\n", offset);
663
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
655664 return -EINVAL;
656665 }
657666
....@@ -748,19 +757,20 @@
748757 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
749758 void *p_data, unsigned int bytes)
750759 {
751
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
752
- unsigned int index = DSPSURF_TO_PIPE(offset);
753
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
754
- int flip_event[] = {
755
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
756
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
757
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
758
- };
760
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
761
+ u32 pipe = DSPSURF_TO_PIPE(offset);
762
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
759763
760764 write_vreg(vgpu, offset, p_data, bytes);
761
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
765
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
762766
763
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
767
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
768
+
769
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
770
+ intel_vgpu_trigger_virtual_event(vgpu, event);
771
+ else
772
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
773
+
764774 return 0;
765775 }
766776
....@@ -770,37 +780,64 @@
770780 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
771781 void *p_data, unsigned int bytes)
772782 {
773
- unsigned int index = SPRSURF_TO_PIPE(offset);
774
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
775
- int flip_event[] = {
776
- [PIPE_A] = SPRITE_A_FLIP_DONE,
777
- [PIPE_B] = SPRITE_B_FLIP_DONE,
778
- [PIPE_C] = SPRITE_C_FLIP_DONE,
779
- };
783
+ u32 pipe = SPRSURF_TO_PIPE(offset);
784
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
780785
781786 write_vreg(vgpu, offset, p_data, bytes);
782
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
787
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
783788
784
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
789
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
790
+ intel_vgpu_trigger_virtual_event(vgpu, event);
791
+ else
792
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
793
+
794
+ return 0;
795
+}
796
+
797
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
798
+ unsigned int offset, void *p_data,
799
+ unsigned int bytes)
800
+{
801
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
802
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
803
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
804
+ int event = SKL_FLIP_EVENT(pipe, plane);
805
+
806
+ write_vreg(vgpu, offset, p_data, bytes);
807
+ if (plane == PLANE_PRIMARY) {
808
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
809
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
810
+ } else {
811
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
812
+ }
813
+
814
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
815
+ intel_vgpu_trigger_virtual_event(vgpu, event);
816
+ else
817
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
818
+
785819 return 0;
786820 }
787821
788822 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
789823 unsigned int reg)
790824 {
791
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
825
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
792826 enum intel_gvt_event_type event;
793827
794
- if (reg == _DPA_AUX_CH_CTL)
828
+ if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
795829 event = AUX_CHANNEL_A;
796
- else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
830
+ else if (reg == _PCH_DPB_AUX_CH_CTL ||
831
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
797832 event = AUX_CHANNEL_B;
798
- else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
833
+ else if (reg == _PCH_DPC_AUX_CH_CTL ||
834
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
799835 event = AUX_CHANNEL_C;
800
- else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
836
+ else if (reg == _PCH_DPD_AUX_CH_CTL ||
837
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
801838 event = AUX_CHANNEL_D;
802839 else {
803
- WARN_ON(true);
840
+ drm_WARN_ON(&dev_priv->drm, true);
804841 return -EINVAL;
805842 }
806843
....@@ -832,7 +869,7 @@
832869 }
833870
834871 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
835
- uint8_t t)
872
+ u8 t)
836873 {
837874 if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
838875 /* training pattern 1 for CR */
....@@ -888,13 +925,11 @@
888925 write_vreg(vgpu, offset, p_data, bytes);
889926 data = vgpu_vreg(vgpu, offset);
890927
891
- if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
892
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
893
- || IS_BROXTON(vgpu->gvt->dev_priv))
928
+ if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
894929 && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
895930 /* SKL DPB/C/D aux ctl register changed */
896931 return 0;
897
- } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
932
+ } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
898933 offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
899934 /* write to the data registers */
900935 return 0;
....@@ -918,7 +953,7 @@
918953
919954 if (op == GVT_AUX_NATIVE_WRITE) {
920955 int t;
921
- uint8_t buf[16];
956
+ u8 buf[16];
922957
923958 if ((addr + len + 1) >= DPCD_SIZE) {
924959 /*
....@@ -1182,7 +1217,7 @@
11821217
11831218 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
11841219 {
1185
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1220
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
11861221 struct intel_vgpu_mm *mm;
11871222 u64 *pdps;
11881223
....@@ -1191,7 +1226,7 @@
11911226 switch (notification) {
11921227 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
11931228 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1194
- /* fall through */
1229
+ fallthrough;
11951230 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
11961231 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
11971232 return PTR_ERR_OR_ZERO(mm);
....@@ -1210,8 +1245,7 @@
12101245
12111246 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
12121247 {
1213
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1214
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
1248
+ struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
12151249 char *env[3] = {NULL, NULL, NULL};
12161250 char vmid_str[20];
12171251 char display_ready_str[20];
....@@ -1228,18 +1262,15 @@
12281262 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
12291263 void *p_data, unsigned int bytes)
12301264 {
1231
- u32 data;
1232
- int ret;
1233
-
1234
- write_vreg(vgpu, offset, p_data, bytes);
1235
- data = vgpu_vreg(vgpu, offset);
1265
+ u32 data = *(u32 *)p_data;
1266
+ bool invalid_write = false;
12361267
12371268 switch (offset) {
12381269 case _vgtif_reg(display_ready):
12391270 send_display_ready_uevent(vgpu, data ? 1 : 0);
12401271 break;
12411272 case _vgtif_reg(g2v_notify):
1242
- ret = handle_g2v_notification(vgpu, data);
1273
+ handle_g2v_notification(vgpu, data);
12431274 break;
12441275 /* add xhot and yhot to handled list to avoid error log */
12451276 case _vgtif_reg(cursor_x_hot):
....@@ -1256,26 +1287,34 @@
12561287 case _vgtif_reg(execlist_context_descriptor_hi):
12571288 break;
12581289 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1290
+ invalid_write = true;
12591291 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
12601292 break;
12611293 default:
1294
+ invalid_write = true;
12621295 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
12631296 offset, bytes, data);
12641297 break;
12651298 }
1299
+
1300
+ if (!invalid_write)
1301
+ write_vreg(vgpu, offset, p_data, bytes);
1302
+
12661303 return 0;
12671304 }
12681305
12691306 static int pf_write(struct intel_vgpu *vgpu,
12701307 unsigned int offset, void *p_data, unsigned int bytes)
12711308 {
1309
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
12721310 u32 val = *(u32 *)p_data;
12731311
12741312 if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
12751313 offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
12761314 offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
1277
- WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
1278
- vgpu->id);
1315
+ drm_WARN_ONCE(&i915->drm, true,
1316
+ "VM(%d): guest is trying to scaling a plane\n",
1317
+ vgpu->id);
12791318 return 0;
12801319 }
12811320
....@@ -1287,12 +1326,13 @@
12871326 {
12881327 write_vreg(vgpu, offset, p_data, bytes);
12891328
1290
- if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
1329
+ if (vgpu_vreg(vgpu, offset) &
1330
+ HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
12911331 vgpu_vreg(vgpu, offset) |=
1292
- HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
1332
+ HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
12931333 else
12941334 vgpu_vreg(vgpu, offset) &=
1295
- ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
1335
+ ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
12961336 return 0;
12971337 }
12981338
....@@ -1322,13 +1362,15 @@
13221362 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
13231363 void *p_data, unsigned int bytes)
13241364 {
1365
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
13251366 u32 mode;
13261367
13271368 write_vreg(vgpu, offset, p_data, bytes);
13281369 mode = vgpu_vreg(vgpu, offset);
13291370
13301371 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1331
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
1372
+ drm_WARN_ONCE(&i915->drm, 1,
1373
+ "VM(%d): iGVT-g doesn't support GuC\n",
13321374 vgpu->id);
13331375 return 0;
13341376 }
....@@ -1339,20 +1381,16 @@
13391381 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
13401382 void *p_data, unsigned int bytes)
13411383 {
1342
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1384
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
13431385 u32 trtte = *(u32 *)p_data;
13441386
13451387 if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
1346
- WARN(1, "VM(%d): Use physical address for TRTT!\n",
1388
+ drm_WARN(&i915->drm, 1,
1389
+ "VM(%d): Use physical address for TRTT!\n",
13471390 vgpu->id);
13481391 return -EINVAL;
13491392 }
13501393 write_vreg(vgpu, offset, p_data, bytes);
1351
- /* TRTTE is not per-context */
1352
-
1353
- mmio_hw_access_pre(dev_priv);
1354
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
1355
- mmio_hw_access_post(dev_priv);
13561394
13571395 return 0;
13581396 }
....@@ -1360,15 +1398,6 @@
13601398 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
13611399 void *p_data, unsigned int bytes)
13621400 {
1363
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1364
- u32 val = *(u32 *)p_data;
1365
-
1366
- if (val & 1) {
1367
- /* unblock hw logic */
1368
- mmio_hw_access_pre(dev_priv);
1369
- I915_WRITE(_MMIO(offset), val);
1370
- mmio_hw_access_post(dev_priv);
1371
- }
13721401 write_vreg(vgpu, offset, p_data, bytes);
13731402 return 0;
13741403 }
....@@ -1404,8 +1433,10 @@
14041433
14051434 switch (cmd) {
14061435 case GEN9_PCODE_READ_MEM_LATENCY:
1407
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1408
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
1436
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1437
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1438
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1439
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) {
14091440 /**
14101441 * "Read memory latency" command on gen9.
14111442 * Below memory latency values are read
....@@ -1415,7 +1446,7 @@
14151446 *data0 = 0x1e1a1100;
14161447 else
14171448 *data0 = 0x61514b3d;
1418
- } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
1449
+ } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
14191450 /**
14201451 * "Read memory latency" command on gen9.
14211452 * Below memory latency values are read
....@@ -1428,8 +1459,10 @@
14281459 }
14291460 break;
14301461 case SKL_PCODE_CDCLK_CONTROL:
1431
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1432
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
1462
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
1463
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
1464
+ IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1465
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
14331466 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
14341467 break;
14351468 case GEN6_PCODE_READ_RC6VIDS:
....@@ -1453,24 +1486,27 @@
14531486 void *p_data, unsigned int bytes)
14541487 {
14551488 u32 value = *(u32 *)p_data;
1456
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1489
+ const struct intel_engine_cs *engine =
1490
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
14571491
1458
- if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
1492
+ if (value != 0 &&
1493
+ !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
14591494 gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
14601495 offset, value);
14611496 return -EINVAL;
14621497 }
1498
+
14631499 /*
14641500 * Need to emulate all the HWSP register write to ensure host can
14651501 * update the VM CSB status correctly. Here listed registers can
14661502 * support BDW, SKL or other platforms with same HWSP registers.
14671503 */
1468
- if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
1504
+ if (unlikely(!engine)) {
14691505 gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
14701506 offset);
14711507 return -EINVAL;
14721508 }
1473
- vgpu->hws_pga[ring_id] = value;
1509
+ vgpu->hws_pga[engine->id] = value;
14741510 gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
14751511 vgpu->id, value, offset);
14761512
....@@ -1482,7 +1518,7 @@
14821518 {
14831519 u32 v = *(u32 *)p_data;
14841520
1485
- if (IS_BROXTON(vgpu->gvt->dev_priv))
1521
+ if (IS_BROXTON(vgpu->gvt->gt->i915))
14861522 v &= (1 << 31) | (1 << 29);
14871523 else
14881524 v &= (1 << 31) | (1 << 29) | (1 << 9) |
....@@ -1608,10 +1644,48 @@
16081644 return 0;
16091645 }
16101646
1611
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
1647
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
16121648 unsigned int offset, void *p_data, unsigned int bytes)
16131649 {
16141650 vgpu_vreg(vgpu, offset) = 0;
1651
+ return 0;
1652
+}
1653
+
1654
+/**
1655
+ * FixMe:
1656
+ * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
1657
+ * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
1658
+ * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
1659
+ * these MI_BATCH_BUFFER.
1660
+ * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
1661
+ * PML4 PTE: PAT(0) PCD(1) PWT(1).
1662
+ * The performance is still expected to be low, will need further improvement.
1663
+ */
1664
+static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
1665
+ void *p_data, unsigned int bytes)
1666
+{
1667
+ u64 pat =
1668
+ GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1669
+ GEN8_PPAT(1, 0) |
1670
+ GEN8_PPAT(2, 0) |
1671
+ GEN8_PPAT(3, CHV_PPAT_SNOOP) |
1672
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1673
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1674
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1675
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
1676
+
1677
+ vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
1678
+
1679
+ return 0;
1680
+}
1681
+
1682
+static int guc_status_read(struct intel_vgpu *vgpu,
1683
+ unsigned int offset, void *p_data,
1684
+ unsigned int bytes)
1685
+{
1686
+ /* keep MIA_IN_RESET before clearing */
1687
+ read_vreg(vgpu, offset, p_data, bytes);
1688
+ vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
16151689 return 0;
16161690 }
16171691
....@@ -1619,26 +1693,24 @@
16191693 unsigned int offset, void *p_data, unsigned int bytes)
16201694 {
16211695 struct intel_gvt *gvt = vgpu->gvt;
1622
- struct drm_i915_private *dev_priv = gvt->dev_priv;
1623
- int ring_id;
1624
- u32 ring_base;
1696
+ const struct intel_engine_cs *engine =
1697
+ intel_gvt_render_mmio_to_engine(gvt, offset);
16251698
1626
- ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
16271699 /**
16281700 * Read HW reg in following case
16291701 * a. the offset isn't a ring mmio
16301702 * b. the offset's ring is running on hw.
16311703 * c. the offset is ring time stamp mmio
16321704 */
1633
- if (ring_id >= 0)
1634
- ring_base = dev_priv->engine[ring_id]->mmio_base;
16351705
1636
- if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
1637
- offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
1638
- offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
1639
- mmio_hw_access_pre(dev_priv);
1640
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1641
- mmio_hw_access_post(dev_priv);
1706
+ if (!engine ||
1707
+ vgpu == gvt->scheduler.engine_owner[engine->id] ||
1708
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
1709
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
1710
+ mmio_hw_access_pre(gvt->gt);
1711
+ vgpu_vreg(vgpu, offset) =
1712
+ intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
1713
+ mmio_hw_access_post(gvt->gt);
16421714 }
16431715
16441716 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
....@@ -1647,22 +1719,38 @@
16471719 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
16481720 void *p_data, unsigned int bytes)
16491721 {
1650
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1722
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1723
+ const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
16511724 struct intel_vgpu_execlist *execlist;
16521725 u32 data = *(u32 *)p_data;
16531726 int ret = 0;
16541727
1655
- if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
1728
+ if (drm_WARN_ON(&i915->drm, !engine))
16561729 return -EINVAL;
16571730
1658
- execlist = &vgpu->submission.execlist[ring_id];
1731
+ /*
1732
+ * Due to d3_entered is used to indicate skipping PPGTT invalidation on
1733
+ * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
1734
+ * vGPU reset if in resuming.
1735
+ * In S0ix exit, the device power state also transite from D3 to D0 as
1736
+ * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
1737
+ * S0ix exit, all engines continue to work. However the d3_entered
1738
+ * remains set which will break next vGPU reset logic (miss the expected
1739
+ * PPGTT invalidation).
1740
+ * Engines can only work in D0. Thus the 1st elsp write gives GVT a
1741
+ * chance to clear d3_entered.
1742
+ */
1743
+ if (vgpu->d3_entered)
1744
+ vgpu->d3_entered = false;
1745
+
1746
+ execlist = &vgpu->submission.execlist[engine->id];
16591747
16601748 execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
16611749 if (execlist->elsp_dwords.index == 3) {
1662
- ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1750
+ ret = intel_vgpu_submit_execlist(vgpu, engine);
16631751 if(ret)
1664
- gvt_vgpu_err("fail submit workload on ring %d\n",
1665
- ring_id);
1752
+ gvt_vgpu_err("fail submit workload on ring %s\n",
1753
+ engine->name);
16661754 }
16671755
16681756 ++execlist->elsp_dwords.index;
....@@ -1674,36 +1762,53 @@
16741762 void *p_data, unsigned int bytes)
16751763 {
16761764 u32 data = *(u32 *)p_data;
1677
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1765
+ const struct intel_engine_cs *engine =
1766
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
16781767 bool enable_execlist;
16791768 int ret;
16801769
1770
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
1771
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1772
+ IS_COMETLAKE(vgpu->gvt->gt->i915))
1773
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
16811774 write_vreg(vgpu, offset, p_data, bytes);
1775
+
1776
+ if (IS_MASKED_BITS_ENABLED(data, 1)) {
1777
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1778
+ return 0;
1779
+ }
1780
+
1781
+ if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
1782
+ IS_COMETLAKE(vgpu->gvt->gt->i915)) &&
1783
+ IS_MASKED_BITS_ENABLED(data, 2)) {
1784
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1785
+ return 0;
1786
+ }
16821787
16831788 /* when PPGTT mode enabled, we will check if guest has called
16841789 * pvinfo, if not, we will treat this guest as non-gvtg-aware
16851790 * guest, and stop emulating its cfg space, mmio, gtt, etc.
16861791 */
1687
- if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
1688
- (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
1689
- && !vgpu->pv_notified) {
1792
+ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
1793
+ IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
1794
+ !vgpu->pv_notified) {
16901795 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
16911796 return 0;
16921797 }
1693
- if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
1694
- || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
1798
+ if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
1799
+ IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
16951800 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
16961801
1697
- gvt_dbg_core("EXECLIST %s on ring %d\n",
1698
- (enable_execlist ? "enabling" : "disabling"),
1699
- ring_id);
1802
+ gvt_dbg_core("EXECLIST %s on ring %s\n",
1803
+ (enable_execlist ? "enabling" : "disabling"),
1804
+ engine->name);
17001805
17011806 if (!enable_execlist)
17021807 return 0;
17031808
17041809 ret = intel_vgpu_select_submission_ops(vgpu,
1705
- ENGINE_MASK(ring_id),
1706
- INTEL_VGPU_EXECLIST_SUBMISSION);
1810
+ engine->mask,
1811
+ INTEL_VGPU_EXECLIST_SUBMISSION);
17071812 if (ret)
17081813 return ret;
17091814
....@@ -1722,19 +1827,19 @@
17221827
17231828 switch (offset) {
17241829 case 0x4260:
1725
- id = RCS;
1830
+ id = RCS0;
17261831 break;
17271832 case 0x4264:
1728
- id = VCS;
1833
+ id = VCS0;
17291834 break;
17301835 case 0x4268:
1731
- id = VCS2;
1836
+ id = VCS1;
17321837 break;
17331838 case 0x426c:
1734
- id = BCS;
1839
+ id = BCS0;
17351840 break;
17361841 case 0x4270:
1737
- id = VECS;
1842
+ id = VECS0;
17381843 break;
17391844 default:
17401845 return -EINVAL;
....@@ -1752,12 +1857,28 @@
17521857 write_vreg(vgpu, offset, p_data, bytes);
17531858 data = vgpu_vreg(vgpu, offset);
17541859
1755
- if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
1860
+ if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
17561861 data |= RESET_CTL_READY_TO_RESET;
17571862 else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
17581863 data &= ~RESET_CTL_READY_TO_RESET;
17591864
17601865 vgpu_vreg(vgpu, offset) = data;
1866
+ return 0;
1867
+}
1868
+
1869
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
1870
+ unsigned int offset, void *p_data,
1871
+ unsigned int bytes)
1872
+{
1873
+ u32 data = *(u32 *)p_data;
1874
+
1875
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
1876
+ write_vreg(vgpu, offset, p_data, bytes);
1877
+
1878
+ if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
1879
+ IS_MASKED_BITS_ENABLED(data, 0x8))
1880
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1881
+
17611882 return 0;
17621883 }
17631884
....@@ -1791,7 +1912,7 @@
17911912 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
17921913 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
17931914 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
1794
- if (HAS_BSD2(dev_priv)) \
1915
+ if (HAS_ENGINE(gvt->gt, VCS1)) \
17951916 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
17961917 } while (0)
17971918
....@@ -1812,10 +1933,10 @@
18121933
18131934 static int init_generic_mmio_info(struct intel_gvt *gvt)
18141935 {
1815
- struct drm_i915_private *dev_priv = gvt->dev_priv;
1936
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
18161937 int ret;
18171938
1818
- MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
1939
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
18191940 intel_vgpu_reg_imr_handler);
18201941
18211942 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
....@@ -1823,7 +1944,8 @@
18231944 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
18241945 MMIO_D(SDEISR, D_ALL);
18251946
1826
- MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
1947
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
1948
+
18271949
18281950 MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
18291951 gamw_echo_dev_rw_ia_write);
....@@ -1846,15 +1968,15 @@
18461968 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
18471969
18481970 MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
1849
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
1971
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
18501972 MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
18511973 MMIO_D(GEN7_CXT_SIZE, D_ALL);
18521974
1853
- MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1854
- MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1855
- MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1856
- MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
1857
- MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1975
+ MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
1976
+ MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
1977
+ MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
1978
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
1979
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
18581980
18591981 /* RING MODE */
18601982 #define RING_REG(base) _MMIO((base) + 0x29c)
....@@ -1881,7 +2003,8 @@
18812003 MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
18822004 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
18832005 MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1884
- MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2006
+ MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
2007
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
18852008 MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
18862009 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
18872010 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
....@@ -1967,6 +2090,8 @@
19672090 MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
19682091 MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
19692092 MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
2093
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
2094
+ reg50080_mmio_write);
19702095
19712096 MMIO_D(DSPCNTR(PIPE_B), D_ALL);
19722097 MMIO_D(DSPADDR(PIPE_B), D_ALL);
....@@ -1976,6 +2101,8 @@
19762101 MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
19772102 MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
19782103 MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
2104
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
2105
+ reg50080_mmio_write);
19792106
19802107 MMIO_D(DSPCNTR(PIPE_C), D_ALL);
19812108 MMIO_D(DSPADDR(PIPE_C), D_ALL);
....@@ -1985,6 +2112,8 @@
19852112 MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
19862113 MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
19872114 MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
2115
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
2116
+ reg50080_mmio_write);
19882117
19892118 MMIO_D(SPRCTL(PIPE_A), D_ALL);
19902119 MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
....@@ -1998,6 +2127,8 @@
19982127 MMIO_D(SPROFFSET(PIPE_A), D_ALL);
19992128 MMIO_D(SPRSCALE(PIPE_A), D_ALL);
20002129 MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
2130
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
2131
+ reg50080_mmio_write);
20012132
20022133 MMIO_D(SPRCTL(PIPE_B), D_ALL);
20032134 MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
....@@ -2011,6 +2142,8 @@
20112142 MMIO_D(SPROFFSET(PIPE_B), D_ALL);
20122143 MMIO_D(SPRSCALE(PIPE_B), D_ALL);
20132144 MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
2145
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
2146
+ reg50080_mmio_write);
20142147
20152148 MMIO_D(SPRCTL(PIPE_C), D_ALL);
20162149 MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
....@@ -2024,6 +2157,8 @@
20242157 MMIO_D(SPROFFSET(PIPE_C), D_ALL);
20252158 MMIO_D(SPRSCALE(PIPE_C), D_ALL);
20262159 MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
2160
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
2161
+ reg50080_mmio_write);
20272162
20282163 MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
20292164 MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
....@@ -2137,7 +2272,7 @@
21372272
21382273 MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
21392274 gmbus_mmio_write);
2140
- MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
2275
+ MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
21412276 MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
21422277
21432278 MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
....@@ -2338,9 +2473,9 @@
23382473 MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
23392474 MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
23402475
2341
- MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
2342
- MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
2343
- MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
2476
+ MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
2477
+ MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
2478
+ MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
23442479 MMIO_D(SPLL_CTL, D_ALL);
23452480 MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
23462481 MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
....@@ -2462,17 +2597,10 @@
24622597 MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
24632598 MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
24642599 MMIO_D(GEN6_PMINTRMSK, D_ALL);
2465
- /*
2466
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
2467
- * register.
2468
- */
2469
- MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
2470
- power_well_ctl_mmio_write);
2471
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
2472
- power_well_ctl_mmio_write);
2473
- MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
2474
- MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
2475
- power_well_ctl_mmio_write);
2600
+ MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
2601
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
2602
+ MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
2603
+ MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
24762604 MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
24772605 MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
24782606
....@@ -2603,7 +2731,7 @@
26032731 MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
26042732
26052733 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2606
- MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
2734
+ MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
26072735 MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
26082736 MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
26092737 MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
....@@ -2613,12 +2741,17 @@
26132741 MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
26142742 MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
26152743 MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2744
+
2745
+ MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2746
+ MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
2747
+ MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
2748
+
26162749 return 0;
26172750 }
26182751
2619
-static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2752
+static int init_bdw_mmio_info(struct intel_gvt *gvt)
26202753 {
2621
- struct drm_i915_private *dev_priv = gvt->dev_priv;
2754
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
26222755 int ret;
26232756
26242757 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
....@@ -2683,7 +2816,7 @@
26832816 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
26842817 intel_vgpu_reg_master_irq_handler);
26852818
2686
- MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
2819
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
26872820 mmio_read_from_hw, NULL);
26882821
26892822 #define RING_REG(base) _MMIO((base) + 0xd0)
....@@ -2697,7 +2830,7 @@
26972830 #undef RING_REG
26982831
26992832 #define RING_REG(base) _MMIO((base) + 0x234)
2700
- MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2833
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
27012834 NULL, NULL);
27022835 #undef RING_REG
27032836
....@@ -2723,16 +2856,16 @@
27232856
27242857 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
27252858
2726
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
2859
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
27272860 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
27282861
27292862 MMIO_D(GAMTARBMODE, D_BDW_PLUS);
27302863
27312864 #define RING_REG(base) _MMIO((base) + 0x270)
2732
- MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2865
+ MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
27332866 #undef RING_REG
27342867
2735
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
2868
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
27362869
27372870 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
27382871
....@@ -2741,7 +2874,7 @@
27412874 MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
27422875
27432876 MMIO_D(WM_MISC, D_BDW);
2744
- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
2877
+ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
27452878
27462879 MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
27472880 MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
....@@ -2801,12 +2934,13 @@
28012934 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
28022935 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
28032936 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2937
+ MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
28042938 return 0;
28052939 }
28062940
28072941 static int init_skl_mmio_info(struct intel_gvt *gvt)
28082942 {
2809
- struct drm_i915_private *dev_priv = gvt->dev_priv;
2943
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
28102944 int ret;
28112945
28122946 MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
....@@ -2816,43 +2950,38 @@
28162950 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
28172951 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
28182952
2819
- MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2953
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
28202954 dp_aux_ch_ctl_mmio_write);
2821
- MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2955
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
28222956 dp_aux_ch_ctl_mmio_write);
2823
- MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2957
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
28242958 dp_aux_ch_ctl_mmio_write);
28252959
2826
- /*
2827
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
2828
- * register.
2829
- */
2830
- MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
2831
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
2832
- skl_power_well_ctl_write);
2960
+ MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
2961
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
28332962
2834
- MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2963
+ MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
28352964
2836
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
2965
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
28372966 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
28382967 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
28392968 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2840
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
2841
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
2842
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
2843
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
2844
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
2845
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
2846
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
2847
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
2848
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
2849
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
2850
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
2851
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
2852
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
2853
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
2854
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
2855
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
2969
+ MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2970
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
2971
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
2972
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
2973
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
2974
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2975
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
2976
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
2977
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
2978
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
2979
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
2980
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
2981
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
2982
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
2983
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
2984
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
28562985
28572986 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
28582987 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
....@@ -2971,53 +3100,56 @@
29713100 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
29723101 MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
29733102
2974
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
2975
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
3103
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
3104
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
29763105 MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
29773106 MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
2978
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
3107
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
3108
+ MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
29793109
2980
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
2981
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
2982
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
3110
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
3111
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
3112
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
29833113
2984
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
3114
+ MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
29853115
2986
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
2987
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
3116
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
3117
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
29883118
2989
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3119
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
29903120 NULL, NULL);
2991
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
3121
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
29923122 NULL, NULL);
29933123
29943124 MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
29953125 MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
29963126 MMIO_D(RC6_LOCATION, D_SKL_PLUS);
2997
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
2998
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3127
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
3128
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
3129
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
29993130 NULL, NULL);
30003131
30013132 /* TRTT */
3002
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3003
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3004
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3005
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3006
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3007
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
3008
- NULL, gen9_trtte_write);
3009
- MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
3133
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3134
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3135
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3136
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3137
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3138
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
3139
+ NULL, gen9_trtte_write);
3140
+ MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
3141
+ NULL, gen9_trtt_chicken_write);
30103142
30113143 MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
30123144
30133145 MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
30143146
30153147 MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
3016
- MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
3148
+ MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
30173149 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
30183150
30193151 MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
3020
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
3152
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
30213153 MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
30223154 MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
30233155 MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
....@@ -3042,21 +3174,24 @@
30423174 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
30433175
30443176 MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3045
- MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3177
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
3178
+ MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3179
+ NULL, csfe_chicken1_mmio_write);
3180
+#undef CSFE_CHICKEN1_REG
30463181 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
30473182 NULL, NULL);
30483183 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
30493184 NULL, NULL);
30503185
3051
- MMIO_D(_MMIO(0x4ab8), D_KBL);
3052
- MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
3186
+ MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
3187
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
30533188
30543189 return 0;
30553190 }
30563191
30573192 static int init_bxt_mmio_info(struct intel_gvt *gvt)
30583193 {
3059
- struct drm_i915_private *dev_priv = gvt->dev_priv;
3194
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
30603195 int ret;
30613196
30623197 MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
....@@ -3216,18 +3351,27 @@
32163351 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
32173352 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
32183353
3219
- MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3220
- MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
3221
-
32223354 MMIO_D(RC6_CTX_BASE, D_BXT);
32233355
32243356 MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
32253357 MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
32263358 MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
32273359 MMIO_D(GEN6_GFXPAUSE, D_BXT);
3228
- MMIO_D(GEN8_L3SQCREG1, D_BXT);
3360
+ MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
3361
+ MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3362
+ MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
3363
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3364
+ 0, 0, D_BXT, NULL, NULL);
3365
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3366
+ 0, 0, D_BXT, NULL, NULL);
3367
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3368
+ 0, 0, D_BXT, NULL, NULL);
3369
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
3370
+ 0, 0, D_BXT, NULL, NULL);
32293371
32303372 MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
3373
+
3374
+ MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
32313375
32323376 return 0;
32333377 }
....@@ -3271,7 +3415,10 @@
32713415 gvt->mmio.mmio_attribute = NULL;
32723416 }
32733417
3274
-/* Special MMIO blocks. */
3418
+/* Special MMIO blocks. registers in MMIO block ranges should not be command
3419
+ * accessible (should have no F_CMD_ACCESS flag).
3420
+ * otherwise, need to update cmd_reg_handler in cmd_parser.c
3421
+ */
32753422 static struct gvt_mmio_block mmio_blocks[] = {
32763423 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
32773424 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
....@@ -3295,7 +3442,7 @@
32953442 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
32963443 {
32973444 struct intel_gvt_device_info *info = &gvt->device_info;
3298
- struct drm_i915_private *dev_priv = gvt->dev_priv;
3445
+ struct drm_i915_private *i915 = gvt->gt->i915;
32993446 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
33003447 int ret;
33013448
....@@ -3307,20 +3454,22 @@
33073454 if (ret)
33083455 goto err;
33093456
3310
- if (IS_BROADWELL(dev_priv)) {
3311
- ret = init_broadwell_mmio_info(gvt);
3457
+ if (IS_BROADWELL(i915)) {
3458
+ ret = init_bdw_mmio_info(gvt);
33123459 if (ret)
33133460 goto err;
3314
- } else if (IS_SKYLAKE(dev_priv)
3315
- || IS_KABYLAKE(dev_priv)) {
3316
- ret = init_broadwell_mmio_info(gvt);
3461
+ } else if (IS_SKYLAKE(i915) ||
3462
+ IS_KABYLAKE(i915) ||
3463
+ IS_COFFEELAKE(i915) ||
3464
+ IS_COMETLAKE(i915)) {
3465
+ ret = init_bdw_mmio_info(gvt);
33173466 if (ret)
33183467 goto err;
33193468 ret = init_skl_mmio_info(gvt);
33203469 if (ret)
33213470 goto err;
3322
- } else if (IS_BROXTON(dev_priv)) {
3323
- ret = init_broadwell_mmio_info(gvt);
3471
+ } else if (IS_BROXTON(i915)) {
3472
+ ret = init_bdw_mmio_info(gvt);
33243473 if (ret)
33253474 goto err;
33263475 ret = init_skl_mmio_info(gvt);
....@@ -3364,6 +3513,10 @@
33643513 }
33653514
33663515 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
3516
+ /* pvinfo data doesn't come from hw mmio */
3517
+ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
3518
+ continue;
3519
+
33673520 for (j = 0; j < block->size; j += 4) {
33683521 ret = handler(gvt,
33693522 i915_mmio_reg_offset(block->offset) + j,
....@@ -3456,6 +3609,7 @@
34563609 * @offset: register offset
34573610 * @pdata: data buffer
34583611 * @bytes: data length
3612
+ * @is_read: read or write
34593613 *
34603614 * Returns:
34613615 * Zero on success, negative error code if failed.
....@@ -3463,13 +3617,14 @@
34633617 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
34643618 void *pdata, unsigned int bytes, bool is_read)
34653619 {
3620
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
34663621 struct intel_gvt *gvt = vgpu->gvt;
34673622 struct intel_gvt_mmio_info *mmio_info;
34683623 struct gvt_mmio_block *mmio_block;
34693624 gvt_mmio_func func;
34703625 int ret;
34713626
3472
- if (WARN_ON(bytes > 8))
3627
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
34733628 return -EINVAL;
34743629
34753630 /*
....@@ -3496,12 +3651,11 @@
34963651 return mmio_info->read(vgpu, offset, pdata, bytes);
34973652 else {
34983653 u64 ro_mask = mmio_info->ro_mask;
3499
- u32 old_vreg = 0, old_sreg = 0;
3654
+ u32 old_vreg = 0;
35003655 u64 data = 0;
35013656
35023657 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
35033658 old_vreg = vgpu_vreg(vgpu, offset);
3504
- old_sreg = vgpu_sreg(vgpu, offset);
35053659 }
35063660
35073661 if (likely(!ro_mask))
....@@ -3523,8 +3677,6 @@
35233677
35243678 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
35253679 | (vgpu_vreg(vgpu, offset) & mask);
3526
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
3527
- | (vgpu_sreg(vgpu, offset) & mask);
35283680 }
35293681 }
35303682
....@@ -3535,3 +3687,40 @@
35353687 intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
35363688 intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
35373689 }
3690
+
3691
+void intel_gvt_restore_fence(struct intel_gvt *gvt)
3692
+{
3693
+ struct intel_vgpu *vgpu;
3694
+ int i, id;
3695
+
3696
+ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3697
+ mmio_hw_access_pre(gvt->gt);
3698
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++)
3699
+ intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
3700
+ mmio_hw_access_post(gvt->gt);
3701
+ }
3702
+}
3703
+
3704
+static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
3705
+ u32 offset, void *data)
3706
+{
3707
+ struct intel_vgpu *vgpu = data;
3708
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
3709
+
3710
+ if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
3711
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
3712
+
3713
+ return 0;
3714
+}
3715
+
3716
+void intel_gvt_restore_mmio(struct intel_gvt *gvt)
3717
+{
3718
+ struct intel_vgpu *vgpu;
3719
+ int id;
3720
+
3721
+ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
3722
+ mmio_hw_access_pre(gvt->gt);
3723
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
3724
+ mmio_hw_access_post(gvt->gt);
3725
+ }
3726
+}