hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/gpu/drm/i915/intel_sideband.c
....@@ -22,8 +22,10 @@
2222 *
2323 */
2424
25
+#include <asm/iosf_mbi.h>
26
+
2527 #include "i915_drv.h"
26
-#include "intel_drv.h"
28
+#include "intel_sideband.h"
2729
2830 /*
2931 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
....@@ -39,257 +41,532 @@
3941 /* Private register write, double-word addressing, non-posted */
4042 #define SB_CRWRDA_NP 0x07
4143
42
-static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
43
- u32 port, u32 opcode, u32 addr, u32 *val)
44
+static void ping(void *info)
4445 {
45
- u32 cmd, be = 0xf, bar = 0;
46
- bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
46
+}
4747
48
- cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
49
- (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
50
- (bar << IOSF_BAR_SHIFT);
48
+static void __vlv_punit_get(struct drm_i915_private *i915)
49
+{
50
+ iosf_mbi_punit_acquire();
5151
52
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
52
+ /*
53
+ * Prevent the cpu from sleeping while we use this sideband, otherwise
54
+ * the punit may cause a machine hang. The issue appears to be isolated
55
+ * with changing the power state of the CPU package while changing
56
+ * the power state via the punit, and we have only observed it
57
+ * reliably on 4-core Baytail systems suggesting the issue is in the
58
+ * power delivery mechanism and likely to be be board/function
59
+ * specific. Hence we presume the workaround needs only be applied
60
+ * to the Valleyview P-unit and not all sideband communications.
61
+ */
62
+ if (IS_VALLEYVIEW(i915)) {
63
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
64
+ on_each_cpu(ping, NULL, 1);
65
+ }
66
+}
5367
54
- if (intel_wait_for_register(dev_priv,
68
+static void __vlv_punit_put(struct drm_i915_private *i915)
69
+{
70
+ if (IS_VALLEYVIEW(i915))
71
+ cpu_latency_qos_update_request(&i915->sb_qos,
72
+ PM_QOS_DEFAULT_VALUE);
73
+
74
+ iosf_mbi_punit_release();
75
+}
76
+
77
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
78
+{
79
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
80
+ __vlv_punit_get(i915);
81
+
82
+ mutex_lock(&i915->sb_lock);
83
+}
84
+
85
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
86
+{
87
+ mutex_unlock(&i915->sb_lock);
88
+
89
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
90
+ __vlv_punit_put(i915);
91
+}
92
+
93
+static int vlv_sideband_rw(struct drm_i915_private *i915,
94
+ u32 devfn, u32 port, u32 opcode,
95
+ u32 addr, u32 *val)
96
+{
97
+ struct intel_uncore *uncore = &i915->uncore;
98
+ const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
99
+ int err;
100
+
101
+ lockdep_assert_held(&i915->sb_lock);
102
+ if (port == IOSF_PORT_PUNIT)
103
+ iosf_mbi_assert_punit_acquired();
104
+
105
+ /* Flush the previous comms, just in case it failed last time. */
106
+ if (intel_wait_for_register(uncore,
55107 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
56108 5)) {
57
- DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
58
- is_read ? "read" : "write");
109
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
110
+ is_read ? "read" : "write");
59111 return -EAGAIN;
60112 }
61113
62
- I915_WRITE(VLV_IOSF_ADDR, addr);
63
- I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
64
- I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
114
+ preempt_disable();
65115
66
- if (intel_wait_for_register(dev_priv,
67
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
68
- 5)) {
69
- DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
70
- is_read ? "read" : "write");
71
- return -ETIMEDOUT;
116
+ intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
117
+ intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
118
+ intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
119
+ (devfn << IOSF_DEVFN_SHIFT) |
120
+ (opcode << IOSF_OPCODE_SHIFT) |
121
+ (port << IOSF_PORT_SHIFT) |
122
+ (0xf << IOSF_BYTE_ENABLES_SHIFT) |
123
+ (0 << IOSF_BAR_SHIFT) |
124
+ IOSF_SB_BUSY);
125
+
126
+ if (__intel_wait_for_register_fw(uncore,
127
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
128
+ 10000, 0, NULL) == 0) {
129
+ if (is_read)
130
+ *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
131
+ err = 0;
132
+ } else {
133
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
134
+ is_read ? "read" : "write");
135
+ err = -ETIMEDOUT;
72136 }
73137
74
- if (is_read)
75
- *val = I915_READ(VLV_IOSF_DATA);
76
-
77
- return 0;
78
-}
79
-
80
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
81
-{
82
- u32 val = 0;
83
-
84
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
85
-
86
- mutex_lock(&dev_priv->sb_lock);
87
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
88
- SB_CRRDDA_NP, addr, &val);
89
- mutex_unlock(&dev_priv->sb_lock);
90
-
91
- return val;
92
-}
93
-
94
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
95
-{
96
- int err;
97
-
98
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
99
-
100
- mutex_lock(&dev_priv->sb_lock);
101
- err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
102
- SB_CRWRDA_NP, addr, &val);
103
- mutex_unlock(&dev_priv->sb_lock);
138
+ preempt_enable();
104139
105140 return err;
106141 }
107142
108
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
143
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
109144 {
110145 u32 val = 0;
111146
112
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
147
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
148
+ SB_CRRDDA_NP, addr, &val);
149
+
150
+ return val;
151
+}
152
+
153
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
154
+{
155
+ return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
156
+ SB_CRWRDA_NP, addr, &val);
157
+}
158
+
159
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
160
+{
161
+ u32 val = 0;
162
+
163
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
113164 SB_CRRDDA_NP, reg, &val);
114165
115166 return val;
116167 }
117168
118
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
169
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
119170 {
120
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
171
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
121172 SB_CRWRDA_NP, reg, &val);
122173 }
123174
124
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
175
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
125176 {
126177 u32 val = 0;
127178
128
- WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
129
-
130
- mutex_lock(&dev_priv->sb_lock);
131
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
179
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
132180 SB_CRRDDA_NP, addr, &val);
133
- mutex_unlock(&dev_priv->sb_lock);
134181
135182 return val;
136183 }
137184
138
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
185
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
139186 {
140187 u32 val = 0;
141
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
188
+
189
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
142190 SB_CRRDDA_NP, reg, &val);
191
+
143192 return val;
144193 }
145194
146
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
195
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
147196 u8 port, u32 reg, u32 val)
148197 {
149
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
198
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
150199 SB_CRWRDA_NP, reg, &val);
151200 }
152201
153
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
202
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
154203 {
155204 u32 val = 0;
156
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
205
+
206
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
157207 SB_CRRDDA_NP, reg, &val);
208
+
158209 return val;
159210 }
160211
161
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
212
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
162213 {
163
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
214
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
164215 SB_CRWRDA_NP, reg, &val);
165216 }
166217
167
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
218
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
168219 {
169220 u32 val = 0;
170
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
221
+
222
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
171223 SB_CRRDDA_NP, reg, &val);
224
+
172225 return val;
173226 }
174227
175
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
228
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
176229 {
177
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
230
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
178231 SB_CRWRDA_NP, reg, &val);
179232 }
180233
181
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
234
+static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
182235 {
236
+ /*
237
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
238
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
239
+ */
240
+ if (IS_CHERRYVIEW(i915))
241
+ return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
242
+ else
243
+ return IOSF_PORT_DPIO;
244
+}
245
+
246
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
247
+{
248
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
183249 u32 val = 0;
184250
185
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
186
- SB_MRD_NP, reg, &val);
251
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
187252
188253 /*
189254 * FIXME: There might be some registers where all 1's is a valid value,
190255 * so ideally we should check the register offset instead...
191256 */
192
- WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
193
- pipe_name(pipe), reg, val);
257
+ drm_WARN(&i915->drm, val == 0xffffffff,
258
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
259
+ pipe_name(pipe), reg, val);
194260
195261 return val;
196262 }
197263
198
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
264
+void vlv_dpio_write(struct drm_i915_private *i915,
265
+ enum pipe pipe, int reg, u32 val)
199266 {
200
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
201
- SB_MWR_NP, reg, &val);
267
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
268
+
269
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
270
+}
271
+
272
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
273
+{
274
+ u32 val = 0;
275
+
276
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
277
+ reg, &val);
278
+ return val;
279
+}
280
+
281
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
282
+{
283
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
284
+ reg, &val);
202285 }
203286
204287 /* SBI access */
205
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
288
+static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
289
+ enum intel_sbi_destination destination,
290
+ u32 *val, bool is_read)
291
+{
292
+ struct intel_uncore *uncore = &i915->uncore;
293
+ u32 cmd;
294
+
295
+ lockdep_assert_held(&i915->sb_lock);
296
+
297
+ if (intel_wait_for_register_fw(uncore,
298
+ SBI_CTL_STAT, SBI_BUSY, 0,
299
+ 100)) {
300
+ drm_err(&i915->drm,
301
+ "timeout waiting for SBI to become ready\n");
302
+ return -EBUSY;
303
+ }
304
+
305
+ intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
306
+ intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
307
+
308
+ if (destination == SBI_ICLK)
309
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
310
+ else
311
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
312
+ if (!is_read)
313
+ cmd |= BIT(8);
314
+ intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
315
+
316
+ if (__intel_wait_for_register_fw(uncore,
317
+ SBI_CTL_STAT, SBI_BUSY, 0,
318
+ 100, 100, &cmd)) {
319
+ drm_err(&i915->drm,
320
+ "timeout waiting for SBI to complete read\n");
321
+ return -ETIMEDOUT;
322
+ }
323
+
324
+ if (cmd & SBI_RESPONSE_FAIL) {
325
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
326
+ return -ENXIO;
327
+ }
328
+
329
+ if (is_read)
330
+ *val = intel_uncore_read_fw(uncore, SBI_DATA);
331
+
332
+ return 0;
333
+}
334
+
335
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
206336 enum intel_sbi_destination destination)
207337 {
208
- u32 value = 0;
209
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
338
+ u32 result = 0;
210339
211
- if (intel_wait_for_register(dev_priv,
212
- SBI_CTL_STAT, SBI_BUSY, 0,
213
- 100)) {
214
- DRM_ERROR("timeout waiting for SBI to become ready\n");
215
- return 0;
216
- }
340
+ intel_sbi_rw(i915, reg, destination, &result, true);
217341
218
- I915_WRITE(SBI_ADDR, (reg << 16));
219
- I915_WRITE(SBI_DATA, 0);
220
-
221
- if (destination == SBI_ICLK)
222
- value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
223
- else
224
- value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
225
- I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
226
-
227
- if (intel_wait_for_register(dev_priv,
228
- SBI_CTL_STAT,
229
- SBI_BUSY,
230
- 0,
231
- 100)) {
232
- DRM_ERROR("timeout waiting for SBI to complete read\n");
233
- return 0;
234
- }
235
-
236
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
237
- DRM_ERROR("error during SBI read of reg %x\n", reg);
238
- return 0;
239
- }
240
-
241
- return I915_READ(SBI_DATA);
342
+ return result;
242343 }
243344
244
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
345
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
245346 enum intel_sbi_destination destination)
246347 {
247
- u32 tmp;
348
+ intel_sbi_rw(i915, reg, destination, &value, false);
349
+}
248350
249
- WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
250
-
251
- if (intel_wait_for_register(dev_priv,
252
- SBI_CTL_STAT, SBI_BUSY, 0,
253
- 100)) {
254
- DRM_ERROR("timeout waiting for SBI to become ready\n");
255
- return;
351
+static int gen6_check_mailbox_status(u32 mbox)
352
+{
353
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
354
+ case GEN6_PCODE_SUCCESS:
355
+ return 0;
356
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
357
+ return -ENODEV;
358
+ case GEN6_PCODE_ILLEGAL_CMD:
359
+ return -ENXIO;
360
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
361
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
362
+ return -EOVERFLOW;
363
+ case GEN6_PCODE_TIMEOUT:
364
+ return -ETIMEDOUT;
365
+ default:
366
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
367
+ return 0;
256368 }
369
+}
257370
258
- I915_WRITE(SBI_ADDR, (reg << 16));
259
- I915_WRITE(SBI_DATA, value);
371
+static int gen7_check_mailbox_status(u32 mbox)
372
+{
373
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
374
+ case GEN6_PCODE_SUCCESS:
375
+ return 0;
376
+ case GEN6_PCODE_ILLEGAL_CMD:
377
+ return -ENXIO;
378
+ case GEN7_PCODE_TIMEOUT:
379
+ return -ETIMEDOUT;
380
+ case GEN7_PCODE_ILLEGAL_DATA:
381
+ return -EINVAL;
382
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
383
+ return -ENXIO;
384
+ case GEN11_PCODE_LOCKED:
385
+ return -EBUSY;
386
+ case GEN11_PCODE_REJECTED:
387
+ return -EACCES;
388
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
389
+ return -EOVERFLOW;
390
+ default:
391
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
392
+ return 0;
393
+ }
394
+}
260395
261
- if (destination == SBI_ICLK)
262
- tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
396
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
397
+ u32 mbox, u32 *val, u32 *val1,
398
+ int fast_timeout_us,
399
+ int slow_timeout_ms,
400
+ bool is_read)
401
+{
402
+ struct intel_uncore *uncore = &i915->uncore;
403
+
404
+ lockdep_assert_held(&i915->sb_lock);
405
+
406
+ /*
407
+ * GEN6_PCODE_* are outside of the forcewake domain, we can
408
+ * use te fw I915_READ variants to reduce the amount of work
409
+ * required when reading/writing.
410
+ */
411
+
412
+ if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
413
+ return -EAGAIN;
414
+
415
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
416
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
417
+ intel_uncore_write_fw(uncore,
418
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
419
+
420
+ if (__intel_wait_for_register_fw(uncore,
421
+ GEN6_PCODE_MAILBOX,
422
+ GEN6_PCODE_READY, 0,
423
+ fast_timeout_us,
424
+ slow_timeout_ms,
425
+ &mbox))
426
+ return -ETIMEDOUT;
427
+
428
+ if (is_read)
429
+ *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
430
+ if (is_read && val1)
431
+ *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
432
+
433
+ if (INTEL_GEN(i915) > 6)
434
+ return gen7_check_mailbox_status(mbox);
263435 else
264
- tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
265
- I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
266
-
267
- if (intel_wait_for_register(dev_priv,
268
- SBI_CTL_STAT,
269
- SBI_BUSY,
270
- 0,
271
- 100)) {
272
- DRM_ERROR("timeout waiting for SBI to complete write\n");
273
- return;
274
- }
275
-
276
- if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
277
- DRM_ERROR("error during SBI write of %x to reg %x\n",
278
- value, reg);
279
- return;
280
- }
436
+ return gen6_check_mailbox_status(mbox);
281437 }
282438
283
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
439
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
440
+ u32 *val, u32 *val1)
284441 {
285
- u32 val = 0;
286
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
287
- reg, &val);
288
- return val;
442
+ int err;
443
+
444
+ mutex_lock(&i915->sb_lock);
445
+ err = __sandybridge_pcode_rw(i915, mbox, val, val1,
446
+ 500, 20,
447
+ true);
448
+ mutex_unlock(&i915->sb_lock);
449
+
450
+ if (err) {
451
+ drm_dbg(&i915->drm,
452
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
453
+ mbox, __builtin_return_address(0), err);
454
+ }
455
+
456
+ return err;
289457 }
290458
291
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
459
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
460
+ u32 mbox, u32 val,
461
+ int fast_timeout_us,
462
+ int slow_timeout_ms)
292463 {
293
- vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
294
- reg, &val);
464
+ int err;
465
+
466
+ mutex_lock(&i915->sb_lock);
467
+ err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
468
+ fast_timeout_us, slow_timeout_ms,
469
+ false);
470
+ mutex_unlock(&i915->sb_lock);
471
+
472
+ if (err) {
473
+ drm_dbg(&i915->drm,
474
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
475
+ val, mbox, __builtin_return_address(0), err);
476
+ }
477
+
478
+ return err;
479
+}
480
+
481
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
482
+ u32 request, u32 reply_mask, u32 reply,
483
+ u32 *status)
484
+{
485
+ *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
486
+ 500, 0,
487
+ true);
488
+
489
+ return *status || ((request & reply_mask) == reply);
490
+}
491
+
492
+/**
493
+ * skl_pcode_request - send PCODE request until acknowledgment
494
+ * @i915: device private
495
+ * @mbox: PCODE mailbox ID the request is targeted for
496
+ * @request: request ID
497
+ * @reply_mask: mask used to check for request acknowledgment
498
+ * @reply: value used to check for request acknowledgment
499
+ * @timeout_base_ms: timeout for polling with preemption enabled
500
+ *
501
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
502
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
503
+ * The request is acknowledged once the PCODE reply dword equals @reply after
504
+ * applying @reply_mask. Polling is first attempted with preemption enabled
505
+ * for @timeout_base_ms and if this times out for another 50 ms with
506
+ * preemption disabled.
507
+ *
508
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
509
+ * other error as reported by PCODE.
510
+ */
511
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
512
+ u32 reply_mask, u32 reply, int timeout_base_ms)
513
+{
514
+ u32 status;
515
+ int ret;
516
+
517
+ mutex_lock(&i915->sb_lock);
518
+
519
+#define COND \
520
+ skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
521
+
522
+ /*
523
+ * Prime the PCODE by doing a request first. Normally it guarantees
524
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
525
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
526
+ * first, so send the first request explicitly.
527
+ */
528
+ if (COND) {
529
+ ret = 0;
530
+ goto out;
531
+ }
532
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
533
+ if (!ret)
534
+ goto out;
535
+
536
+ /*
537
+ * The above can time out if the number of requests was low (2 in the
538
+ * worst case) _and_ PCODE was busy for some reason even after a
539
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
540
+ * the poll with preemption disabled to maximize the number of
541
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
542
+ * account for interrupts that could reduce the number of these
543
+ * requests, and for any quirks of the PCODE firmware that delays
544
+ * the request completion.
545
+ */
546
+ drm_dbg_kms(&i915->drm,
547
+ "PCODE timeout, retrying with preemption disabled\n");
548
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
549
+ preempt_disable();
550
+ ret = wait_for_atomic(COND, 50);
551
+ preempt_enable();
552
+
553
+out:
554
+ mutex_unlock(&i915->sb_lock);
555
+ return ret ? ret : status;
556
+#undef COND
557
+}
558
+
559
+void intel_pcode_init(struct drm_i915_private *i915)
560
+{
561
+ int ret;
562
+
563
+ if (!IS_DGFX(i915))
564
+ return;
565
+
566
+ ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
567
+ DG1_UNCORE_GET_INIT_STATUS,
568
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
569
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 50);
570
+ if (ret)
571
+ drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
295572 }