forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/gpu/drm/i915/i915_irq.c
....@@ -28,14 +28,29 @@
2828
2929 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3030
31
-#include <linux/sysrq.h>
32
-#include <linux/slab.h>
3331 #include <linux/circ_buf.h>
34
-#include <drm/drmP.h>
35
-#include <drm/i915_drm.h>
32
+#include <linux/slab.h>
33
+#include <linux/sysrq.h>
34
+
35
+#include <drm/drm_drv.h>
36
+#include <drm/drm_irq.h>
37
+
38
+#include "display/intel_display_types.h"
39
+#include "display/intel_fifo_underrun.h"
40
+#include "display/intel_hotplug.h"
41
+#include "display/intel_lpe_audio.h"
42
+#include "display/intel_psr.h"
43
+
44
+#include "gt/intel_breadcrumbs.h"
45
+#include "gt/intel_gt.h"
46
+#include "gt/intel_gt_irq.h"
47
+#include "gt/intel_gt_pm_irq.h"
48
+#include "gt/intel_rps.h"
49
+
3650 #include "i915_drv.h"
51
+#include "i915_irq.h"
3752 #include "i915_trace.h"
38
-#include "intel_drv.h"
53
+#include "intel_pm.h"
3954
4055 /**
4156 * DOC: interrupt handling
....@@ -44,6 +59,8 @@
4459 * interrupt handling support. There's a lot more functionality in i915_irq.c
4560 * and related files, but that will be described in separate chapters.
4661 */
62
+
63
+typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
4764
4865 static const u32 hpd_ilk[HPD_NUM_PINS] = {
4966 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
....@@ -62,7 +79,7 @@
6279 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
6380 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
6481 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG
82
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
6683 };
6784
6885 static const u32 hpd_cpt[HPD_NUM_PINS] = {
....@@ -70,7 +87,7 @@
7087 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
7188 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7289 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
90
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
7491 };
7592
7693 static const u32 hpd_spt[HPD_NUM_PINS] = {
....@@ -78,7 +95,7 @@
7895 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7996 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
8097 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81
- [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
98
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
8299 };
83100
84101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
....@@ -87,7 +104,7 @@
87104 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88105 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89106 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
107
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
91108 };
92109
93110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
....@@ -96,7 +113,7 @@
96113 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97114 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98115 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
116
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
100117 };
101118
102119 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
....@@ -105,132 +122,180 @@
105122 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106123 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107124 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
125
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
109126 };
110127
111
-/* BXT hpd list */
112128 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113129 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114130 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115
- [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
131
+ [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
116132 };
117133
118134 static const u32 hpd_gen11[HPD_NUM_PINS] = {
119
- [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
120
- [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
121
- [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
122
- [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
135
+ [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
136
+ [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
137
+ [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
138
+ [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
139
+ [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
140
+ [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
123141 };
124142
125143 static const u32 hpd_icp[HPD_NUM_PINS] = {
126
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
127
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
128
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
129
- [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
130
- [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
131
- [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
144
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
145
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
146
+ [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
147
+ [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
148
+ [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
149
+ [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
150
+ [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
151
+ [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
152
+ [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
132153 };
133154
134
-/* IIR can theoretically queue up two events. Be paranoid. */
135
-#define GEN8_IRQ_RESET_NDX(type, which) do { \
136
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
137
- POSTING_READ(GEN8_##type##_IMR(which)); \
138
- I915_WRITE(GEN8_##type##_IER(which), 0); \
139
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
140
- POSTING_READ(GEN8_##type##_IIR(which)); \
141
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
142
- POSTING_READ(GEN8_##type##_IIR(which)); \
143
-} while (0)
155
+static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
156
+{
157
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
144158
145
-#define GEN3_IRQ_RESET(type) do { \
146
- I915_WRITE(type##IMR, 0xffffffff); \
147
- POSTING_READ(type##IMR); \
148
- I915_WRITE(type##IER, 0); \
149
- I915_WRITE(type##IIR, 0xffffffff); \
150
- POSTING_READ(type##IIR); \
151
- I915_WRITE(type##IIR, 0xffffffff); \
152
- POSTING_READ(type##IIR); \
153
-} while (0)
159
+ if (HAS_GMCH(dev_priv)) {
160
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
161
+ IS_CHERRYVIEW(dev_priv))
162
+ hpd->hpd = hpd_status_g4x;
163
+ else
164
+ hpd->hpd = hpd_status_i915;
165
+ return;
166
+ }
154167
155
-#define GEN2_IRQ_RESET(type) do { \
156
- I915_WRITE16(type##IMR, 0xffff); \
157
- POSTING_READ16(type##IMR); \
158
- I915_WRITE16(type##IER, 0); \
159
- I915_WRITE16(type##IIR, 0xffff); \
160
- POSTING_READ16(type##IIR); \
161
- I915_WRITE16(type##IIR, 0xffff); \
162
- POSTING_READ16(type##IIR); \
163
-} while (0)
168
+ if (INTEL_GEN(dev_priv) >= 11)
169
+ hpd->hpd = hpd_gen11;
170
+ else if (IS_GEN9_LP(dev_priv))
171
+ hpd->hpd = hpd_bxt;
172
+ else if (INTEL_GEN(dev_priv) >= 8)
173
+ hpd->hpd = hpd_bdw;
174
+ else if (INTEL_GEN(dev_priv) >= 7)
175
+ hpd->hpd = hpd_ivb;
176
+ else
177
+ hpd->hpd = hpd_ilk;
178
+
179
+ if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
180
+ return;
181
+
182
+ if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
183
+ HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
184
+ hpd->pch_hpd = hpd_icp;
185
+ else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
186
+ hpd->pch_hpd = hpd_spt;
187
+ else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
188
+ hpd->pch_hpd = hpd_cpt;
189
+ else if (HAS_PCH_IBX(dev_priv))
190
+ hpd->pch_hpd = hpd_ibx;
191
+ else
192
+ MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
193
+}
194
+
195
+static void
196
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
197
+{
198
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
199
+
200
+ drm_crtc_handle_vblank(&crtc->base);
201
+}
202
+
203
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
204
+ i915_reg_t iir, i915_reg_t ier)
205
+{
206
+ intel_uncore_write(uncore, imr, 0xffffffff);
207
+ intel_uncore_posting_read(uncore, imr);
208
+
209
+ intel_uncore_write(uncore, ier, 0);
210
+
211
+ /* IIR can theoretically queue up two events. Be paranoid. */
212
+ intel_uncore_write(uncore, iir, 0xffffffff);
213
+ intel_uncore_posting_read(uncore, iir);
214
+ intel_uncore_write(uncore, iir, 0xffffffff);
215
+ intel_uncore_posting_read(uncore, iir);
216
+}
217
+
218
+void gen2_irq_reset(struct intel_uncore *uncore)
219
+{
220
+ intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
221
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
222
+
223
+ intel_uncore_write16(uncore, GEN2_IER, 0);
224
+
225
+ /* IIR can theoretically queue up two events. Be paranoid. */
226
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
227
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
228
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
229
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
230
+}
164231
165232 /*
166233 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
167234 */
168
-static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169
- i915_reg_t reg)
235
+static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
170236 {
171
- u32 val = I915_READ(reg);
237
+ u32 val = intel_uncore_read(uncore, reg);
172238
173239 if (val == 0)
174240 return;
175241
176
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177
- i915_mmio_reg_offset(reg), val);
178
- I915_WRITE(reg, 0xffffffff);
179
- POSTING_READ(reg);
180
- I915_WRITE(reg, 0xffffffff);
181
- POSTING_READ(reg);
242
+ drm_WARN(&uncore->i915->drm, 1,
243
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
244
+ i915_mmio_reg_offset(reg), val);
245
+ intel_uncore_write(uncore, reg, 0xffffffff);
246
+ intel_uncore_posting_read(uncore, reg);
247
+ intel_uncore_write(uncore, reg, 0xffffffff);
248
+ intel_uncore_posting_read(uncore, reg);
182249 }
183250
184
-static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
185
- i915_reg_t reg)
251
+static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
186252 {
187
- u16 val = I915_READ16(reg);
253
+ u16 val = intel_uncore_read16(uncore, GEN2_IIR);
188254
189255 if (val == 0)
190256 return;
191257
192
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
193
- i915_mmio_reg_offset(reg), val);
194
- I915_WRITE16(reg, 0xffff);
195
- POSTING_READ16(reg);
196
- I915_WRITE16(reg, 0xffff);
197
- POSTING_READ16(reg);
258
+ drm_WARN(&uncore->i915->drm, 1,
259
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
260
+ i915_mmio_reg_offset(GEN2_IIR), val);
261
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
262
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
263
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
264
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
198265 }
199266
200
-#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
201
- gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
202
- I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
203
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
204
- POSTING_READ(GEN8_##type##_IMR(which)); \
205
-} while (0)
267
+void gen3_irq_init(struct intel_uncore *uncore,
268
+ i915_reg_t imr, u32 imr_val,
269
+ i915_reg_t ier, u32 ier_val,
270
+ i915_reg_t iir)
271
+{
272
+ gen3_assert_iir_is_zero(uncore, iir);
206273
207
-#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
208
- gen3_assert_iir_is_zero(dev_priv, type##IIR); \
209
- I915_WRITE(type##IER, (ier_val)); \
210
- I915_WRITE(type##IMR, (imr_val)); \
211
- POSTING_READ(type##IMR); \
212
-} while (0)
274
+ intel_uncore_write(uncore, ier, ier_val);
275
+ intel_uncore_write(uncore, imr, imr_val);
276
+ intel_uncore_posting_read(uncore, imr);
277
+}
213278
214
-#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
215
- gen2_assert_iir_is_zero(dev_priv, type##IIR); \
216
- I915_WRITE16(type##IER, (ier_val)); \
217
- I915_WRITE16(type##IMR, (imr_val)); \
218
- POSTING_READ16(type##IMR); \
219
-} while (0)
279
+void gen2_irq_init(struct intel_uncore *uncore,
280
+ u32 imr_val, u32 ier_val)
281
+{
282
+ gen2_assert_iir_is_zero(uncore);
220283
221
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
222
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
284
+ intel_uncore_write16(uncore, GEN2_IER, ier_val);
285
+ intel_uncore_write16(uncore, GEN2_IMR, imr_val);
286
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
287
+}
223288
224289 /* For display hotplug interrupt */
225290 static inline void
226291 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
227
- uint32_t mask,
228
- uint32_t bits)
292
+ u32 mask,
293
+ u32 bits)
229294 {
230
- uint32_t val;
295
+ u32 val;
231296
232297 lockdep_assert_held(&dev_priv->irq_lock);
233
- WARN_ON(bits & ~mask);
298
+ drm_WARN_ON(&dev_priv->drm, bits & ~mask);
234299
235300 val = I915_READ(PORT_HOTPLUG_EN);
236301 val &= ~mask;
....@@ -251,47 +316,12 @@
251316 * version is also available.
252317 */
253318 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
254
- uint32_t mask,
255
- uint32_t bits)
319
+ u32 mask,
320
+ u32 bits)
256321 {
257322 spin_lock_irq(&dev_priv->irq_lock);
258323 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
259324 spin_unlock_irq(&dev_priv->irq_lock);
260
-}
261
-
262
-static u32
263
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
264
- const unsigned int bank, const unsigned int bit);
265
-
266
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
267
- const unsigned int bank,
268
- const unsigned int bit)
269
-{
270
- void __iomem * const regs = i915->regs;
271
- u32 dw;
272
-
273
- lockdep_assert_held(&i915->irq_lock);
274
-
275
- dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
276
- if (dw & BIT(bit)) {
277
- /*
278
- * According to the BSpec, DW_IIR bits cannot be cleared without
279
- * first servicing the Selector & Shared IIR registers.
280
- */
281
- gen11_gt_engine_identity(i915, bank, bit);
282
-
283
- /*
284
- * We locked GT INT DW by reading it. If we want to (try
285
- * to) recover from this succesfully, we need to clear
286
- * our bit, otherwise we are locking the register for
287
- * everybody.
288
- */
289
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
290
-
291
- return true;
292
- }
293
-
294
- return false;
295325 }
296326
297327 /**
....@@ -301,16 +331,16 @@
301331 * @enabled_irq_mask: mask of interrupt bits to enable
302332 */
303333 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
304
- uint32_t interrupt_mask,
305
- uint32_t enabled_irq_mask)
334
+ u32 interrupt_mask,
335
+ u32 enabled_irq_mask)
306336 {
307
- uint32_t new_val;
337
+ u32 new_val;
308338
309339 lockdep_assert_held(&dev_priv->irq_lock);
310340
311
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
341
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
312342
313
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
343
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
314344 return;
315345
316346 new_val = dev_priv->irq_mask;
....@@ -325,270 +355,23 @@
325355 }
326356
327357 /**
328
- * ilk_update_gt_irq - update GTIMR
329
- * @dev_priv: driver private
330
- * @interrupt_mask: mask of interrupt bits to update
331
- * @enabled_irq_mask: mask of interrupt bits to enable
332
- */
333
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
334
- uint32_t interrupt_mask,
335
- uint32_t enabled_irq_mask)
336
-{
337
- lockdep_assert_held(&dev_priv->irq_lock);
338
-
339
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
340
-
341
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342
- return;
343
-
344
- dev_priv->gt_irq_mask &= ~interrupt_mask;
345
- dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
346
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
347
-}
348
-
349
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
350
-{
351
- ilk_update_gt_irq(dev_priv, mask, mask);
352
- POSTING_READ_FW(GTIMR);
353
-}
354
-
355
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
356
-{
357
- ilk_update_gt_irq(dev_priv, mask, 0);
358
-}
359
-
360
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
361
-{
362
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
363
-
364
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
365
-}
366
-
367
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
368
-{
369
- if (INTEL_GEN(dev_priv) >= 11)
370
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
371
- else if (INTEL_GEN(dev_priv) >= 8)
372
- return GEN8_GT_IMR(2);
373
- else
374
- return GEN6_PMIMR;
375
-}
376
-
377
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
378
-{
379
- if (INTEL_GEN(dev_priv) >= 11)
380
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
381
- else if (INTEL_GEN(dev_priv) >= 8)
382
- return GEN8_GT_IER(2);
383
- else
384
- return GEN6_PMIER;
385
-}
386
-
387
-/**
388
- * snb_update_pm_irq - update GEN6_PMIMR
389
- * @dev_priv: driver private
390
- * @interrupt_mask: mask of interrupt bits to update
391
- * @enabled_irq_mask: mask of interrupt bits to enable
392
- */
393
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
394
- uint32_t interrupt_mask,
395
- uint32_t enabled_irq_mask)
396
-{
397
- uint32_t new_val;
398
-
399
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
400
-
401
- lockdep_assert_held(&dev_priv->irq_lock);
402
-
403
- new_val = dev_priv->pm_imr;
404
- new_val &= ~interrupt_mask;
405
- new_val |= (~enabled_irq_mask & interrupt_mask);
406
-
407
- if (new_val != dev_priv->pm_imr) {
408
- dev_priv->pm_imr = new_val;
409
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
410
- POSTING_READ(gen6_pm_imr(dev_priv));
411
- }
412
-}
413
-
414
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
415
-{
416
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
417
- return;
418
-
419
- snb_update_pm_irq(dev_priv, mask, mask);
420
-}
421
-
422
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
423
-{
424
- snb_update_pm_irq(dev_priv, mask, 0);
425
-}
426
-
427
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
428
-{
429
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
430
- return;
431
-
432
- __gen6_mask_pm_irq(dev_priv, mask);
433
-}
434
-
435
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
436
-{
437
- i915_reg_t reg = gen6_pm_iir(dev_priv);
438
-
439
- lockdep_assert_held(&dev_priv->irq_lock);
440
-
441
- I915_WRITE(reg, reset_mask);
442
- I915_WRITE(reg, reset_mask);
443
- POSTING_READ(reg);
444
-}
445
-
446
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
447
-{
448
- lockdep_assert_held(&dev_priv->irq_lock);
449
-
450
- dev_priv->pm_ier |= enable_mask;
451
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
452
- gen6_unmask_pm_irq(dev_priv, enable_mask);
453
- /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
454
-}
455
-
456
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
457
-{
458
- lockdep_assert_held(&dev_priv->irq_lock);
459
-
460
- dev_priv->pm_ier &= ~disable_mask;
461
- __gen6_mask_pm_irq(dev_priv, disable_mask);
462
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
463
- /* though a barrier is missing here, but don't really need a one */
464
-}
465
-
466
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
467
-{
468
- spin_lock_irq(&dev_priv->irq_lock);
469
-
470
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
471
- ;
472
-
473
- dev_priv->gt_pm.rps.pm_iir = 0;
474
-
475
- spin_unlock_irq(&dev_priv->irq_lock);
476
-}
477
-
478
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
479
-{
480
- spin_lock_irq(&dev_priv->irq_lock);
481
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
482
- dev_priv->gt_pm.rps.pm_iir = 0;
483
- spin_unlock_irq(&dev_priv->irq_lock);
484
-}
485
-
486
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
487
-{
488
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
489
-
490
- if (READ_ONCE(rps->interrupts_enabled))
491
- return;
492
-
493
- spin_lock_irq(&dev_priv->irq_lock);
494
- WARN_ON_ONCE(rps->pm_iir);
495
-
496
- if (INTEL_GEN(dev_priv) >= 11)
497
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
498
- else
499
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
500
-
501
- rps->interrupts_enabled = true;
502
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
503
-
504
- spin_unlock_irq(&dev_priv->irq_lock);
505
-}
506
-
507
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
508
-{
509
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
510
-
511
- if (!READ_ONCE(rps->interrupts_enabled))
512
- return;
513
-
514
- spin_lock_irq(&dev_priv->irq_lock);
515
- rps->interrupts_enabled = false;
516
-
517
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
518
-
519
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
520
-
521
- spin_unlock_irq(&dev_priv->irq_lock);
522
- synchronize_irq(dev_priv->drm.irq);
523
-
524
- /* Now that we will not be generating any more work, flush any
525
- * outstanding tasks. As we are called on the RPS idle path,
526
- * we will reset the GPU to minimum frequencies, so the current
527
- * state of the worker can be discarded.
528
- */
529
- cancel_work_sync(&rps->work);
530
- if (INTEL_GEN(dev_priv) >= 11)
531
- gen11_reset_rps_interrupts(dev_priv);
532
- else
533
- gen6_reset_rps_interrupts(dev_priv);
534
-}
535
-
536
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
537
-{
538
- assert_rpm_wakelock_held(dev_priv);
539
-
540
- spin_lock_irq(&dev_priv->irq_lock);
541
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
542
- spin_unlock_irq(&dev_priv->irq_lock);
543
-}
544
-
545
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
546
-{
547
- assert_rpm_wakelock_held(dev_priv);
548
-
549
- spin_lock_irq(&dev_priv->irq_lock);
550
- if (!dev_priv->guc.interrupts_enabled) {
551
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
552
- dev_priv->pm_guc_events);
553
- dev_priv->guc.interrupts_enabled = true;
554
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
555
- }
556
- spin_unlock_irq(&dev_priv->irq_lock);
557
-}
558
-
559
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
560
-{
561
- assert_rpm_wakelock_held(dev_priv);
562
-
563
- spin_lock_irq(&dev_priv->irq_lock);
564
- dev_priv->guc.interrupts_enabled = false;
565
-
566
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
567
-
568
- spin_unlock_irq(&dev_priv->irq_lock);
569
- synchronize_irq(dev_priv->drm.irq);
570
-
571
- gen9_reset_guc_interrupts(dev_priv);
572
-}
573
-
574
-/**
575358 * bdw_update_port_irq - update DE port interrupt
576359 * @dev_priv: driver private
577360 * @interrupt_mask: mask of interrupt bits to update
578361 * @enabled_irq_mask: mask of interrupt bits to enable
579362 */
580363 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
581
- uint32_t interrupt_mask,
582
- uint32_t enabled_irq_mask)
364
+ u32 interrupt_mask,
365
+ u32 enabled_irq_mask)
583366 {
584
- uint32_t new_val;
585
- uint32_t old_val;
367
+ u32 new_val;
368
+ u32 old_val;
586369
587370 lockdep_assert_held(&dev_priv->irq_lock);
588371
589
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
372
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
590373
591
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
374
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
592375 return;
593376
594377 old_val = I915_READ(GEN8_DE_PORT_IMR);
....@@ -612,16 +395,16 @@
612395 */
613396 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
614397 enum pipe pipe,
615
- uint32_t interrupt_mask,
616
- uint32_t enabled_irq_mask)
398
+ u32 interrupt_mask,
399
+ u32 enabled_irq_mask)
617400 {
618
- uint32_t new_val;
401
+ u32 new_val;
619402
620403 lockdep_assert_held(&dev_priv->irq_lock);
621404
622
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
405
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
623406
624
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
407
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
625408 return;
626409
627410 new_val = dev_priv->de_irq_mask[pipe];
....@@ -642,18 +425,18 @@
642425 * @enabled_irq_mask: mask of interrupt bits to enable
643426 */
644427 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
645
- uint32_t interrupt_mask,
646
- uint32_t enabled_irq_mask)
428
+ u32 interrupt_mask,
429
+ u32 enabled_irq_mask)
647430 {
648
- uint32_t sdeimr = I915_READ(SDEIMR);
431
+ u32 sdeimr = I915_READ(SDEIMR);
649432 sdeimr &= ~interrupt_mask;
650433 sdeimr |= (~enabled_irq_mask & interrupt_mask);
651434
652
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
435
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
653436
654437 lockdep_assert_held(&dev_priv->irq_lock);
655438
656
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
439
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
657440 return;
658441
659442 I915_WRITE(SDEIMR, sdeimr);
....@@ -675,13 +458,15 @@
675458 * On pipe A we don't support the PSR interrupt yet,
676459 * on pipe B and C the same bit MBZ.
677460 */
678
- if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
461
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
462
+ status_mask & PIPE_A_PSR_STATUS_VLV))
679463 return 0;
680464 /*
681465 * On pipe B and C we don't support the PSR interrupt yet, on pipe
682466 * A the same bit is for perf counters which we don't use either.
683467 */
684
- if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
468
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
469
+ status_mask & PIPE_B_PSR_STATUS_VLV))
685470 return 0;
686471
687472 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
....@@ -693,10 +478,11 @@
693478 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
694479
695480 out:
696
- WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
697
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
698
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
699
- pipe_name(pipe), enable_mask, status_mask);
481
+ drm_WARN_ONCE(&dev_priv->drm,
482
+ enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
483
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
484
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
485
+ pipe_name(pipe), enable_mask, status_mask);
700486
701487 return enable_mask;
702488 }
....@@ -707,12 +493,12 @@
707493 i915_reg_t reg = PIPESTAT(pipe);
708494 u32 enable_mask;
709495
710
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
711
- "pipe %c: status_mask=0x%x\n",
712
- pipe_name(pipe), status_mask);
496
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
497
+ "pipe %c: status_mask=0x%x\n",
498
+ pipe_name(pipe), status_mask);
713499
714500 lockdep_assert_held(&dev_priv->irq_lock);
715
- WARN_ON(!intel_irqs_enabled(dev_priv));
501
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
716502
717503 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
718504 return;
....@@ -730,12 +516,12 @@
730516 i915_reg_t reg = PIPESTAT(pipe);
731517 u32 enable_mask;
732518
733
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
734
- "pipe %c: status_mask=0x%x\n",
735
- pipe_name(pipe), status_mask);
519
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
520
+ "pipe %c: status_mask=0x%x\n",
521
+ pipe_name(pipe), status_mask);
736522
737523 lockdep_assert_held(&dev_priv->irq_lock);
738
- WARN_ON(!intel_irqs_enabled(dev_priv));
524
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
739525
740526 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
741527 return;
....@@ -747,13 +533,21 @@
747533 POSTING_READ(reg);
748534 }
749535
536
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
537
+{
538
+ if (!dev_priv->opregion.asle)
539
+ return false;
540
+
541
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
542
+}
543
+
750544 /**
751545 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
752546 * @dev_priv: i915 device private
753547 */
754548 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
755549 {
756
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
550
+ if (!i915_has_asle(dev_priv))
757551 return;
758552
759553 spin_lock_irq(&dev_priv->irq_lock);
....@@ -819,13 +613,29 @@
819613 /* Called from drm generic code, passed a 'crtc', which
820614 * we use as a pipe index
821615 */
822
-static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
616
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
823617 {
824
- struct drm_i915_private *dev_priv = to_i915(dev);
618
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
619
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
620
+ const struct drm_display_mode *mode = &vblank->hwmode;
621
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
825622 i915_reg_t high_frame, low_frame;
826623 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
827
- const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
828624 unsigned long irqflags;
625
+
626
+ /*
627
+ * On i965gm TV output the frame counter only works up to
628
+ * the point when we enable the TV encoder. After that the
629
+ * frame counter ceases to work and reads zero. We need a
630
+ * vblank wait before enabling the TV encoder and so we
631
+ * have to enable vblank interrupts while the frame counter
632
+ * is still in a working state. However the core vblank code
633
+ * does not like us returning non-zero frame counter values
634
+ * when we've told it that we don't have a working frame
635
+ * counter. Thus we must stop non-zero values leaking out.
636
+ */
637
+ if (!vblank->max_vblank_count)
638
+ return 0;
829639
830640 htotal = mode->crtc_htotal;
831641 hsync_start = mode->crtc_hsync_start;
....@@ -850,9 +660,9 @@
850660 * register.
851661 */
852662 do {
853
- high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
854
- low = I915_READ_FW(low_frame);
855
- high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
663
+ high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
664
+ low = intel_de_read_fw(dev_priv, low_frame);
665
+ high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
856666 } while (high1 != high2);
857667
858668 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
....@@ -869,9 +679,10 @@
869679 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
870680 }
871681
872
-static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
682
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
873683 {
874
- struct drm_i915_private *dev_priv = to_i915(dev);
684
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
685
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
875686
876687 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
877688 }
....@@ -908,15 +719,17 @@
908719 * pipe frame time stamp. The time stamp value
909720 * is sampled at every start of vertical blank.
910721 */
911
- scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
722
+ scan_prev_time = intel_de_read_fw(dev_priv,
723
+ PIPE_FRMTMSTMP(crtc->pipe));
912724
913725 /*
914726 * The TIMESTAMP_CTR register has the current
915727 * time stamp value.
916728 */
917
- scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
729
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
918730
919
- scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
731
+ scan_post_time = intel_de_read_fw(dev_priv,
732
+ PIPE_FRMTMSTMP(crtc->pipe));
920733 } while (scan_post_time != scan_prev_time);
921734
922735 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
....@@ -927,7 +740,10 @@
927740 return scanline;
928741 }
929742
930
-/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
743
+/*
744
+ * intel_de_read_fw(), only for fast reads of display block, no need for
745
+ * forcewake etc.
746
+ */
931747 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
932748 {
933749 struct drm_device *dev = crtc->base.dev;
....@@ -943,17 +759,17 @@
943759 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
944760 mode = &vblank->hwmode;
945761
946
- if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
762
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
947763 return __intel_get_crtc_scanline_from_timestamp(crtc);
948764
949765 vtotal = mode->crtc_vtotal;
950766 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
951767 vtotal /= 2;
952768
953
- if (IS_GEN2(dev_priv))
954
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
769
+ if (IS_GEN(dev_priv, 2))
770
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
955771 else
956
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
772
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
957773
958774 /*
959775 * On HSW, the DSL reg (0x70000) appears to return 0 if we
....@@ -972,7 +788,7 @@
972788
973789 for (i = 0; i < 100; i++) {
974790 udelay(1);
975
- temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
791
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
976792 if (temp != position) {
977793 position = temp;
978794 break;
....@@ -987,21 +803,27 @@
987803 return (position + crtc->scanline_offset) % vtotal;
988804 }
989805
990
-static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
991
- bool in_vblank_irq, int *vpos, int *hpos,
806
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
807
+ bool in_vblank_irq,
808
+ int *vpos, int *hpos,
992809 ktime_t *stime, ktime_t *etime,
993810 const struct drm_display_mode *mode)
994811 {
812
+ struct drm_device *dev = _crtc->dev;
995813 struct drm_i915_private *dev_priv = to_i915(dev);
996
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
997
- pipe);
814
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
815
+ enum pipe pipe = crtc->pipe;
998816 int position;
999817 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1000818 unsigned long irqflags;
819
+ bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
820
+ IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
821
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1001822
1002
- if (WARN_ON(!mode->crtc_clock)) {
1003
- DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1004
- "pipe %c\n", pipe_name(pipe));
823
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
824
+ drm_dbg(&dev_priv->drm,
825
+ "trying to get scanoutpos for disabled "
826
+ "pipe %c\n", pipe_name(pipe));
1005827 return false;
1006828 }
1007829
....@@ -1030,17 +852,17 @@
1030852 if (stime)
1031853 *stime = ktime_get();
1032854
1033
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
855
+ if (use_scanline_counter) {
1034856 /* No obvious pixelcount register. Only query vertical
1035857 * scanout position from Display scan line register.
1036858 */
1037
- position = __intel_get_crtc_scanline(intel_crtc);
859
+ position = __intel_get_crtc_scanline(crtc);
1038860 } else {
1039861 /* Have access to pixelcount since start of frame.
1040862 * We can split this into vertical and horizontal
1041863 * scanout position.
1042864 */
1043
- position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
865
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1044866
1045867 /* convert to pixel counts */
1046868 vbl_start *= htotal;
....@@ -1090,7 +912,7 @@
1090912 else
1091913 position += vtotal - vbl_end;
1092914
1093
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
915
+ if (use_scanline_counter) {
1094916 *vpos = position;
1095917 *hpos = 0;
1096918 } else {
....@@ -1099,6 +921,14 @@
1099921 }
1100922
1101923 return true;
924
+}
925
+
926
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
927
+ ktime_t *vblank_time, bool in_vblank_irq)
928
+{
929
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
930
+ crtc, max_error, vblank_time, in_vblank_irq,
931
+ i915_get_crtc_scanoutpos);
1102932 }
1103933
1104934 int intel_get_crtc_scanline(struct intel_crtc *crtc)
....@@ -1114,255 +944,8 @@
1114944 return position;
1115945 }
1116946
1117
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1118
-{
1119
- u32 busy_up, busy_down, max_avg, min_avg;
1120
- u8 new_delay;
1121
-
1122
- spin_lock(&mchdev_lock);
1123
-
1124
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1125
-
1126
- new_delay = dev_priv->ips.cur_delay;
1127
-
1128
- I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1129
- busy_up = I915_READ(RCPREVBSYTUPAVG);
1130
- busy_down = I915_READ(RCPREVBSYTDNAVG);
1131
- max_avg = I915_READ(RCBMAXAVG);
1132
- min_avg = I915_READ(RCBMINAVG);
1133
-
1134
- /* Handle RCS change request from hw */
1135
- if (busy_up > max_avg) {
1136
- if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1137
- new_delay = dev_priv->ips.cur_delay - 1;
1138
- if (new_delay < dev_priv->ips.max_delay)
1139
- new_delay = dev_priv->ips.max_delay;
1140
- } else if (busy_down < min_avg) {
1141
- if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1142
- new_delay = dev_priv->ips.cur_delay + 1;
1143
- if (new_delay > dev_priv->ips.min_delay)
1144
- new_delay = dev_priv->ips.min_delay;
1145
- }
1146
-
1147
- if (ironlake_set_drps(dev_priv, new_delay))
1148
- dev_priv->ips.cur_delay = new_delay;
1149
-
1150
- spin_unlock(&mchdev_lock);
1151
-
1152
- return;
1153
-}
1154
-
1155
-static void notify_ring(struct intel_engine_cs *engine)
1156
-{
1157
- const u32 seqno = intel_engine_get_seqno(engine);
1158
- struct i915_request *rq = NULL;
1159
- struct task_struct *tsk = NULL;
1160
- struct intel_wait *wait;
1161
-
1162
- if (unlikely(!engine->breadcrumbs.irq_armed))
1163
- return;
1164
-
1165
- rcu_read_lock();
1166
-
1167
- spin_lock(&engine->breadcrumbs.irq_lock);
1168
- wait = engine->breadcrumbs.irq_wait;
1169
- if (wait) {
1170
- /*
1171
- * We use a callback from the dma-fence to submit
1172
- * requests after waiting on our own requests. To
1173
- * ensure minimum delay in queuing the next request to
1174
- * hardware, signal the fence now rather than wait for
1175
- * the signaler to be woken up. We still wake up the
1176
- * waiter in order to handle the irq-seqno coherency
1177
- * issues (we may receive the interrupt before the
1178
- * seqno is written, see __i915_request_irq_complete())
1179
- * and to handle coalescing of multiple seqno updates
1180
- * and many waiters.
1181
- */
1182
- if (i915_seqno_passed(seqno, wait->seqno)) {
1183
- struct i915_request *waiter = wait->request;
1184
-
1185
- if (waiter &&
1186
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1187
- &waiter->fence.flags) &&
1188
- intel_wait_check_request(wait, waiter))
1189
- rq = i915_request_get(waiter);
1190
-
1191
- tsk = wait->tsk;
1192
- } else {
1193
- if (engine->irq_seqno_barrier &&
1194
- i915_seqno_passed(seqno, wait->seqno - 1)) {
1195
- set_bit(ENGINE_IRQ_BREADCRUMB,
1196
- &engine->irq_posted);
1197
- tsk = wait->tsk;
1198
- }
1199
- }
1200
-
1201
- engine->breadcrumbs.irq_count++;
1202
- } else {
1203
- if (engine->breadcrumbs.irq_armed)
1204
- __intel_engine_disarm_breadcrumbs(engine);
1205
- }
1206
- spin_unlock(&engine->breadcrumbs.irq_lock);
1207
-
1208
- if (rq) {
1209
- spin_lock(&rq->lock);
1210
- dma_fence_signal_locked(&rq->fence);
1211
- GEM_BUG_ON(!i915_request_completed(rq));
1212
- spin_unlock(&rq->lock);
1213
-
1214
- i915_request_put(rq);
1215
- }
1216
-
1217
- if (tsk && tsk->state & TASK_NORMAL)
1218
- wake_up_process(tsk);
1219
-
1220
- rcu_read_unlock();
1221
-
1222
- trace_intel_engine_notify(engine, wait);
1223
-}
1224
-
1225
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
1226
- struct intel_rps_ei *ei)
1227
-{
1228
- ei->ktime = ktime_get_raw();
1229
- ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1230
- ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1231
-}
1232
-
1233
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1234
-{
1235
- memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1236
-}
1237
-
1238
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1239
-{
1240
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
1241
- const struct intel_rps_ei *prev = &rps->ei;
1242
- struct intel_rps_ei now;
1243
- u32 events = 0;
1244
-
1245
- if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1246
- return 0;
1247
-
1248
- vlv_c0_read(dev_priv, &now);
1249
-
1250
- if (prev->ktime) {
1251
- u64 time, c0;
1252
- u32 render, media;
1253
-
1254
- time = ktime_us_delta(now.ktime, prev->ktime);
1255
-
1256
- time *= dev_priv->czclk_freq;
1257
-
1258
- /* Workload can be split between render + media,
1259
- * e.g. SwapBuffers being blitted in X after being rendered in
1260
- * mesa. To account for this we need to combine both engines
1261
- * into our activity counter.
1262
- */
1263
- render = now.render_c0 - prev->render_c0;
1264
- media = now.media_c0 - prev->media_c0;
1265
- c0 = max(render, media);
1266
- c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1267
-
1268
- if (c0 > time * rps->power.up_threshold)
1269
- events = GEN6_PM_RP_UP_THRESHOLD;
1270
- else if (c0 < time * rps->power.down_threshold)
1271
- events = GEN6_PM_RP_DOWN_THRESHOLD;
1272
- }
1273
-
1274
- rps->ei = now;
1275
- return events;
1276
-}
1277
-
1278
-static void gen6_pm_rps_work(struct work_struct *work)
1279
-{
1280
- struct drm_i915_private *dev_priv =
1281
- container_of(work, struct drm_i915_private, gt_pm.rps.work);
1282
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
1283
- bool client_boost = false;
1284
- int new_delay, adj, min, max;
1285
- u32 pm_iir = 0;
1286
-
1287
- spin_lock_irq(&dev_priv->irq_lock);
1288
- if (rps->interrupts_enabled) {
1289
- pm_iir = fetch_and_zero(&rps->pm_iir);
1290
- client_boost = atomic_read(&rps->num_waiters);
1291
- }
1292
- spin_unlock_irq(&dev_priv->irq_lock);
1293
-
1294
- /* Make sure we didn't queue anything we're not going to process. */
1295
- WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1296
- if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1297
- goto out;
1298
-
1299
- mutex_lock(&dev_priv->pcu_lock);
1300
-
1301
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1302
-
1303
- adj = rps->last_adj;
1304
- new_delay = rps->cur_freq;
1305
- min = rps->min_freq_softlimit;
1306
- max = rps->max_freq_softlimit;
1307
- if (client_boost)
1308
- max = rps->max_freq;
1309
- if (client_boost && new_delay < rps->boost_freq) {
1310
- new_delay = rps->boost_freq;
1311
- adj = 0;
1312
- } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1313
- if (adj > 0)
1314
- adj *= 2;
1315
- else /* CHV needs even encode values */
1316
- adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1317
-
1318
- if (new_delay >= rps->max_freq_softlimit)
1319
- adj = 0;
1320
- } else if (client_boost) {
1321
- adj = 0;
1322
- } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1323
- if (rps->cur_freq > rps->efficient_freq)
1324
- new_delay = rps->efficient_freq;
1325
- else if (rps->cur_freq > rps->min_freq_softlimit)
1326
- new_delay = rps->min_freq_softlimit;
1327
- adj = 0;
1328
- } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1329
- if (adj < 0)
1330
- adj *= 2;
1331
- else /* CHV needs even encode values */
1332
- adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1333
-
1334
- if (new_delay <= rps->min_freq_softlimit)
1335
- adj = 0;
1336
- } else { /* unknown event */
1337
- adj = 0;
1338
- }
1339
-
1340
- rps->last_adj = adj;
1341
-
1342
- /* sysfs frequency interfaces may have snuck in while servicing the
1343
- * interrupt
1344
- */
1345
- new_delay += adj;
1346
- new_delay = clamp_t(int, new_delay, min, max);
1347
-
1348
- if (intel_set_rps(dev_priv, new_delay)) {
1349
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1350
- rps->last_adj = 0;
1351
- }
1352
-
1353
- mutex_unlock(&dev_priv->pcu_lock);
1354
-
1355
-out:
1356
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1357
- spin_lock_irq(&dev_priv->irq_lock);
1358
- if (rps->interrupts_enabled)
1359
- gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1360
- spin_unlock_irq(&dev_priv->irq_lock);
1361
-}
1362
-
1363
-
1364947 /**
1365
- * ivybridge_parity_work - Workqueue called when a parity error interrupt
948
+ * ivb_parity_work - Workqueue called when a parity error interrupt
1366949 * occurred.
1367950 * @work: workqueue struct
1368951 *
....@@ -1370,14 +953,15 @@
1370953 * this event, userspace should try to remap the bad rows since statistically
1371954 * it is likely the same row is more likely to go bad again.
1372955 */
1373
-static void ivybridge_parity_work(struct work_struct *work)
956
+static void ivb_parity_work(struct work_struct *work)
1374957 {
1375958 struct drm_i915_private *dev_priv =
1376959 container_of(work, typeof(*dev_priv), l3_parity.error_work);
960
+ struct intel_gt *gt = &dev_priv->gt;
1377961 u32 error_status, row, bank, subbank;
1378962 char *parity_event[6];
1379
- uint32_t misccpctl;
1380
- uint8_t slice = 0;
963
+ u32 misccpctl;
964
+ u8 slice = 0;
1381965
1382966 /* We must turn off DOP level clock gating to access the L3 registers.
1383967 * In order to prevent a get/put style interface, acquire struct mutex
....@@ -1386,7 +970,7 @@
1386970 mutex_lock(&dev_priv->drm.struct_mutex);
1387971
1388972 /* If we've screwed up tracking, just let the interrupt fire again */
1389
- if (WARN_ON(!dev_priv->l3_parity.which_slice))
973
+ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1390974 goto out;
1391975
1392976 misccpctl = I915_READ(GEN7_MISCCPCTL);
....@@ -1397,7 +981,8 @@
1397981 i915_reg_t reg;
1398982
1399983 slice--;
1400
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
984
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
985
+ slice >= NUM_L3_SLICES(dev_priv)))
1401986 break;
1402987
1403988 dev_priv->l3_parity.which_slice &= ~(1<<slice);
....@@ -1434,159 +1019,29 @@
14341019 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
14351020
14361021 out:
1437
- WARN_ON(dev_priv->l3_parity.which_slice);
1438
- spin_lock_irq(&dev_priv->irq_lock);
1439
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1440
- spin_unlock_irq(&dev_priv->irq_lock);
1022
+ drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1023
+ spin_lock_irq(&gt->irq_lock);
1024
+ gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1025
+ spin_unlock_irq(&gt->irq_lock);
14411026
14421027 mutex_unlock(&dev_priv->drm.struct_mutex);
1443
-}
1444
-
1445
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1446
- u32 iir)
1447
-{
1448
- if (!HAS_L3_DPF(dev_priv))
1449
- return;
1450
-
1451
- spin_lock(&dev_priv->irq_lock);
1452
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1453
- spin_unlock(&dev_priv->irq_lock);
1454
-
1455
- iir &= GT_PARITY_ERROR(dev_priv);
1456
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1457
- dev_priv->l3_parity.which_slice |= 1 << 1;
1458
-
1459
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1460
- dev_priv->l3_parity.which_slice |= 1 << 0;
1461
-
1462
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1463
-}
1464
-
1465
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1466
- u32 gt_iir)
1467
-{
1468
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
1469
- notify_ring(dev_priv->engine[RCS]);
1470
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
1471
- notify_ring(dev_priv->engine[VCS]);
1472
-}
1473
-
1474
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1475
- u32 gt_iir)
1476
-{
1477
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
1478
- notify_ring(dev_priv->engine[RCS]);
1479
- if (gt_iir & GT_BSD_USER_INTERRUPT)
1480
- notify_ring(dev_priv->engine[VCS]);
1481
- if (gt_iir & GT_BLT_USER_INTERRUPT)
1482
- notify_ring(dev_priv->engine[BCS]);
1483
-
1484
- if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1485
- GT_BSD_CS_ERROR_INTERRUPT |
1486
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1487
- DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1488
-
1489
- if (gt_iir & GT_PARITY_ERROR(dev_priv))
1490
- ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1491
-}
1492
-
1493
-static void
1494
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1495
-{
1496
- bool tasklet = false;
1497
-
1498
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1499
- tasklet = true;
1500
-
1501
- if (iir & GT_RENDER_USER_INTERRUPT) {
1502
- notify_ring(engine);
1503
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
1504
- }
1505
-
1506
- if (tasklet)
1507
- tasklet_hi_schedule(&engine->execlists.tasklet);
1508
-}
1509
-
1510
-static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1511
- u32 master_ctl, u32 gt_iir[4])
1512
-{
1513
- void __iomem * const regs = i915->regs;
1514
-
1515
-#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1516
- GEN8_GT_BCS_IRQ | \
1517
- GEN8_GT_VCS1_IRQ | \
1518
- GEN8_GT_VCS2_IRQ | \
1519
- GEN8_GT_VECS_IRQ | \
1520
- GEN8_GT_PM_IRQ | \
1521
- GEN8_GT_GUC_IRQ)
1522
-
1523
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1524
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1525
- if (likely(gt_iir[0]))
1526
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1527
- }
1528
-
1529
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1530
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1531
- if (likely(gt_iir[1]))
1532
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1533
- }
1534
-
1535
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1536
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1537
- if (likely(gt_iir[2] & (i915->pm_rps_events |
1538
- i915->pm_guc_events)))
1539
- raw_reg_write(regs, GEN8_GT_IIR(2),
1540
- gt_iir[2] & (i915->pm_rps_events |
1541
- i915->pm_guc_events));
1542
- }
1543
-
1544
- if (master_ctl & GEN8_GT_VECS_IRQ) {
1545
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1546
- if (likely(gt_iir[3]))
1547
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1548
- }
1549
-}
1550
-
1551
-static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1552
- u32 master_ctl, u32 gt_iir[4])
1553
-{
1554
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1555
- gen8_cs_irq_handler(i915->engine[RCS],
1556
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1557
- gen8_cs_irq_handler(i915->engine[BCS],
1558
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1559
- }
1560
-
1561
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1562
- gen8_cs_irq_handler(i915->engine[VCS],
1563
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1564
- gen8_cs_irq_handler(i915->engine[VCS2],
1565
- gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1566
- }
1567
-
1568
- if (master_ctl & GEN8_GT_VECS_IRQ) {
1569
- gen8_cs_irq_handler(i915->engine[VECS],
1570
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1571
- }
1572
-
1573
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1574
- gen6_rps_irq_handler(i915, gt_iir[2]);
1575
- gen9_guc_irq_handler(i915, gt_iir[2]);
1576
- }
15771028 }
15781029
15791030 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
15801031 {
15811032 switch (pin) {
1582
- case HPD_PORT_C:
1033
+ case HPD_PORT_TC1:
15831034 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1584
- case HPD_PORT_D:
1035
+ case HPD_PORT_TC2:
15851036 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1586
- case HPD_PORT_E:
1037
+ case HPD_PORT_TC3:
15871038 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1588
- case HPD_PORT_F:
1039
+ case HPD_PORT_TC4:
15891040 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1041
+ case HPD_PORT_TC5:
1042
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1043
+ case HPD_PORT_TC6:
1044
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
15901045 default:
15911046 return false;
15921047 }
....@@ -1610,9 +1065,11 @@
16101065 {
16111066 switch (pin) {
16121067 case HPD_PORT_A:
1613
- return val & ICP_DDIA_HPD_LONG_DETECT;
1068
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
16141069 case HPD_PORT_B:
1615
- return val & ICP_DDIB_HPD_LONG_DETECT;
1070
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1071
+ case HPD_PORT_C:
1072
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
16161073 default:
16171074 return false;
16181075 }
....@@ -1621,14 +1078,18 @@
16211078 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
16221079 {
16231080 switch (pin) {
1624
- case HPD_PORT_C:
1081
+ case HPD_PORT_TC1:
16251082 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1626
- case HPD_PORT_D:
1083
+ case HPD_PORT_TC2:
16271084 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1628
- case HPD_PORT_E:
1085
+ case HPD_PORT_TC3:
16291086 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1630
- case HPD_PORT_F:
1087
+ case HPD_PORT_TC4:
16311088 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1089
+ case HPD_PORT_TC5:
1090
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1091
+ case HPD_PORT_TC6:
1092
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
16321093 default:
16331094 return false;
16341095 }
....@@ -1713,6 +1174,8 @@
17131174 {
17141175 enum hpd_pin pin;
17151176
1177
+ BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1178
+
17161179 for_each_hpd_pin(pin) {
17171180 if ((hpd[pin] & hotplug_trigger) == 0)
17181181 continue;
....@@ -1723,8 +1186,9 @@
17231186 *long_mask |= BIT(pin);
17241187 }
17251188
1726
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1727
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1189
+ drm_dbg(&dev_priv->drm,
1190
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1191
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
17281192
17291193 }
17301194
....@@ -1741,13 +1205,15 @@
17411205 #if defined(CONFIG_DEBUG_FS)
17421206 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
17431207 enum pipe pipe,
1744
- uint32_t crc0, uint32_t crc1,
1745
- uint32_t crc2, uint32_t crc3,
1746
- uint32_t crc4)
1208
+ u32 crc0, u32 crc1,
1209
+ u32 crc2, u32 crc3,
1210
+ u32 crc4)
17471211 {
1748
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
17491212 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1750
- uint32_t crcs[5];
1213
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1214
+ u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1215
+
1216
+ trace_intel_pipe_crc(crtc, crcs);
17511217
17521218 spin_lock(&pipe_crc->lock);
17531219 /*
....@@ -1766,11 +1232,6 @@
17661232 }
17671233 spin_unlock(&pipe_crc->lock);
17681234
1769
- crcs[0] = crc0;
1770
- crcs[1] = crc1;
1771
- crcs[2] = crc2;
1772
- crcs[3] = crc3;
1773
- crcs[4] = crc4;
17741235 drm_crtc_add_crc_entry(&crtc->base, true,
17751236 drm_crtc_accurate_vblank_count(&crtc->base),
17761237 crcs);
....@@ -1779,9 +1240,9 @@
17791240 static inline void
17801241 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
17811242 enum pipe pipe,
1782
- uint32_t crc0, uint32_t crc1,
1783
- uint32_t crc2, uint32_t crc3,
1784
- uint32_t crc4) {}
1243
+ u32 crc0, u32 crc1,
1244
+ u32 crc2, u32 crc3,
1245
+ u32 crc4) {}
17851246 #endif
17861247
17871248
....@@ -1807,7 +1268,7 @@
18071268 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
18081269 enum pipe pipe)
18091270 {
1810
- uint32_t res1, res2;
1271
+ u32 res1, res2;
18111272
18121273 if (INTEL_GEN(dev_priv) >= 3)
18131274 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
....@@ -1826,41 +1287,6 @@
18261287 res1, res2);
18271288 }
18281289
1829
-/* The RPS events need forcewake, so we add them to a work queue and mask their
1830
- * IMR bits until the work is done. Other interrupts can be processed without
1831
- * the work queue. */
1832
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1833
-{
1834
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
1835
-
1836
- if (pm_iir & dev_priv->pm_rps_events) {
1837
- spin_lock(&dev_priv->irq_lock);
1838
- gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1839
- if (rps->interrupts_enabled) {
1840
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1841
- schedule_work(&rps->work);
1842
- }
1843
- spin_unlock(&dev_priv->irq_lock);
1844
- }
1845
-
1846
- if (INTEL_GEN(dev_priv) >= 8)
1847
- return;
1848
-
1849
- if (HAS_VEBOX(dev_priv)) {
1850
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1851
- notify_ring(dev_priv->engine[VECS]);
1852
-
1853
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1854
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1855
- }
1856
-}
1857
-
1858
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1859
-{
1860
- if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1861
- intel_guc_to_host_event_handler(&dev_priv->guc);
1862
-}
1863
-
18641290 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
18651291 {
18661292 enum pipe pipe;
....@@ -1877,7 +1303,7 @@
18771303 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
18781304 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
18791305 {
1880
- int pipe;
1306
+ enum pipe pipe;
18811307
18821308 spin_lock(&dev_priv->irq_lock);
18831309
....@@ -1902,6 +1328,7 @@
19021328 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
19031329
19041330 switch (pipe) {
1331
+ default:
19051332 case PIPE_A:
19061333 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
19071334 break;
....@@ -1946,7 +1373,7 @@
19461373
19471374 for_each_pipe(dev_priv, pipe) {
19481375 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1949
- drm_handle_vblank(&dev_priv->drm, pipe);
1376
+ intel_handle_vblank(dev_priv, pipe);
19501377
19511378 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
19521379 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
....@@ -1964,7 +1391,7 @@
19641391
19651392 for_each_pipe(dev_priv, pipe) {
19661393 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1967
- drm_handle_vblank(&dev_priv->drm, pipe);
1394
+ intel_handle_vblank(dev_priv, pipe);
19681395
19691396 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
19701397 blc_event = true;
....@@ -1988,7 +1415,7 @@
19881415
19891416 for_each_pipe(dev_priv, pipe) {
19901417 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1991
- drm_handle_vblank(&dev_priv->drm, pipe);
1418
+ intel_handle_vblank(dev_priv, pipe);
19921419
19931420 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
19941421 blc_event = true;
....@@ -2014,7 +1441,7 @@
20141441
20151442 for_each_pipe(dev_priv, pipe) {
20161443 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2017
- drm_handle_vblank(&dev_priv->drm, pipe);
1444
+ intel_handle_vblank(dev_priv, pipe);
20181445
20191446 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
20201447 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
....@@ -2058,9 +1485,9 @@
20581485 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
20591486 }
20601487
2061
- WARN_ONCE(1,
2062
- "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2063
- I915_READ(PORT_HOTPLUG_STAT));
1488
+ drm_WARN_ONCE(&dev_priv->drm, 1,
1489
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1490
+ I915_READ(PORT_HOTPLUG_STAT));
20641491
20651492 return hotplug_status;
20661493 }
....@@ -2069,46 +1496,39 @@
20691496 u32 hotplug_status)
20701497 {
20711498 u32 pin_mask = 0, long_mask = 0;
1499
+ u32 hotplug_trigger;
20721500
2073
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2074
- IS_CHERRYVIEW(dev_priv)) {
2075
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1501
+ if (IS_G4X(dev_priv) ||
1502
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1503
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1504
+ else
1505
+ hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
20761506
2077
- if (hotplug_trigger) {
2078
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2079
- hotplug_trigger, hotplug_trigger,
2080
- hpd_status_g4x,
2081
- i9xx_port_hotplug_long_detect);
1507
+ if (hotplug_trigger) {
1508
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1509
+ hotplug_trigger, hotplug_trigger,
1510
+ dev_priv->hotplug.hpd,
1511
+ i9xx_port_hotplug_long_detect);
20821512
2083
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2084
- }
2085
-
2086
- if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2087
- dp_aux_irq_handler(dev_priv);
2088
- } else {
2089
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2090
-
2091
- if (hotplug_trigger) {
2092
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2093
- hotplug_trigger, hotplug_trigger,
2094
- hpd_status_i915,
2095
- i9xx_port_hotplug_long_detect);
2096
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2097
- }
1513
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
20981514 }
1515
+
1516
+ if ((IS_G4X(dev_priv) ||
1517
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1518
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1519
+ dp_aux_irq_handler(dev_priv);
20991520 }
21001521
21011522 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
21021523 {
2103
- struct drm_device *dev = arg;
2104
- struct drm_i915_private *dev_priv = to_i915(dev);
1524
+ struct drm_i915_private *dev_priv = arg;
21051525 irqreturn_t ret = IRQ_NONE;
21061526
21071527 if (!intel_irqs_enabled(dev_priv))
21081528 return IRQ_NONE;
21091529
21101530 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2111
- disable_rpm_wakeref_asserts(dev_priv);
1531
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
21121532
21131533 do {
21141534 u32 iir, gt_iir, pm_iir;
....@@ -2169,9 +1589,9 @@
21691589 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
21701590
21711591 if (gt_iir)
2172
- snb_gt_irq_handler(dev_priv, gt_iir);
1592
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
21731593 if (pm_iir)
2174
- gen6_rps_irq_handler(dev_priv, pm_iir);
1594
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
21751595
21761596 if (hotplug_status)
21771597 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
....@@ -2179,28 +1599,26 @@
21791599 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
21801600 } while (0);
21811601
2182
- enable_rpm_wakeref_asserts(dev_priv);
1602
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
21831603
21841604 return ret;
21851605 }
21861606
21871607 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
21881608 {
2189
- struct drm_device *dev = arg;
2190
- struct drm_i915_private *dev_priv = to_i915(dev);
1609
+ struct drm_i915_private *dev_priv = arg;
21911610 irqreturn_t ret = IRQ_NONE;
21921611
21931612 if (!intel_irqs_enabled(dev_priv))
21941613 return IRQ_NONE;
21951614
21961615 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2197
- disable_rpm_wakeref_asserts(dev_priv);
1616
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
21981617
21991618 do {
22001619 u32 master_ctl, iir;
22011620 u32 pipe_stats[I915_MAX_PIPES] = {};
22021621 u32 hotplug_status = 0;
2203
- u32 gt_iir[4];
22041622 u32 ier = 0;
22051623
22061624 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
....@@ -2228,7 +1646,7 @@
22281646 ier = I915_READ(VLV_IER);
22291647 I915_WRITE(VLV_IER, 0);
22301648
2231
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1649
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
22321650
22331651 if (iir & I915_DISPLAY_PORT_INTERRUPT)
22341652 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
....@@ -2252,22 +1670,19 @@
22521670 I915_WRITE(VLV_IER, ier);
22531671 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
22541672
2255
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2256
-
22571673 if (hotplug_status)
22581674 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22591675
22601676 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22611677 } while (0);
22621678
2263
- enable_rpm_wakeref_asserts(dev_priv);
1679
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22641680
22651681 return ret;
22661682 }
22671683
22681684 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2269
- u32 hotplug_trigger,
2270
- const u32 hpd[HPD_NUM_PINS])
1685
+ u32 hotplug_trigger)
22711686 {
22721687 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
22731688
....@@ -2290,8 +1705,9 @@
22901705 if (!hotplug_trigger)
22911706 return;
22921707
2293
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2294
- dig_hotplug_reg, hpd,
1708
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1709
+ hotplug_trigger, dig_hotplug_reg,
1710
+ dev_priv->hotplug.pch_hpd,
22951711 pch_port_hotplug_long_detect);
22961712
22971713 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
....@@ -2299,16 +1715,16 @@
22991715
23001716 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23011717 {
2302
- int pipe;
1718
+ enum pipe pipe;
23031719 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
23041720
2305
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1721
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
23061722
23071723 if (pch_iir & SDE_AUDIO_POWER_MASK) {
23081724 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
23091725 SDE_AUDIO_POWER_SHIFT);
2310
- DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2311
- port_name(port));
1726
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1727
+ port_name(port));
23121728 }
23131729
23141730 if (pch_iir & SDE_AUX_MASK)
....@@ -2318,25 +1734,27 @@
23181734 gmbus_irq_handler(dev_priv);
23191735
23201736 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2321
- DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1737
+ drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
23221738
23231739 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2324
- DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1740
+ drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
23251741
23261742 if (pch_iir & SDE_POISON)
2327
- DRM_ERROR("PCH poison interrupt\n");
1743
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
23281744
2329
- if (pch_iir & SDE_FDI_MASK)
1745
+ if (pch_iir & SDE_FDI_MASK) {
23301746 for_each_pipe(dev_priv, pipe)
2331
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2332
- pipe_name(pipe),
2333
- I915_READ(FDI_RX_IIR(pipe)));
1747
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1748
+ pipe_name(pipe),
1749
+ I915_READ(FDI_RX_IIR(pipe)));
1750
+ }
23341751
23351752 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2336
- DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1753
+ drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
23371754
23381755 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2339
- DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1756
+ drm_dbg(&dev_priv->drm,
1757
+ "PCH transcoder CRC error interrupt\n");
23401758
23411759 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
23421760 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
....@@ -2351,7 +1769,7 @@
23511769 enum pipe pipe;
23521770
23531771 if (err_int & ERR_INT_POISON)
2354
- DRM_ERROR("Poison interrupt\n");
1772
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
23551773
23561774 for_each_pipe(dev_priv, pipe) {
23571775 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
....@@ -2374,7 +1792,7 @@
23741792 enum pipe pipe;
23751793
23761794 if (serr_int & SERR_INT_POISON)
2377
- DRM_ERROR("PCH poison interrupt\n");
1795
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
23781796
23791797 for_each_pipe(dev_priv, pipe)
23801798 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
....@@ -2385,16 +1803,16 @@
23851803
23861804 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23871805 {
2388
- int pipe;
1806
+ enum pipe pipe;
23891807 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
23901808
2391
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1809
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
23921810
23931811 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
23941812 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
23951813 SDE_AUDIO_POWER_SHIFT_CPT);
2396
- DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2397
- port_name(port));
1814
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1815
+ port_name(port));
23981816 }
23991817
24001818 if (pch_iir & SDE_AUX_MASK_CPT)
....@@ -2404,16 +1822,17 @@
24041822 gmbus_irq_handler(dev_priv);
24051823
24061824 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2407
- DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1825
+ drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
24081826
24091827 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2410
- DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1828
+ drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
24111829
2412
- if (pch_iir & SDE_FDI_MASK_CPT)
1830
+ if (pch_iir & SDE_FDI_MASK_CPT) {
24131831 for_each_pipe(dev_priv, pipe)
2414
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2415
- pipe_name(pipe),
2416
- I915_READ(FDI_RX_IIR(pipe)));
1832
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1833
+ pipe_name(pipe),
1834
+ I915_READ(FDI_RX_IIR(pipe)));
1835
+ }
24171836
24181837 if (pch_iir & SDE_ERROR_CPT)
24191838 cpt_serr_int_handler(dev_priv);
....@@ -2421,9 +1840,26 @@
24211840
24221841 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
24231842 {
2424
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2425
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1843
+ u32 ddi_hotplug_trigger, tc_hotplug_trigger;
24261844 u32 pin_mask = 0, long_mask = 0;
1845
+
1846
+ if (HAS_PCH_TGP(dev_priv)) {
1847
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1848
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1849
+ } else if (HAS_PCH_JSP(dev_priv)) {
1850
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1851
+ tc_hotplug_trigger = 0;
1852
+ } else if (HAS_PCH_MCC(dev_priv)) {
1853
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1854
+ tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1855
+ } else {
1856
+ drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1857
+ "Unrecognized PCH type 0x%x\n",
1858
+ INTEL_PCH_TYPE(dev_priv));
1859
+
1860
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1861
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1862
+ }
24271863
24281864 if (ddi_hotplug_trigger) {
24291865 u32 dig_hotplug_reg;
....@@ -2432,8 +1868,8 @@
24321868 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
24331869
24341870 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2435
- ddi_hotplug_trigger,
2436
- dig_hotplug_reg, hpd_icp,
1871
+ ddi_hotplug_trigger, dig_hotplug_reg,
1872
+ dev_priv->hotplug.pch_hpd,
24371873 icp_ddi_port_hotplug_long_detect);
24381874 }
24391875
....@@ -2444,8 +1880,8 @@
24441880 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
24451881
24461882 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2447
- tc_hotplug_trigger,
2448
- dig_hotplug_reg, hpd_icp,
1883
+ tc_hotplug_trigger, dig_hotplug_reg,
1884
+ dev_priv->hotplug.pch_hpd,
24491885 icp_tc_port_hotplug_long_detect);
24501886 }
24511887
....@@ -2470,7 +1906,8 @@
24701906 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
24711907
24721908 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2473
- hotplug_trigger, dig_hotplug_reg, hpd_spt,
1909
+ hotplug_trigger, dig_hotplug_reg,
1910
+ dev_priv->hotplug.pch_hpd,
24741911 spt_port_hotplug_long_detect);
24751912 }
24761913
....@@ -2481,7 +1918,8 @@
24811918 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
24821919
24831920 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2484
- hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1921
+ hotplug2_trigger, dig_hotplug_reg,
1922
+ dev_priv->hotplug.pch_hpd,
24851923 spt_port_hotplug2_long_detect);
24861924 }
24871925
....@@ -2493,16 +1931,16 @@
24931931 }
24941932
24951933 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2496
- u32 hotplug_trigger,
2497
- const u32 hpd[HPD_NUM_PINS])
1934
+ u32 hotplug_trigger)
24981935 {
24991936 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
25001937
25011938 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
25021939 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
25031940
2504
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2505
- dig_hotplug_reg, hpd,
1941
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1942
+ hotplug_trigger, dig_hotplug_reg,
1943
+ dev_priv->hotplug.hpd,
25061944 ilk_port_hotplug_long_detect);
25071945
25081946 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
....@@ -2515,7 +1953,7 @@
25151953 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
25161954
25171955 if (hotplug_trigger)
2518
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1956
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
25191957
25201958 if (de_iir & DE_AUX_CHANNEL_A)
25211959 dp_aux_irq_handler(dev_priv);
....@@ -2524,11 +1962,11 @@
25241962 intel_opregion_asle_intr(dev_priv);
25251963
25261964 if (de_iir & DE_POISON)
2527
- DRM_ERROR("Poison interrupt\n");
1965
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
25281966
25291967 for_each_pipe(dev_priv, pipe) {
25301968 if (de_iir & DE_PIPE_VBLANK(pipe))
2531
- drm_handle_vblank(&dev_priv->drm, pipe);
1969
+ intel_handle_vblank(dev_priv, pipe);
25321970
25331971 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
25341972 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
....@@ -2550,8 +1988,8 @@
25501988 I915_WRITE(SDEIIR, pch_iir);
25511989 }
25521990
2553
- if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2554
- ironlake_rps_change_irq_handler(dev_priv);
1991
+ if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
1992
+ gen5_rps_irq_handler(&dev_priv->gt.rps);
25551993 }
25561994
25571995 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
....@@ -2561,7 +1999,7 @@
25611999 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
25622000
25632001 if (hotplug_trigger)
2564
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2002
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
25652003
25662004 if (de_iir & DE_ERR_INT_IVB)
25672005 ivb_err_int_handler(dev_priv);
....@@ -2581,7 +2019,7 @@
25812019
25822020 for_each_pipe(dev_priv, pipe) {
25832021 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2584
- drm_handle_vblank(&dev_priv->drm, pipe);
2022
+ intel_handle_vblank(dev_priv, pipe);
25852023 }
25862024
25872025 /* check event from PCH */
....@@ -2603,85 +2041,85 @@
26032041 * 4 - Process the interrupt(s) that had bits set in the IIRs.
26042042 * 5 - Re-enable Master Interrupt Control.
26052043 */
2606
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2044
+static irqreturn_t ilk_irq_handler(int irq, void *arg)
26072045 {
2608
- struct drm_device *dev = arg;
2609
- struct drm_i915_private *dev_priv = to_i915(dev);
2046
+ struct drm_i915_private *i915 = arg;
2047
+ void __iomem * const regs = i915->uncore.regs;
26102048 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
26112049 irqreturn_t ret = IRQ_NONE;
26122050
2613
- if (!intel_irqs_enabled(dev_priv))
2051
+ if (unlikely(!intel_irqs_enabled(i915)))
26142052 return IRQ_NONE;
26152053
26162054 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2617
- disable_rpm_wakeref_asserts(dev_priv);
2055
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
26182056
26192057 /* disable master interrupt before clearing iir */
2620
- de_ier = I915_READ(DEIER);
2621
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2058
+ de_ier = raw_reg_read(regs, DEIER);
2059
+ raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
26222060
26232061 /* Disable south interrupts. We'll only write to SDEIIR once, so further
26242062 * interrupts will will be stored on its back queue, and then we'll be
26252063 * able to process them after we restore SDEIER (as soon as we restore
26262064 * it, we'll get an interrupt if SDEIIR still has something to process
26272065 * due to its back queue). */
2628
- if (!HAS_PCH_NOP(dev_priv)) {
2629
- sde_ier = I915_READ(SDEIER);
2630
- I915_WRITE(SDEIER, 0);
2066
+ if (!HAS_PCH_NOP(i915)) {
2067
+ sde_ier = raw_reg_read(regs, SDEIER);
2068
+ raw_reg_write(regs, SDEIER, 0);
26312069 }
26322070
26332071 /* Find, clear, then process each source of interrupt */
26342072
2635
- gt_iir = I915_READ(GTIIR);
2073
+ gt_iir = raw_reg_read(regs, GTIIR);
26362074 if (gt_iir) {
2637
- I915_WRITE(GTIIR, gt_iir);
2638
- ret = IRQ_HANDLED;
2639
- if (INTEL_GEN(dev_priv) >= 6)
2640
- snb_gt_irq_handler(dev_priv, gt_iir);
2075
+ raw_reg_write(regs, GTIIR, gt_iir);
2076
+ if (INTEL_GEN(i915) >= 6)
2077
+ gen6_gt_irq_handler(&i915->gt, gt_iir);
26412078 else
2642
- ilk_gt_irq_handler(dev_priv, gt_iir);
2079
+ gen5_gt_irq_handler(&i915->gt, gt_iir);
2080
+ ret = IRQ_HANDLED;
26432081 }
26442082
2645
- de_iir = I915_READ(DEIIR);
2083
+ de_iir = raw_reg_read(regs, DEIIR);
26462084 if (de_iir) {
2647
- I915_WRITE(DEIIR, de_iir);
2648
- ret = IRQ_HANDLED;
2649
- if (INTEL_GEN(dev_priv) >= 7)
2650
- ivb_display_irq_handler(dev_priv, de_iir);
2085
+ raw_reg_write(regs, DEIIR, de_iir);
2086
+ if (INTEL_GEN(i915) >= 7)
2087
+ ivb_display_irq_handler(i915, de_iir);
26512088 else
2652
- ilk_display_irq_handler(dev_priv, de_iir);
2089
+ ilk_display_irq_handler(i915, de_iir);
2090
+ ret = IRQ_HANDLED;
26532091 }
26542092
2655
- if (INTEL_GEN(dev_priv) >= 6) {
2656
- u32 pm_iir = I915_READ(GEN6_PMIIR);
2093
+ if (INTEL_GEN(i915) >= 6) {
2094
+ u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
26572095 if (pm_iir) {
2658
- I915_WRITE(GEN6_PMIIR, pm_iir);
2096
+ raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2097
+ gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
26592098 ret = IRQ_HANDLED;
2660
- gen6_rps_irq_handler(dev_priv, pm_iir);
26612099 }
26622100 }
26632101
2664
- I915_WRITE(DEIER, de_ier);
2665
- if (!HAS_PCH_NOP(dev_priv))
2666
- I915_WRITE(SDEIER, sde_ier);
2102
+ raw_reg_write(regs, DEIER, de_ier);
2103
+ if (sde_ier)
2104
+ raw_reg_write(regs, SDEIER, sde_ier);
26672105
26682106 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2669
- enable_rpm_wakeref_asserts(dev_priv);
2107
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
26702108
26712109 return ret;
26722110 }
26732111
26742112 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2675
- u32 hotplug_trigger,
2676
- const u32 hpd[HPD_NUM_PINS])
2113
+ u32 hotplug_trigger)
26772114 {
26782115 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
26792116
26802117 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
26812118 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
26822119
2683
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2684
- dig_hotplug_reg, hpd,
2120
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2121
+ hotplug_trigger, dig_hotplug_reg,
2122
+ dev_priv->hotplug.hpd,
26852123 bxt_port_hotplug_long_detect);
26862124
26872125 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
....@@ -2699,8 +2137,9 @@
26992137 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
27002138 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
27012139
2702
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2703
- dig_hotplug_reg, hpd_gen11,
2140
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2141
+ trigger_tc, dig_hotplug_reg,
2142
+ dev_priv->hotplug.hpd,
27042143 gen11_port_hotplug_long_detect);
27052144 }
27062145
....@@ -2710,15 +2149,92 @@
27102149 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
27112150 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
27122151
2713
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2714
- dig_hotplug_reg, hpd_gen11,
2152
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2153
+ trigger_tbt, dig_hotplug_reg,
2154
+ dev_priv->hotplug.hpd,
27152155 gen11_port_hotplug_long_detect);
27162156 }
27172157
27182158 if (pin_mask)
27192159 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
27202160 else
2721
- DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2161
+ drm_err(&dev_priv->drm,
2162
+ "Unexpected DE HPD interrupt 0x%08x\n", iir);
2163
+}
2164
+
2165
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2166
+{
2167
+ u32 mask;
2168
+
2169
+ if (INTEL_GEN(dev_priv) >= 12)
2170
+ return TGL_DE_PORT_AUX_DDIA |
2171
+ TGL_DE_PORT_AUX_DDIB |
2172
+ TGL_DE_PORT_AUX_DDIC |
2173
+ TGL_DE_PORT_AUX_USBC1 |
2174
+ TGL_DE_PORT_AUX_USBC2 |
2175
+ TGL_DE_PORT_AUX_USBC3 |
2176
+ TGL_DE_PORT_AUX_USBC4 |
2177
+ TGL_DE_PORT_AUX_USBC5 |
2178
+ TGL_DE_PORT_AUX_USBC6;
2179
+
2180
+
2181
+ mask = GEN8_AUX_CHANNEL_A;
2182
+ if (INTEL_GEN(dev_priv) >= 9)
2183
+ mask |= GEN9_AUX_CHANNEL_B |
2184
+ GEN9_AUX_CHANNEL_C |
2185
+ GEN9_AUX_CHANNEL_D;
2186
+
2187
+ if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2188
+ mask |= CNL_AUX_CHANNEL_F;
2189
+
2190
+ if (IS_GEN(dev_priv, 11))
2191
+ mask |= ICL_AUX_CHANNEL_E;
2192
+
2193
+ return mask;
2194
+}
2195
+
2196
+static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2197
+{
2198
+ if (IS_ROCKETLAKE(dev_priv))
2199
+ return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2200
+ else if (INTEL_GEN(dev_priv) >= 11)
2201
+ return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2202
+ else if (INTEL_GEN(dev_priv) >= 9)
2203
+ return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2204
+ else
2205
+ return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2206
+}
2207
+
2208
+static void
2209
+gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2210
+{
2211
+ bool found = false;
2212
+
2213
+ if (iir & GEN8_DE_MISC_GSE) {
2214
+ intel_opregion_asle_intr(dev_priv);
2215
+ found = true;
2216
+ }
2217
+
2218
+ if (iir & GEN8_DE_EDP_PSR) {
2219
+ u32 psr_iir;
2220
+ i915_reg_t iir_reg;
2221
+
2222
+ if (INTEL_GEN(dev_priv) >= 12)
2223
+ iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2224
+ else
2225
+ iir_reg = EDP_PSR_IIR;
2226
+
2227
+ psr_iir = I915_READ(iir_reg);
2228
+ I915_WRITE(iir_reg, psr_iir);
2229
+
2230
+ if (psr_iir)
2231
+ found = true;
2232
+
2233
+ intel_psr_irq_handler(dev_priv, psr_iir);
2234
+ }
2235
+
2236
+ if (!found)
2237
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
27222238 }
27232239
27242240 static irqreturn_t
....@@ -2731,29 +2247,13 @@
27312247 if (master_ctl & GEN8_DE_MISC_IRQ) {
27322248 iir = I915_READ(GEN8_DE_MISC_IIR);
27332249 if (iir) {
2734
- bool found = false;
2735
-
27362250 I915_WRITE(GEN8_DE_MISC_IIR, iir);
27372251 ret = IRQ_HANDLED;
2738
-
2739
- if (iir & GEN8_DE_MISC_GSE) {
2740
- intel_opregion_asle_intr(dev_priv);
2741
- found = true;
2742
- }
2743
-
2744
- if (iir & GEN8_DE_EDP_PSR) {
2745
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
2746
-
2747
- intel_psr_irq_handler(dev_priv, psr_iir);
2748
- I915_WRITE(EDP_PSR_IIR, psr_iir);
2749
- found = true;
2750
- }
2751
-
2752
- if (!found)
2753
- DRM_ERROR("Unexpected DE Misc interrupt\n");
2252
+ gen8_de_misc_irq_handler(dev_priv, iir);
2253
+ } else {
2254
+ drm_err(&dev_priv->drm,
2255
+ "The master control interrupt lied (DE MISC)!\n");
27542256 }
2755
- else
2756
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
27572257 }
27582258
27592259 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
....@@ -2763,7 +2263,8 @@
27632263 ret = IRQ_HANDLED;
27642264 gen11_hpd_irq_handler(dev_priv, iir);
27652265 } else {
2766
- DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2266
+ drm_err(&dev_priv->drm,
2267
+ "The master control interrupt lied, (DE HPD)!\n");
27672268 }
27682269 }
27692270
....@@ -2776,20 +2277,7 @@
27762277 I915_WRITE(GEN8_DE_PORT_IIR, iir);
27772278 ret = IRQ_HANDLED;
27782279
2779
- tmp_mask = GEN8_AUX_CHANNEL_A;
2780
- if (INTEL_GEN(dev_priv) >= 9)
2781
- tmp_mask |= GEN9_AUX_CHANNEL_B |
2782
- GEN9_AUX_CHANNEL_C |
2783
- GEN9_AUX_CHANNEL_D;
2784
-
2785
- if (INTEL_GEN(dev_priv) >= 11)
2786
- tmp_mask |= ICL_AUX_CHANNEL_E;
2787
-
2788
- if (IS_CNL_WITH_PORT_F(dev_priv) ||
2789
- INTEL_GEN(dev_priv) >= 11)
2790
- tmp_mask |= CNL_AUX_CHANNEL_F;
2791
-
2792
- if (iir & tmp_mask) {
2280
+ if (iir & gen8_de_port_aux_mask(dev_priv)) {
27932281 dp_aux_irq_handler(dev_priv);
27942282 found = true;
27952283 }
....@@ -2797,15 +2285,13 @@
27972285 if (IS_GEN9_LP(dev_priv)) {
27982286 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
27992287 if (tmp_mask) {
2800
- bxt_hpd_irq_handler(dev_priv, tmp_mask,
2801
- hpd_bxt);
2288
+ bxt_hpd_irq_handler(dev_priv, tmp_mask);
28022289 found = true;
28032290 }
28042291 } else if (IS_BROADWELL(dev_priv)) {
28052292 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
28062293 if (tmp_mask) {
2807
- ilk_hpd_irq_handler(dev_priv,
2808
- tmp_mask, hpd_bdw);
2294
+ ilk_hpd_irq_handler(dev_priv, tmp_mask);
28092295 found = true;
28102296 }
28112297 }
....@@ -2816,10 +2302,12 @@
28162302 }
28172303
28182304 if (!found)
2819
- DRM_ERROR("Unexpected DE Port interrupt\n");
2305
+ drm_err(&dev_priv->drm,
2306
+ "Unexpected DE Port interrupt\n");
28202307 }
28212308 else
2822
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2309
+ drm_err(&dev_priv->drm,
2310
+ "The master control interrupt lied (DE PORT)!\n");
28232311 }
28242312
28252313 for_each_pipe(dev_priv, pipe) {
....@@ -2830,7 +2318,8 @@
28302318
28312319 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
28322320 if (!iir) {
2833
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2321
+ drm_err(&dev_priv->drm,
2322
+ "The master control interrupt lied (DE PIPE)!\n");
28342323 continue;
28352324 }
28362325
....@@ -2838,7 +2327,7 @@
28382327 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
28392328
28402329 if (iir & GEN8_PIPE_VBLANK)
2841
- drm_handle_vblank(&dev_priv->drm, pipe);
2330
+ intel_handle_vblank(dev_priv, pipe);
28422331
28432332 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
28442333 hsw_pipe_crc_irq_handler(dev_priv, pipe);
....@@ -2846,16 +2335,12 @@
28462335 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
28472336 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
28482337
2849
- fault_errors = iir;
2850
- if (INTEL_GEN(dev_priv) >= 9)
2851
- fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2852
- else
2853
- fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2854
-
2338
+ fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
28552339 if (fault_errors)
2856
- DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2857
- pipe_name(pipe),
2858
- fault_errors);
2340
+ drm_err(&dev_priv->drm,
2341
+ "Fault errors on pipe %c: 0x%08x\n",
2342
+ pipe_name(pipe),
2343
+ fault_errors);
28592344 }
28602345
28612346 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
....@@ -2870,11 +2355,9 @@
28702355 I915_WRITE(SDEIIR, iir);
28712356 ret = IRQ_HANDLED;
28722357
2873
- if (HAS_PCH_ICP(dev_priv))
2358
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
28742359 icp_irq_handler(dev_priv, iir);
2875
- else if (HAS_PCH_SPT(dev_priv) ||
2876
- HAS_PCH_KBP(dev_priv) ||
2877
- HAS_PCH_CNP(dev_priv))
2360
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
28782361 spt_irq_handler(dev_priv, iir);
28792362 else
28802363 cpt_irq_handler(dev_priv, iir);
....@@ -2883,218 +2366,66 @@
28832366 * Like on previous PCH there seems to be something
28842367 * fishy going on with forwarding PCH interrupts.
28852368 */
2886
- DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2369
+ drm_dbg(&dev_priv->drm,
2370
+ "The master control interrupt lied (SDE)!\n");
28872371 }
28882372 }
28892373
28902374 return ret;
28912375 }
28922376
2377
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2378
+{
2379
+ raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2380
+
2381
+ /*
2382
+ * Now with master disabled, get a sample of level indications
2383
+ * for this interrupt. Indications will be cleared on related acks.
2384
+ * New indications can and will light up during processing,
2385
+ * and will generate new interrupt after enabling master.
2386
+ */
2387
+ return raw_reg_read(regs, GEN8_MASTER_IRQ);
2388
+}
2389
+
2390
+static inline void gen8_master_intr_enable(void __iomem * const regs)
2391
+{
2392
+ raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2393
+}
2394
+
28932395 static irqreturn_t gen8_irq_handler(int irq, void *arg)
28942396 {
2895
- struct drm_i915_private *dev_priv = to_i915(arg);
2397
+ struct drm_i915_private *dev_priv = arg;
2398
+ void __iomem * const regs = dev_priv->uncore.regs;
28962399 u32 master_ctl;
2897
- u32 gt_iir[4];
28982400
28992401 if (!intel_irqs_enabled(dev_priv))
29002402 return IRQ_NONE;
29012403
2902
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2903
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2904
- if (!master_ctl)
2404
+ master_ctl = gen8_master_intr_disable(regs);
2405
+ if (!master_ctl) {
2406
+ gen8_master_intr_enable(regs);
29052407 return IRQ_NONE;
2408
+ }
29062409
2907
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2908
-
2909
- /* Find, clear, then process each source of interrupt */
2910
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2410
+ /* Find, queue (onto bottom-halves), then clear each source */
2411
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
29112412
29122413 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
29132414 if (master_ctl & ~GEN8_GT_IRQS) {
2914
- disable_rpm_wakeref_asserts(dev_priv);
2415
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
29152416 gen8_de_irq_handler(dev_priv, master_ctl);
2916
- enable_rpm_wakeref_asserts(dev_priv);
2417
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
29172418 }
29182419
2919
- I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2920
-
2921
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2420
+ gen8_master_intr_enable(regs);
29222421
29232422 return IRQ_HANDLED;
29242423 }
29252424
2926
-struct wedge_me {
2927
- struct delayed_work work;
2928
- struct drm_i915_private *i915;
2929
- const char *name;
2930
-};
2931
-
2932
-static void wedge_me(struct work_struct *work)
2933
-{
2934
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
2935
-
2936
- dev_err(w->i915->drm.dev,
2937
- "%s timed out, cancelling all in-flight rendering.\n",
2938
- w->name);
2939
- i915_gem_set_wedged(w->i915);
2940
-}
2941
-
2942
-static void __init_wedge(struct wedge_me *w,
2943
- struct drm_i915_private *i915,
2944
- long timeout,
2945
- const char *name)
2946
-{
2947
- w->i915 = i915;
2948
- w->name = name;
2949
-
2950
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2951
- schedule_delayed_work(&w->work, timeout);
2952
-}
2953
-
2954
-static void __fini_wedge(struct wedge_me *w)
2955
-{
2956
- cancel_delayed_work_sync(&w->work);
2957
- destroy_delayed_work_on_stack(&w->work);
2958
- w->i915 = NULL;
2959
-}
2960
-
2961
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
2962
- for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
2963
- (W)->i915; \
2964
- __fini_wedge((W)))
2965
-
29662425 static u32
2967
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
2968
- const unsigned int bank, const unsigned int bit)
2426
+gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
29692427 {
2970
- void __iomem * const regs = i915->regs;
2971
- u32 timeout_ts;
2972
- u32 ident;
2973
-
2974
- lockdep_assert_held(&i915->irq_lock);
2975
-
2976
- raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2977
-
2978
- /*
2979
- * NB: Specs do not specify how long to spin wait,
2980
- * so we do ~100us as an educated guess.
2981
- */
2982
- timeout_ts = (local_clock() >> 10) + 100;
2983
- do {
2984
- ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2985
- } while (!(ident & GEN11_INTR_DATA_VALID) &&
2986
- !time_after32(local_clock() >> 10, timeout_ts));
2987
-
2988
- if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2989
- DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2990
- bank, bit, ident);
2991
- return 0;
2992
- }
2993
-
2994
- raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2995
- GEN11_INTR_DATA_VALID);
2996
-
2997
- return ident;
2998
-}
2999
-
3000
-static void
3001
-gen11_other_irq_handler(struct drm_i915_private * const i915,
3002
- const u8 instance, const u16 iir)
3003
-{
3004
- if (instance == OTHER_GTPM_INSTANCE)
3005
- return gen6_rps_irq_handler(i915, iir);
3006
-
3007
- WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3008
- instance, iir);
3009
-}
3010
-
3011
-static void
3012
-gen11_engine_irq_handler(struct drm_i915_private * const i915,
3013
- const u8 class, const u8 instance, const u16 iir)
3014
-{
3015
- struct intel_engine_cs *engine;
3016
-
3017
- if (instance <= MAX_ENGINE_INSTANCE)
3018
- engine = i915->engine_class[class][instance];
3019
- else
3020
- engine = NULL;
3021
-
3022
- if (likely(engine))
3023
- return gen8_cs_irq_handler(engine, iir);
3024
-
3025
- WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3026
- class, instance);
3027
-}
3028
-
3029
-static void
3030
-gen11_gt_identity_handler(struct drm_i915_private * const i915,
3031
- const u32 identity)
3032
-{
3033
- const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3034
- const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3035
- const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3036
-
3037
- if (unlikely(!intr))
3038
- return;
3039
-
3040
- if (class <= COPY_ENGINE_CLASS)
3041
- return gen11_engine_irq_handler(i915, class, instance, intr);
3042
-
3043
- if (class == OTHER_CLASS)
3044
- return gen11_other_irq_handler(i915, instance, intr);
3045
-
3046
- WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3047
- class, instance, intr);
3048
-}
3049
-
3050
-static void
3051
-gen11_gt_bank_handler(struct drm_i915_private * const i915,
3052
- const unsigned int bank)
3053
-{
3054
- void __iomem * const regs = i915->regs;
3055
- unsigned long intr_dw;
3056
- unsigned int bit;
3057
-
3058
- lockdep_assert_held(&i915->irq_lock);
3059
-
3060
- intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3061
-
3062
- if (unlikely(!intr_dw)) {
3063
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3064
- return;
3065
- }
3066
-
3067
- for_each_set_bit(bit, &intr_dw, 32) {
3068
- const u32 ident = gen11_gt_engine_identity(i915,
3069
- bank, bit);
3070
-
3071
- gen11_gt_identity_handler(i915, ident);
3072
- }
3073
-
3074
- /* Clear must be after shared has been served for engine */
3075
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3076
-}
3077
-
3078
-static void
3079
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
3080
- const u32 master_ctl)
3081
-{
3082
- unsigned int bank;
3083
-
3084
- spin_lock(&i915->irq_lock);
3085
-
3086
- for (bank = 0; bank < 2; bank++) {
3087
- if (master_ctl & GEN11_GT_DW_IRQ(bank))
3088
- gen11_gt_bank_handler(i915, bank);
3089
- }
3090
-
3091
- spin_unlock(&i915->irq_lock);
3092
-}
3093
-
3094
-static u32
3095
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3096
-{
3097
- void __iomem * const regs = dev_priv->regs;
2428
+ void __iomem * const regs = gt->uncore->regs;
30982429 u32 iir;
30992430
31002431 if (!(master_ctl & GEN11_GU_MISC_IRQ))
....@@ -3108,242 +2439,138 @@
31082439 }
31092440
31102441 static void
3111
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
2442
+gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
31122443 {
31132444 if (iir & GEN11_GU_MISC_GSE)
3114
- intel_opregion_asle_intr(dev_priv);
2445
+ intel_opregion_asle_intr(gt->i915);
31152446 }
31162447
3117
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
2448
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
31182449 {
3119
- struct drm_i915_private * const i915 = to_i915(arg);
3120
- void __iomem * const regs = i915->regs;
2450
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2451
+
2452
+ /*
2453
+ * Now with master disabled, get a sample of level indications
2454
+ * for this interrupt. Indications will be cleared on related acks.
2455
+ * New indications can and will light up during processing,
2456
+ * and will generate new interrupt after enabling master.
2457
+ */
2458
+ return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2459
+}
2460
+
2461
+static inline void gen11_master_intr_enable(void __iomem * const regs)
2462
+{
2463
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2464
+}
2465
+
2466
+static void
2467
+gen11_display_irq_handler(struct drm_i915_private *i915)
2468
+{
2469
+ void __iomem * const regs = i915->uncore.regs;
2470
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2471
+
2472
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
2473
+ /*
2474
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2475
+ * for the display related bits.
2476
+ */
2477
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2478
+ gen8_de_irq_handler(i915, disp_ctl);
2479
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2480
+ GEN11_DISPLAY_IRQ_ENABLE);
2481
+
2482
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
2483
+}
2484
+
2485
+static __always_inline irqreturn_t
2486
+__gen11_irq_handler(struct drm_i915_private * const i915,
2487
+ u32 (*intr_disable)(void __iomem * const regs),
2488
+ void (*intr_enable)(void __iomem * const regs))
2489
+{
2490
+ void __iomem * const regs = i915->uncore.regs;
2491
+ struct intel_gt *gt = &i915->gt;
31212492 u32 master_ctl;
31222493 u32 gu_misc_iir;
31232494
31242495 if (!intel_irqs_enabled(i915))
31252496 return IRQ_NONE;
31262497
3127
- master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3128
- master_ctl &= ~GEN11_MASTER_IRQ;
3129
- if (!master_ctl)
2498
+ master_ctl = intr_disable(regs);
2499
+ if (!master_ctl) {
2500
+ intr_enable(regs);
31302501 return IRQ_NONE;
3131
-
3132
- /* Disable interrupts. */
3133
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3134
-
3135
- /* Find, clear, then process each source of interrupt. */
3136
- gen11_gt_irq_handler(i915, master_ctl);
3137
-
3138
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3139
- if (master_ctl & GEN11_DISPLAY_IRQ) {
3140
- const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3141
-
3142
- disable_rpm_wakeref_asserts(i915);
3143
- /*
3144
- * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3145
- * for the display related bits.
3146
- */
3147
- gen8_de_irq_handler(i915, disp_ctl);
3148
- enable_rpm_wakeref_asserts(i915);
31492502 }
31502503
3151
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2504
+ /* Find, queue (onto bottom-halves), then clear each source */
2505
+ gen11_gt_irq_handler(gt, master_ctl);
31522506
3153
- /* Acknowledge and enable interrupts. */
3154
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
2507
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2508
+ if (master_ctl & GEN11_DISPLAY_IRQ)
2509
+ gen11_display_irq_handler(i915);
31552510
3156
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2511
+ gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2512
+
2513
+ intr_enable(regs);
2514
+
2515
+ gen11_gu_misc_irq_handler(gt, gu_misc_iir);
31572516
31582517 return IRQ_HANDLED;
31592518 }
31602519
3161
-static void i915_reset_device(struct drm_i915_private *dev_priv,
3162
- u32 engine_mask,
3163
- const char *reason)
2520
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
31642521 {
3165
- struct i915_gpu_error *error = &dev_priv->gpu_error;
3166
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
3167
- char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
3168
- char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
3169
- char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
3170
- struct wedge_me w;
3171
-
3172
- kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
3173
-
3174
- DRM_DEBUG_DRIVER("resetting chip\n");
3175
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
3176
-
3177
- /* Use a watchdog to ensure that our reset completes */
3178
- i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
3179
- intel_prepare_reset(dev_priv);
3180
-
3181
- error->reason = reason;
3182
- error->stalled_mask = engine_mask;
3183
-
3184
- /* Signal that locked waiters should reset the GPU */
3185
- smp_mb__before_atomic();
3186
- set_bit(I915_RESET_HANDOFF, &error->flags);
3187
- wake_up_all(&error->wait_queue);
3188
-
3189
- /* Wait for anyone holding the lock to wakeup, without
3190
- * blocking indefinitely on struct_mutex.
3191
- */
3192
- do {
3193
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
3194
- i915_reset(dev_priv, engine_mask, reason);
3195
- mutex_unlock(&dev_priv->drm.struct_mutex);
3196
- }
3197
- } while (wait_on_bit_timeout(&error->flags,
3198
- I915_RESET_HANDOFF,
3199
- TASK_UNINTERRUPTIBLE,
3200
- 1));
3201
-
3202
- error->stalled_mask = 0;
3203
- error->reason = NULL;
3204
-
3205
- intel_finish_reset(dev_priv);
3206
- }
3207
-
3208
- if (!test_bit(I915_WEDGED, &error->flags))
3209
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
2522
+ return __gen11_irq_handler(arg,
2523
+ gen11_master_intr_disable,
2524
+ gen11_master_intr_enable);
32102525 }
32112526
3212
-static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2527
+static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
32132528 {
3214
- u32 eir;
2529
+ u32 val;
32152530
3216
- if (!IS_GEN2(dev_priv))
3217
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2531
+ /* First disable interrupts */
2532
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
32182533
3219
- if (INTEL_GEN(dev_priv) < 4)
3220
- I915_WRITE(IPEIR, I915_READ(IPEIR));
3221
- else
3222
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2534
+ /* Get the indication levels and ack the master unit */
2535
+ val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2536
+ if (unlikely(!val))
2537
+ return 0;
32232538
3224
- I915_WRITE(EIR, I915_READ(EIR));
3225
- eir = I915_READ(EIR);
3226
- if (eir) {
3227
- /*
3228
- * some errors might have become stuck,
3229
- * mask them.
3230
- */
3231
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
3232
- I915_WRITE(EMR, I915_READ(EMR) | eir);
3233
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
3234
- }
2539
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2540
+
2541
+ /*
2542
+ * Now with master disabled, get a sample of level indications
2543
+ * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
2544
+ * out as this bit doesn't exist anymore for DG1
2545
+ */
2546
+ val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2547
+ if (unlikely(!val))
2548
+ return 0;
2549
+
2550
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2551
+
2552
+ return val;
32352553 }
32362554
3237
-/**
3238
- * i915_handle_error - handle a gpu error
3239
- * @dev_priv: i915 device private
3240
- * @engine_mask: mask representing engines that are hung
3241
- * @flags: control flags
3242
- * @fmt: Error message format string
3243
- *
3244
- * Do some basic checking of register state at error time and
3245
- * dump it to the syslog. Also call i915_capture_error_state() to make
3246
- * sure we get a record and make it available in debugfs. Fire a uevent
3247
- * so userspace knows something bad happened (should trigger collection
3248
- * of a ring dump etc.).
3249
- */
3250
-void i915_handle_error(struct drm_i915_private *dev_priv,
3251
- u32 engine_mask,
3252
- unsigned long flags,
3253
- const char *fmt, ...)
2555
+static inline void dg1_master_intr_enable(void __iomem * const regs)
32542556 {
3255
- struct intel_engine_cs *engine;
3256
- unsigned int tmp;
3257
- char error_msg[80];
3258
- char *msg = NULL;
2557
+ raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2558
+}
32592559
3260
- if (fmt) {
3261
- va_list args;
3262
-
3263
- va_start(args, fmt);
3264
- vscnprintf(error_msg, sizeof(error_msg), fmt, args);
3265
- va_end(args);
3266
-
3267
- msg = error_msg;
3268
- }
3269
-
3270
- /*
3271
- * In most cases it's guaranteed that we get here with an RPM
3272
- * reference held, for example because there is a pending GPU
3273
- * request that won't finish until the reset is done. This
3274
- * isn't the case at least when we get here by doing a
3275
- * simulated reset via debugfs, so get an RPM reference.
3276
- */
3277
- intel_runtime_pm_get(dev_priv);
3278
-
3279
- engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3280
-
3281
- if (flags & I915_ERROR_CAPTURE) {
3282
- i915_capture_error_state(dev_priv, engine_mask, msg);
3283
- i915_clear_error_registers(dev_priv);
3284
- }
3285
-
3286
- /*
3287
- * Try engine reset when available. We fall back to full reset if
3288
- * single reset fails.
3289
- */
3290
- if (intel_has_reset_engine(dev_priv)) {
3291
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
3292
- BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3293
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3294
- &dev_priv->gpu_error.flags))
3295
- continue;
3296
-
3297
- if (i915_reset_engine(engine, msg) == 0)
3298
- engine_mask &= ~intel_engine_flag(engine);
3299
-
3300
- clear_bit(I915_RESET_ENGINE + engine->id,
3301
- &dev_priv->gpu_error.flags);
3302
- wake_up_bit(&dev_priv->gpu_error.flags,
3303
- I915_RESET_ENGINE + engine->id);
3304
- }
3305
- }
3306
-
3307
- if (!engine_mask)
3308
- goto out;
3309
-
3310
- /* Full reset needs the mutex, stop any other user trying to do so. */
3311
- if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3312
- wait_event(dev_priv->gpu_error.reset_queue,
3313
- !test_bit(I915_RESET_BACKOFF,
3314
- &dev_priv->gpu_error.flags));
3315
- goto out;
3316
- }
3317
-
3318
- /* Prevent any other reset-engine attempt. */
3319
- for_each_engine(engine, dev_priv, tmp) {
3320
- while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3321
- &dev_priv->gpu_error.flags))
3322
- wait_on_bit(&dev_priv->gpu_error.flags,
3323
- I915_RESET_ENGINE + engine->id,
3324
- TASK_UNINTERRUPTIBLE);
3325
- }
3326
-
3327
- i915_reset_device(dev_priv, engine_mask, msg);
3328
-
3329
- for_each_engine(engine, dev_priv, tmp) {
3330
- clear_bit(I915_RESET_ENGINE + engine->id,
3331
- &dev_priv->gpu_error.flags);
3332
- }
3333
-
3334
- clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3335
- wake_up_all(&dev_priv->gpu_error.reset_queue);
3336
-
3337
-out:
3338
- intel_runtime_pm_put(dev_priv);
2560
+static irqreturn_t dg1_irq_handler(int irq, void *arg)
2561
+{
2562
+ return __gen11_irq_handler(arg,
2563
+ dg1_master_intr_disable_and_ack,
2564
+ dg1_master_intr_enable);
33392565 }
33402566
33412567 /* Called from drm generic code, passed 'crtc' which
33422568 * we use as a pipe index
33432569 */
3344
-static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2570
+int i8xx_enable_vblank(struct drm_crtc *crtc)
33452571 {
3346
- struct drm_i915_private *dev_priv = to_i915(dev);
2572
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2573
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
33472574 unsigned long irqflags;
33482575
33492576 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3353,9 +2580,26 @@
33532580 return 0;
33542581 }
33552582
3356
-static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2583
+int i915gm_enable_vblank(struct drm_crtc *crtc)
33572584 {
3358
- struct drm_i915_private *dev_priv = to_i915(dev);
2585
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2586
+
2587
+ /*
2588
+ * Vblank interrupts fail to wake the device up from C2+.
2589
+ * Disabling render clock gating during C-states avoids
2590
+ * the problem. There is a small power cost so we do this
2591
+ * only when vblank interrupts are actually enabled.
2592
+ */
2593
+ if (dev_priv->vblank_enabled++ == 0)
2594
+ I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2595
+
2596
+ return i8xx_enable_vblank(crtc);
2597
+}
2598
+
2599
+int i965_enable_vblank(struct drm_crtc *crtc)
2600
+{
2601
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2602
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
33592603 unsigned long irqflags;
33602604
33612605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3366,11 +2610,12 @@
33662610 return 0;
33672611 }
33682612
3369
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2613
+int ilk_enable_vblank(struct drm_crtc *crtc)
33702614 {
3371
- struct drm_i915_private *dev_priv = to_i915(dev);
2615
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2616
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
33722617 unsigned long irqflags;
3373
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2618
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
33742619 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
33752620
33762621 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3381,14 +2626,15 @@
33812626 * PSR is active as no frames are generated.
33822627 */
33832628 if (HAS_PSR(dev_priv))
3384
- drm_vblank_restore(dev, pipe);
2629
+ drm_crtc_vblank_restore(crtc);
33852630
33862631 return 0;
33872632 }
33882633
3389
-static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2634
+int bdw_enable_vblank(struct drm_crtc *crtc)
33902635 {
3391
- struct drm_i915_private *dev_priv = to_i915(dev);
2636
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2637
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
33922638 unsigned long irqflags;
33932639
33942640 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3399,7 +2645,7 @@
33992645 * PSR is active as no frames are generated, so check only for PSR.
34002646 */
34012647 if (HAS_PSR(dev_priv))
3402
- drm_vblank_restore(dev, pipe);
2648
+ drm_crtc_vblank_restore(crtc);
34032649
34042650 return 0;
34052651 }
....@@ -3407,9 +2653,10 @@
34072653 /* Called from drm generic code, passed 'crtc' which
34082654 * we use as a pipe index
34092655 */
3410
-static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2656
+void i8xx_disable_vblank(struct drm_crtc *crtc)
34112657 {
3412
- struct drm_i915_private *dev_priv = to_i915(dev);
2658
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2659
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
34132660 unsigned long irqflags;
34142661
34152662 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3417,9 +2664,20 @@
34172664 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
34182665 }
34192666
3420
-static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2667
+void i915gm_disable_vblank(struct drm_crtc *crtc)
34212668 {
3422
- struct drm_i915_private *dev_priv = to_i915(dev);
2669
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2670
+
2671
+ i8xx_disable_vblank(crtc);
2672
+
2673
+ if (--dev_priv->vblank_enabled == 0)
2674
+ I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2675
+}
2676
+
2677
+void i965_disable_vblank(struct drm_crtc *crtc)
2678
+{
2679
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2680
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
34232681 unsigned long irqflags;
34242682
34252683 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3428,11 +2686,12 @@
34282686 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
34292687 }
34302688
3431
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2689
+void ilk_disable_vblank(struct drm_crtc *crtc)
34322690 {
3433
- struct drm_i915_private *dev_priv = to_i915(dev);
2691
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2692
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
34342693 unsigned long irqflags;
3435
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2694
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
34362695 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
34372696
34382697 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3440,9 +2699,10 @@
34402699 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
34412700 }
34422701
3443
-static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2702
+void bdw_disable_vblank(struct drm_crtc *crtc)
34442703 {
3445
- struct drm_i915_private *dev_priv = to_i915(dev);
2704
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2705
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
34462706 unsigned long irqflags;
34472707
34482708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
....@@ -3452,10 +2712,12 @@
34522712
34532713 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
34542714 {
2715
+ struct intel_uncore *uncore = &dev_priv->uncore;
2716
+
34552717 if (HAS_PCH_NOP(dev_priv))
34562718 return;
34572719
3458
- GEN3_IRQ_RESET(SDE);
2720
+ GEN3_IRQ_RESET(uncore, SDE);
34592721
34602722 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
34612723 I915_WRITE(SERR_INT, 0xffffffff);
....@@ -3469,43 +2731,38 @@
34692731 *
34702732 * This function needs to be called before interrupts are enabled.
34712733 */
3472
-static void ibx_irq_pre_postinstall(struct drm_device *dev)
2734
+static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
34732735 {
3474
- struct drm_i915_private *dev_priv = to_i915(dev);
3475
-
34762736 if (HAS_PCH_NOP(dev_priv))
34772737 return;
34782738
3479
- WARN_ON(I915_READ(SDEIER) != 0);
2739
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
34802740 I915_WRITE(SDEIER, 0xffffffff);
34812741 POSTING_READ(SDEIER);
34822742 }
34832743
3484
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3485
-{
3486
- GEN3_IRQ_RESET(GT);
3487
- if (INTEL_GEN(dev_priv) >= 6)
3488
- GEN3_IRQ_RESET(GEN6_PM);
3489
-}
3490
-
34912744 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
34922745 {
2746
+ struct intel_uncore *uncore = &dev_priv->uncore;
2747
+
34932748 if (IS_CHERRYVIEW(dev_priv))
3494
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2749
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
34952750 else
3496
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2751
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
34972752
34982753 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3499
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2754
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35002755
35012756 i9xx_pipestat_irq_reset(dev_priv);
35022757
3503
- GEN3_IRQ_RESET(VLV_);
2758
+ GEN3_IRQ_RESET(uncore, VLV_);
35042759 dev_priv->irq_mask = ~0u;
35052760 }
35062761
35072762 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
35082763 {
2764
+ struct intel_uncore *uncore = &dev_priv->uncore;
2765
+
35092766 u32 pipestat_mask;
35102767 u32 enable_mask;
35112768 enum pipe pipe;
....@@ -3526,44 +2783,39 @@
35262783 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
35272784 I915_LPE_PIPE_C_INTERRUPT;
35282785
3529
- WARN_ON(dev_priv->irq_mask != ~0u);
2786
+ drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
35302787
35312788 dev_priv->irq_mask = ~enable_mask;
35322789
3533
- GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
2790
+ GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35342791 }
35352792
35362793 /* drm_dma.h hooks
35372794 */
3538
-static void ironlake_irq_reset(struct drm_device *dev)
2795
+static void ilk_irq_reset(struct drm_i915_private *dev_priv)
35392796 {
3540
- struct drm_i915_private *dev_priv = to_i915(dev);
2797
+ struct intel_uncore *uncore = &dev_priv->uncore;
35412798
3542
- if (IS_GEN5(dev_priv))
3543
- I915_WRITE(HWSTAM, 0xffffffff);
3544
-
3545
- GEN3_IRQ_RESET(DE);
3546
- if (IS_GEN7(dev_priv))
3547
- I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2799
+ GEN3_IRQ_RESET(uncore, DE);
2800
+ if (IS_GEN(dev_priv, 7))
2801
+ intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
35482802
35492803 if (IS_HASWELL(dev_priv)) {
3550
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3551
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
2804
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2805
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
35522806 }
35532807
3554
- gen5_gt_irq_reset(dev_priv);
2808
+ gen5_gt_irq_reset(&dev_priv->gt);
35552809
35562810 ibx_irq_reset(dev_priv);
35572811 }
35582812
3559
-static void valleyview_irq_reset(struct drm_device *dev)
2813
+static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
35602814 {
3561
- struct drm_i915_private *dev_priv = to_i915(dev);
3562
-
35632815 I915_WRITE(VLV_MASTER_IER, 0);
35642816 POSTING_READ(VLV_MASTER_IER);
35652817
3566
- gen5_gt_irq_reset(dev_priv);
2818
+ gen5_gt_irq_reset(&dev_priv->gt);
35672819
35682820 spin_lock_irq(&dev_priv->irq_lock);
35692821 if (dev_priv->display_irqs_enabled)
....@@ -3571,88 +2823,101 @@
35712823 spin_unlock_irq(&dev_priv->irq_lock);
35722824 }
35732825
3574
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2826
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
35752827 {
3576
- GEN8_IRQ_RESET_NDX(GT, 0);
3577
- GEN8_IRQ_RESET_NDX(GT, 1);
3578
- GEN8_IRQ_RESET_NDX(GT, 2);
3579
- GEN8_IRQ_RESET_NDX(GT, 3);
3580
-}
2828
+ struct intel_uncore *uncore = &dev_priv->uncore;
2829
+ enum pipe pipe;
35812830
3582
-static void gen8_irq_reset(struct drm_device *dev)
3583
-{
3584
- struct drm_i915_private *dev_priv = to_i915(dev);
3585
- int pipe;
2831
+ gen8_master_intr_disable(dev_priv->uncore.regs);
35862832
3587
- I915_WRITE(GEN8_MASTER_IRQ, 0);
3588
- POSTING_READ(GEN8_MASTER_IRQ);
2833
+ gen8_gt_irq_reset(&dev_priv->gt);
35892834
3590
- gen8_gt_irq_reset(dev_priv);
3591
-
3592
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3593
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
2835
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2836
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
35942837
35952838 for_each_pipe(dev_priv, pipe)
35962839 if (intel_display_power_is_enabled(dev_priv,
35972840 POWER_DOMAIN_PIPE(pipe)))
3598
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2841
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
35992842
3600
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
3601
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
3602
- GEN3_IRQ_RESET(GEN8_PCU_);
2843
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2844
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2845
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
36032846
36042847 if (HAS_PCH_SPLIT(dev_priv))
36052848 ibx_irq_reset(dev_priv);
36062849 }
36072850
3608
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
2851
+static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
36092852 {
3610
- /* Disable RCS, BCS, VCS and VECS class engines. */
3611
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3612
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
2853
+ struct intel_uncore *uncore = &dev_priv->uncore;
2854
+ enum pipe pipe;
2855
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2856
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
36132857
3614
- /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3615
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
3616
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
3617
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
3618
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
3619
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
2858
+ intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
36202859
3621
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3622
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
3623
-}
2860
+ if (INTEL_GEN(dev_priv) >= 12) {
2861
+ enum transcoder trans;
36242862
3625
-static void gen11_irq_reset(struct drm_device *dev)
3626
-{
3627
- struct drm_i915_private *dev_priv = dev->dev_private;
3628
- int pipe;
2863
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2864
+ enum intel_display_power_domain domain;
36292865
3630
- I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
3631
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
2866
+ domain = POWER_DOMAIN_TRANSCODER(trans);
2867
+ if (!intel_display_power_is_enabled(dev_priv, domain))
2868
+ continue;
36322869
3633
- gen11_gt_irq_reset(dev_priv);
3634
-
3635
- I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
2870
+ intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2871
+ intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2872
+ }
2873
+ } else {
2874
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2875
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2876
+ }
36362877
36372878 for_each_pipe(dev_priv, pipe)
36382879 if (intel_display_power_is_enabled(dev_priv,
36392880 POWER_DOMAIN_PIPE(pipe)))
3640
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2881
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36412882
3642
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
3643
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
3644
- GEN3_IRQ_RESET(GEN11_DE_HPD_);
3645
- GEN3_IRQ_RESET(GEN11_GU_MISC_);
3646
- GEN3_IRQ_RESET(GEN8_PCU_);
2883
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2884
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2885
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
36472886
3648
- if (HAS_PCH_ICP(dev_priv))
3649
- GEN3_IRQ_RESET(SDE);
2887
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2888
+ GEN3_IRQ_RESET(uncore, SDE);
2889
+
2890
+ /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
2891
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
2892
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2893
+ SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2894
+ intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
2895
+ SBCLK_RUN_REFCLK_DIS, 0);
2896
+ }
2897
+}
2898
+
2899
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2900
+{
2901
+ struct intel_uncore *uncore = &dev_priv->uncore;
2902
+
2903
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
2904
+ dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
2905
+ else
2906
+ gen11_master_intr_disable(dev_priv->uncore.regs);
2907
+
2908
+ gen11_gt_irq_reset(&dev_priv->gt);
2909
+ gen11_display_irq_reset(dev_priv);
2910
+
2911
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2912
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
36502913 }
36512914
36522915 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
36532916 u8 pipe_mask)
36542917 {
3655
- uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2918
+ struct intel_uncore *uncore = &dev_priv->uncore;
2919
+
2920
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36562921 enum pipe pipe;
36572922
36582923 spin_lock_irq(&dev_priv->irq_lock);
....@@ -3663,7 +2928,7 @@
36632928 }
36642929
36652930 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3666
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
2931
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36672932 dev_priv->de_irq_mask[pipe],
36682933 ~dev_priv->de_irq_mask[pipe] | extra_ier);
36692934
....@@ -3673,6 +2938,7 @@
36732938 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
36742939 u8 pipe_mask)
36752940 {
2941
+ struct intel_uncore *uncore = &dev_priv->uncore;
36762942 enum pipe pipe;
36772943
36782944 spin_lock_irq(&dev_priv->irq_lock);
....@@ -3683,24 +2949,24 @@
36832949 }
36842950
36852951 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3686
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2952
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36872953
36882954 spin_unlock_irq(&dev_priv->irq_lock);
36892955
36902956 /* make sure we're done processing display irqs */
3691
- synchronize_irq(dev_priv->drm.irq);
2957
+ intel_synchronize_irq(dev_priv);
36922958 }
36932959
3694
-static void cherryview_irq_reset(struct drm_device *dev)
2960
+static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
36952961 {
3696
- struct drm_i915_private *dev_priv = to_i915(dev);
2962
+ struct intel_uncore *uncore = &dev_priv->uncore;
36972963
36982964 I915_WRITE(GEN8_MASTER_IRQ, 0);
36992965 POSTING_READ(GEN8_MASTER_IRQ);
37002966
3701
- gen8_gt_irq_reset(dev_priv);
2967
+ gen8_gt_irq_reset(&dev_priv->gt);
37022968
3703
- GEN3_IRQ_RESET(GEN8_PCU_);
2969
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
37042970
37052971 spin_lock_irq(&dev_priv->irq_lock);
37062972 if (dev_priv->display_irqs_enabled)
....@@ -3719,6 +2985,18 @@
37192985 enabled_irqs |= hpd[encoder->hpd_pin];
37202986
37212987 return enabled_irqs;
2988
+}
2989
+
2990
+static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
2991
+ const u32 hpd[HPD_NUM_PINS])
2992
+{
2993
+ struct intel_encoder *encoder;
2994
+ u32 hotplug_irqs = 0;
2995
+
2996
+ for_each_intel_encoder(&dev_priv->drm, encoder)
2997
+ hotplug_irqs |= hpd[encoder->hpd_pin];
2998
+
2999
+ return hotplug_irqs;
37223000 }
37233001
37243002 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
....@@ -3750,46 +3028,71 @@
37503028 {
37513029 u32 hotplug_irqs, enabled_irqs;
37523030
3753
- if (HAS_PCH_IBX(dev_priv)) {
3754
- hotplug_irqs = SDE_HOTPLUG_MASK;
3755
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3756
- } else {
3757
- hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3758
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3759
- }
3031
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3032
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
37603033
37613034 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
37623035
37633036 ibx_hpd_detection_setup(dev_priv);
37643037 }
37653038
3766
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3039
+static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
3040
+ u32 enable_mask)
37673041 {
37683042 u32 hotplug;
37693043
37703044 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3771
- hotplug |= ICP_DDIA_HPD_ENABLE |
3772
- ICP_DDIB_HPD_ENABLE;
3045
+ hotplug |= enable_mask;
37733046 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3047
+}
3048
+
3049
+static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
3050
+ u32 enable_mask)
3051
+{
3052
+ u32 hotplug;
37743053
37753054 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3776
- hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3777
- ICP_TC_HPD_ENABLE(PORT_TC2) |
3778
- ICP_TC_HPD_ENABLE(PORT_TC3) |
3779
- ICP_TC_HPD_ENABLE(PORT_TC4);
3055
+ hotplug |= enable_mask;
37803056 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
37813057 }
37823058
3783
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3059
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3060
+ u32 ddi_enable_mask, u32 tc_enable_mask)
37843061 {
37853062 u32 hotplug_irqs, enabled_irqs;
37863063
3787
- hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3788
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3064
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3065
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3066
+
3067
+ if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3068
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
37893069
37903070 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
37913071
3792
- icp_hpd_detection_setup(dev_priv);
3072
+ icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
3073
+ if (tc_enable_mask)
3074
+ icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
3075
+}
3076
+
3077
+/*
3078
+ * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3079
+ * equivalent of SDE.
3080
+ */
3081
+static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3082
+{
3083
+ icp_hpd_irq_setup(dev_priv,
3084
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
3085
+}
3086
+
3087
+/*
3088
+ * JSP behaves exactly the same as MCC above except that port C is mapped to
3089
+ * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
3090
+ * masks & tables rather than ICP's masks & tables.
3091
+ */
3092
+static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3093
+{
3094
+ icp_hpd_irq_setup(dev_priv,
3095
+ TGP_DDI_HPD_ENABLE_MASK, 0);
37933096 }
37943097
37953098 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
....@@ -3800,14 +3103,18 @@
38003103 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
38013104 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
38023105 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3803
- GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3106
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
3107
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
3108
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
38043109 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
38053110
38063111 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
38073112 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
38083113 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
38093114 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3810
- GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3115
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
3116
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
3117
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
38113118 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
38123119 }
38133120
....@@ -3816,8 +3123,8 @@
38163123 u32 hotplug_irqs, enabled_irqs;
38173124 u32 val;
38183125
3819
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3820
- hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3126
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3127
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
38213128
38223129 val = I915_READ(GEN11_DE_HPD_IMR);
38233130 val &= ~hotplug_irqs;
....@@ -3827,8 +3134,12 @@
38273134
38283135 gen11_hpd_detection_setup(dev_priv);
38293136
3830
- if (HAS_PCH_ICP(dev_priv))
3831
- icp_hpd_irq_setup(dev_priv);
3137
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3138
+ icp_hpd_irq_setup(dev_priv,
3139
+ TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
3140
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3141
+ icp_hpd_irq_setup(dev_priv,
3142
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
38323143 }
38333144
38343145 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
....@@ -3860,8 +3171,11 @@
38603171 {
38613172 u32 hotplug_irqs, enabled_irqs;
38623173
3863
- hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3864
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3174
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3175
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3176
+
3177
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3178
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
38653179
38663180 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38673181
....@@ -3888,22 +3202,13 @@
38883202 {
38893203 u32 hotplug_irqs, enabled_irqs;
38903204
3891
- if (INTEL_GEN(dev_priv) >= 8) {
3892
- hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3893
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3205
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3206
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
38943207
3208
+ if (INTEL_GEN(dev_priv) >= 8)
38953209 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3896
- } else if (INTEL_GEN(dev_priv) >= 7) {
3897
- hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3898
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3899
-
3210
+ else
39003211 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3901
- } else {
3902
- hotplug_irqs = DE_DP_A_HOTPLUG;
3903
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3904
-
3905
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3906
- }
39073212
39083213 ilk_hpd_detection_setup(dev_priv);
39093214
....@@ -3920,8 +3225,9 @@
39203225 PORTB_HOTPLUG_ENABLE |
39213226 PORTC_HOTPLUG_ENABLE;
39223227
3923
- DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3924
- hotplug, enabled_irqs);
3228
+ drm_dbg_kms(&dev_priv->drm,
3229
+ "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3230
+ hotplug, enabled_irqs);
39253231 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
39263232
39273233 /*
....@@ -3950,17 +3256,16 @@
39503256 {
39513257 u32 hotplug_irqs, enabled_irqs;
39523258
3953
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3954
- hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3259
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3260
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
39553261
39563262 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39573263
39583264 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39593265 }
39603266
3961
-static void ibx_irq_postinstall(struct drm_device *dev)
3267
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
39623268 {
3963
- struct drm_i915_private *dev_priv = to_i915(dev);
39643269 u32 mask;
39653270
39663271 if (HAS_PCH_NOP(dev_priv))
....@@ -3973,7 +3278,7 @@
39733278 else
39743279 mask = SDE_GMBUS_CPT;
39753280
3976
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3281
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
39773282 I915_WRITE(SDEIMR, ~mask);
39783283
39793284 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
....@@ -3983,47 +3288,9 @@
39833288 spt_hpd_detection_setup(dev_priv);
39843289 }
39853290
3986
-static void gen5_gt_irq_postinstall(struct drm_device *dev)
3291
+static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
39873292 {
3988
- struct drm_i915_private *dev_priv = to_i915(dev);
3989
- u32 pm_irqs, gt_irqs;
3990
-
3991
- pm_irqs = gt_irqs = 0;
3992
-
3993
- dev_priv->gt_irq_mask = ~0;
3994
- if (HAS_L3_DPF(dev_priv)) {
3995
- /* L3 parity interrupt is always unmasked. */
3996
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3997
- gt_irqs |= GT_PARITY_ERROR(dev_priv);
3998
- }
3999
-
4000
- gt_irqs |= GT_RENDER_USER_INTERRUPT;
4001
- if (IS_GEN5(dev_priv)) {
4002
- gt_irqs |= ILK_BSD_USER_INTERRUPT;
4003
- } else {
4004
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
4005
- }
4006
-
4007
- GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
4008
-
4009
- if (INTEL_GEN(dev_priv) >= 6) {
4010
- /*
4011
- * RPS interrupts will get enabled/disabled on demand when RPS
4012
- * itself is enabled/disabled.
4013
- */
4014
- if (HAS_VEBOX(dev_priv)) {
4015
- pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4016
- dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
4017
- }
4018
-
4019
- dev_priv->pm_imr = 0xffffffff;
4020
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
4021
- }
4022
-}
4023
-
4024
-static int ironlake_irq_postinstall(struct drm_device *dev)
4025
-{
4026
- struct drm_i915_private *dev_priv = to_i915(dev);
3293
+ struct intel_uncore *uncore = &dev_priv->uncore;
40273294 u32 display_mask, extra_mask;
40283295
40293296 if (INTEL_GEN(dev_priv) >= 7) {
....@@ -4042,22 +3309,22 @@
40423309 }
40433310
40443311 if (IS_HASWELL(dev_priv)) {
4045
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4046
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3312
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40473313 display_mask |= DE_EDP_PSR_INT_HSW;
40483314 }
40493315
40503316 dev_priv->irq_mask = ~display_mask;
40513317
4052
- ibx_irq_pre_postinstall(dev);
3318
+ ibx_irq_pre_postinstall(dev_priv);
40533319
4054
- GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3320
+ GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3321
+ display_mask | extra_mask);
40553322
4056
- gen5_gt_irq_postinstall(dev);
3323
+ gen5_gt_irq_postinstall(&dev_priv->gt);
40573324
40583325 ilk_hpd_detection_setup(dev_priv);
40593326
4060
- ibx_irq_postinstall(dev);
3327
+ ibx_irq_postinstall(dev_priv);
40613328
40623329 if (IS_IRONLAKE_M(dev_priv)) {
40633330 /* Enable PCU event interrupts
....@@ -4069,8 +3336,6 @@
40693336 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
40703337 spin_unlock_irq(&dev_priv->irq_lock);
40713338 }
4072
-
4073
- return 0;
40743339 }
40753340
40763341 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
....@@ -4102,11 +3367,9 @@
41023367 }
41033368
41043369
4105
-static int valleyview_irq_postinstall(struct drm_device *dev)
3370
+static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
41063371 {
4107
- struct drm_i915_private *dev_priv = to_i915(dev);
4108
-
4109
- gen5_gt_irq_postinstall(dev);
3372
+ gen5_gt_irq_postinstall(&dev_priv->gt);
41103373
41113374 spin_lock_irq(&dev_priv->irq_lock);
41123375 if (dev_priv->display_irqs_enabled)
....@@ -4115,69 +3378,27 @@
41153378
41163379 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
41173380 POSTING_READ(VLV_MASTER_IER);
4118
-
4119
- return 0;
4120
-}
4121
-
4122
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4123
-{
4124
- /* These are interrupts we'll toggle with the ring mask register */
4125
- uint32_t gt_interrupts[] = {
4126
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4127
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4128
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4129
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4130
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4131
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4132
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
4133
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4134
- 0,
4135
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4136
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4137
- };
4138
-
4139
- if (HAS_L3_DPF(dev_priv))
4140
- gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
4141
-
4142
- dev_priv->pm_ier = 0x0;
4143
- dev_priv->pm_imr = ~dev_priv->pm_ier;
4144
- GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4145
- GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4146
- /*
4147
- * RPS interrupts will get enabled/disabled on demand when RPS itself
4148
- * is enabled/disabled. Same wil be the case for GuC interrupts.
4149
- */
4150
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4151
- GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
41523381 }
41533382
41543383 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
41553384 {
4156
- uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4157
- uint32_t de_pipe_enables;
4158
- u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3385
+ struct intel_uncore *uncore = &dev_priv->uncore;
3386
+
3387
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3388
+ GEN8_PIPE_CDCLK_CRC_DONE;
3389
+ u32 de_pipe_enables;
3390
+ u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
41593391 u32 de_port_enables;
41603392 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3393
+ u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3394
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
41613395 enum pipe pipe;
41623396
41633397 if (INTEL_GEN(dev_priv) <= 10)
41643398 de_misc_masked |= GEN8_DE_MISC_GSE;
41653399
4166
- if (INTEL_GEN(dev_priv) >= 9) {
4167
- de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4168
- de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4169
- GEN9_AUX_CHANNEL_D;
4170
- if (IS_GEN9_LP(dev_priv))
4171
- de_port_masked |= BXT_DE_PORT_GMBUS;
4172
- } else {
4173
- de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4174
- }
4175
-
4176
- if (INTEL_GEN(dev_priv) >= 11)
4177
- de_port_masked |= ICL_AUX_CHANNEL_E;
4178
-
4179
- if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4180
- de_port_masked |= CNL_AUX_CHANNEL_F;
3400
+ if (IS_GEN9_LP(dev_priv))
3401
+ de_port_masked |= BXT_DE_PORT_GMBUS;
41813402
41823403 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
41833404 GEN8_PIPE_FIFO_UNDERRUN;
....@@ -4188,28 +3409,42 @@
41883409 else if (IS_BROADWELL(dev_priv))
41893410 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
41903411
4191
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4192
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3412
+ if (INTEL_GEN(dev_priv) >= 12) {
3413
+ enum transcoder trans;
3414
+
3415
+ for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3416
+ enum intel_display_power_domain domain;
3417
+
3418
+ domain = POWER_DOMAIN_TRANSCODER(trans);
3419
+ if (!intel_display_power_is_enabled(dev_priv, domain))
3420
+ continue;
3421
+
3422
+ gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3423
+ }
3424
+ } else {
3425
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3426
+ }
41933427
41943428 for_each_pipe(dev_priv, pipe) {
41953429 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
41963430
41973431 if (intel_display_power_is_enabled(dev_priv,
41983432 POWER_DOMAIN_PIPE(pipe)))
4199
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3433
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
42003434 dev_priv->de_irq_mask[pipe],
42013435 de_pipe_enables);
42023436 }
42033437
4204
- GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4205
- GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3438
+ GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3439
+ GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
42063440
42073441 if (INTEL_GEN(dev_priv) >= 11) {
42083442 u32 de_hpd_masked = 0;
42093443 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
42103444 GEN11_DE_TBT_HOTPLUG_MASK;
42113445
4212
- GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
3446
+ GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3447
+ de_hpd_enables);
42133448 gen11_hpd_detection_setup(dev_priv);
42143449 } else if (IS_GEN9_LP(dev_priv)) {
42153450 bxt_hpd_detection_setup(dev_priv);
....@@ -4218,93 +3453,72 @@
42183453 }
42193454 }
42203455
4221
-static int gen8_irq_postinstall(struct drm_device *dev)
3456
+static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
42223457 {
4223
- struct drm_i915_private *dev_priv = to_i915(dev);
4224
-
42253458 if (HAS_PCH_SPLIT(dev_priv))
4226
- ibx_irq_pre_postinstall(dev);
3459
+ ibx_irq_pre_postinstall(dev_priv);
42273460
4228
- gen8_gt_irq_postinstall(dev_priv);
3461
+ gen8_gt_irq_postinstall(&dev_priv->gt);
42293462 gen8_de_irq_postinstall(dev_priv);
42303463
42313464 if (HAS_PCH_SPLIT(dev_priv))
4232
- ibx_irq_postinstall(dev);
3465
+ ibx_irq_postinstall(dev_priv);
42333466
4234
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4235
- POSTING_READ(GEN8_MASTER_IRQ);
4236
-
4237
- return 0;
3467
+ gen8_master_intr_enable(dev_priv->uncore.regs);
42383468 }
42393469
4240
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3470
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
42413471 {
4242
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4243
-
4244
- BUILD_BUG_ON(irqs & 0xffff0000);
4245
-
4246
- /* Enable RCS, BCS, VCS and VECS class interrupts. */
4247
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4248
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
4249
-
4250
- /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4251
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
4252
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
4253
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
4254
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
4255
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4256
-
4257
- /*
4258
- * RPS interrupts will get enabled/disabled on demand when RPS itself
4259
- * is enabled/disabled.
4260
- */
4261
- dev_priv->pm_ier = 0x0;
4262
- dev_priv->pm_imr = ~dev_priv->pm_ier;
4263
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4264
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
4265
-}
4266
-
4267
-static void icp_irq_postinstall(struct drm_device *dev)
4268
-{
4269
- struct drm_i915_private *dev_priv = to_i915(dev);
42703472 u32 mask = SDE_GMBUS_ICP;
42713473
4272
- WARN_ON(I915_READ(SDEIER) != 0);
3474
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
42733475 I915_WRITE(SDEIER, 0xffffffff);
42743476 POSTING_READ(SDEIER);
42753477
4276
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3478
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
42773479 I915_WRITE(SDEIMR, ~mask);
42783480
4279
- icp_hpd_detection_setup(dev_priv);
3481
+ if (HAS_PCH_TGP(dev_priv)) {
3482
+ icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
3483
+ icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
3484
+ } else if (HAS_PCH_JSP(dev_priv)) {
3485
+ icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
3486
+ } else if (HAS_PCH_MCC(dev_priv)) {
3487
+ icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
3488
+ icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
3489
+ } else {
3490
+ icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
3491
+ icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
3492
+ }
42803493 }
42813494
4282
-static int gen11_irq_postinstall(struct drm_device *dev)
3495
+static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
42833496 {
4284
- struct drm_i915_private *dev_priv = dev->dev_private;
3497
+ struct intel_uncore *uncore = &dev_priv->uncore;
42853498 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
42863499
4287
- if (HAS_PCH_ICP(dev_priv))
4288
- icp_irq_postinstall(dev);
3500
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3501
+ icp_irq_postinstall(dev_priv);
42893502
4290
- gen11_gt_irq_postinstall(dev_priv);
3503
+ gen11_gt_irq_postinstall(&dev_priv->gt);
42913504 gen8_de_irq_postinstall(dev_priv);
42923505
4293
- GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3506
+ GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
42943507
42953508 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
42963509
4297
- I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
4298
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
4299
-
4300
- return 0;
3510
+ if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3511
+ dg1_master_intr_enable(uncore->regs);
3512
+ POSTING_READ(DG1_MSTR_UNIT_INTR);
3513
+ } else {
3514
+ gen11_master_intr_enable(uncore->regs);
3515
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
3516
+ }
43013517 }
43023518
4303
-static int cherryview_irq_postinstall(struct drm_device *dev)
3519
+static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
43043520 {
4305
- struct drm_i915_private *dev_priv = to_i915(dev);
4306
-
4307
- gen8_gt_irq_postinstall(dev_priv);
3521
+ gen8_gt_irq_postinstall(&dev_priv->gt);
43083522
43093523 spin_lock_irq(&dev_priv->irq_lock);
43103524 if (dev_priv->display_irqs_enabled)
....@@ -4313,28 +3527,26 @@
43133527
43143528 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
43153529 POSTING_READ(GEN8_MASTER_IRQ);
4316
-
4317
- return 0;
43183530 }
43193531
4320
-static void i8xx_irq_reset(struct drm_device *dev)
3532
+static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
43213533 {
4322
- struct drm_i915_private *dev_priv = to_i915(dev);
3534
+ struct intel_uncore *uncore = &dev_priv->uncore;
43233535
43243536 i9xx_pipestat_irq_reset(dev_priv);
43253537
4326
- I915_WRITE16(HWSTAM, 0xffff);
4327
-
4328
- GEN2_IRQ_RESET();
3538
+ GEN2_IRQ_RESET(uncore);
43293539 }
43303540
4331
-static int i8xx_irq_postinstall(struct drm_device *dev)
3541
+static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
43323542 {
4333
- struct drm_i915_private *dev_priv = to_i915(dev);
3543
+ struct intel_uncore *uncore = &dev_priv->uncore;
43343544 u16 enable_mask;
43353545
4336
- I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4337
- I915_ERROR_MEMORY_REFRESH));
3546
+ intel_uncore_write16(uncore,
3547
+ EMR,
3548
+ ~(I915_ERROR_PAGE_TABLE |
3549
+ I915_ERROR_MEMORY_REFRESH));
43383550
43393551 /* Unmask the interrupts that we always want on. */
43403552 dev_priv->irq_mask =
....@@ -4348,7 +3560,7 @@
43483560 I915_MASTER_ERROR_INTERRUPT |
43493561 I915_USER_INTERRUPT;
43503562
4351
- GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3563
+ GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
43523564
43533565 /* Interrupt setup is already guaranteed to be single-threaded, this is
43543566 * just to make the assert_spin_locked check happy. */
....@@ -4356,21 +3568,20 @@
43563568 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
43573569 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
43583570 spin_unlock_irq(&dev_priv->irq_lock);
4359
-
4360
- return 0;
43613571 }
43623572
4363
-static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
3573
+static void i8xx_error_irq_ack(struct drm_i915_private *i915,
43643574 u16 *eir, u16 *eir_stuck)
43653575 {
3576
+ struct intel_uncore *uncore = &i915->uncore;
43663577 u16 emr;
43673578
4368
- *eir = I915_READ16(EIR);
3579
+ *eir = intel_uncore_read16(uncore, EIR);
43693580
43703581 if (*eir)
4371
- I915_WRITE16(EIR, *eir);
3582
+ intel_uncore_write16(uncore, EIR, *eir);
43723583
4373
- *eir_stuck = I915_READ16(EIR);
3584
+ *eir_stuck = intel_uncore_read16(uncore, EIR);
43743585 if (*eir_stuck == 0)
43753586 return;
43763587
....@@ -4384,9 +3595,9 @@
43843595 * (or by a GPU reset) so we mask any bit that
43853596 * remains set.
43863597 */
4387
- emr = I915_READ16(EMR);
4388
- I915_WRITE16(EMR, 0xffff);
4389
- I915_WRITE16(EMR, emr | *eir_stuck);
3598
+ emr = intel_uncore_read16(uncore, EMR);
3599
+ intel_uncore_write16(uncore, EMR, 0xffff);
3600
+ intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
43903601 }
43913602
43923603 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
....@@ -4395,7 +3606,8 @@
43953606 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
43963607
43973608 if (eir_stuck)
4398
- DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
3609
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3610
+ eir_stuck);
43993611 }
44003612
44013613 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
....@@ -4432,27 +3644,27 @@
44323644 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
44333645
44343646 if (eir_stuck)
4435
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
3647
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3648
+ eir_stuck);
44363649 }
44373650
44383651 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
44393652 {
4440
- struct drm_device *dev = arg;
4441
- struct drm_i915_private *dev_priv = to_i915(dev);
3653
+ struct drm_i915_private *dev_priv = arg;
44423654 irqreturn_t ret = IRQ_NONE;
44433655
44443656 if (!intel_irqs_enabled(dev_priv))
44453657 return IRQ_NONE;
44463658
44473659 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4448
- disable_rpm_wakeref_asserts(dev_priv);
3660
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44493661
44503662 do {
44513663 u32 pipe_stats[I915_MAX_PIPES] = {};
44523664 u16 eir = 0, eir_stuck = 0;
44533665 u16 iir;
44543666
4455
- iir = I915_READ16(IIR);
3667
+ iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
44563668 if (iir == 0)
44573669 break;
44583670
....@@ -4465,10 +3677,10 @@
44653677 if (iir & I915_MASTER_ERROR_INTERRUPT)
44663678 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
44673679
4468
- I915_WRITE16(IIR, iir);
3680
+ intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
44693681
44703682 if (iir & I915_USER_INTERRUPT)
4471
- notify_ring(dev_priv->engine[RCS]);
3683
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
44723684
44733685 if (iir & I915_MASTER_ERROR_INTERRUPT)
44743686 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
....@@ -4476,14 +3688,14 @@
44763688 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
44773689 } while (0);
44783690
4479
- enable_rpm_wakeref_asserts(dev_priv);
3691
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44803692
44813693 return ret;
44823694 }
44833695
4484
-static void i915_irq_reset(struct drm_device *dev)
3696
+static void i915_irq_reset(struct drm_i915_private *dev_priv)
44853697 {
4486
- struct drm_i915_private *dev_priv = to_i915(dev);
3698
+ struct intel_uncore *uncore = &dev_priv->uncore;
44873699
44883700 if (I915_HAS_HOTPLUG(dev_priv)) {
44893701 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
....@@ -4492,14 +3704,12 @@
44923704
44933705 i9xx_pipestat_irq_reset(dev_priv);
44943706
4495
- I915_WRITE(HWSTAM, 0xffffffff);
4496
-
4497
- GEN3_IRQ_RESET();
3707
+ GEN3_IRQ_RESET(uncore, GEN2_);
44983708 }
44993709
4500
-static int i915_irq_postinstall(struct drm_device *dev)
3710
+static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
45013711 {
4502
- struct drm_i915_private *dev_priv = to_i915(dev);
3712
+ struct intel_uncore *uncore = &dev_priv->uncore;
45033713 u32 enable_mask;
45043714
45053715 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
....@@ -4526,7 +3736,7 @@
45263736 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
45273737 }
45283738
4529
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3739
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
45303740
45313741 /* Interrupt setup is already guaranteed to be single-threaded, this is
45323742 * just to make the assert_spin_locked check happy. */
....@@ -4536,21 +3746,18 @@
45363746 spin_unlock_irq(&dev_priv->irq_lock);
45373747
45383748 i915_enable_asle_pipestat(dev_priv);
4539
-
4540
- return 0;
45413749 }
45423750
45433751 static irqreturn_t i915_irq_handler(int irq, void *arg)
45443752 {
4545
- struct drm_device *dev = arg;
4546
- struct drm_i915_private *dev_priv = to_i915(dev);
3753
+ struct drm_i915_private *dev_priv = arg;
45473754 irqreturn_t ret = IRQ_NONE;
45483755
45493756 if (!intel_irqs_enabled(dev_priv))
45503757 return IRQ_NONE;
45513758
45523759 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4553
- disable_rpm_wakeref_asserts(dev_priv);
3760
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45543761
45553762 do {
45563763 u32 pipe_stats[I915_MAX_PIPES] = {};
....@@ -4558,7 +3765,7 @@
45583765 u32 hotplug_status = 0;
45593766 u32 iir;
45603767
4561
- iir = I915_READ(IIR);
3768
+ iir = I915_READ(GEN2_IIR);
45623769 if (iir == 0)
45633770 break;
45643771
....@@ -4575,10 +3782,10 @@
45753782 if (iir & I915_MASTER_ERROR_INTERRUPT)
45763783 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
45773784
4578
- I915_WRITE(IIR, iir);
3785
+ I915_WRITE(GEN2_IIR, iir);
45793786
45803787 if (iir & I915_USER_INTERRUPT)
4581
- notify_ring(dev_priv->engine[RCS]);
3788
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
45823789
45833790 if (iir & I915_MASTER_ERROR_INTERRUPT)
45843791 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
....@@ -4589,28 +3796,26 @@
45893796 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
45903797 } while (0);
45913798
4592
- enable_rpm_wakeref_asserts(dev_priv);
3799
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45933800
45943801 return ret;
45953802 }
45963803
4597
-static void i965_irq_reset(struct drm_device *dev)
3804
+static void i965_irq_reset(struct drm_i915_private *dev_priv)
45983805 {
4599
- struct drm_i915_private *dev_priv = to_i915(dev);
3806
+ struct intel_uncore *uncore = &dev_priv->uncore;
46003807
46013808 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
46023809 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
46033810
46043811 i9xx_pipestat_irq_reset(dev_priv);
46053812
4606
- I915_WRITE(HWSTAM, 0xffffffff);
4607
-
4608
- GEN3_IRQ_RESET();
3813
+ GEN3_IRQ_RESET(uncore, GEN2_);
46093814 }
46103815
4611
-static int i965_irq_postinstall(struct drm_device *dev)
3816
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
46123817 {
4613
- struct drm_i915_private *dev_priv = to_i915(dev);
3818
+ struct intel_uncore *uncore = &dev_priv->uncore;
46143819 u32 enable_mask;
46153820 u32 error_mask;
46163821
....@@ -4648,7 +3853,7 @@
46483853 if (IS_G4X(dev_priv))
46493854 enable_mask |= I915_BSD_USER_INTERRUPT;
46503855
4651
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3856
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
46523857
46533858 /* Interrupt setup is already guaranteed to be single-threaded, this is
46543859 * just to make the assert_spin_locked check happy. */
....@@ -4659,8 +3864,6 @@
46593864 spin_unlock_irq(&dev_priv->irq_lock);
46603865
46613866 i915_enable_asle_pipestat(dev_priv);
4662
-
4663
- return 0;
46643867 }
46653868
46663869 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
....@@ -4690,15 +3893,14 @@
46903893
46913894 static irqreturn_t i965_irq_handler(int irq, void *arg)
46923895 {
4693
- struct drm_device *dev = arg;
4694
- struct drm_i915_private *dev_priv = to_i915(dev);
3896
+ struct drm_i915_private *dev_priv = arg;
46953897 irqreturn_t ret = IRQ_NONE;
46963898
46973899 if (!intel_irqs_enabled(dev_priv))
46983900 return IRQ_NONE;
46993901
47003902 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4701
- disable_rpm_wakeref_asserts(dev_priv);
3903
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47023904
47033905 do {
47043906 u32 pipe_stats[I915_MAX_PIPES] = {};
....@@ -4706,7 +3908,7 @@
47063908 u32 hotplug_status = 0;
47073909 u32 iir;
47083910
4709
- iir = I915_READ(IIR);
3911
+ iir = I915_READ(GEN2_IIR);
47103912 if (iir == 0)
47113913 break;
47123914
....@@ -4722,13 +3924,13 @@
47223924 if (iir & I915_MASTER_ERROR_INTERRUPT)
47233925 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
47243926
4725
- I915_WRITE(IIR, iir);
3927
+ I915_WRITE(GEN2_IIR, iir);
47263928
47273929 if (iir & I915_USER_INTERRUPT)
4728
- notify_ring(dev_priv->engine[RCS]);
3930
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
47293931
47303932 if (iir & I915_BSD_USER_INTERRUPT)
4731
- notify_ring(dev_priv->engine[VCS]);
3933
+ intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
47323934
47333935 if (iir & I915_MASTER_ERROR_INTERRUPT)
47343936 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
....@@ -4739,7 +3941,7 @@
47393941 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
47403942 } while (0);
47413943
4742
- enable_rpm_wakeref_asserts(dev_priv);
3944
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47433945
47443946 return ret;
47453947 }
....@@ -4754,59 +3956,21 @@
47543956 void intel_irq_init(struct drm_i915_private *dev_priv)
47553957 {
47563958 struct drm_device *dev = &dev_priv->drm;
4757
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
47583959 int i;
3960
+
3961
+ intel_hpd_init_pins(dev_priv);
47593962
47603963 intel_hpd_init_work(dev_priv);
47613964
4762
- INIT_WORK(&rps->work, gen6_pm_rps_work);
4763
-
4764
- INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3965
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
47653966 for (i = 0; i < MAX_L3_SLICES; ++i)
47663967 dev_priv->l3_parity.remap_info[i] = NULL;
47673968
4768
- if (HAS_GUC_SCHED(dev_priv))
4769
- dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
3969
+ /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3970
+ if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3971
+ dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
47703972
4771
- /* Let's track the enabled rps events */
4772
- if (IS_VALLEYVIEW(dev_priv))
4773
- /* WaGsvRC0ResidencyMethod:vlv */
4774
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4775
- else
4776
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4777
-
4778
- rps->pm_intrmsk_mbz = 0;
4779
-
4780
- /*
4781
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4782
- * if GEN6_PM_UP_EI_EXPIRED is masked.
4783
- *
4784
- * TODO: verify if this can be reproduced on VLV,CHV.
4785
- */
4786
- if (INTEL_GEN(dev_priv) <= 7)
4787
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4788
-
4789
- if (INTEL_GEN(dev_priv) >= 8)
4790
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4791
-
4792
- if (IS_GEN2(dev_priv)) {
4793
- /* Gen2 doesn't have a hardware frame counter */
4794
- dev->max_vblank_count = 0;
4795
- } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4796
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4797
- dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4798
- } else {
4799
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
4800
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4801
- }
4802
-
4803
- /*
4804
- * Opt out of the vblank disable timer on everything except gen2.
4805
- * Gen2 doesn't have a hardware frame counter and so depends on
4806
- * vblank interrupts to produce sane vblank seuquence numbers.
4807
- */
4808
- if (!IS_GEN2(dev_priv))
4809
- dev->vblank_disable_immediate = true;
3973
+ dev->vblank_disable_immediate = true;
48103974
48113975 /* Most platforms treat the display irq block as an always-on
48123976 * power domain. vlv/chv can disable it at runtime and need
....@@ -4819,81 +3983,30 @@
48193983 dev_priv->display_irqs_enabled = false;
48203984
48213985 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3986
+ /* If we have MST support, we want to avoid doing short HPD IRQ storm
3987
+ * detection, as short HPD storms will occur as a natural part of
3988
+ * sideband messaging with MST.
3989
+ * On older platforms however, IRQ storms can occur with both long and
3990
+ * short pulses, as seen on some G4x systems.
3991
+ */
3992
+ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
48223993
4823
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4824
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4825
-
4826
- if (IS_CHERRYVIEW(dev_priv)) {
4827
- dev->driver->irq_handler = cherryview_irq_handler;
4828
- dev->driver->irq_preinstall = cherryview_irq_reset;
4829
- dev->driver->irq_postinstall = cherryview_irq_postinstall;
4830
- dev->driver->irq_uninstall = cherryview_irq_reset;
4831
- dev->driver->enable_vblank = i965_enable_vblank;
4832
- dev->driver->disable_vblank = i965_disable_vblank;
4833
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4834
- } else if (IS_VALLEYVIEW(dev_priv)) {
4835
- dev->driver->irq_handler = valleyview_irq_handler;
4836
- dev->driver->irq_preinstall = valleyview_irq_reset;
4837
- dev->driver->irq_postinstall = valleyview_irq_postinstall;
4838
- dev->driver->irq_uninstall = valleyview_irq_reset;
4839
- dev->driver->enable_vblank = i965_enable_vblank;
4840
- dev->driver->disable_vblank = i965_disable_vblank;
4841
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4842
- } else if (INTEL_GEN(dev_priv) >= 11) {
4843
- dev->driver->irq_handler = gen11_irq_handler;
4844
- dev->driver->irq_preinstall = gen11_irq_reset;
4845
- dev->driver->irq_postinstall = gen11_irq_postinstall;
4846
- dev->driver->irq_uninstall = gen11_irq_reset;
4847
- dev->driver->enable_vblank = gen8_enable_vblank;
4848
- dev->driver->disable_vblank = gen8_disable_vblank;
4849
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4850
- } else if (INTEL_GEN(dev_priv) >= 8) {
4851
- dev->driver->irq_handler = gen8_irq_handler;
4852
- dev->driver->irq_preinstall = gen8_irq_reset;
4853
- dev->driver->irq_postinstall = gen8_irq_postinstall;
4854
- dev->driver->irq_uninstall = gen8_irq_reset;
4855
- dev->driver->enable_vblank = gen8_enable_vblank;
4856
- dev->driver->disable_vblank = gen8_disable_vblank;
4857
- if (IS_GEN9_LP(dev_priv))
3994
+ if (HAS_GMCH(dev_priv)) {
3995
+ if (I915_HAS_HOTPLUG(dev_priv))
3996
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3997
+ } else {
3998
+ if (HAS_PCH_JSP(dev_priv))
3999
+ dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
4000
+ else if (HAS_PCH_MCC(dev_priv))
4001
+ dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
4002
+ else if (INTEL_GEN(dev_priv) >= 11)
4003
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4004
+ else if (IS_GEN9_LP(dev_priv))
48584005 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4859
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4860
- HAS_PCH_CNP(dev_priv))
4006
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48614007 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48624008 else
48634009 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4864
- } else if (HAS_PCH_SPLIT(dev_priv)) {
4865
- dev->driver->irq_handler = ironlake_irq_handler;
4866
- dev->driver->irq_preinstall = ironlake_irq_reset;
4867
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
4868
- dev->driver->irq_uninstall = ironlake_irq_reset;
4869
- dev->driver->enable_vblank = ironlake_enable_vblank;
4870
- dev->driver->disable_vblank = ironlake_disable_vblank;
4871
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4872
- } else {
4873
- if (IS_GEN2(dev_priv)) {
4874
- dev->driver->irq_preinstall = i8xx_irq_reset;
4875
- dev->driver->irq_postinstall = i8xx_irq_postinstall;
4876
- dev->driver->irq_handler = i8xx_irq_handler;
4877
- dev->driver->irq_uninstall = i8xx_irq_reset;
4878
- dev->driver->enable_vblank = i8xx_enable_vblank;
4879
- dev->driver->disable_vblank = i8xx_disable_vblank;
4880
- } else if (IS_GEN3(dev_priv)) {
4881
- dev->driver->irq_preinstall = i915_irq_reset;
4882
- dev->driver->irq_postinstall = i915_irq_postinstall;
4883
- dev->driver->irq_uninstall = i915_irq_reset;
4884
- dev->driver->irq_handler = i915_irq_handler;
4885
- dev->driver->enable_vblank = i8xx_enable_vblank;
4886
- dev->driver->disable_vblank = i8xx_disable_vblank;
4887
- } else {
4888
- dev->driver->irq_preinstall = i965_irq_reset;
4889
- dev->driver->irq_postinstall = i965_irq_postinstall;
4890
- dev->driver->irq_uninstall = i965_irq_reset;
4891
- dev->driver->irq_handler = i965_irq_handler;
4892
- dev->driver->enable_vblank = i965_enable_vblank;
4893
- dev->driver->disable_vblank = i965_disable_vblank;
4894
- }
4895
- if (I915_HAS_HOTPLUG(dev_priv))
4896
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
48974010 }
48984011 }
48994012
....@@ -4911,6 +4024,77 @@
49114024 kfree(i915->l3_parity.remap_info[i]);
49124025 }
49134026
4027
+static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4028
+{
4029
+ if (HAS_GMCH(dev_priv)) {
4030
+ if (IS_CHERRYVIEW(dev_priv))
4031
+ return cherryview_irq_handler;
4032
+ else if (IS_VALLEYVIEW(dev_priv))
4033
+ return valleyview_irq_handler;
4034
+ else if (IS_GEN(dev_priv, 4))
4035
+ return i965_irq_handler;
4036
+ else if (IS_GEN(dev_priv, 3))
4037
+ return i915_irq_handler;
4038
+ else
4039
+ return i8xx_irq_handler;
4040
+ } else {
4041
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
4042
+ return dg1_irq_handler;
4043
+ if (INTEL_GEN(dev_priv) >= 11)
4044
+ return gen11_irq_handler;
4045
+ else if (INTEL_GEN(dev_priv) >= 8)
4046
+ return gen8_irq_handler;
4047
+ else
4048
+ return ilk_irq_handler;
4049
+ }
4050
+}
4051
+
4052
+static void intel_irq_reset(struct drm_i915_private *dev_priv)
4053
+{
4054
+ if (HAS_GMCH(dev_priv)) {
4055
+ if (IS_CHERRYVIEW(dev_priv))
4056
+ cherryview_irq_reset(dev_priv);
4057
+ else if (IS_VALLEYVIEW(dev_priv))
4058
+ valleyview_irq_reset(dev_priv);
4059
+ else if (IS_GEN(dev_priv, 4))
4060
+ i965_irq_reset(dev_priv);
4061
+ else if (IS_GEN(dev_priv, 3))
4062
+ i915_irq_reset(dev_priv);
4063
+ else
4064
+ i8xx_irq_reset(dev_priv);
4065
+ } else {
4066
+ if (INTEL_GEN(dev_priv) >= 11)
4067
+ gen11_irq_reset(dev_priv);
4068
+ else if (INTEL_GEN(dev_priv) >= 8)
4069
+ gen8_irq_reset(dev_priv);
4070
+ else
4071
+ ilk_irq_reset(dev_priv);
4072
+ }
4073
+}
4074
+
4075
+static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4076
+{
4077
+ if (HAS_GMCH(dev_priv)) {
4078
+ if (IS_CHERRYVIEW(dev_priv))
4079
+ cherryview_irq_postinstall(dev_priv);
4080
+ else if (IS_VALLEYVIEW(dev_priv))
4081
+ valleyview_irq_postinstall(dev_priv);
4082
+ else if (IS_GEN(dev_priv, 4))
4083
+ i965_irq_postinstall(dev_priv);
4084
+ else if (IS_GEN(dev_priv, 3))
4085
+ i915_irq_postinstall(dev_priv);
4086
+ else
4087
+ i8xx_irq_postinstall(dev_priv);
4088
+ } else {
4089
+ if (INTEL_GEN(dev_priv) >= 11)
4090
+ gen11_irq_postinstall(dev_priv);
4091
+ else if (INTEL_GEN(dev_priv) >= 8)
4092
+ gen8_irq_postinstall(dev_priv);
4093
+ else
4094
+ ilk_irq_postinstall(dev_priv);
4095
+ }
4096
+}
4097
+
49144098 /**
49154099 * intel_irq_install - enables the hardware interrupt
49164100 * @dev_priv: i915 device instance
....@@ -4924,6 +4108,9 @@
49244108 */
49254109 int intel_irq_install(struct drm_i915_private *dev_priv)
49264110 {
4111
+ int irq = dev_priv->drm.pdev->irq;
4112
+ int ret;
4113
+
49274114 /*
49284115 * We enable some interrupt sources in our postinstall hooks, so mark
49294116 * interrupts as enabled _before_ actually enabling them to avoid
....@@ -4931,7 +4118,20 @@
49314118 */
49324119 dev_priv->runtime_pm.irqs_enabled = true;
49334120
4934
- return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4121
+ dev_priv->drm.irq_enabled = true;
4122
+
4123
+ intel_irq_reset(dev_priv);
4124
+
4125
+ ret = request_irq(irq, intel_irq_handler(dev_priv),
4126
+ IRQF_SHARED, DRIVER_NAME, dev_priv);
4127
+ if (ret < 0) {
4128
+ dev_priv->drm.irq_enabled = false;
4129
+ return ret;
4130
+ }
4131
+
4132
+ intel_irq_postinstall(dev_priv);
4133
+
4134
+ return ret;
49354135 }
49364136
49374137 /**
....@@ -4943,7 +4143,23 @@
49434143 */
49444144 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49454145 {
4946
- drm_irq_uninstall(&dev_priv->drm);
4146
+ int irq = dev_priv->drm.pdev->irq;
4147
+
4148
+ /*
4149
+ * FIXME we can get called twice during driver probe
4150
+ * error handling as well as during driver remove due to
4151
+ * intel_modeset_driver_remove() calling us out of sequence.
4152
+ * Would be nice if it didn't do that...
4153
+ */
4154
+ if (!dev_priv->drm.irq_enabled)
4155
+ return;
4156
+
4157
+ dev_priv->drm.irq_enabled = false;
4158
+
4159
+ intel_irq_reset(dev_priv);
4160
+
4161
+ free_irq(irq, dev_priv);
4162
+
49474163 intel_hpd_cancel_work(dev_priv);
49484164 dev_priv->runtime_pm.irqs_enabled = false;
49494165 }
....@@ -4957,9 +4173,9 @@
49574173 */
49584174 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
49594175 {
4960
- dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4176
+ intel_irq_reset(dev_priv);
49614177 dev_priv->runtime_pm.irqs_enabled = false;
4962
- synchronize_irq(dev_priv->drm.irq);
4178
+ intel_synchronize_irq(dev_priv);
49634179 }
49644180
49654181 /**
....@@ -4972,6 +4188,20 @@
49724188 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
49734189 {
49744190 dev_priv->runtime_pm.irqs_enabled = true;
4975
- dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4976
- dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4191
+ intel_irq_reset(dev_priv);
4192
+ intel_irq_postinstall(dev_priv);
4193
+}
4194
+
4195
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4196
+{
4197
+ /*
4198
+ * We only use drm_irq_uninstall() at unload and VT switch, so
4199
+ * this is the only thing we need to check.
4200
+ */
4201
+ return dev_priv->runtime_pm.irqs_enabled;
4202
+}
4203
+
4204
+void intel_synchronize_irq(struct drm_i915_private *i915)
4205
+{
4206
+ synchronize_irq(i915->drm.pdev->irq);
49774207 }