forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/i915/intel_uncore.c
....@@ -21,17 +21,44 @@
2121 * IN THE SOFTWARE.
2222 */
2323
24
-#include "i915_drv.h"
25
-#include "intel_drv.h"
26
-#include "i915_vgpu.h"
27
-
28
-#include <asm/iosf_mbi.h>
2924 #include <linux/pm_runtime.h>
25
+#include <asm/iosf_mbi.h>
26
+
27
+#include "i915_drv.h"
28
+#include "i915_trace.h"
29
+#include "i915_vgpu.h"
30
+#include "intel_pm.h"
3031
3132 #define FORCEWAKE_ACK_TIMEOUT_MS 50
3233 #define GT_FIFO_TIMEOUT_MS 10
3334
34
-#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
35
+#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
36
+
37
+void
38
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
39
+{
40
+ spin_lock_init(&mmio_debug->lock);
41
+ mmio_debug->unclaimed_mmio_check = 1;
42
+}
43
+
44
+static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
45
+{
46
+ lockdep_assert_held(&mmio_debug->lock);
47
+
48
+ /* Save and disable mmio debugging for the user bypass */
49
+ if (!mmio_debug->suspend_count++) {
50
+ mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51
+ mmio_debug->unclaimed_mmio_check = 0;
52
+ }
53
+}
54
+
55
+static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
56
+{
57
+ lockdep_assert_held(&mmio_debug->lock);
58
+
59
+ if (!--mmio_debug->suspend_count)
60
+ mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
61
+}
3562
3663 static const char * const forcewake_domain_names[] = {
3764 "render",
....@@ -58,21 +85,27 @@
5885 return "unknown";
5986 }
6087
88
+#define fw_ack(d) readl((d)->reg_ack)
89
+#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
90
+#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
91
+
6192 static inline void
62
-fw_domain_reset(struct drm_i915_private *i915,
63
- const struct intel_uncore_forcewake_domain *d)
93
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
6494 {
6595 /*
6696 * We don't really know if the powerwell for the forcewake domain we are
6797 * trying to reset here does exist at this point (engines could be fused
6898 * off in ICL+), so no waiting for acks
6999 */
70
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
100
+ /* WaRsClearFWBitsAtReset:bdw,skl */
101
+ fw_clear(d, 0xffff);
71102 }
72103
73104 static inline void
74105 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
75106 {
107
+ GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
108
+ d->uncore->fw_domains_timer |= d->mask;
76109 d->wake_count++;
77110 hrtimer_start_range_ns(&d->timer,
78111 NSEC_PER_MSEC,
....@@ -81,38 +114,36 @@
81114 }
82115
83116 static inline int
84
-__wait_for_ack(const struct drm_i915_private *i915,
85
- const struct intel_uncore_forcewake_domain *d,
117
+__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
86118 const u32 ack,
87119 const u32 value)
88120 {
89
- return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
121
+ return wait_for_atomic((fw_ack(d) & ack) == value,
90122 FORCEWAKE_ACK_TIMEOUT_MS);
91123 }
92124
93125 static inline int
94
-wait_ack_clear(const struct drm_i915_private *i915,
95
- const struct intel_uncore_forcewake_domain *d,
126
+wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
96127 const u32 ack)
97128 {
98
- return __wait_for_ack(i915, d, ack, 0);
129
+ return __wait_for_ack(d, ack, 0);
99130 }
100131
101132 static inline int
102
-wait_ack_set(const struct drm_i915_private *i915,
103
- const struct intel_uncore_forcewake_domain *d,
133
+wait_ack_set(const struct intel_uncore_forcewake_domain *d,
104134 const u32 ack)
105135 {
106
- return __wait_for_ack(i915, d, ack, ack);
136
+ return __wait_for_ack(d, ack, ack);
107137 }
108138
109139 static inline void
110
-fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
111
- const struct intel_uncore_forcewake_domain *d)
140
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
112141 {
113
- if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
142
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
114143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
115144 intel_uncore_forcewake_domain_to_str(d->id));
145
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
146
+ }
116147 }
117148
118149 enum ack_type {
....@@ -121,8 +152,7 @@
121152 };
122153
123154 static int
124
-fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
125
- const struct intel_uncore_forcewake_domain *d,
155
+fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
126156 const enum ack_type type)
127157 {
128158 const u32 ack_bit = FORCEWAKE_KERNEL;
....@@ -146,129 +176,124 @@
146176
147177 pass = 1;
148178 do {
149
- wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
179
+ wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
150180
151
- __raw_i915_write32(i915, d->reg_set,
152
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
181
+ fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
153182 /* Give gt some time to relax before the polling frenzy */
154183 udelay(10 * pass);
155
- wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
184
+ wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
156185
157
- ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
186
+ ack_detected = (fw_ack(d) & ack_bit) == value;
158187
159
- __raw_i915_write32(i915, d->reg_set,
160
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
188
+ fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
161189 } while (!ack_detected && pass++ < 10);
162190
163191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
164192 intel_uncore_forcewake_domain_to_str(d->id),
165193 type == ACK_SET ? "set" : "clear",
166
- __raw_i915_read32(i915, d->reg_ack),
194
+ fw_ack(d),
167195 pass);
168196
169197 return ack_detected ? 0 : -ETIMEDOUT;
170198 }
171199
172200 static inline void
173
-fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
174
- const struct intel_uncore_forcewake_domain *d)
201
+fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
175202 {
176
- if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
203
+ if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
177204 return;
178205
179
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
180
- fw_domain_wait_ack_clear(i915, d);
206
+ if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
207
+ fw_domain_wait_ack_clear(d);
181208 }
182209
183210 static inline void
184
-fw_domain_get(struct drm_i915_private *i915,
185
- const struct intel_uncore_forcewake_domain *d)
211
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
186212 {
187
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
213
+ fw_set(d, FORCEWAKE_KERNEL);
188214 }
189215
190216 static inline void
191
-fw_domain_wait_ack_set(const struct drm_i915_private *i915,
192
- const struct intel_uncore_forcewake_domain *d)
217
+fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
193218 {
194
- if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
219
+ if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
195220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
196221 intel_uncore_forcewake_domain_to_str(d->id));
222
+ add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
223
+ }
197224 }
198225
199226 static inline void
200
-fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
201
- const struct intel_uncore_forcewake_domain *d)
227
+fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
202228 {
203
- if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
229
+ if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
204230 return;
205231
206
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
207
- fw_domain_wait_ack_set(i915, d);
232
+ if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
233
+ fw_domain_wait_ack_set(d);
208234 }
209235
210236 static inline void
211
-fw_domain_put(const struct drm_i915_private *i915,
212
- const struct intel_uncore_forcewake_domain *d)
237
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
213238 {
214
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
239
+ fw_clear(d, FORCEWAKE_KERNEL);
215240 }
216241
217242 static void
218
-fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
243
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
219244 {
220245 struct intel_uncore_forcewake_domain *d;
221246 unsigned int tmp;
222247
223
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
248
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
224249
225
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
226
- fw_domain_wait_ack_clear(i915, d);
227
- fw_domain_get(i915, d);
250
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
251
+ fw_domain_wait_ack_clear(d);
252
+ fw_domain_get(d);
228253 }
229254
230
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
231
- fw_domain_wait_ack_set(i915, d);
255
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
256
+ fw_domain_wait_ack_set(d);
232257
233
- i915->uncore.fw_domains_active |= fw_domains;
258
+ uncore->fw_domains_active |= fw_domains;
234259 }
235260
236261 static void
237
-fw_domains_get_with_fallback(struct drm_i915_private *i915,
262
+fw_domains_get_with_fallback(struct intel_uncore *uncore,
238263 enum forcewake_domains fw_domains)
239264 {
240265 struct intel_uncore_forcewake_domain *d;
241266 unsigned int tmp;
242267
243
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
268
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
244269
245
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
246
- fw_domain_wait_ack_clear_fallback(i915, d);
247
- fw_domain_get(i915, d);
270
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
271
+ fw_domain_wait_ack_clear_fallback(d);
272
+ fw_domain_get(d);
248273 }
249274
250
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
251
- fw_domain_wait_ack_set_fallback(i915, d);
275
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
276
+ fw_domain_wait_ack_set_fallback(d);
252277
253
- i915->uncore.fw_domains_active |= fw_domains;
278
+ uncore->fw_domains_active |= fw_domains;
254279 }
255280
256281 static void
257
-fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
282
+fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
258283 {
259284 struct intel_uncore_forcewake_domain *d;
260285 unsigned int tmp;
261286
262
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
287
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
263288
264
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
265
- fw_domain_put(i915, d);
289
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
290
+ fw_domain_put(d);
266291
267
- i915->uncore.fw_domains_active &= ~fw_domains;
292
+ uncore->fw_domains_active &= ~fw_domains;
268293 }
269294
270295 static void
271
-fw_domains_reset(struct drm_i915_private *i915,
296
+fw_domains_reset(struct intel_uncore *uncore,
272297 enum forcewake_domains fw_domains)
273298 {
274299 struct intel_uncore_forcewake_domain *d;
....@@ -277,59 +302,71 @@
277302 if (!fw_domains)
278303 return;
279304
280
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
305
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
281306
282
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
283
- fw_domain_reset(i915, d);
307
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
308
+ fw_domain_reset(d);
284309 }
285310
286
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
311
+static inline u32 gt_thread_status(struct intel_uncore *uncore)
287312 {
288
- /* w/a for a sporadic read returning 0 by waiting for the GT
313
+ u32 val;
314
+
315
+ val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
316
+ val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
317
+
318
+ return val;
319
+}
320
+
321
+static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
322
+{
323
+ /*
324
+ * w/a for a sporadic read returning 0 by waiting for the GT
289325 * thread to wake up.
290326 */
291
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
292
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
293
- DRM_ERROR("GT thread status wait timed out\n");
327
+ drm_WARN_ONCE(&uncore->i915->drm,
328
+ wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
329
+ "GT thread status wait timed out\n");
294330 }
295331
296
-static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
332
+static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
297333 enum forcewake_domains fw_domains)
298334 {
299
- fw_domains_get(dev_priv, fw_domains);
335
+ fw_domains_get(uncore, fw_domains);
300336
301337 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
302
- __gen6_gt_wait_for_thread_c0(dev_priv);
338
+ __gen6_gt_wait_for_thread_c0(uncore);
303339 }
304340
305
-static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
341
+static inline u32 fifo_free_entries(struct intel_uncore *uncore)
306342 {
307
- u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
343
+ u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
308344
309345 return count & GT_FIFO_FREE_ENTRIES_MASK;
310346 }
311347
312
-static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
348
+static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
313349 {
314350 u32 n;
315351
316352 /* On VLV, FIFO will be shared by both SW and HW.
317353 * So, we need to read the FREE_ENTRIES everytime */
318
- if (IS_VALLEYVIEW(dev_priv))
319
- n = fifo_free_entries(dev_priv);
354
+ if (IS_VALLEYVIEW(uncore->i915))
355
+ n = fifo_free_entries(uncore);
320356 else
321
- n = dev_priv->uncore.fifo_count;
357
+ n = uncore->fifo_count;
322358
323359 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
324
- if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
360
+ if (wait_for_atomic((n = fifo_free_entries(uncore)) >
325361 GT_FIFO_NUM_RESERVED_ENTRIES,
326362 GT_FIFO_TIMEOUT_MS)) {
327
- DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
363
+ drm_dbg(&uncore->i915->drm,
364
+ "GT_FIFO timeout, entries: %u\n", n);
328365 return;
329366 }
330367 }
331368
332
- dev_priv->uncore.fifo_count = n - 1;
369
+ uncore->fifo_count = n - 1;
333370 }
334371
335372 static enum hrtimer_restart
....@@ -337,30 +374,30 @@
337374 {
338375 struct intel_uncore_forcewake_domain *domain =
339376 container_of(timer, struct intel_uncore_forcewake_domain, timer);
340
- struct drm_i915_private *dev_priv =
341
- container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
377
+ struct intel_uncore *uncore = domain->uncore;
342378 unsigned long irqflags;
343379
344
- assert_rpm_device_not_suspended(dev_priv);
380
+ assert_rpm_device_not_suspended(uncore->rpm);
345381
346382 if (xchg(&domain->active, false))
347383 return HRTIMER_RESTART;
348384
349
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
350
- if (WARN_ON(domain->wake_count == 0))
351
- domain->wake_count++;
385
+ spin_lock_irqsave(&uncore->lock, irqflags);
352386
387
+ uncore->fw_domains_timer &= ~domain->mask;
388
+
389
+ GEM_BUG_ON(!domain->wake_count);
353390 if (--domain->wake_count == 0)
354
- dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
391
+ uncore->funcs.force_wake_put(uncore, domain->mask);
355392
356
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
393
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
357394
358395 return HRTIMER_NORESTART;
359396 }
360397
361398 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
362399 static unsigned int
363
-intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
400
+intel_uncore_forcewake_reset(struct intel_uncore *uncore)
364401 {
365402 unsigned long irqflags;
366403 struct intel_uncore_forcewake_domain *domain;
....@@ -378,7 +415,7 @@
378415
379416 active_domains = 0;
380417
381
- for_each_fw_domain(domain, dev_priv, tmp) {
418
+ for_each_fw_domain(domain, uncore, tmp) {
382419 smp_store_mb(domain->active, false);
383420 if (hrtimer_cancel(&domain->timer) == 0)
384421 continue;
....@@ -386,9 +423,9 @@
386423 intel_uncore_fw_release_timer(&domain->timer);
387424 }
388425
389
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
426
+ spin_lock_irqsave(&uncore->lock, irqflags);
390427
391
- for_each_fw_domain(domain, dev_priv, tmp) {
428
+ for_each_fw_domain(domain, uncore, tmp) {
392429 if (hrtimer_active(&domain->timer))
393430 active_domains |= domain->mask;
394431 }
....@@ -397,206 +434,164 @@
397434 break;
398435
399436 if (--retry_count == 0) {
400
- DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
437
+ drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
401438 break;
402439 }
403440
404
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
441
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
405442 cond_resched();
406443 }
407444
408
- WARN_ON(active_domains);
445
+ drm_WARN_ON(&uncore->i915->drm, active_domains);
409446
410
- fw = dev_priv->uncore.fw_domains_active;
447
+ fw = uncore->fw_domains_active;
411448 if (fw)
412
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
449
+ uncore->funcs.force_wake_put(uncore, fw);
413450
414
- fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
415
- assert_forcewakes_inactive(dev_priv);
451
+ fw_domains_reset(uncore, uncore->fw_domains);
452
+ assert_forcewakes_inactive(uncore);
416453
417
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
454
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
418455
419456 return fw; /* track the lost user forcewake domains */
420457 }
421458
422
-static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
423
-{
424
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
425
- const unsigned int sets[4] = { 1, 1, 2, 2 };
426
- const u32 cap = dev_priv->edram_cap;
427
-
428
- return EDRAM_NUM_BANKS(cap) *
429
- ways[EDRAM_WAYS_IDX(cap)] *
430
- sets[EDRAM_SETS_IDX(cap)] *
431
- 1024 * 1024;
432
-}
433
-
434
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
435
-{
436
- if (!HAS_EDRAM(dev_priv))
437
- return 0;
438
-
439
- /* The needed capability bits for size calculation
440
- * are not there with pre gen9 so return 128MB always.
441
- */
442
- if (INTEL_GEN(dev_priv) < 9)
443
- return 128 * 1024 * 1024;
444
-
445
- return gen9_edram_size(dev_priv);
446
-}
447
-
448
-static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
449
-{
450
- if (IS_HASWELL(dev_priv) ||
451
- IS_BROADWELL(dev_priv) ||
452
- INTEL_GEN(dev_priv) >= 9) {
453
- dev_priv->edram_cap = __raw_i915_read32(dev_priv,
454
- HSW_EDRAM_CAP);
455
-
456
- /* NB: We can't write IDICR yet because we do not have gt funcs
457
- * set up */
458
- } else {
459
- dev_priv->edram_cap = 0;
460
- }
461
-
462
- if (HAS_EDRAM(dev_priv))
463
- DRM_INFO("Found %lluMB of eDRAM\n",
464
- intel_uncore_edram_size(dev_priv) / (1024 * 1024));
465
-}
466
-
467459 static bool
468
-fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
460
+fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
469461 {
470462 u32 dbg;
471463
472
- dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
464
+ dbg = __raw_uncore_read32(uncore, FPGA_DBG);
473465 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
474466 return false;
475467
476
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
468
+ __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
477469
478470 return true;
479471 }
480472
481473 static bool
482
-vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
474
+vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
483475 {
484476 u32 cer;
485477
486
- cer = __raw_i915_read32(dev_priv, CLAIM_ER);
478
+ cer = __raw_uncore_read32(uncore, CLAIM_ER);
487479 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
488480 return false;
489481
490
- __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
482
+ __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
491483
492484 return true;
493485 }
494486
495487 static bool
496
-gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
488
+gen6_check_for_fifo_debug(struct intel_uncore *uncore)
497489 {
498490 u32 fifodbg;
499491
500
- fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
492
+ fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
501493
502494 if (unlikely(fifodbg)) {
503
- DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
504
- __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
495
+ drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
496
+ __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
505497 }
506498
507499 return fifodbg;
508500 }
509501
510502 static bool
511
-check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
503
+check_for_unclaimed_mmio(struct intel_uncore *uncore)
512504 {
513505 bool ret = false;
514506
515
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
516
- ret |= fpga_check_for_unclaimed_mmio(dev_priv);
507
+ lockdep_assert_held(&uncore->debug->lock);
517508
518
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
519
- ret |= vlv_check_for_unclaimed_mmio(dev_priv);
509
+ if (uncore->debug->suspend_count)
510
+ return false;
520511
521
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
522
- ret |= gen6_check_for_fifo_debug(dev_priv);
512
+ if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
513
+ ret |= fpga_check_for_unclaimed_mmio(uncore);
514
+
515
+ if (intel_uncore_has_dbg_unclaimed(uncore))
516
+ ret |= vlv_check_for_unclaimed_mmio(uncore);
517
+
518
+ if (intel_uncore_has_fifo(uncore))
519
+ ret |= gen6_check_for_fifo_debug(uncore);
523520
524521 return ret;
525522 }
526523
527
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
528
- unsigned int restore_forcewake)
524
+static void forcewake_early_sanitize(struct intel_uncore *uncore,
525
+ unsigned int restore_forcewake)
529526 {
530
- /* clear out unclaimed reg detection bit */
531
- if (check_for_unclaimed_mmio(dev_priv))
532
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
527
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
533528
534529 /* WaDisableShadowRegForCpd:chv */
535
- if (IS_CHERRYVIEW(dev_priv)) {
536
- __raw_i915_write32(dev_priv, GTFIFOCTL,
537
- __raw_i915_read32(dev_priv, GTFIFOCTL) |
538
- GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
539
- GT_FIFO_CTL_RC6_POLICY_STALL);
530
+ if (IS_CHERRYVIEW(uncore->i915)) {
531
+ __raw_uncore_write32(uncore, GTFIFOCTL,
532
+ __raw_uncore_read32(uncore, GTFIFOCTL) |
533
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
534
+ GT_FIFO_CTL_RC6_POLICY_STALL);
540535 }
541536
542537 iosf_mbi_punit_acquire();
543
- intel_uncore_forcewake_reset(dev_priv);
538
+ intel_uncore_forcewake_reset(uncore);
544539 if (restore_forcewake) {
545
- spin_lock_irq(&dev_priv->uncore.lock);
546
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
547
- restore_forcewake);
540
+ spin_lock_irq(&uncore->lock);
541
+ uncore->funcs.force_wake_get(uncore, restore_forcewake);
548542
549
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
550
- dev_priv->uncore.fifo_count =
551
- fifo_free_entries(dev_priv);
552
- spin_unlock_irq(&dev_priv->uncore.lock);
543
+ if (intel_uncore_has_fifo(uncore))
544
+ uncore->fifo_count = fifo_free_entries(uncore);
545
+ spin_unlock_irq(&uncore->lock);
553546 }
554547 iosf_mbi_punit_release();
555548 }
556549
557
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
550
+void intel_uncore_suspend(struct intel_uncore *uncore)
558551 {
552
+ if (!intel_uncore_has_forcewake(uncore))
553
+ return;
554
+
559555 iosf_mbi_punit_acquire();
560556 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
561
- &dev_priv->uncore.pmic_bus_access_nb);
562
- dev_priv->uncore.fw_domains_saved =
563
- intel_uncore_forcewake_reset(dev_priv);
557
+ &uncore->pmic_bus_access_nb);
558
+ uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
564559 iosf_mbi_punit_release();
565560 }
566561
567
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
562
+void intel_uncore_resume_early(struct intel_uncore *uncore)
568563 {
569564 unsigned int restore_forcewake;
570565
571
- restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
572
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
566
+ if (intel_uncore_unclaimed_mmio(uncore))
567
+ drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
573568
574
- iosf_mbi_register_pmic_bus_access_notifier(
575
- &dev_priv->uncore.pmic_bus_access_nb);
576
- i915_check_and_clear_faults(dev_priv);
569
+ if (!intel_uncore_has_forcewake(uncore))
570
+ return;
571
+
572
+ restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
573
+ forcewake_early_sanitize(uncore, restore_forcewake);
574
+
575
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
577576 }
578577
579
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
578
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
580579 {
581
- iosf_mbi_register_pmic_bus_access_notifier(
582
- &dev_priv->uncore.pmic_bus_access_nb);
580
+ if (!intel_uncore_has_forcewake(uncore))
581
+ return;
582
+
583
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
583584 }
584585
585
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
586
-{
587
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
588
- intel_sanitize_gt_powersave(dev_priv);
589
-}
590
-
591
-static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
586
+static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
592587 enum forcewake_domains fw_domains)
593588 {
594589 struct intel_uncore_forcewake_domain *domain;
595590 unsigned int tmp;
596591
597
- fw_domains &= dev_priv->uncore.fw_domains;
592
+ fw_domains &= uncore->fw_domains;
598593
599
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
594
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
600595 if (domain->wake_count++) {
601596 fw_domains &= ~domain->mask;
602597 domain->active = true;
....@@ -604,12 +599,12 @@
604599 }
605600
606601 if (fw_domains)
607
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
602
+ uncore->funcs.force_wake_get(uncore, fw_domains);
608603 }
609604
610605 /**
611606 * intel_uncore_forcewake_get - grab forcewake domain references
612
- * @dev_priv: i915 device instance
607
+ * @uncore: the intel_uncore structure
613608 * @fw_domains: forcewake domains to get reference on
614609 *
615610 * This function can be used get GT's forcewake domain references.
....@@ -620,183 +615,239 @@
620615 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
621616 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
622617 */
623
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
618
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
624619 enum forcewake_domains fw_domains)
625620 {
626621 unsigned long irqflags;
627622
628
- if (!dev_priv->uncore.funcs.force_wake_get)
623
+ if (!uncore->funcs.force_wake_get)
629624 return;
630625
631
- assert_rpm_wakelock_held(dev_priv);
626
+ assert_rpm_wakelock_held(uncore->rpm);
632627
633
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
634
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
635
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
628
+ spin_lock_irqsave(&uncore->lock, irqflags);
629
+ __intel_uncore_forcewake_get(uncore, fw_domains);
630
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
636631 }
637632
638633 /**
639634 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
640
- * @dev_priv: i915 device instance
635
+ * @uncore: the intel_uncore structure
641636 *
642637 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
643638 * the GT powerwell and in the process disable our debugging for the
644639 * duration of userspace's bypass.
645640 */
646
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
641
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
647642 {
648
- spin_lock_irq(&dev_priv->uncore.lock);
649
- if (!dev_priv->uncore.user_forcewake.count++) {
650
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
651
-
652
- /* Save and disable mmio debugging for the user bypass */
653
- dev_priv->uncore.user_forcewake.saved_mmio_check =
654
- dev_priv->uncore.unclaimed_mmio_check;
655
- dev_priv->uncore.user_forcewake.saved_mmio_debug =
656
- i915_modparams.mmio_debug;
657
-
658
- dev_priv->uncore.unclaimed_mmio_check = 0;
659
- i915_modparams.mmio_debug = 0;
643
+ spin_lock_irq(&uncore->lock);
644
+ if (!uncore->user_forcewake_count++) {
645
+ intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
646
+ spin_lock(&uncore->debug->lock);
647
+ mmio_debug_suspend(uncore->debug);
648
+ spin_unlock(&uncore->debug->lock);
660649 }
661
- spin_unlock_irq(&dev_priv->uncore.lock);
650
+ spin_unlock_irq(&uncore->lock);
662651 }
663652
664653 /**
665654 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
666
- * @dev_priv: i915 device instance
655
+ * @uncore: the intel_uncore structure
667656 *
668657 * This function complements intel_uncore_forcewake_user_get() and releases
669658 * the GT powerwell taken on behalf of the userspace bypass.
670659 */
671
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
660
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
672661 {
673
- spin_lock_irq(&dev_priv->uncore.lock);
674
- if (!--dev_priv->uncore.user_forcewake.count) {
675
- if (intel_uncore_unclaimed_mmio(dev_priv))
676
- dev_info(dev_priv->drm.dev,
662
+ spin_lock_irq(&uncore->lock);
663
+ if (!--uncore->user_forcewake_count) {
664
+ spin_lock(&uncore->debug->lock);
665
+ mmio_debug_resume(uncore->debug);
666
+
667
+ if (check_for_unclaimed_mmio(uncore))
668
+ drm_info(&uncore->i915->drm,
677669 "Invalid mmio detected during user access\n");
670
+ spin_unlock(&uncore->debug->lock);
678671
679
- dev_priv->uncore.unclaimed_mmio_check =
680
- dev_priv->uncore.user_forcewake.saved_mmio_check;
681
- i915_modparams.mmio_debug =
682
- dev_priv->uncore.user_forcewake.saved_mmio_debug;
683
-
684
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
672
+ intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
685673 }
686
- spin_unlock_irq(&dev_priv->uncore.lock);
674
+ spin_unlock_irq(&uncore->lock);
687675 }
688676
689677 /**
690678 * intel_uncore_forcewake_get__locked - grab forcewake domain references
691
- * @dev_priv: i915 device instance
679
+ * @uncore: the intel_uncore structure
692680 * @fw_domains: forcewake domains to get reference on
693681 *
694682 * See intel_uncore_forcewake_get(). This variant places the onus
695683 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
696684 */
697
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
685
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
698686 enum forcewake_domains fw_domains)
699687 {
700
- lockdep_assert_held(&dev_priv->uncore.lock);
688
+ lockdep_assert_held(&uncore->lock);
701689
702
- if (!dev_priv->uncore.funcs.force_wake_get)
690
+ if (!uncore->funcs.force_wake_get)
703691 return;
704692
705
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
693
+ __intel_uncore_forcewake_get(uncore, fw_domains);
706694 }
707695
708
-static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
709
- enum forcewake_domains fw_domains)
696
+static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
697
+ enum forcewake_domains fw_domains,
698
+ bool delayed)
710699 {
711700 struct intel_uncore_forcewake_domain *domain;
712701 unsigned int tmp;
713702
714
- fw_domains &= dev_priv->uncore.fw_domains;
703
+ fw_domains &= uncore->fw_domains;
715704
716
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
717
- if (WARN_ON(domain->wake_count == 0))
718
- continue;
705
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
706
+ GEM_BUG_ON(!domain->wake_count);
719707
720708 if (--domain->wake_count) {
721709 domain->active = true;
722710 continue;
723711 }
724712
725
- fw_domain_arm_timer(domain);
713
+ if (delayed &&
714
+ !(domain->uncore->fw_domains_timer & domain->mask))
715
+ fw_domain_arm_timer(domain);
716
+ else
717
+ uncore->funcs.force_wake_put(uncore, domain->mask);
726718 }
727719 }
728720
729721 /**
730722 * intel_uncore_forcewake_put - release a forcewake domain reference
731
- * @dev_priv: i915 device instance
723
+ * @uncore: the intel_uncore structure
732724 * @fw_domains: forcewake domains to put references
733725 *
734726 * This function drops the device-level forcewakes for specified
735727 * domains obtained by intel_uncore_forcewake_get().
736728 */
737
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
729
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
738730 enum forcewake_domains fw_domains)
739731 {
740732 unsigned long irqflags;
741733
742
- if (!dev_priv->uncore.funcs.force_wake_put)
734
+ if (!uncore->funcs.force_wake_put)
743735 return;
744736
745
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
746
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
747
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
737
+ spin_lock_irqsave(&uncore->lock, irqflags);
738
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
739
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
740
+}
741
+
742
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
743
+ enum forcewake_domains fw_domains)
744
+{
745
+ unsigned long irqflags;
746
+
747
+ if (!uncore->funcs.force_wake_put)
748
+ return;
749
+
750
+ spin_lock_irqsave(&uncore->lock, irqflags);
751
+ __intel_uncore_forcewake_put(uncore, fw_domains, true);
752
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
753
+}
754
+
755
+/**
756
+ * intel_uncore_forcewake_flush - flush the delayed release
757
+ * @uncore: the intel_uncore structure
758
+ * @fw_domains: forcewake domains to flush
759
+ */
760
+void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
761
+ enum forcewake_domains fw_domains)
762
+{
763
+ struct intel_uncore_forcewake_domain *domain;
764
+ unsigned int tmp;
765
+
766
+ if (!uncore->funcs.force_wake_put)
767
+ return;
768
+
769
+ fw_domains &= uncore->fw_domains;
770
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
771
+ WRITE_ONCE(domain->active, false);
772
+ if (hrtimer_cancel(&domain->timer))
773
+ intel_uncore_fw_release_timer(&domain->timer);
774
+ }
748775 }
749776
750777 /**
751778 * intel_uncore_forcewake_put__locked - grab forcewake domain references
752
- * @dev_priv: i915 device instance
779
+ * @uncore: the intel_uncore structure
753780 * @fw_domains: forcewake domains to get reference on
754781 *
755782 * See intel_uncore_forcewake_put(). This variant places the onus
756783 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
757784 */
758
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
785
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
759786 enum forcewake_domains fw_domains)
760787 {
761
- lockdep_assert_held(&dev_priv->uncore.lock);
788
+ lockdep_assert_held(&uncore->lock);
762789
763
- if (!dev_priv->uncore.funcs.force_wake_put)
790
+ if (!uncore->funcs.force_wake_put)
764791 return;
765792
766
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
793
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
767794 }
768795
769
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
796
+void assert_forcewakes_inactive(struct intel_uncore *uncore)
770797 {
771
- if (!dev_priv->uncore.funcs.force_wake_get)
798
+ if (!uncore->funcs.force_wake_get)
772799 return;
773800
774
- WARN(dev_priv->uncore.fw_domains_active,
775
- "Expected all fw_domains to be inactive, but %08x are still on\n",
776
- dev_priv->uncore.fw_domains_active);
801
+ drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
802
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
803
+ uncore->fw_domains_active);
777804 }
778805
779
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
806
+void assert_forcewakes_active(struct intel_uncore *uncore,
780807 enum forcewake_domains fw_domains)
781808 {
782
- if (!dev_priv->uncore.funcs.force_wake_get)
809
+ struct intel_uncore_forcewake_domain *domain;
810
+ unsigned int tmp;
811
+
812
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
783813 return;
784814
785
- assert_rpm_wakelock_held(dev_priv);
815
+ if (!uncore->funcs.force_wake_get)
816
+ return;
786817
787
- fw_domains &= dev_priv->uncore.fw_domains;
788
- WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
789
- "Expected %08x fw_domains to be active, but %08x are off\n",
790
- fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
818
+ spin_lock_irq(&uncore->lock);
819
+
820
+ assert_rpm_wakelock_held(uncore->rpm);
821
+
822
+ fw_domains &= uncore->fw_domains;
823
+ drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
824
+ "Expected %08x fw_domains to be active, but %08x are off\n",
825
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
826
+
827
+ /*
828
+ * Check that the caller has an explicit wakeref and we don't mistake
829
+ * it for the auto wakeref.
830
+ */
831
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
832
+ unsigned int actual = READ_ONCE(domain->wake_count);
833
+ unsigned int expect = 1;
834
+
835
+ if (uncore->fw_domains_timer & domain->mask)
836
+ expect++; /* pending automatic release */
837
+
838
+ if (drm_WARN(&uncore->i915->drm, actual < expect,
839
+ "Expected domain %d to be held awake by caller, count=%d\n",
840
+ domain->id, actual))
841
+ break;
842
+ }
843
+
844
+ spin_unlock_irq(&uncore->lock);
791845 }
792846
793847 /* We give fast paths for the really cool registers */
794848 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
795849
796
-#define GEN11_NEEDS_FORCE_WAKE(reg) \
797
- ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
798
-
799
-#define __gen6_reg_read_fw_domains(offset) \
850
+#define __gen6_reg_read_fw_domains(uncore, offset) \
800851 ({ \
801852 enum forcewake_domains __fwd; \
802853 if (NEEDS_FORCE_WAKE(offset)) \
....@@ -836,13 +887,13 @@
836887 })
837888
838889 static enum forcewake_domains
839
-find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
890
+find_fw_domain(struct intel_uncore *uncore, u32 offset)
840891 {
841892 const struct intel_forcewake_range *entry;
842893
843894 entry = BSEARCH(offset,
844
- dev_priv->uncore.fw_domains_table,
845
- dev_priv->uncore.fw_domains_table_entries,
895
+ uncore->fw_domains_table,
896
+ uncore->fw_domains_table_entries,
846897 fw_range_cmp);
847898
848899 if (!entry)
....@@ -854,22 +905,17 @@
854905 * translate it here to the list of available domains.
855906 */
856907 if (entry->domains == FORCEWAKE_ALL)
857
- return dev_priv->uncore.fw_domains;
908
+ return uncore->fw_domains;
858909
859
- WARN(entry->domains & ~dev_priv->uncore.fw_domains,
860
- "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
861
- entry->domains & ~dev_priv->uncore.fw_domains, offset);
910
+ drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
911
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
912
+ entry->domains & ~uncore->fw_domains, offset);
862913
863914 return entry->domains;
864915 }
865916
866917 #define GEN_FW_RANGE(s, e, d) \
867918 { .start = (s), .end = (e), .domains = (d) }
868
-
869
-#define HAS_FWTABLE(dev_priv) \
870
- (INTEL_GEN(dev_priv) >= 9 || \
871
- IS_CHERRYVIEW(dev_priv) || \
872
- IS_VALLEYVIEW(dev_priv))
873919
874920 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
875921 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
....@@ -882,21 +928,19 @@
882928 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
883929 };
884930
885
-#define __fwtable_reg_read_fw_domains(offset) \
931
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
886932 ({ \
887933 enum forcewake_domains __fwd = 0; \
888934 if (NEEDS_FORCE_WAKE((offset))) \
889
- __fwd = find_fw_domain(dev_priv, offset); \
935
+ __fwd = find_fw_domain(uncore, offset); \
890936 __fwd; \
891937 })
892938
893
-#define __gen11_fwtable_reg_read_fw_domains(offset) \
894
-({ \
895
- enum forcewake_domains __fwd = 0; \
896
- if (GEN11_NEEDS_FORCE_WAKE((offset))) \
897
- __fwd = find_fw_domain(dev_priv, offset); \
898
- __fwd; \
899
-})
939
+#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
940
+ find_fw_domain(uncore, offset)
941
+
942
+#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
943
+ find_fw_domain(uncore, offset)
900944
901945 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
902946 static const i915_reg_t gen8_shadowed_regs[] = {
....@@ -910,6 +954,20 @@
910954 };
911955
912956 static const i915_reg_t gen11_shadowed_regs[] = {
957
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
958
+ GEN6_RPNSWREQ, /* 0xA008 */
959
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
960
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
961
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
962
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
963
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
964
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
965
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
966
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
967
+ /* TODO: Other registers are not yet used */
968
+};
969
+
970
+static const i915_reg_t gen12_shadowed_regs[] = {
913971 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
914972 GEN6_RPNSWREQ, /* 0xA008 */
915973 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
....@@ -945,8 +1003,15 @@
9451003
9461004 __is_genX_shadowed(8)
9471005 __is_genX_shadowed(11)
1006
+__is_genX_shadowed(12)
9481007
949
-#define __gen8_reg_write_fw_domains(offset) \
1008
+static enum forcewake_domains
1009
+gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1010
+{
1011
+ return FORCEWAKE_RENDER;
1012
+}
1013
+
1014
+#define __gen8_reg_write_fw_domains(uncore, offset) \
9501015 ({ \
9511016 enum forcewake_domains __fwd; \
9521017 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
....@@ -976,19 +1041,29 @@
9761041 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
9771042 };
9781043
979
-#define __fwtable_reg_write_fw_domains(offset) \
1044
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
9801045 ({ \
9811046 enum forcewake_domains __fwd = 0; \
9821047 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
983
- __fwd = find_fw_domain(dev_priv, offset); \
1048
+ __fwd = find_fw_domain(uncore, offset); \
9841049 __fwd; \
9851050 })
9861051
987
-#define __gen11_fwtable_reg_write_fw_domains(offset) \
1052
+#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
9881053 ({ \
9891054 enum forcewake_domains __fwd = 0; \
990
- if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
991
- __fwd = find_fw_domain(dev_priv, offset); \
1055
+ const u32 __offset = (offset); \
1056
+ if (!is_gen11_shadowed(__offset)) \
1057
+ __fwd = find_fw_domain(uncore, __offset); \
1058
+ __fwd; \
1059
+})
1060
+
1061
+#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
1062
+({ \
1063
+ enum forcewake_domains __fwd = 0; \
1064
+ const u32 __offset = (offset); \
1065
+ if (!is_gen12_shadowed(__offset)) \
1066
+ __fwd = find_fw_domain(uncore, __offset); \
9921067 __fwd; \
9931068 })
9941069
....@@ -1030,6 +1105,45 @@
10301105
10311106 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
10321107 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1108
+ GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1109
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1110
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1111
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1112
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1113
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1114
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1115
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1116
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1117
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1118
+ GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
1119
+ GEN_FW_RANGE(0x8800, 0x8bff, 0),
1120
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1121
+ GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
1122
+ GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1123
+ GEN_FW_RANGE(0x9560, 0x95ff, 0),
1124
+ GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
1125
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1126
+ GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
1127
+ GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1128
+ GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
1129
+ GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1130
+ GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
1131
+ GEN_FW_RANGE(0x24000, 0x2407f, 0),
1132
+ GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
1133
+ GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1134
+ GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
1135
+ GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1136
+ GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
1137
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1138
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1139
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1140
+ GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1141
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1142
+ GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1143
+};
1144
+
1145
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1146
+static const struct intel_forcewake_range __gen12_fw_ranges[] = {
10331147 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
10341148 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
10351149 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
....@@ -1049,7 +1163,13 @@
10491163 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
10501164 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
10511165 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1052
- GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1166
+ GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
1167
+ GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
1168
+ GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
1169
+ GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
1170
+ GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
1171
+ GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
1172
+ GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
10531173 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
10541174 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
10551175 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
....@@ -1063,43 +1183,65 @@
10631183 };
10641184
10651185 static void
1066
-ilk_dummy_write(struct drm_i915_private *dev_priv)
1186
+ilk_dummy_write(struct intel_uncore *uncore)
10671187 {
10681188 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
10691189 * the chip from rc6 before touching it for real. MI_MODE is masked,
10701190 * hence harmless to write 0 into. */
1071
- __raw_i915_write32(dev_priv, MI_MODE, 0);
1191
+ __raw_uncore_write32(uncore, MI_MODE, 0);
10721192 }
10731193
10741194 static void
1075
-__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1195
+__unclaimed_reg_debug(struct intel_uncore *uncore,
10761196 const i915_reg_t reg,
10771197 const bool read,
10781198 const bool before)
10791199 {
1080
- if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
1081
- "Unclaimed %s register 0x%x\n",
1082
- read ? "read from" : "write to",
1083
- i915_mmio_reg_offset(reg)))
1200
+ if (drm_WARN(&uncore->i915->drm,
1201
+ check_for_unclaimed_mmio(uncore) && !before,
1202
+ "Unclaimed %s register 0x%x\n",
1203
+ read ? "read from" : "write to",
1204
+ i915_mmio_reg_offset(reg)))
10841205 /* Only report the first N failures */
1085
- i915_modparams.mmio_debug--;
1206
+ uncore->i915->params.mmio_debug--;
10861207 }
10871208
10881209 static inline void
1089
-unclaimed_reg_debug(struct drm_i915_private *dev_priv,
1210
+unclaimed_reg_debug(struct intel_uncore *uncore,
10901211 const i915_reg_t reg,
10911212 const bool read,
10921213 const bool before)
10931214 {
1094
- if (likely(!i915_modparams.mmio_debug))
1215
+ if (likely(!uncore->i915->params.mmio_debug))
10951216 return;
10961217
1097
- __unclaimed_reg_debug(dev_priv, reg, read, before);
1218
+ /* interrupts are disabled and re-enabled around uncore->lock usage */
1219
+ lockdep_assert_held(&uncore->lock);
1220
+
1221
+ if (before)
1222
+ spin_lock(&uncore->debug->lock);
1223
+
1224
+ __unclaimed_reg_debug(uncore, reg, read, before);
1225
+
1226
+ if (!before)
1227
+ spin_unlock(&uncore->debug->lock);
10981228 }
1229
+
1230
+#define __vgpu_read(x) \
1231
+static u##x \
1232
+vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1233
+ u##x val = __raw_uncore_read##x(uncore, reg); \
1234
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1235
+ return val; \
1236
+}
1237
+__vgpu_read(8)
1238
+__vgpu_read(16)
1239
+__vgpu_read(32)
1240
+__vgpu_read(64)
10991241
11001242 #define GEN2_READ_HEADER(x) \
11011243 u##x val = 0; \
1102
- assert_rpm_wakelock_held(dev_priv);
1244
+ assert_rpm_wakelock_held(uncore->rpm);
11031245
11041246 #define GEN2_READ_FOOTER \
11051247 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
....@@ -1107,18 +1249,18 @@
11071249
11081250 #define __gen2_read(x) \
11091251 static u##x \
1110
-gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1252
+gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
11111253 GEN2_READ_HEADER(x); \
1112
- val = __raw_i915_read##x(dev_priv, reg); \
1254
+ val = __raw_uncore_read##x(uncore, reg); \
11131255 GEN2_READ_FOOTER; \
11141256 }
11151257
11161258 #define __gen5_read(x) \
11171259 static u##x \
1118
-gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1260
+gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
11191261 GEN2_READ_HEADER(x); \
1120
- ilk_dummy_write(dev_priv); \
1121
- val = __raw_i915_read##x(dev_priv, reg); \
1262
+ ilk_dummy_write(uncore); \
1263
+ val = __raw_uncore_read##x(uncore, reg); \
11221264 GEN2_READ_FOOTER; \
11231265 }
11241266
....@@ -1141,98 +1283,95 @@
11411283 u32 offset = i915_mmio_reg_offset(reg); \
11421284 unsigned long irqflags; \
11431285 u##x val = 0; \
1144
- assert_rpm_wakelock_held(dev_priv); \
1145
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1146
- unclaimed_reg_debug(dev_priv, reg, true, true)
1286
+ assert_rpm_wakelock_held(uncore->rpm); \
1287
+ spin_lock_irqsave(&uncore->lock, irqflags); \
1288
+ unclaimed_reg_debug(uncore, reg, true, true)
11471289
11481290 #define GEN6_READ_FOOTER \
1149
- unclaimed_reg_debug(dev_priv, reg, true, false); \
1150
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1291
+ unclaimed_reg_debug(uncore, reg, true, false); \
1292
+ spin_unlock_irqrestore(&uncore->lock, irqflags); \
11511293 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
11521294 return val
11531295
1154
-static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
1296
+static noinline void ___force_wake_auto(struct intel_uncore *uncore,
11551297 enum forcewake_domains fw_domains)
11561298 {
11571299 struct intel_uncore_forcewake_domain *domain;
11581300 unsigned int tmp;
11591301
1160
- GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1302
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
11611303
1162
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
1304
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
11631305 fw_domain_arm_timer(domain);
11641306
1165
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
1307
+ uncore->funcs.force_wake_get(uncore, fw_domains);
11661308 }
11671309
1168
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
1310
+static inline void __force_wake_auto(struct intel_uncore *uncore,
11691311 enum forcewake_domains fw_domains)
11701312 {
1171
- if (WARN_ON(!fw_domains))
1172
- return;
1313
+ GEM_BUG_ON(!fw_domains);
11731314
11741315 /* Turn on all requested but inactive supported forcewake domains. */
1175
- fw_domains &= dev_priv->uncore.fw_domains;
1176
- fw_domains &= ~dev_priv->uncore.fw_domains_active;
1316
+ fw_domains &= uncore->fw_domains;
1317
+ fw_domains &= ~uncore->fw_domains_active;
11771318
11781319 if (fw_domains)
1179
- ___force_wake_auto(dev_priv, fw_domains);
1320
+ ___force_wake_auto(uncore, fw_domains);
11801321 }
11811322
11821323 #define __gen_read(func, x) \
11831324 static u##x \
1184
-func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1325
+func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
11851326 enum forcewake_domains fw_engine; \
11861327 GEN6_READ_HEADER(x); \
1187
- fw_engine = __##func##_reg_read_fw_domains(offset); \
1328
+ fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
11881329 if (fw_engine) \
1189
- __force_wake_auto(dev_priv, fw_engine); \
1190
- val = __raw_i915_read##x(dev_priv, reg); \
1330
+ __force_wake_auto(uncore, fw_engine); \
1331
+ val = __raw_uncore_read##x(uncore, reg); \
11911332 GEN6_READ_FOOTER; \
11921333 }
1193
-#define __gen6_read(x) __gen_read(gen6, x)
1194
-#define __fwtable_read(x) __gen_read(fwtable, x)
1195
-#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
11961334
1197
-__gen11_fwtable_read(8)
1198
-__gen11_fwtable_read(16)
1199
-__gen11_fwtable_read(32)
1200
-__gen11_fwtable_read(64)
1201
-__fwtable_read(8)
1202
-__fwtable_read(16)
1203
-__fwtable_read(32)
1204
-__fwtable_read(64)
1205
-__gen6_read(8)
1206
-__gen6_read(16)
1207
-__gen6_read(32)
1208
-__gen6_read(64)
1335
+#define __gen_reg_read_funcs(func) \
1336
+static enum forcewake_domains \
1337
+func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1338
+ return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1339
+} \
1340
+\
1341
+__gen_read(func, 8) \
1342
+__gen_read(func, 16) \
1343
+__gen_read(func, 32) \
1344
+__gen_read(func, 64)
12091345
1210
-#undef __gen11_fwtable_read
1211
-#undef __fwtable_read
1212
-#undef __gen6_read
1346
+__gen_reg_read_funcs(gen12_fwtable);
1347
+__gen_reg_read_funcs(gen11_fwtable);
1348
+__gen_reg_read_funcs(fwtable);
1349
+__gen_reg_read_funcs(gen6);
1350
+
1351
+#undef __gen_reg_read_funcs
12131352 #undef GEN6_READ_FOOTER
12141353 #undef GEN6_READ_HEADER
12151354
12161355 #define GEN2_WRITE_HEADER \
12171356 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1218
- assert_rpm_wakelock_held(dev_priv); \
1357
+ assert_rpm_wakelock_held(uncore->rpm); \
12191358
12201359 #define GEN2_WRITE_FOOTER
12211360
12221361 #define __gen2_write(x) \
12231362 static void \
1224
-gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1363
+gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
12251364 GEN2_WRITE_HEADER; \
1226
- __raw_i915_write##x(dev_priv, reg, val); \
1365
+ __raw_uncore_write##x(uncore, reg, val); \
12271366 GEN2_WRITE_FOOTER; \
12281367 }
12291368
12301369 #define __gen5_write(x) \
12311370 static void \
1232
-gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1371
+gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
12331372 GEN2_WRITE_HEADER; \
1234
- ilk_dummy_write(dev_priv); \
1235
- __raw_i915_write##x(dev_priv, reg, val); \
1373
+ ilk_dummy_write(uncore); \
1374
+ __raw_uncore_write##x(uncore, reg, val); \
12361375 GEN2_WRITE_FOOTER; \
12371376 }
12381377
....@@ -1253,95 +1392,119 @@
12531392 u32 offset = i915_mmio_reg_offset(reg); \
12541393 unsigned long irqflags; \
12551394 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1256
- assert_rpm_wakelock_held(dev_priv); \
1257
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1258
- unclaimed_reg_debug(dev_priv, reg, false, true)
1395
+ assert_rpm_wakelock_held(uncore->rpm); \
1396
+ spin_lock_irqsave(&uncore->lock, irqflags); \
1397
+ unclaimed_reg_debug(uncore, reg, false, true)
12591398
12601399 #define GEN6_WRITE_FOOTER \
1261
- unclaimed_reg_debug(dev_priv, reg, false, false); \
1262
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1400
+ unclaimed_reg_debug(uncore, reg, false, false); \
1401
+ spin_unlock_irqrestore(&uncore->lock, irqflags)
12631402
12641403 #define __gen6_write(x) \
12651404 static void \
1266
-gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1405
+gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
12671406 GEN6_WRITE_HEADER; \
12681407 if (NEEDS_FORCE_WAKE(offset)) \
1269
- __gen6_gt_wait_for_fifo(dev_priv); \
1270
- __raw_i915_write##x(dev_priv, reg, val); \
1408
+ __gen6_gt_wait_for_fifo(uncore); \
1409
+ __raw_uncore_write##x(uncore, reg, val); \
12711410 GEN6_WRITE_FOOTER; \
12721411 }
1273
-
1274
-#define __gen_write(func, x) \
1275
-static void \
1276
-func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1277
- enum forcewake_domains fw_engine; \
1278
- GEN6_WRITE_HEADER; \
1279
- fw_engine = __##func##_reg_write_fw_domains(offset); \
1280
- if (fw_engine) \
1281
- __force_wake_auto(dev_priv, fw_engine); \
1282
- __raw_i915_write##x(dev_priv, reg, val); \
1283
- GEN6_WRITE_FOOTER; \
1284
-}
1285
-#define __gen8_write(x) __gen_write(gen8, x)
1286
-#define __fwtable_write(x) __gen_write(fwtable, x)
1287
-#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
1288
-
1289
-__gen11_fwtable_write(8)
1290
-__gen11_fwtable_write(16)
1291
-__gen11_fwtable_write(32)
1292
-__fwtable_write(8)
1293
-__fwtable_write(16)
1294
-__fwtable_write(32)
1295
-__gen8_write(8)
1296
-__gen8_write(16)
1297
-__gen8_write(32)
12981412 __gen6_write(8)
12991413 __gen6_write(16)
13001414 __gen6_write(32)
13011415
1302
-#undef __gen11_fwtable_write
1303
-#undef __fwtable_write
1304
-#undef __gen8_write
1305
-#undef __gen6_write
1416
+#define __gen_write(func, x) \
1417
+static void \
1418
+func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1419
+ enum forcewake_domains fw_engine; \
1420
+ GEN6_WRITE_HEADER; \
1421
+ fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1422
+ if (fw_engine) \
1423
+ __force_wake_auto(uncore, fw_engine); \
1424
+ __raw_uncore_write##x(uncore, reg, val); \
1425
+ GEN6_WRITE_FOOTER; \
1426
+}
1427
+
1428
+#define __gen_reg_write_funcs(func) \
1429
+static enum forcewake_domains \
1430
+func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1431
+ return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1432
+} \
1433
+\
1434
+__gen_write(func, 8) \
1435
+__gen_write(func, 16) \
1436
+__gen_write(func, 32)
1437
+
1438
+__gen_reg_write_funcs(gen12_fwtable);
1439
+__gen_reg_write_funcs(gen11_fwtable);
1440
+__gen_reg_write_funcs(fwtable);
1441
+__gen_reg_write_funcs(gen8);
1442
+
1443
+#undef __gen_reg_write_funcs
13061444 #undef GEN6_WRITE_FOOTER
13071445 #undef GEN6_WRITE_HEADER
13081446
1309
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1447
+#define __vgpu_write(x) \
1448
+static void \
1449
+vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1450
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1451
+ __raw_uncore_write##x(uncore, reg, val); \
1452
+}
1453
+__vgpu_write(8)
1454
+__vgpu_write(16)
1455
+__vgpu_write(32)
1456
+
1457
+#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
13101458 do { \
1311
- (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1312
- (i915)->uncore.funcs.mmio_writew = x##_write16; \
1313
- (i915)->uncore.funcs.mmio_writel = x##_write32; \
1459
+ (uncore)->funcs.mmio_writeb = x##_write8; \
1460
+ (uncore)->funcs.mmio_writew = x##_write16; \
1461
+ (uncore)->funcs.mmio_writel = x##_write32; \
13141462 } while (0)
13151463
1316
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1464
+#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
13171465 do { \
1318
- (i915)->uncore.funcs.mmio_readb = x##_read8; \
1319
- (i915)->uncore.funcs.mmio_readw = x##_read16; \
1320
- (i915)->uncore.funcs.mmio_readl = x##_read32; \
1321
- (i915)->uncore.funcs.mmio_readq = x##_read64; \
1466
+ (uncore)->funcs.mmio_readb = x##_read8; \
1467
+ (uncore)->funcs.mmio_readw = x##_read16; \
1468
+ (uncore)->funcs.mmio_readl = x##_read32; \
1469
+ (uncore)->funcs.mmio_readq = x##_read64; \
13221470 } while (0)
13231471
1472
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1473
+do { \
1474
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1475
+ (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1476
+} while (0)
13241477
1325
-static void fw_domain_init(struct drm_i915_private *dev_priv,
1326
- enum forcewake_domain_id domain_id,
1327
- i915_reg_t reg_set,
1328
- i915_reg_t reg_ack)
1478
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1479
+do { \
1480
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1481
+ (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1482
+} while (0)
1483
+
1484
+static int __fw_domain_init(struct intel_uncore *uncore,
1485
+ enum forcewake_domain_id domain_id,
1486
+ i915_reg_t reg_set,
1487
+ i915_reg_t reg_ack)
13291488 {
13301489 struct intel_uncore_forcewake_domain *d;
13311490
1332
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1333
- return;
1491
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1492
+ GEM_BUG_ON(uncore->fw_domain[domain_id]);
13341493
1335
- d = &dev_priv->uncore.fw_domain[domain_id];
1494
+ if (i915_inject_probe_failure(uncore->i915))
1495
+ return -ENOMEM;
13361496
1337
- WARN_ON(d->wake_count);
1497
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
1498
+ if (!d)
1499
+ return -ENOMEM;
13381500
1339
- WARN_ON(!i915_mmio_reg_valid(reg_set));
1340
- WARN_ON(!i915_mmio_reg_valid(reg_ack));
1501
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1502
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
13411503
1504
+ d->uncore = uncore;
13421505 d->wake_count = 0;
1343
- d->reg_set = reg_set;
1344
- d->reg_ack = reg_ack;
1506
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1507
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
13451508
13461509 d->id = domain_id;
13471510
....@@ -1355,104 +1518,111 @@
13551518 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
13561519 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
13571520
1358
-
13591521 d->mask = BIT(domain_id);
13601522
13611523 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
13621524 d->timer.function = intel_uncore_fw_release_timer;
13631525
1364
- dev_priv->uncore.fw_domains |= BIT(domain_id);
1526
+ uncore->fw_domains |= BIT(domain_id);
13651527
1366
- fw_domain_reset(dev_priv, d);
1528
+ fw_domain_reset(d);
1529
+
1530
+ uncore->fw_domain[domain_id] = d;
1531
+
1532
+ return 0;
13671533 }
13681534
1369
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
1535
+static void fw_domain_fini(struct intel_uncore *uncore,
13701536 enum forcewake_domain_id domain_id)
13711537 {
13721538 struct intel_uncore_forcewake_domain *d;
13731539
1374
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1540
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1541
+
1542
+ d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1543
+ if (!d)
13751544 return;
13761545
1377
- d = &dev_priv->uncore.fw_domain[domain_id];
1378
-
1379
- WARN_ON(d->wake_count);
1380
- WARN_ON(hrtimer_cancel(&d->timer));
1381
- memset(d, 0, sizeof(*d));
1382
-
1383
- dev_priv->uncore.fw_domains &= ~BIT(domain_id);
1546
+ uncore->fw_domains &= ~BIT(domain_id);
1547
+ drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1548
+ drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
1549
+ kfree(d);
13841550 }
13851551
1386
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1552
+static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
13871553 {
1388
- if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1389
- return;
1554
+ struct intel_uncore_forcewake_domain *d;
1555
+ int tmp;
13901556
1391
- if (IS_GEN6(dev_priv)) {
1392
- dev_priv->uncore.fw_reset = 0;
1393
- dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1394
- dev_priv->uncore.fw_clear = 0;
1395
- } else {
1396
- /* WaRsClearFWBitsAtReset:bdw,skl */
1397
- dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1398
- dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1399
- dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1400
- }
1557
+ for_each_fw_domain(d, uncore, tmp)
1558
+ fw_domain_fini(uncore, d->id);
1559
+}
14011560
1402
- if (INTEL_GEN(dev_priv) >= 11) {
1561
+static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1562
+{
1563
+ struct drm_i915_private *i915 = uncore->i915;
1564
+ int ret = 0;
1565
+
1566
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1567
+
1568
+#define fw_domain_init(uncore__, id__, set__, ack__) \
1569
+ (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1570
+
1571
+ if (INTEL_GEN(i915) >= 11) {
1572
+ /* we'll prune the domains of missing engines later */
1573
+ intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
14031574 int i;
14041575
1405
- dev_priv->uncore.funcs.force_wake_get =
1406
- fw_domains_get_with_fallback;
1407
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1408
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1576
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1577
+ uncore->funcs.force_wake_put = fw_domains_put;
1578
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
14091579 FORCEWAKE_RENDER_GEN9,
14101580 FORCEWAKE_ACK_RENDER_GEN9);
1411
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1581
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
14121582 FORCEWAKE_BLITTER_GEN9,
14131583 FORCEWAKE_ACK_BLITTER_GEN9);
1584
+
14141585 for (i = 0; i < I915_MAX_VCS; i++) {
1415
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
1586
+ if (!__HAS_ENGINE(emask, _VCS(i)))
14161587 continue;
14171588
1418
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1589
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
14191590 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
14201591 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
14211592 }
14221593 for (i = 0; i < I915_MAX_VECS; i++) {
1423
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
1594
+ if (!__HAS_ENGINE(emask, _VECS(i)))
14241595 continue;
14251596
1426
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1597
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
14271598 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
14281599 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
14291600 }
1430
- } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
1431
- dev_priv->uncore.funcs.force_wake_get =
1432
- fw_domains_get_with_fallback;
1433
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1434
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1601
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
1602
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1603
+ uncore->funcs.force_wake_put = fw_domains_put;
1604
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
14351605 FORCEWAKE_RENDER_GEN9,
14361606 FORCEWAKE_ACK_RENDER_GEN9);
1437
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1607
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
14381608 FORCEWAKE_BLITTER_GEN9,
14391609 FORCEWAKE_ACK_BLITTER_GEN9);
1440
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1610
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
14411611 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1442
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1443
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1444
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1445
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1612
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1613
+ uncore->funcs.force_wake_get = fw_domains_get;
1614
+ uncore->funcs.force_wake_put = fw_domains_put;
1615
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
14461616 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1447
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1617
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
14481618 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1449
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1450
- dev_priv->uncore.funcs.force_wake_get =
1619
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1620
+ uncore->funcs.force_wake_get =
14511621 fw_domains_get_with_thread_status;
1452
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1453
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1622
+ uncore->funcs.force_wake_put = fw_domains_put;
1623
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
14541624 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1455
- } else if (IS_IVYBRIDGE(dev_priv)) {
1625
+ } else if (IS_IVYBRIDGE(i915)) {
14561626 u32 ecobus;
14571627
14581628 /* IVB configs may use multi-threaded forcewake */
....@@ -1464,9 +1634,9 @@
14641634 * (correctly) interpreted by the test below as MT
14651635 * forcewake being disabled.
14661636 */
1467
- dev_priv->uncore.funcs.force_wake_get =
1637
+ uncore->funcs.force_wake_get =
14681638 fw_domains_get_with_thread_status;
1469
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1639
+ uncore->funcs.force_wake_put = fw_domains_put;
14701640
14711641 /* We need to init first for ECOBUS access and then
14721642 * determine later if we want to reinit, in case of MT access is
....@@ -1475,48 +1645,59 @@
14751645 * before the ecobus check.
14761646 */
14771647
1478
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1479
- __raw_posting_read(dev_priv, ECOBUS);
1648
+ __raw_uncore_write32(uncore, FORCEWAKE, 0);
1649
+ __raw_posting_read(uncore, ECOBUS);
14801650
1481
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1482
- FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1651
+ ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1652
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1653
+ if (ret)
1654
+ goto out;
14831655
1484
- spin_lock_irq(&dev_priv->uncore.lock);
1485
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1486
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1487
- fw_domains_put(dev_priv, FORCEWAKE_RENDER);
1488
- spin_unlock_irq(&dev_priv->uncore.lock);
1656
+ spin_lock_irq(&uncore->lock);
1657
+ fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1658
+ ecobus = __raw_uncore_read32(uncore, ECOBUS);
1659
+ fw_domains_put(uncore, FORCEWAKE_RENDER);
1660
+ spin_unlock_irq(&uncore->lock);
14891661
14901662 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1491
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1492
- DRM_INFO("when using vblank-synced partial screen updates.\n");
1493
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1663
+ drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1664
+ drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
1665
+ fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1666
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
14941667 FORCEWAKE, FORCEWAKE_ACK);
14951668 }
1496
- } else if (IS_GEN6(dev_priv)) {
1497
- dev_priv->uncore.funcs.force_wake_get =
1669
+ } else if (IS_GEN(i915, 6)) {
1670
+ uncore->funcs.force_wake_get =
14981671 fw_domains_get_with_thread_status;
1499
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1500
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1672
+ uncore->funcs.force_wake_put = fw_domains_put;
1673
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
15011674 FORCEWAKE, FORCEWAKE_ACK);
15021675 }
15031676
1677
+#undef fw_domain_init
1678
+
15041679 /* All future platforms are expected to require complex power gating */
1505
- WARN_ON(dev_priv->uncore.fw_domains == 0);
1680
+ drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
1681
+
1682
+out:
1683
+ if (ret)
1684
+ intel_uncore_fw_domains_fini(uncore);
1685
+
1686
+ return ret;
15061687 }
15071688
1508
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
1689
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
15091690 { \
1510
- dev_priv->uncore.fw_domains_table = \
1691
+ (uncore)->fw_domains_table = \
15111692 (struct intel_forcewake_range *)(d); \
1512
- dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1693
+ (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
15131694 }
15141695
15151696 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
15161697 unsigned long action, void *data)
15171698 {
1518
- struct drm_i915_private *dev_priv = container_of(nb,
1519
- struct drm_i915_private, uncore.pmic_bus_access_nb);
1699
+ struct intel_uncore *uncore = container_of(nb,
1700
+ struct intel_uncore, pmic_bus_access_nb);
15201701
15211702 switch (action) {
15221703 case MBI_PMIC_BUS_ACCESS_BEGIN:
....@@ -1533,67 +1714,174 @@
15331714 * wake reference -> disable wakeref asserts for the time of
15341715 * the access.
15351716 */
1536
- disable_rpm_wakeref_asserts(dev_priv);
1537
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1538
- enable_rpm_wakeref_asserts(dev_priv);
1717
+ disable_rpm_wakeref_asserts(uncore->rpm);
1718
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1719
+ enable_rpm_wakeref_asserts(uncore->rpm);
15391720 break;
15401721 case MBI_PMIC_BUS_ACCESS_END:
1541
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1722
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
15421723 break;
15431724 }
15441725
15451726 return NOTIFY_OK;
15461727 }
15471728
1548
-void intel_uncore_init(struct drm_i915_private *dev_priv)
1729
+static int uncore_mmio_setup(struct intel_uncore *uncore)
15491730 {
1550
- i915_check_vgpu(dev_priv);
1731
+ struct drm_i915_private *i915 = uncore->i915;
1732
+ struct pci_dev *pdev = i915->drm.pdev;
1733
+ int mmio_bar;
1734
+ int mmio_size;
15511735
1552
- intel_uncore_edram_detect(dev_priv);
1553
- intel_uncore_fw_domains_init(dev_priv);
1554
- __intel_uncore_early_sanitize(dev_priv, 0);
1555
-
1556
- dev_priv->uncore.unclaimed_mmio_check = 1;
1557
- dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1558
- i915_pmic_bus_access_notifier;
1559
-
1560
- if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1561
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1562
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1563
- } else if (IS_GEN5(dev_priv)) {
1564
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1565
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1566
- } else if (IS_GEN(dev_priv, 6, 7)) {
1567
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1568
-
1569
- if (IS_VALLEYVIEW(dev_priv)) {
1570
- ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1571
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1572
- } else {
1573
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1574
- }
1575
- } else if (IS_GEN8(dev_priv)) {
1576
- if (IS_CHERRYVIEW(dev_priv)) {
1577
- ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1578
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1579
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1580
-
1581
- } else {
1582
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1583
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1584
- }
1585
- } else if (IS_GEN(dev_priv, 9, 10)) {
1586
- ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1587
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1588
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1589
- } else {
1590
- ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
1591
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
1592
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
1736
+ mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1737
+ /*
1738
+ * Before gen4, the registers and the GTT are behind different BARs.
1739
+ * However, from gen4 onwards, the registers and the GTT are shared
1740
+ * in the same BAR, so we want to restrict this ioremap from
1741
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1742
+ * the register BAR remains the same size for all the earlier
1743
+ * generations up to Ironlake.
1744
+ */
1745
+ if (INTEL_GEN(i915) < 5)
1746
+ mmio_size = 512 * 1024;
1747
+ else
1748
+ mmio_size = 2 * 1024 * 1024;
1749
+ uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1750
+ if (uncore->regs == NULL) {
1751
+ drm_err(&i915->drm, "failed to map registers\n");
1752
+ return -EIO;
15931753 }
15941754
1595
- iosf_mbi_register_pmic_bus_access_notifier(
1596
- &dev_priv->uncore.pmic_bus_access_nb);
1755
+ return 0;
1756
+}
1757
+
1758
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1759
+{
1760
+ struct pci_dev *pdev = uncore->i915->drm.pdev;
1761
+
1762
+ pci_iounmap(pdev, uncore->regs);
1763
+}
1764
+
1765
+void intel_uncore_init_early(struct intel_uncore *uncore,
1766
+ struct drm_i915_private *i915)
1767
+{
1768
+ spin_lock_init(&uncore->lock);
1769
+ uncore->i915 = i915;
1770
+ uncore->rpm = &i915->runtime_pm;
1771
+ uncore->debug = &i915->mmio_debug;
1772
+}
1773
+
1774
+static void uncore_raw_init(struct intel_uncore *uncore)
1775
+{
1776
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1777
+
1778
+ if (intel_vgpu_active(uncore->i915)) {
1779
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
1780
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
1781
+ } else if (IS_GEN(uncore->i915, 5)) {
1782
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1783
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1784
+ } else {
1785
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1786
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1787
+ }
1788
+}
1789
+
1790
+static int uncore_forcewake_init(struct intel_uncore *uncore)
1791
+{
1792
+ struct drm_i915_private *i915 = uncore->i915;
1793
+ int ret;
1794
+
1795
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1796
+
1797
+ ret = intel_uncore_fw_domains_init(uncore);
1798
+ if (ret)
1799
+ return ret;
1800
+ forcewake_early_sanitize(uncore, 0);
1801
+
1802
+ if (IS_GEN_RANGE(i915, 6, 7)) {
1803
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1804
+
1805
+ if (IS_VALLEYVIEW(i915)) {
1806
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1807
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1808
+ } else {
1809
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1810
+ }
1811
+ } else if (IS_GEN(i915, 8)) {
1812
+ if (IS_CHERRYVIEW(i915)) {
1813
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1814
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1815
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1816
+ } else {
1817
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1818
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1819
+ }
1820
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
1821
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1822
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1823
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1824
+ } else if (IS_GEN(i915, 11)) {
1825
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1826
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1827
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1828
+ } else {
1829
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
1830
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
1831
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
1832
+ }
1833
+
1834
+ uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1835
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1836
+
1837
+ return 0;
1838
+}
1839
+
1840
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
1841
+{
1842
+ struct drm_i915_private *i915 = uncore->i915;
1843
+ int ret;
1844
+
1845
+ ret = uncore_mmio_setup(uncore);
1846
+ if (ret)
1847
+ return ret;
1848
+
1849
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1850
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
1851
+
1852
+ if (!intel_uncore_has_forcewake(uncore)) {
1853
+ uncore_raw_init(uncore);
1854
+ } else {
1855
+ ret = uncore_forcewake_init(uncore);
1856
+ if (ret)
1857
+ goto out_mmio_cleanup;
1858
+ }
1859
+
1860
+ /* make sure fw funcs are set if and only if we have fw*/
1861
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1862
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1863
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1864
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1865
+
1866
+ if (HAS_FPGA_DBG_UNCLAIMED(i915))
1867
+ uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1868
+
1869
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1870
+ uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1871
+
1872
+ if (IS_GEN_RANGE(i915, 6, 7))
1873
+ uncore->flags |= UNCORE_HAS_FIFO;
1874
+
1875
+ /* clear out unclaimed reg detection bit */
1876
+ if (intel_uncore_unclaimed_mmio(uncore))
1877
+ drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
1878
+
1879
+ return 0;
1880
+
1881
+out_mmio_cleanup:
1882
+ uncore_mmio_cleanup(uncore);
1883
+
1884
+ return ret;
15971885 }
15981886
15991887 /*
....@@ -1601,45 +1889,49 @@
16011889 * the forcewake domains. Prune them, to make sure they only reference existing
16021890 * engines.
16031891 */
1604
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
1892
+void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
1893
+ struct intel_gt *gt)
16051894 {
1606
- if (INTEL_GEN(dev_priv) >= 11) {
1607
- enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
1608
- enum forcewake_domain_id domain_id;
1609
- int i;
1895
+ enum forcewake_domains fw_domains = uncore->fw_domains;
1896
+ enum forcewake_domain_id domain_id;
1897
+ int i;
16101898
1611
- for (i = 0; i < I915_MAX_VCS; i++) {
1612
- domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1899
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11)
1900
+ return;
16131901
1614
- if (HAS_ENGINE(dev_priv, _VCS(i)))
1615
- continue;
1902
+ for (i = 0; i < I915_MAX_VCS; i++) {
1903
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
16161904
1617
- if (fw_domains & BIT(domain_id))
1618
- fw_domain_fini(dev_priv, domain_id);
1619
- }
1905
+ if (HAS_ENGINE(gt, _VCS(i)))
1906
+ continue;
16201907
1621
- for (i = 0; i < I915_MAX_VECS; i++) {
1622
- domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1908
+ if (fw_domains & BIT(domain_id))
1909
+ fw_domain_fini(uncore, domain_id);
1910
+ }
16231911
1624
- if (HAS_ENGINE(dev_priv, _VECS(i)))
1625
- continue;
1912
+ for (i = 0; i < I915_MAX_VECS; i++) {
1913
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
16261914
1627
- if (fw_domains & BIT(domain_id))
1628
- fw_domain_fini(dev_priv, domain_id);
1629
- }
1915
+ if (HAS_ENGINE(gt, _VECS(i)))
1916
+ continue;
1917
+
1918
+ if (fw_domains & BIT(domain_id))
1919
+ fw_domain_fini(uncore, domain_id);
16301920 }
16311921 }
16321922
1633
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
1923
+void intel_uncore_fini_mmio(struct intel_uncore *uncore)
16341924 {
1635
- /* Paranoia: make sure we have disabled everything before we exit. */
1636
- intel_uncore_sanitize(dev_priv);
1925
+ if (intel_uncore_has_forcewake(uncore)) {
1926
+ iosf_mbi_punit_acquire();
1927
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1928
+ &uncore->pmic_bus_access_nb);
1929
+ intel_uncore_forcewake_reset(uncore);
1930
+ intel_uncore_fw_domains_fini(uncore);
1931
+ iosf_mbi_punit_release();
1932
+ }
16371933
1638
- iosf_mbi_punit_acquire();
1639
- iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1640
- &dev_priv->uncore.pmic_bus_access_nb);
1641
- intel_uncore_forcewake_reset(dev_priv);
1642
- iosf_mbi_punit_release();
1934
+ uncore_mmio_cleanup(uncore);
16431935 }
16441936
16451937 static const struct reg_whitelist {
....@@ -1650,16 +1942,18 @@
16501942 } reg_read_whitelist[] = { {
16511943 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
16521944 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1653
- .gen_mask = INTEL_GEN_MASK(4, 11),
1945
+ .gen_mask = INTEL_GEN_MASK(4, 12),
16541946 .size = 8
16551947 } };
16561948
16571949 int i915_reg_read_ioctl(struct drm_device *dev,
16581950 void *data, struct drm_file *file)
16591951 {
1660
- struct drm_i915_private *dev_priv = to_i915(dev);
1952
+ struct drm_i915_private *i915 = to_i915(dev);
1953
+ struct intel_uncore *uncore = &i915->uncore;
16611954 struct drm_i915_reg_read *reg = data;
16621955 struct reg_whitelist const *entry;
1956
+ intel_wakeref_t wakeref;
16631957 unsigned int flags;
16641958 int remain;
16651959 int ret = 0;
....@@ -1673,7 +1967,7 @@
16731967 GEM_BUG_ON(entry->size > 8);
16741968 GEM_BUG_ON(entry_offset & (entry->size - 1));
16751969
1676
- if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1970
+ if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
16771971 entry_offset == (reg->offset & -entry->size))
16781972 break;
16791973 entry++;
....@@ -1685,280 +1979,32 @@
16851979
16861980 flags = reg->offset & (entry->size - 1);
16871981
1688
- intel_runtime_pm_get(dev_priv);
1689
- if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1690
- reg->val = I915_READ64_2x32(entry->offset_ldw,
1691
- entry->offset_udw);
1692
- else if (entry->size == 8 && flags == 0)
1693
- reg->val = I915_READ64(entry->offset_ldw);
1694
- else if (entry->size == 4 && flags == 0)
1695
- reg->val = I915_READ(entry->offset_ldw);
1696
- else if (entry->size == 2 && flags == 0)
1697
- reg->val = I915_READ16(entry->offset_ldw);
1698
- else if (entry->size == 1 && flags == 0)
1699
- reg->val = I915_READ8(entry->offset_ldw);
1700
- else
1701
- ret = -EINVAL;
1702
- intel_runtime_pm_put(dev_priv);
1982
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1983
+ if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1984
+ reg->val = intel_uncore_read64_2x32(uncore,
1985
+ entry->offset_ldw,
1986
+ entry->offset_udw);
1987
+ else if (entry->size == 8 && flags == 0)
1988
+ reg->val = intel_uncore_read64(uncore,
1989
+ entry->offset_ldw);
1990
+ else if (entry->size == 4 && flags == 0)
1991
+ reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1992
+ else if (entry->size == 2 && flags == 0)
1993
+ reg->val = intel_uncore_read16(uncore,
1994
+ entry->offset_ldw);
1995
+ else if (entry->size == 1 && flags == 0)
1996
+ reg->val = intel_uncore_read8(uncore,
1997
+ entry->offset_ldw);
1998
+ else
1999
+ ret = -EINVAL;
2000
+ }
17032001
17042002 return ret;
1705
-}
1706
-
1707
-static void gen3_stop_engine(struct intel_engine_cs *engine)
1708
-{
1709
- struct drm_i915_private *dev_priv = engine->i915;
1710
- const u32 base = engine->mmio_base;
1711
-
1712
- if (intel_engine_stop_cs(engine))
1713
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
1714
-
1715
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
1716
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
1717
-
1718
- I915_WRITE_FW(RING_HEAD(base), 0);
1719
- I915_WRITE_FW(RING_TAIL(base), 0);
1720
- POSTING_READ_FW(RING_TAIL(base));
1721
-
1722
- /* The ring must be empty before it is disabled */
1723
- I915_WRITE_FW(RING_CTL(base), 0);
1724
-
1725
- /* Check acts as a post */
1726
- if (I915_READ_FW(RING_HEAD(base)) != 0)
1727
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1728
- engine->name);
1729
-}
1730
-
1731
-static void i915_stop_engines(struct drm_i915_private *dev_priv,
1732
- unsigned engine_mask)
1733
-{
1734
- struct intel_engine_cs *engine;
1735
- enum intel_engine_id id;
1736
-
1737
- if (INTEL_GEN(dev_priv) < 3)
1738
- return;
1739
-
1740
- for_each_engine_masked(engine, dev_priv, engine_mask, id)
1741
- gen3_stop_engine(engine);
1742
-}
1743
-
1744
-static bool i915_in_reset(struct pci_dev *pdev)
1745
-{
1746
- u8 gdrst;
1747
-
1748
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1749
- return gdrst & GRDOM_RESET_STATUS;
1750
-}
1751
-
1752
-static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1753
-{
1754
- struct pci_dev *pdev = dev_priv->drm.pdev;
1755
- int err;
1756
-
1757
- /* Assert reset for at least 20 usec, and wait for acknowledgement. */
1758
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1759
- usleep_range(50, 200);
1760
- err = wait_for(i915_in_reset(pdev), 500);
1761
-
1762
- /* Clear the reset request. */
1763
- pci_write_config_byte(pdev, I915_GDRST, 0);
1764
- usleep_range(50, 200);
1765
- if (!err)
1766
- err = wait_for(!i915_in_reset(pdev), 500);
1767
-
1768
- return err;
1769
-}
1770
-
1771
-static bool g4x_reset_complete(struct pci_dev *pdev)
1772
-{
1773
- u8 gdrst;
1774
-
1775
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1776
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
1777
-}
1778
-
1779
-static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1780
-{
1781
- struct pci_dev *pdev = dev_priv->drm.pdev;
1782
-
1783
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1784
- return wait_for(g4x_reset_complete(pdev), 500);
1785
-}
1786
-
1787
-static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1788
-{
1789
- struct pci_dev *pdev = dev_priv->drm.pdev;
1790
- int ret;
1791
-
1792
- /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1793
- I915_WRITE(VDECCLK_GATE_D,
1794
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1795
- POSTING_READ(VDECCLK_GATE_D);
1796
-
1797
- pci_write_config_byte(pdev, I915_GDRST,
1798
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1799
- ret = wait_for(g4x_reset_complete(pdev), 500);
1800
- if (ret) {
1801
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1802
- goto out;
1803
- }
1804
-
1805
- pci_write_config_byte(pdev, I915_GDRST,
1806
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
1807
- ret = wait_for(g4x_reset_complete(pdev), 500);
1808
- if (ret) {
1809
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1810
- goto out;
1811
- }
1812
-
1813
-out:
1814
- pci_write_config_byte(pdev, I915_GDRST, 0);
1815
-
1816
- I915_WRITE(VDECCLK_GATE_D,
1817
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1818
- POSTING_READ(VDECCLK_GATE_D);
1819
-
1820
- return ret;
1821
-}
1822
-
1823
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1824
- unsigned engine_mask)
1825
-{
1826
- int ret;
1827
-
1828
- I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1829
- ret = intel_wait_for_register(dev_priv,
1830
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1831
- 500);
1832
- if (ret) {
1833
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1834
- goto out;
1835
- }
1836
-
1837
- I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1838
- ret = intel_wait_for_register(dev_priv,
1839
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1840
- 500);
1841
- if (ret) {
1842
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1843
- goto out;
1844
- }
1845
-
1846
-out:
1847
- I915_WRITE(ILK_GDSR, 0);
1848
- POSTING_READ(ILK_GDSR);
1849
- return ret;
1850
-}
1851
-
1852
-/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1853
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1854
- u32 hw_domain_mask)
1855
-{
1856
- int err;
1857
-
1858
- /* GEN6_GDRST is not in the gt power well, no need to check
1859
- * for fifo space for the write or forcewake the chip for
1860
- * the read
1861
- */
1862
- __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1863
-
1864
- /* Wait for the device to ack the reset requests */
1865
- err = __intel_wait_for_register_fw(dev_priv,
1866
- GEN6_GDRST, hw_domain_mask, 0,
1867
- 500, 0,
1868
- NULL);
1869
- if (err)
1870
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1871
- hw_domain_mask);
1872
-
1873
- return err;
1874
-}
1875
-
1876
-/**
1877
- * gen6_reset_engines - reset individual engines
1878
- * @dev_priv: i915 device
1879
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1880
- *
1881
- * This function will reset the individual engines that are set in engine_mask.
1882
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1883
- *
1884
- * Note: It is responsibility of the caller to handle the difference between
1885
- * asking full domain reset versus reset for all available individual engines.
1886
- *
1887
- * Returns 0 on success, nonzero on error.
1888
- */
1889
-static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1890
- unsigned engine_mask)
1891
-{
1892
- struct intel_engine_cs *engine;
1893
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1894
- [RCS] = GEN6_GRDOM_RENDER,
1895
- [BCS] = GEN6_GRDOM_BLT,
1896
- [VCS] = GEN6_GRDOM_MEDIA,
1897
- [VCS2] = GEN8_GRDOM_MEDIA2,
1898
- [VECS] = GEN6_GRDOM_VECS,
1899
- };
1900
- u32 hw_mask;
1901
-
1902
- if (engine_mask == ALL_ENGINES) {
1903
- hw_mask = GEN6_GRDOM_FULL;
1904
- } else {
1905
- unsigned int tmp;
1906
-
1907
- hw_mask = 0;
1908
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1909
- hw_mask |= hw_engine_mask[engine->id];
1910
- }
1911
-
1912
- return gen6_hw_domain_reset(dev_priv, hw_mask);
1913
-}
1914
-
1915
-/**
1916
- * gen11_reset_engines - reset individual engines
1917
- * @dev_priv: i915 device
1918
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1919
- *
1920
- * This function will reset the individual engines that are set in engine_mask.
1921
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1922
- *
1923
- * Note: It is responsibility of the caller to handle the difference between
1924
- * asking full domain reset versus reset for all available individual engines.
1925
- *
1926
- * Returns 0 on success, nonzero on error.
1927
- */
1928
-static int gen11_reset_engines(struct drm_i915_private *dev_priv,
1929
- unsigned engine_mask)
1930
-{
1931
- struct intel_engine_cs *engine;
1932
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1933
- [RCS] = GEN11_GRDOM_RENDER,
1934
- [BCS] = GEN11_GRDOM_BLT,
1935
- [VCS] = GEN11_GRDOM_MEDIA,
1936
- [VCS2] = GEN11_GRDOM_MEDIA2,
1937
- [VCS3] = GEN11_GRDOM_MEDIA3,
1938
- [VCS4] = GEN11_GRDOM_MEDIA4,
1939
- [VECS] = GEN11_GRDOM_VECS,
1940
- [VECS2] = GEN11_GRDOM_VECS2,
1941
- };
1942
- u32 hw_mask;
1943
-
1944
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
1945
-
1946
- if (engine_mask == ALL_ENGINES) {
1947
- hw_mask = GEN11_GRDOM_FULL;
1948
- } else {
1949
- unsigned int tmp;
1950
-
1951
- hw_mask = 0;
1952
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1953
- hw_mask |= hw_engine_mask[engine->id];
1954
- }
1955
-
1956
- return gen6_hw_domain_reset(dev_priv, hw_mask);
19572003 }
19582004
19592005 /**
19602006 * __intel_wait_for_register_fw - wait until register matches expected state
1961
- * @dev_priv: the i915 device
2007
+ * @uncore: the struct intel_uncore
19622008 * @reg: the register to read
19632009 * @mask: mask to apply to register value
19642010 * @value: expected value
....@@ -1980,9 +2026,9 @@
19802026 * wish to wait without holding forcewake for the duration (i.e. you expect
19812027 * the wait to be slow).
19822028 *
1983
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
2029
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
19842030 */
1985
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
2031
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
19862032 i915_reg_t reg,
19872033 u32 mask,
19882034 u32 value,
....@@ -1990,13 +2036,14 @@
19902036 unsigned int slow_timeout_ms,
19912037 u32 *out_value)
19922038 {
1993
- u32 uninitialized_var(reg_value);
1994
-#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
2039
+ u32 reg_value = 0;
2040
+#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
19952041 int ret;
19962042
19972043 /* Catch any overuse of this function */
19982044 might_sleep_if(slow_timeout_ms);
19992045 GEM_BUG_ON(fast_timeout_us > 20000);
2046
+ GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
20002047
20012048 ret = -ETIMEDOUT;
20022049 if (fast_timeout_us && fast_timeout_us <= 20000)
....@@ -2013,7 +2060,7 @@
20132060
20142061 /**
20152062 * __intel_wait_for_register - wait until register matches expected state
2016
- * @dev_priv: the i915 device
2063
+ * @uncore: the struct intel_uncore
20172064 * @reg: the register to read
20182065 * @mask: mask to apply to register value
20192066 * @value: expected value
....@@ -2028,37 +2075,41 @@
20282075 *
20292076 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
20302077 *
2031
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
2078
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
20322079 */
2033
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
2034
- i915_reg_t reg,
2035
- u32 mask,
2036
- u32 value,
2037
- unsigned int fast_timeout_us,
2038
- unsigned int slow_timeout_ms,
2039
- u32 *out_value)
2080
+int __intel_wait_for_register(struct intel_uncore *uncore,
2081
+ i915_reg_t reg,
2082
+ u32 mask,
2083
+ u32 value,
2084
+ unsigned int fast_timeout_us,
2085
+ unsigned int slow_timeout_ms,
2086
+ u32 *out_value)
20402087 {
20412088 unsigned fw =
2042
- intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
2089
+ intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
20432090 u32 reg_value;
20442091 int ret;
20452092
20462093 might_sleep_if(slow_timeout_ms);
20472094
2048
- spin_lock_irq(&dev_priv->uncore.lock);
2049
- intel_uncore_forcewake_get__locked(dev_priv, fw);
2095
+ spin_lock_irq(&uncore->lock);
2096
+ intel_uncore_forcewake_get__locked(uncore, fw);
20502097
2051
- ret = __intel_wait_for_register_fw(dev_priv,
2098
+ ret = __intel_wait_for_register_fw(uncore,
20522099 reg, mask, value,
20532100 fast_timeout_us, 0, &reg_value);
20542101
2055
- intel_uncore_forcewake_put__locked(dev_priv, fw);
2056
- spin_unlock_irq(&dev_priv->uncore.lock);
2102
+ intel_uncore_forcewake_put__locked(uncore, fw);
2103
+ spin_unlock_irq(&uncore->lock);
20572104
20582105 if (ret && slow_timeout_ms)
2059
- ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
2106
+ ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2107
+ reg),
20602108 (reg_value & mask) == value,
20612109 slow_timeout_ms * 1000, 10, 1000);
2110
+
2111
+ /* just trace the final value */
2112
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
20622113
20632114 if (out_value)
20642115 *out_value = reg_value;
....@@ -2066,245 +2117,49 @@
20662117 return ret;
20672118 }
20682119
2069
-static int gen8_reset_engine_start(struct intel_engine_cs *engine)
2120
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
20702121 {
2071
- struct drm_i915_private *dev_priv = engine->i915;
2072
- int ret;
2122
+ bool ret;
20732123
2074
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
2075
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
2076
-
2077
- ret = __intel_wait_for_register_fw(dev_priv,
2078
- RING_RESET_CTL(engine->mmio_base),
2079
- RESET_CTL_READY_TO_RESET,
2080
- RESET_CTL_READY_TO_RESET,
2081
- 700, 0,
2082
- NULL);
2083
- if (ret)
2084
- DRM_ERROR("%s: reset request timeout\n", engine->name);
2124
+ spin_lock_irq(&uncore->debug->lock);
2125
+ ret = check_for_unclaimed_mmio(uncore);
2126
+ spin_unlock_irq(&uncore->debug->lock);
20852127
20862128 return ret;
2087
-}
2088
-
2089
-static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
2090
-{
2091
- struct drm_i915_private *dev_priv = engine->i915;
2092
-
2093
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
2094
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
2095
-}
2096
-
2097
-static int gen8_reset_engines(struct drm_i915_private *dev_priv,
2098
- unsigned engine_mask)
2099
-{
2100
- struct intel_engine_cs *engine;
2101
- unsigned int tmp;
2102
- int ret;
2103
-
2104
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
2105
- if (gen8_reset_engine_start(engine)) {
2106
- ret = -EIO;
2107
- goto not_ready;
2108
- }
2109
- }
2110
-
2111
- if (INTEL_GEN(dev_priv) >= 11)
2112
- ret = gen11_reset_engines(dev_priv, engine_mask);
2113
- else
2114
- ret = gen6_reset_engines(dev_priv, engine_mask);
2115
-
2116
-not_ready:
2117
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
2118
- gen8_reset_engine_cancel(engine);
2119
-
2120
- return ret;
2121
-}
2122
-
2123
-typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
2124
-
2125
-static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
2126
-{
2127
- if (!i915_modparams.reset)
2128
- return NULL;
2129
-
2130
- if (INTEL_GEN(dev_priv) >= 8)
2131
- return gen8_reset_engines;
2132
- else if (INTEL_GEN(dev_priv) >= 6)
2133
- return gen6_reset_engines;
2134
- else if (IS_GEN5(dev_priv))
2135
- return ironlake_do_reset;
2136
- else if (IS_G4X(dev_priv))
2137
- return g4x_do_reset;
2138
- else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
2139
- return g33_do_reset;
2140
- else if (INTEL_GEN(dev_priv) >= 3)
2141
- return i915_do_reset;
2142
- else
2143
- return NULL;
2144
-}
2145
-
2146
-int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
2147
-{
2148
- reset_func reset = intel_get_gpu_reset(dev_priv);
2149
- int retry;
2150
- int ret;
2151
-
2152
- /*
2153
- * We want to perform per-engine reset from atomic context (e.g.
2154
- * softirq), which imposes the constraint that we cannot sleep.
2155
- * However, experience suggests that spending a bit of time waiting
2156
- * for a reset helps in various cases, so for a full-device reset
2157
- * we apply the opposite rule and wait if we want to. As we should
2158
- * always follow up a failed per-engine reset with a full device reset,
2159
- * being a little faster, stricter and more error prone for the
2160
- * atomic case seems an acceptable compromise.
2161
- *
2162
- * Unfortunately this leads to a bimodal routine, when the goal was
2163
- * to have a single reset function that worked for resetting any
2164
- * number of engines simultaneously.
2165
- */
2166
- might_sleep_if(engine_mask == ALL_ENGINES);
2167
-
2168
- /*
2169
- * If the power well sleeps during the reset, the reset
2170
- * request may be dropped and never completes (causing -EIO).
2171
- */
2172
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2173
- for (retry = 0; retry < 3; retry++) {
2174
-
2175
- /*
2176
- * We stop engines, otherwise we might get failed reset and a
2177
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
2178
- * from system hang if batchbuffer is progressing when
2179
- * the reset is issued, regardless of READY_TO_RESET ack.
2180
- * Thus assume it is best to stop engines on all gens
2181
- * where we have a gpu reset.
2182
- *
2183
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
2184
- *
2185
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
2186
- *
2187
- * FIXME: Wa for more modern gens needs to be validated
2188
- */
2189
- i915_stop_engines(dev_priv, engine_mask);
2190
-
2191
- ret = -ENODEV;
2192
- if (reset) {
2193
- GEM_TRACE("engine_mask=%x\n", engine_mask);
2194
- ret = reset(dev_priv, engine_mask);
2195
- }
2196
- if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
2197
- break;
2198
-
2199
- cond_resched();
2200
- }
2201
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2202
-
2203
- return ret;
2204
-}
2205
-
2206
-bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
2207
-{
2208
- return intel_get_gpu_reset(dev_priv) != NULL;
2209
-}
2210
-
2211
-bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
2212
-{
2213
- return (dev_priv->info.has_reset_engine &&
2214
- i915_modparams.reset >= 2);
2215
-}
2216
-
2217
-int intel_reset_guc(struct drm_i915_private *dev_priv)
2218
-{
2219
- u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
2220
- GEN9_GRDOM_GUC;
2221
- int ret;
2222
-
2223
- GEM_BUG_ON(!HAS_GUC(dev_priv));
2224
-
2225
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2226
- ret = gen6_hw_domain_reset(dev_priv, guc_domain);
2227
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2228
-
2229
- return ret;
2230
-}
2231
-
2232
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
2233
-{
2234
- return check_for_unclaimed_mmio(dev_priv);
22352129 }
22362130
22372131 bool
2238
-intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
2132
+intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
22392133 {
2240
- if (unlikely(i915_modparams.mmio_debug ||
2241
- dev_priv->uncore.unclaimed_mmio_check <= 0))
2242
- return false;
2134
+ bool ret = false;
22432135
2244
- if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
2245
- DRM_DEBUG("Unclaimed register detected, "
2246
- "enabling oneshot unclaimed register reporting. "
2247
- "Please use i915.mmio_debug=N for more information.\n");
2248
- i915_modparams.mmio_debug++;
2249
- dev_priv->uncore.unclaimed_mmio_check--;
2250
- return true;
2136
+ spin_lock_irq(&uncore->debug->lock);
2137
+
2138
+ if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2139
+ goto out;
2140
+
2141
+ if (unlikely(check_for_unclaimed_mmio(uncore))) {
2142
+ if (!uncore->i915->params.mmio_debug) {
2143
+ drm_dbg(&uncore->i915->drm,
2144
+ "Unclaimed register detected, "
2145
+ "enabling oneshot unclaimed register reporting. "
2146
+ "Please use i915.mmio_debug=N for more information.\n");
2147
+ uncore->i915->params.mmio_debug++;
2148
+ }
2149
+ uncore->debug->unclaimed_mmio_check--;
2150
+ ret = true;
22512151 }
22522152
2253
- return false;
2254
-}
2153
+out:
2154
+ spin_unlock_irq(&uncore->debug->lock);
22552155
2256
-static enum forcewake_domains
2257
-intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
2258
- i915_reg_t reg)
2259
-{
2260
- u32 offset = i915_mmio_reg_offset(reg);
2261
- enum forcewake_domains fw_domains;
2262
-
2263
- if (INTEL_GEN(dev_priv) >= 11) {
2264
- fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
2265
- } else if (HAS_FWTABLE(dev_priv)) {
2266
- fw_domains = __fwtable_reg_read_fw_domains(offset);
2267
- } else if (INTEL_GEN(dev_priv) >= 6) {
2268
- fw_domains = __gen6_reg_read_fw_domains(offset);
2269
- } else {
2270
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
2271
- fw_domains = 0;
2272
- }
2273
-
2274
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2275
-
2276
- return fw_domains;
2277
-}
2278
-
2279
-static enum forcewake_domains
2280
-intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
2281
- i915_reg_t reg)
2282
-{
2283
- u32 offset = i915_mmio_reg_offset(reg);
2284
- enum forcewake_domains fw_domains;
2285
-
2286
- if (INTEL_GEN(dev_priv) >= 11) {
2287
- fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
2288
- } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
2289
- fw_domains = __fwtable_reg_write_fw_domains(offset);
2290
- } else if (IS_GEN8(dev_priv)) {
2291
- fw_domains = __gen8_reg_write_fw_domains(offset);
2292
- } else if (IS_GEN(dev_priv, 6, 7)) {
2293
- fw_domains = FORCEWAKE_RENDER;
2294
- } else {
2295
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
2296
- fw_domains = 0;
2297
- }
2298
-
2299
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
2300
-
2301
- return fw_domains;
2156
+ return ret;
23022157 }
23032158
23042159 /**
23052160 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
23062161 * a register
2307
- * @dev_priv: pointer to struct drm_i915_private
2162
+ * @uncore: pointer to struct intel_uncore
23082163 * @reg: register in question
23092164 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
23102165 *
....@@ -2316,21 +2171,23 @@
23162171 * callers to do FIFO management on their own or risk losing writes.
23172172 */
23182173 enum forcewake_domains
2319
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
2174
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
23202175 i915_reg_t reg, unsigned int op)
23212176 {
23222177 enum forcewake_domains fw_domains = 0;
23232178
2324
- WARN_ON(!op);
2179
+ drm_WARN_ON(&uncore->i915->drm, !op);
23252180
2326
- if (intel_vgpu_active(dev_priv))
2181
+ if (!intel_uncore_has_forcewake(uncore))
23272182 return 0;
23282183
23292184 if (op & FW_REG_READ)
2330
- fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
2185
+ fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
23312186
23322187 if (op & FW_REG_WRITE)
2333
- fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
2188
+ fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2189
+
2190
+ drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
23342191
23352192 return fw_domains;
23362193 }