.. | .. |
---|
21 | 21 | * IN THE SOFTWARE. |
---|
22 | 22 | */ |
---|
23 | 23 | |
---|
24 | | -#include "i915_drv.h" |
---|
25 | | -#include "intel_drv.h" |
---|
26 | | -#include "i915_vgpu.h" |
---|
27 | | - |
---|
28 | | -#include <asm/iosf_mbi.h> |
---|
29 | 24 | #include <linux/pm_runtime.h> |
---|
| 25 | +#include <asm/iosf_mbi.h> |
---|
| 26 | + |
---|
| 27 | +#include "i915_drv.h" |
---|
| 28 | +#include "i915_trace.h" |
---|
| 29 | +#include "i915_vgpu.h" |
---|
| 30 | +#include "intel_pm.h" |
---|
30 | 31 | |
---|
31 | 32 | #define FORCEWAKE_ACK_TIMEOUT_MS 50 |
---|
32 | 33 | #define GT_FIFO_TIMEOUT_MS 10 |
---|
33 | 34 | |
---|
34 | | -#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) |
---|
| 35 | +#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) |
---|
| 36 | + |
---|
| 37 | +void |
---|
| 38 | +intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) |
---|
| 39 | +{ |
---|
| 40 | + spin_lock_init(&mmio_debug->lock); |
---|
| 41 | + mmio_debug->unclaimed_mmio_check = 1; |
---|
| 42 | +} |
---|
| 43 | + |
---|
| 44 | +static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) |
---|
| 45 | +{ |
---|
| 46 | + lockdep_assert_held(&mmio_debug->lock); |
---|
| 47 | + |
---|
| 48 | + /* Save and disable mmio debugging for the user bypass */ |
---|
| 49 | + if (!mmio_debug->suspend_count++) { |
---|
| 50 | + mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; |
---|
| 51 | + mmio_debug->unclaimed_mmio_check = 0; |
---|
| 52 | + } |
---|
| 53 | +} |
---|
| 54 | + |
---|
| 55 | +static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) |
---|
| 56 | +{ |
---|
| 57 | + lockdep_assert_held(&mmio_debug->lock); |
---|
| 58 | + |
---|
| 59 | + if (!--mmio_debug->suspend_count) |
---|
| 60 | + mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; |
---|
| 61 | +} |
---|
35 | 62 | |
---|
36 | 63 | static const char * const forcewake_domain_names[] = { |
---|
37 | 64 | "render", |
---|
.. | .. |
---|
58 | 85 | return "unknown"; |
---|
59 | 86 | } |
---|
60 | 87 | |
---|
| 88 | +#define fw_ack(d) readl((d)->reg_ack) |
---|
| 89 | +#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) |
---|
| 90 | +#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) |
---|
| 91 | + |
---|
61 | 92 | static inline void |
---|
62 | | -fw_domain_reset(struct drm_i915_private *i915, |
---|
63 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 93 | +fw_domain_reset(const struct intel_uncore_forcewake_domain *d) |
---|
64 | 94 | { |
---|
65 | 95 | /* |
---|
66 | 96 | * We don't really know if the powerwell for the forcewake domain we are |
---|
67 | 97 | * trying to reset here does exist at this point (engines could be fused |
---|
68 | 98 | * off in ICL+), so no waiting for acks |
---|
69 | 99 | */ |
---|
70 | | - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); |
---|
| 100 | + /* WaRsClearFWBitsAtReset:bdw,skl */ |
---|
| 101 | + fw_clear(d, 0xffff); |
---|
71 | 102 | } |
---|
72 | 103 | |
---|
73 | 104 | static inline void |
---|
74 | 105 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) |
---|
75 | 106 | { |
---|
| 107 | + GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); |
---|
| 108 | + d->uncore->fw_domains_timer |= d->mask; |
---|
76 | 109 | d->wake_count++; |
---|
77 | 110 | hrtimer_start_range_ns(&d->timer, |
---|
78 | 111 | NSEC_PER_MSEC, |
---|
.. | .. |
---|
81 | 114 | } |
---|
82 | 115 | |
---|
83 | 116 | static inline int |
---|
84 | | -__wait_for_ack(const struct drm_i915_private *i915, |
---|
85 | | - const struct intel_uncore_forcewake_domain *d, |
---|
| 117 | +__wait_for_ack(const struct intel_uncore_forcewake_domain *d, |
---|
86 | 118 | const u32 ack, |
---|
87 | 119 | const u32 value) |
---|
88 | 120 | { |
---|
89 | | - return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value, |
---|
| 121 | + return wait_for_atomic((fw_ack(d) & ack) == value, |
---|
90 | 122 | FORCEWAKE_ACK_TIMEOUT_MS); |
---|
91 | 123 | } |
---|
92 | 124 | |
---|
93 | 125 | static inline int |
---|
94 | | -wait_ack_clear(const struct drm_i915_private *i915, |
---|
95 | | - const struct intel_uncore_forcewake_domain *d, |
---|
| 126 | +wait_ack_clear(const struct intel_uncore_forcewake_domain *d, |
---|
96 | 127 | const u32 ack) |
---|
97 | 128 | { |
---|
98 | | - return __wait_for_ack(i915, d, ack, 0); |
---|
| 129 | + return __wait_for_ack(d, ack, 0); |
---|
99 | 130 | } |
---|
100 | 131 | |
---|
101 | 132 | static inline int |
---|
102 | | -wait_ack_set(const struct drm_i915_private *i915, |
---|
103 | | - const struct intel_uncore_forcewake_domain *d, |
---|
| 133 | +wait_ack_set(const struct intel_uncore_forcewake_domain *d, |
---|
104 | 134 | const u32 ack) |
---|
105 | 135 | { |
---|
106 | | - return __wait_for_ack(i915, d, ack, ack); |
---|
| 136 | + return __wait_for_ack(d, ack, ack); |
---|
107 | 137 | } |
---|
108 | 138 | |
---|
109 | 139 | static inline void |
---|
110 | | -fw_domain_wait_ack_clear(const struct drm_i915_private *i915, |
---|
111 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 140 | +fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) |
---|
112 | 141 | { |
---|
113 | | - if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL)) |
---|
| 142 | + if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { |
---|
114 | 143 | DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", |
---|
115 | 144 | intel_uncore_forcewake_domain_to_str(d->id)); |
---|
| 145 | + add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ |
---|
| 146 | + } |
---|
116 | 147 | } |
---|
117 | 148 | |
---|
118 | 149 | enum ack_type { |
---|
.. | .. |
---|
121 | 152 | }; |
---|
122 | 153 | |
---|
123 | 154 | static int |
---|
124 | | -fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, |
---|
125 | | - const struct intel_uncore_forcewake_domain *d, |
---|
| 155 | +fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, |
---|
126 | 156 | const enum ack_type type) |
---|
127 | 157 | { |
---|
128 | 158 | const u32 ack_bit = FORCEWAKE_KERNEL; |
---|
.. | .. |
---|
146 | 176 | |
---|
147 | 177 | pass = 1; |
---|
148 | 178 | do { |
---|
149 | | - wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK); |
---|
| 179 | + wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); |
---|
150 | 180 | |
---|
151 | | - __raw_i915_write32(i915, d->reg_set, |
---|
152 | | - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK)); |
---|
| 181 | + fw_set(d, FORCEWAKE_KERNEL_FALLBACK); |
---|
153 | 182 | /* Give gt some time to relax before the polling frenzy */ |
---|
154 | 183 | udelay(10 * pass); |
---|
155 | | - wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK); |
---|
| 184 | + wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); |
---|
156 | 185 | |
---|
157 | | - ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value; |
---|
| 186 | + ack_detected = (fw_ack(d) & ack_bit) == value; |
---|
158 | 187 | |
---|
159 | | - __raw_i915_write32(i915, d->reg_set, |
---|
160 | | - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK)); |
---|
| 188 | + fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); |
---|
161 | 189 | } while (!ack_detected && pass++ < 10); |
---|
162 | 190 | |
---|
163 | 191 | DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", |
---|
164 | 192 | intel_uncore_forcewake_domain_to_str(d->id), |
---|
165 | 193 | type == ACK_SET ? "set" : "clear", |
---|
166 | | - __raw_i915_read32(i915, d->reg_ack), |
---|
| 194 | + fw_ack(d), |
---|
167 | 195 | pass); |
---|
168 | 196 | |
---|
169 | 197 | return ack_detected ? 0 : -ETIMEDOUT; |
---|
170 | 198 | } |
---|
171 | 199 | |
---|
172 | 200 | static inline void |
---|
173 | | -fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915, |
---|
174 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 201 | +fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) |
---|
175 | 202 | { |
---|
176 | | - if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL))) |
---|
| 203 | + if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) |
---|
177 | 204 | return; |
---|
178 | 205 | |
---|
179 | | - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR)) |
---|
180 | | - fw_domain_wait_ack_clear(i915, d); |
---|
| 206 | + if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) |
---|
| 207 | + fw_domain_wait_ack_clear(d); |
---|
181 | 208 | } |
---|
182 | 209 | |
---|
183 | 210 | static inline void |
---|
184 | | -fw_domain_get(struct drm_i915_private *i915, |
---|
185 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 211 | +fw_domain_get(const struct intel_uncore_forcewake_domain *d) |
---|
186 | 212 | { |
---|
187 | | - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); |
---|
| 213 | + fw_set(d, FORCEWAKE_KERNEL); |
---|
188 | 214 | } |
---|
189 | 215 | |
---|
190 | 216 | static inline void |
---|
191 | | -fw_domain_wait_ack_set(const struct drm_i915_private *i915, |
---|
192 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 217 | +fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) |
---|
193 | 218 | { |
---|
194 | | - if (wait_ack_set(i915, d, FORCEWAKE_KERNEL)) |
---|
| 219 | + if (wait_ack_set(d, FORCEWAKE_KERNEL)) { |
---|
195 | 220 | DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", |
---|
196 | 221 | intel_uncore_forcewake_domain_to_str(d->id)); |
---|
| 222 | + add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ |
---|
| 223 | + } |
---|
197 | 224 | } |
---|
198 | 225 | |
---|
199 | 226 | static inline void |
---|
200 | | -fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915, |
---|
201 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 227 | +fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) |
---|
202 | 228 | { |
---|
203 | | - if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL))) |
---|
| 229 | + if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) |
---|
204 | 230 | return; |
---|
205 | 231 | |
---|
206 | | - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET)) |
---|
207 | | - fw_domain_wait_ack_set(i915, d); |
---|
| 232 | + if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) |
---|
| 233 | + fw_domain_wait_ack_set(d); |
---|
208 | 234 | } |
---|
209 | 235 | |
---|
210 | 236 | static inline void |
---|
211 | | -fw_domain_put(const struct drm_i915_private *i915, |
---|
212 | | - const struct intel_uncore_forcewake_domain *d) |
---|
| 237 | +fw_domain_put(const struct intel_uncore_forcewake_domain *d) |
---|
213 | 238 | { |
---|
214 | | - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); |
---|
| 239 | + fw_clear(d, FORCEWAKE_KERNEL); |
---|
215 | 240 | } |
---|
216 | 241 | |
---|
217 | 242 | static void |
---|
218 | | -fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
---|
| 243 | +fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) |
---|
219 | 244 | { |
---|
220 | 245 | struct intel_uncore_forcewake_domain *d; |
---|
221 | 246 | unsigned int tmp; |
---|
222 | 247 | |
---|
223 | | - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
---|
| 248 | + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
---|
224 | 249 | |
---|
225 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { |
---|
226 | | - fw_domain_wait_ack_clear(i915, d); |
---|
227 | | - fw_domain_get(i915, d); |
---|
| 250 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { |
---|
| 251 | + fw_domain_wait_ack_clear(d); |
---|
| 252 | + fw_domain_get(d); |
---|
228 | 253 | } |
---|
229 | 254 | |
---|
230 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
---|
231 | | - fw_domain_wait_ack_set(i915, d); |
---|
| 255 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
---|
| 256 | + fw_domain_wait_ack_set(d); |
---|
232 | 257 | |
---|
233 | | - i915->uncore.fw_domains_active |= fw_domains; |
---|
| 258 | + uncore->fw_domains_active |= fw_domains; |
---|
234 | 259 | } |
---|
235 | 260 | |
---|
236 | 261 | static void |
---|
237 | | -fw_domains_get_with_fallback(struct drm_i915_private *i915, |
---|
| 262 | +fw_domains_get_with_fallback(struct intel_uncore *uncore, |
---|
238 | 263 | enum forcewake_domains fw_domains) |
---|
239 | 264 | { |
---|
240 | 265 | struct intel_uncore_forcewake_domain *d; |
---|
241 | 266 | unsigned int tmp; |
---|
242 | 267 | |
---|
243 | | - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
---|
| 268 | + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
---|
244 | 269 | |
---|
245 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { |
---|
246 | | - fw_domain_wait_ack_clear_fallback(i915, d); |
---|
247 | | - fw_domain_get(i915, d); |
---|
| 270 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { |
---|
| 271 | + fw_domain_wait_ack_clear_fallback(d); |
---|
| 272 | + fw_domain_get(d); |
---|
248 | 273 | } |
---|
249 | 274 | |
---|
250 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
---|
251 | | - fw_domain_wait_ack_set_fallback(i915, d); |
---|
| 275 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
---|
| 276 | + fw_domain_wait_ack_set_fallback(d); |
---|
252 | 277 | |
---|
253 | | - i915->uncore.fw_domains_active |= fw_domains; |
---|
| 278 | + uncore->fw_domains_active |= fw_domains; |
---|
254 | 279 | } |
---|
255 | 280 | |
---|
256 | 281 | static void |
---|
257 | | -fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
---|
| 282 | +fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) |
---|
258 | 283 | { |
---|
259 | 284 | struct intel_uncore_forcewake_domain *d; |
---|
260 | 285 | unsigned int tmp; |
---|
261 | 286 | |
---|
262 | | - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
---|
| 287 | + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
---|
263 | 288 | |
---|
264 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
---|
265 | | - fw_domain_put(i915, d); |
---|
| 289 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
---|
| 290 | + fw_domain_put(d); |
---|
266 | 291 | |
---|
267 | | - i915->uncore.fw_domains_active &= ~fw_domains; |
---|
| 292 | + uncore->fw_domains_active &= ~fw_domains; |
---|
268 | 293 | } |
---|
269 | 294 | |
---|
270 | 295 | static void |
---|
271 | | -fw_domains_reset(struct drm_i915_private *i915, |
---|
| 296 | +fw_domains_reset(struct intel_uncore *uncore, |
---|
272 | 297 | enum forcewake_domains fw_domains) |
---|
273 | 298 | { |
---|
274 | 299 | struct intel_uncore_forcewake_domain *d; |
---|
.. | .. |
---|
277 | 302 | if (!fw_domains) |
---|
278 | 303 | return; |
---|
279 | 304 | |
---|
280 | | - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
---|
| 305 | + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
---|
281 | 306 | |
---|
282 | | - for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
---|
283 | | - fw_domain_reset(i915, d); |
---|
| 307 | + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) |
---|
| 308 | + fw_domain_reset(d); |
---|
284 | 309 | } |
---|
285 | 310 | |
---|
286 | | -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) |
---|
| 311 | +static inline u32 gt_thread_status(struct intel_uncore *uncore) |
---|
287 | 312 | { |
---|
288 | | - /* w/a for a sporadic read returning 0 by waiting for the GT |
---|
| 313 | + u32 val; |
---|
| 314 | + |
---|
| 315 | + val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); |
---|
| 316 | + val &= GEN6_GT_THREAD_STATUS_CORE_MASK; |
---|
| 317 | + |
---|
| 318 | + return val; |
---|
| 319 | +} |
---|
| 320 | + |
---|
| 321 | +static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) |
---|
| 322 | +{ |
---|
| 323 | + /* |
---|
| 324 | + * w/a for a sporadic read returning 0 by waiting for the GT |
---|
289 | 325 | * thread to wake up. |
---|
290 | 326 | */ |
---|
291 | | - if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & |
---|
292 | | - GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) |
---|
293 | | - DRM_ERROR("GT thread status wait timed out\n"); |
---|
| 327 | + drm_WARN_ONCE(&uncore->i915->drm, |
---|
| 328 | + wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), |
---|
| 329 | + "GT thread status wait timed out\n"); |
---|
294 | 330 | } |
---|
295 | 331 | |
---|
296 | | -static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, |
---|
| 332 | +static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, |
---|
297 | 333 | enum forcewake_domains fw_domains) |
---|
298 | 334 | { |
---|
299 | | - fw_domains_get(dev_priv, fw_domains); |
---|
| 335 | + fw_domains_get(uncore, fw_domains); |
---|
300 | 336 | |
---|
301 | 337 | /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ |
---|
302 | | - __gen6_gt_wait_for_thread_c0(dev_priv); |
---|
| 338 | + __gen6_gt_wait_for_thread_c0(uncore); |
---|
303 | 339 | } |
---|
304 | 340 | |
---|
305 | | -static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) |
---|
| 341 | +static inline u32 fifo_free_entries(struct intel_uncore *uncore) |
---|
306 | 342 | { |
---|
307 | | - u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); |
---|
| 343 | + u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); |
---|
308 | 344 | |
---|
309 | 345 | return count & GT_FIFO_FREE_ENTRIES_MASK; |
---|
310 | 346 | } |
---|
311 | 347 | |
---|
312 | | -static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
---|
| 348 | +static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) |
---|
313 | 349 | { |
---|
314 | 350 | u32 n; |
---|
315 | 351 | |
---|
316 | 352 | /* On VLV, FIFO will be shared by both SW and HW. |
---|
317 | 353 | * So, we need to read the FREE_ENTRIES everytime */ |
---|
318 | | - if (IS_VALLEYVIEW(dev_priv)) |
---|
319 | | - n = fifo_free_entries(dev_priv); |
---|
| 354 | + if (IS_VALLEYVIEW(uncore->i915)) |
---|
| 355 | + n = fifo_free_entries(uncore); |
---|
320 | 356 | else |
---|
321 | | - n = dev_priv->uncore.fifo_count; |
---|
| 357 | + n = uncore->fifo_count; |
---|
322 | 358 | |
---|
323 | 359 | if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { |
---|
324 | | - if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > |
---|
| 360 | + if (wait_for_atomic((n = fifo_free_entries(uncore)) > |
---|
325 | 361 | GT_FIFO_NUM_RESERVED_ENTRIES, |
---|
326 | 362 | GT_FIFO_TIMEOUT_MS)) { |
---|
327 | | - DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); |
---|
| 363 | + drm_dbg(&uncore->i915->drm, |
---|
| 364 | + "GT_FIFO timeout, entries: %u\n", n); |
---|
328 | 365 | return; |
---|
329 | 366 | } |
---|
330 | 367 | } |
---|
331 | 368 | |
---|
332 | | - dev_priv->uncore.fifo_count = n - 1; |
---|
| 369 | + uncore->fifo_count = n - 1; |
---|
333 | 370 | } |
---|
334 | 371 | |
---|
335 | 372 | static enum hrtimer_restart |
---|
.. | .. |
---|
337 | 374 | { |
---|
338 | 375 | struct intel_uncore_forcewake_domain *domain = |
---|
339 | 376 | container_of(timer, struct intel_uncore_forcewake_domain, timer); |
---|
340 | | - struct drm_i915_private *dev_priv = |
---|
341 | | - container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); |
---|
| 377 | + struct intel_uncore *uncore = domain->uncore; |
---|
342 | 378 | unsigned long irqflags; |
---|
343 | 379 | |
---|
344 | | - assert_rpm_device_not_suspended(dev_priv); |
---|
| 380 | + assert_rpm_device_not_suspended(uncore->rpm); |
---|
345 | 381 | |
---|
346 | 382 | if (xchg(&domain->active, false)) |
---|
347 | 383 | return HRTIMER_RESTART; |
---|
348 | 384 | |
---|
349 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
---|
350 | | - if (WARN_ON(domain->wake_count == 0)) |
---|
351 | | - domain->wake_count++; |
---|
| 385 | + spin_lock_irqsave(&uncore->lock, irqflags); |
---|
352 | 386 | |
---|
| 387 | + uncore->fw_domains_timer &= ~domain->mask; |
---|
| 388 | + |
---|
| 389 | + GEM_BUG_ON(!domain->wake_count); |
---|
353 | 390 | if (--domain->wake_count == 0) |
---|
354 | | - dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); |
---|
| 391 | + uncore->funcs.force_wake_put(uncore, domain->mask); |
---|
355 | 392 | |
---|
356 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
---|
| 393 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
357 | 394 | |
---|
358 | 395 | return HRTIMER_NORESTART; |
---|
359 | 396 | } |
---|
360 | 397 | |
---|
361 | 398 | /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ |
---|
362 | 399 | static unsigned int |
---|
363 | | -intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) |
---|
| 400 | +intel_uncore_forcewake_reset(struct intel_uncore *uncore) |
---|
364 | 401 | { |
---|
365 | 402 | unsigned long irqflags; |
---|
366 | 403 | struct intel_uncore_forcewake_domain *domain; |
---|
.. | .. |
---|
378 | 415 | |
---|
379 | 416 | active_domains = 0; |
---|
380 | 417 | |
---|
381 | | - for_each_fw_domain(domain, dev_priv, tmp) { |
---|
| 418 | + for_each_fw_domain(domain, uncore, tmp) { |
---|
382 | 419 | smp_store_mb(domain->active, false); |
---|
383 | 420 | if (hrtimer_cancel(&domain->timer) == 0) |
---|
384 | 421 | continue; |
---|
.. | .. |
---|
386 | 423 | intel_uncore_fw_release_timer(&domain->timer); |
---|
387 | 424 | } |
---|
388 | 425 | |
---|
389 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
---|
| 426 | + spin_lock_irqsave(&uncore->lock, irqflags); |
---|
390 | 427 | |
---|
391 | | - for_each_fw_domain(domain, dev_priv, tmp) { |
---|
| 428 | + for_each_fw_domain(domain, uncore, tmp) { |
---|
392 | 429 | if (hrtimer_active(&domain->timer)) |
---|
393 | 430 | active_domains |= domain->mask; |
---|
394 | 431 | } |
---|
.. | .. |
---|
397 | 434 | break; |
---|
398 | 435 | |
---|
399 | 436 | if (--retry_count == 0) { |
---|
400 | | - DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); |
---|
| 437 | + drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n"); |
---|
401 | 438 | break; |
---|
402 | 439 | } |
---|
403 | 440 | |
---|
404 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
---|
| 441 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
405 | 442 | cond_resched(); |
---|
406 | 443 | } |
---|
407 | 444 | |
---|
408 | | - WARN_ON(active_domains); |
---|
| 445 | + drm_WARN_ON(&uncore->i915->drm, active_domains); |
---|
409 | 446 | |
---|
410 | | - fw = dev_priv->uncore.fw_domains_active; |
---|
| 447 | + fw = uncore->fw_domains_active; |
---|
411 | 448 | if (fw) |
---|
412 | | - dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); |
---|
| 449 | + uncore->funcs.force_wake_put(uncore, fw); |
---|
413 | 450 | |
---|
414 | | - fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); |
---|
415 | | - assert_forcewakes_inactive(dev_priv); |
---|
| 451 | + fw_domains_reset(uncore, uncore->fw_domains); |
---|
| 452 | + assert_forcewakes_inactive(uncore); |
---|
416 | 453 | |
---|
417 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
---|
| 454 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
418 | 455 | |
---|
419 | 456 | return fw; /* track the lost user forcewake domains */ |
---|
420 | 457 | } |
---|
421 | 458 | |
---|
422 | | -static u64 gen9_edram_size(struct drm_i915_private *dev_priv) |
---|
423 | | -{ |
---|
424 | | - const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; |
---|
425 | | - const unsigned int sets[4] = { 1, 1, 2, 2 }; |
---|
426 | | - const u32 cap = dev_priv->edram_cap; |
---|
427 | | - |
---|
428 | | - return EDRAM_NUM_BANKS(cap) * |
---|
429 | | - ways[EDRAM_WAYS_IDX(cap)] * |
---|
430 | | - sets[EDRAM_SETS_IDX(cap)] * |
---|
431 | | - 1024 * 1024; |
---|
432 | | -} |
---|
433 | | - |
---|
434 | | -u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) |
---|
435 | | -{ |
---|
436 | | - if (!HAS_EDRAM(dev_priv)) |
---|
437 | | - return 0; |
---|
438 | | - |
---|
439 | | - /* The needed capability bits for size calculation |
---|
440 | | - * are not there with pre gen9 so return 128MB always. |
---|
441 | | - */ |
---|
442 | | - if (INTEL_GEN(dev_priv) < 9) |
---|
443 | | - return 128 * 1024 * 1024; |
---|
444 | | - |
---|
445 | | - return gen9_edram_size(dev_priv); |
---|
446 | | -} |
---|
447 | | - |
---|
448 | | -static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) |
---|
449 | | -{ |
---|
450 | | - if (IS_HASWELL(dev_priv) || |
---|
451 | | - IS_BROADWELL(dev_priv) || |
---|
452 | | - INTEL_GEN(dev_priv) >= 9) { |
---|
453 | | - dev_priv->edram_cap = __raw_i915_read32(dev_priv, |
---|
454 | | - HSW_EDRAM_CAP); |
---|
455 | | - |
---|
456 | | - /* NB: We can't write IDICR yet because we do not have gt funcs |
---|
457 | | - * set up */ |
---|
458 | | - } else { |
---|
459 | | - dev_priv->edram_cap = 0; |
---|
460 | | - } |
---|
461 | | - |
---|
462 | | - if (HAS_EDRAM(dev_priv)) |
---|
463 | | - DRM_INFO("Found %lluMB of eDRAM\n", |
---|
464 | | - intel_uncore_edram_size(dev_priv) / (1024 * 1024)); |
---|
465 | | -} |
---|
466 | | - |
---|
467 | 459 | static bool |
---|
468 | | -fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) |
---|
| 460 | +fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) |
---|
469 | 461 | { |
---|
470 | 462 | u32 dbg; |
---|
471 | 463 | |
---|
472 | | - dbg = __raw_i915_read32(dev_priv, FPGA_DBG); |
---|
| 464 | + dbg = __raw_uncore_read32(uncore, FPGA_DBG); |
---|
473 | 465 | if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) |
---|
474 | 466 | return false; |
---|
475 | 467 | |
---|
476 | | - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
---|
| 468 | + __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
---|
477 | 469 | |
---|
478 | 470 | return true; |
---|
479 | 471 | } |
---|
480 | 472 | |
---|
481 | 473 | static bool |
---|
482 | | -vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) |
---|
| 474 | +vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) |
---|
483 | 475 | { |
---|
484 | 476 | u32 cer; |
---|
485 | 477 | |
---|
486 | | - cer = __raw_i915_read32(dev_priv, CLAIM_ER); |
---|
| 478 | + cer = __raw_uncore_read32(uncore, CLAIM_ER); |
---|
487 | 479 | if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) |
---|
488 | 480 | return false; |
---|
489 | 481 | |
---|
490 | | - __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); |
---|
| 482 | + __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); |
---|
491 | 483 | |
---|
492 | 484 | return true; |
---|
493 | 485 | } |
---|
494 | 486 | |
---|
495 | 487 | static bool |
---|
496 | | -gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) |
---|
| 488 | +gen6_check_for_fifo_debug(struct intel_uncore *uncore) |
---|
497 | 489 | { |
---|
498 | 490 | u32 fifodbg; |
---|
499 | 491 | |
---|
500 | | - fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); |
---|
| 492 | + fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); |
---|
501 | 493 | |
---|
502 | 494 | if (unlikely(fifodbg)) { |
---|
503 | | - DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); |
---|
504 | | - __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); |
---|
| 495 | + drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg); |
---|
| 496 | + __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); |
---|
505 | 497 | } |
---|
506 | 498 | |
---|
507 | 499 | return fifodbg; |
---|
508 | 500 | } |
---|
509 | 501 | |
---|
510 | 502 | static bool |
---|
511 | | -check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) |
---|
| 503 | +check_for_unclaimed_mmio(struct intel_uncore *uncore) |
---|
512 | 504 | { |
---|
513 | 505 | bool ret = false; |
---|
514 | 506 | |
---|
515 | | - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) |
---|
516 | | - ret |= fpga_check_for_unclaimed_mmio(dev_priv); |
---|
| 507 | + lockdep_assert_held(&uncore->debug->lock); |
---|
517 | 508 | |
---|
518 | | - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
---|
519 | | - ret |= vlv_check_for_unclaimed_mmio(dev_priv); |
---|
| 509 | + if (uncore->debug->suspend_count) |
---|
| 510 | + return false; |
---|
520 | 511 | |
---|
521 | | - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) |
---|
522 | | - ret |= gen6_check_for_fifo_debug(dev_priv); |
---|
| 512 | + if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) |
---|
| 513 | + ret |= fpga_check_for_unclaimed_mmio(uncore); |
---|
| 514 | + |
---|
| 515 | + if (intel_uncore_has_dbg_unclaimed(uncore)) |
---|
| 516 | + ret |= vlv_check_for_unclaimed_mmio(uncore); |
---|
| 517 | + |
---|
| 518 | + if (intel_uncore_has_fifo(uncore)) |
---|
| 519 | + ret |= gen6_check_for_fifo_debug(uncore); |
---|
523 | 520 | |
---|
524 | 521 | return ret; |
---|
525 | 522 | } |
---|
526 | 523 | |
---|
527 | | -static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, |
---|
528 | | - unsigned int restore_forcewake) |
---|
| 524 | +static void forcewake_early_sanitize(struct intel_uncore *uncore, |
---|
| 525 | + unsigned int restore_forcewake) |
---|
529 | 526 | { |
---|
530 | | - /* clear out unclaimed reg detection bit */ |
---|
531 | | - if (check_for_unclaimed_mmio(dev_priv)) |
---|
532 | | - DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); |
---|
| 527 | + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
---|
533 | 528 | |
---|
534 | 529 | /* WaDisableShadowRegForCpd:chv */ |
---|
535 | | - if (IS_CHERRYVIEW(dev_priv)) { |
---|
536 | | - __raw_i915_write32(dev_priv, GTFIFOCTL, |
---|
537 | | - __raw_i915_read32(dev_priv, GTFIFOCTL) | |
---|
538 | | - GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | |
---|
539 | | - GT_FIFO_CTL_RC6_POLICY_STALL); |
---|
| 530 | + if (IS_CHERRYVIEW(uncore->i915)) { |
---|
| 531 | + __raw_uncore_write32(uncore, GTFIFOCTL, |
---|
| 532 | + __raw_uncore_read32(uncore, GTFIFOCTL) | |
---|
| 533 | + GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | |
---|
| 534 | + GT_FIFO_CTL_RC6_POLICY_STALL); |
---|
540 | 535 | } |
---|
541 | 536 | |
---|
542 | 537 | iosf_mbi_punit_acquire(); |
---|
543 | | - intel_uncore_forcewake_reset(dev_priv); |
---|
| 538 | + intel_uncore_forcewake_reset(uncore); |
---|
544 | 539 | if (restore_forcewake) { |
---|
545 | | - spin_lock_irq(&dev_priv->uncore.lock); |
---|
546 | | - dev_priv->uncore.funcs.force_wake_get(dev_priv, |
---|
547 | | - restore_forcewake); |
---|
| 540 | + spin_lock_irq(&uncore->lock); |
---|
| 541 | + uncore->funcs.force_wake_get(uncore, restore_forcewake); |
---|
548 | 542 | |
---|
549 | | - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) |
---|
550 | | - dev_priv->uncore.fifo_count = |
---|
551 | | - fifo_free_entries(dev_priv); |
---|
552 | | - spin_unlock_irq(&dev_priv->uncore.lock); |
---|
| 543 | + if (intel_uncore_has_fifo(uncore)) |
---|
| 544 | + uncore->fifo_count = fifo_free_entries(uncore); |
---|
| 545 | + spin_unlock_irq(&uncore->lock); |
---|
553 | 546 | } |
---|
554 | 547 | iosf_mbi_punit_release(); |
---|
555 | 548 | } |
---|
556 | 549 | |
---|
557 | | -void intel_uncore_suspend(struct drm_i915_private *dev_priv) |
---|
| 550 | +void intel_uncore_suspend(struct intel_uncore *uncore) |
---|
558 | 551 | { |
---|
| 552 | + if (!intel_uncore_has_forcewake(uncore)) |
---|
| 553 | + return; |
---|
| 554 | + |
---|
559 | 555 | iosf_mbi_punit_acquire(); |
---|
560 | 556 | iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( |
---|
561 | | - &dev_priv->uncore.pmic_bus_access_nb); |
---|
562 | | - dev_priv->uncore.fw_domains_saved = |
---|
563 | | - intel_uncore_forcewake_reset(dev_priv); |
---|
| 557 | + &uncore->pmic_bus_access_nb); |
---|
| 558 | + uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); |
---|
564 | 559 | iosf_mbi_punit_release(); |
---|
565 | 560 | } |
---|
566 | 561 | |
---|
567 | | -void intel_uncore_resume_early(struct drm_i915_private *dev_priv) |
---|
| 562 | +void intel_uncore_resume_early(struct intel_uncore *uncore) |
---|
568 | 563 | { |
---|
569 | 564 | unsigned int restore_forcewake; |
---|
570 | 565 | |
---|
571 | | - restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved); |
---|
572 | | - __intel_uncore_early_sanitize(dev_priv, restore_forcewake); |
---|
| 566 | + if (intel_uncore_unclaimed_mmio(uncore)) |
---|
| 567 | + drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n"); |
---|
573 | 568 | |
---|
574 | | - iosf_mbi_register_pmic_bus_access_notifier( |
---|
575 | | - &dev_priv->uncore.pmic_bus_access_nb); |
---|
576 | | - i915_check_and_clear_faults(dev_priv); |
---|
| 569 | + if (!intel_uncore_has_forcewake(uncore)) |
---|
| 570 | + return; |
---|
| 571 | + |
---|
| 572 | + restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); |
---|
| 573 | + forcewake_early_sanitize(uncore, restore_forcewake); |
---|
| 574 | + |
---|
| 575 | + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
---|
577 | 576 | } |
---|
578 | 577 | |
---|
579 | | -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) |
---|
| 578 | +void intel_uncore_runtime_resume(struct intel_uncore *uncore) |
---|
580 | 579 | { |
---|
581 | | - iosf_mbi_register_pmic_bus_access_notifier( |
---|
582 | | - &dev_priv->uncore.pmic_bus_access_nb); |
---|
| 580 | + if (!intel_uncore_has_forcewake(uncore)) |
---|
| 581 | + return; |
---|
| 582 | + |
---|
| 583 | + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
---|
583 | 584 | } |
---|
584 | 585 | |
---|
585 | | -void intel_uncore_sanitize(struct drm_i915_private *dev_priv) |
---|
586 | | -{ |
---|
587 | | - /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
---|
588 | | - intel_sanitize_gt_powersave(dev_priv); |
---|
589 | | -} |
---|
590 | | - |
---|
591 | | -static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
---|
| 586 | +static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, |
---|
592 | 587 | enum forcewake_domains fw_domains) |
---|
593 | 588 | { |
---|
594 | 589 | struct intel_uncore_forcewake_domain *domain; |
---|
595 | 590 | unsigned int tmp; |
---|
596 | 591 | |
---|
597 | | - fw_domains &= dev_priv->uncore.fw_domains; |
---|
| 592 | + fw_domains &= uncore->fw_domains; |
---|
598 | 593 | |
---|
599 | | - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
---|
| 594 | + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
---|
600 | 595 | if (domain->wake_count++) { |
---|
601 | 596 | fw_domains &= ~domain->mask; |
---|
602 | 597 | domain->active = true; |
---|
.. | .. |
---|
604 | 599 | } |
---|
605 | 600 | |
---|
606 | 601 | if (fw_domains) |
---|
607 | | - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
---|
| 602 | + uncore->funcs.force_wake_get(uncore, fw_domains); |
---|
608 | 603 | } |
---|
609 | 604 | |
---|
610 | 605 | /** |
---|
611 | 606 | * intel_uncore_forcewake_get - grab forcewake domain references |
---|
612 | | - * @dev_priv: i915 device instance |
---|
| 607 | + * @uncore: the intel_uncore structure |
---|
613 | 608 | * @fw_domains: forcewake domains to get reference on |
---|
614 | 609 | * |
---|
615 | 610 | * This function can be used get GT's forcewake domain references. |
---|
.. | .. |
---|
620 | 615 | * call to intel_unforce_forcewake_put(). Usually caller wants all the domains |
---|
621 | 616 | * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. |
---|
622 | 617 | */ |
---|
623 | | -void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
---|
| 618 | +void intel_uncore_forcewake_get(struct intel_uncore *uncore, |
---|
624 | 619 | enum forcewake_domains fw_domains) |
---|
625 | 620 | { |
---|
626 | 621 | unsigned long irqflags; |
---|
627 | 622 | |
---|
628 | | - if (!dev_priv->uncore.funcs.force_wake_get) |
---|
| 623 | + if (!uncore->funcs.force_wake_get) |
---|
629 | 624 | return; |
---|
630 | 625 | |
---|
631 | | - assert_rpm_wakelock_held(dev_priv); |
---|
| 626 | + assert_rpm_wakelock_held(uncore->rpm); |
---|
632 | 627 | |
---|
633 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
---|
634 | | - __intel_uncore_forcewake_get(dev_priv, fw_domains); |
---|
635 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
---|
| 628 | + spin_lock_irqsave(&uncore->lock, irqflags); |
---|
| 629 | + __intel_uncore_forcewake_get(uncore, fw_domains); |
---|
| 630 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
636 | 631 | } |
---|
637 | 632 | |
---|
638 | 633 | /** |
---|
639 | 634 | * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace |
---|
640 | | - * @dev_priv: i915 device instance |
---|
| 635 | + * @uncore: the intel_uncore structure |
---|
641 | 636 | * |
---|
642 | 637 | * This function is a wrapper around intel_uncore_forcewake_get() to acquire |
---|
643 | 638 | * the GT powerwell and in the process disable our debugging for the |
---|
644 | 639 | * duration of userspace's bypass. |
---|
645 | 640 | */ |
---|
646 | | -void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) |
---|
| 641 | +void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) |
---|
647 | 642 | { |
---|
648 | | - spin_lock_irq(&dev_priv->uncore.lock); |
---|
649 | | - if (!dev_priv->uncore.user_forcewake.count++) { |
---|
650 | | - intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); |
---|
651 | | - |
---|
652 | | - /* Save and disable mmio debugging for the user bypass */ |
---|
653 | | - dev_priv->uncore.user_forcewake.saved_mmio_check = |
---|
654 | | - dev_priv->uncore.unclaimed_mmio_check; |
---|
655 | | - dev_priv->uncore.user_forcewake.saved_mmio_debug = |
---|
656 | | - i915_modparams.mmio_debug; |
---|
657 | | - |
---|
658 | | - dev_priv->uncore.unclaimed_mmio_check = 0; |
---|
659 | | - i915_modparams.mmio_debug = 0; |
---|
| 643 | + spin_lock_irq(&uncore->lock); |
---|
| 644 | + if (!uncore->user_forcewake_count++) { |
---|
| 645 | + intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); |
---|
| 646 | + spin_lock(&uncore->debug->lock); |
---|
| 647 | + mmio_debug_suspend(uncore->debug); |
---|
| 648 | + spin_unlock(&uncore->debug->lock); |
---|
660 | 649 | } |
---|
661 | | - spin_unlock_irq(&dev_priv->uncore.lock); |
---|
| 650 | + spin_unlock_irq(&uncore->lock); |
---|
662 | 651 | } |
---|
663 | 652 | |
---|
664 | 653 | /** |
---|
665 | 654 | * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace |
---|
666 | | - * @dev_priv: i915 device instance |
---|
| 655 | + * @uncore: the intel_uncore structure |
---|
667 | 656 | * |
---|
668 | 657 | * This function complements intel_uncore_forcewake_user_get() and releases |
---|
669 | 658 | * the GT powerwell taken on behalf of the userspace bypass. |
---|
670 | 659 | */ |
---|
671 | | -void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) |
---|
| 660 | +void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) |
---|
672 | 661 | { |
---|
673 | | - spin_lock_irq(&dev_priv->uncore.lock); |
---|
674 | | - if (!--dev_priv->uncore.user_forcewake.count) { |
---|
675 | | - if (intel_uncore_unclaimed_mmio(dev_priv)) |
---|
676 | | - dev_info(dev_priv->drm.dev, |
---|
| 662 | + spin_lock_irq(&uncore->lock); |
---|
| 663 | + if (!--uncore->user_forcewake_count) { |
---|
| 664 | + spin_lock(&uncore->debug->lock); |
---|
| 665 | + mmio_debug_resume(uncore->debug); |
---|
| 666 | + |
---|
| 667 | + if (check_for_unclaimed_mmio(uncore)) |
---|
| 668 | + drm_info(&uncore->i915->drm, |
---|
677 | 669 | "Invalid mmio detected during user access\n"); |
---|
| 670 | + spin_unlock(&uncore->debug->lock); |
---|
678 | 671 | |
---|
679 | | - dev_priv->uncore.unclaimed_mmio_check = |
---|
680 | | - dev_priv->uncore.user_forcewake.saved_mmio_check; |
---|
681 | | - i915_modparams.mmio_debug = |
---|
682 | | - dev_priv->uncore.user_forcewake.saved_mmio_debug; |
---|
683 | | - |
---|
684 | | - intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); |
---|
| 672 | + intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); |
---|
685 | 673 | } |
---|
686 | | - spin_unlock_irq(&dev_priv->uncore.lock); |
---|
| 674 | + spin_unlock_irq(&uncore->lock); |
---|
687 | 675 | } |
---|
688 | 676 | |
---|
689 | 677 | /** |
---|
690 | 678 | * intel_uncore_forcewake_get__locked - grab forcewake domain references |
---|
691 | | - * @dev_priv: i915 device instance |
---|
| 679 | + * @uncore: the intel_uncore structure |
---|
692 | 680 | * @fw_domains: forcewake domains to get reference on |
---|
693 | 681 | * |
---|
694 | 682 | * See intel_uncore_forcewake_get(). This variant places the onus |
---|
695 | 683 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. |
---|
696 | 684 | */ |
---|
697 | | -void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, |
---|
| 685 | +void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, |
---|
698 | 686 | enum forcewake_domains fw_domains) |
---|
699 | 687 | { |
---|
700 | | - lockdep_assert_held(&dev_priv->uncore.lock); |
---|
| 688 | + lockdep_assert_held(&uncore->lock); |
---|
701 | 689 | |
---|
702 | | - if (!dev_priv->uncore.funcs.force_wake_get) |
---|
| 690 | + if (!uncore->funcs.force_wake_get) |
---|
703 | 691 | return; |
---|
704 | 692 | |
---|
705 | | - __intel_uncore_forcewake_get(dev_priv, fw_domains); |
---|
| 693 | + __intel_uncore_forcewake_get(uncore, fw_domains); |
---|
706 | 694 | } |
---|
707 | 695 | |
---|
708 | | -static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, |
---|
709 | | - enum forcewake_domains fw_domains) |
---|
| 696 | +static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, |
---|
| 697 | + enum forcewake_domains fw_domains, |
---|
| 698 | + bool delayed) |
---|
710 | 699 | { |
---|
711 | 700 | struct intel_uncore_forcewake_domain *domain; |
---|
712 | 701 | unsigned int tmp; |
---|
713 | 702 | |
---|
714 | | - fw_domains &= dev_priv->uncore.fw_domains; |
---|
| 703 | + fw_domains &= uncore->fw_domains; |
---|
715 | 704 | |
---|
716 | | - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
---|
717 | | - if (WARN_ON(domain->wake_count == 0)) |
---|
718 | | - continue; |
---|
| 705 | + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
---|
| 706 | + GEM_BUG_ON(!domain->wake_count); |
---|
719 | 707 | |
---|
720 | 708 | if (--domain->wake_count) { |
---|
721 | 709 | domain->active = true; |
---|
722 | 710 | continue; |
---|
723 | 711 | } |
---|
724 | 712 | |
---|
725 | | - fw_domain_arm_timer(domain); |
---|
| 713 | + if (delayed && |
---|
| 714 | + !(domain->uncore->fw_domains_timer & domain->mask)) |
---|
| 715 | + fw_domain_arm_timer(domain); |
---|
| 716 | + else |
---|
| 717 | + uncore->funcs.force_wake_put(uncore, domain->mask); |
---|
726 | 718 | } |
---|
727 | 719 | } |
---|
728 | 720 | |
---|
729 | 721 | /** |
---|
730 | 722 | * intel_uncore_forcewake_put - release a forcewake domain reference |
---|
731 | | - * @dev_priv: i915 device instance |
---|
| 723 | + * @uncore: the intel_uncore structure |
---|
732 | 724 | * @fw_domains: forcewake domains to put references |
---|
733 | 725 | * |
---|
734 | 726 | * This function drops the device-level forcewakes for specified |
---|
735 | 727 | * domains obtained by intel_uncore_forcewake_get(). |
---|
736 | 728 | */ |
---|
737 | | -void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, |
---|
| 729 | +void intel_uncore_forcewake_put(struct intel_uncore *uncore, |
---|
738 | 730 | enum forcewake_domains fw_domains) |
---|
739 | 731 | { |
---|
740 | 732 | unsigned long irqflags; |
---|
741 | 733 | |
---|
742 | | - if (!dev_priv->uncore.funcs.force_wake_put) |
---|
| 734 | + if (!uncore->funcs.force_wake_put) |
---|
743 | 735 | return; |
---|
744 | 736 | |
---|
745 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
---|
746 | | - __intel_uncore_forcewake_put(dev_priv, fw_domains); |
---|
747 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
---|
| 737 | + spin_lock_irqsave(&uncore->lock, irqflags); |
---|
| 738 | + __intel_uncore_forcewake_put(uncore, fw_domains, false); |
---|
| 739 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
| 740 | +} |
---|
| 741 | + |
---|
| 742 | +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, |
---|
| 743 | + enum forcewake_domains fw_domains) |
---|
| 744 | +{ |
---|
| 745 | + unsigned long irqflags; |
---|
| 746 | + |
---|
| 747 | + if (!uncore->funcs.force_wake_put) |
---|
| 748 | + return; |
---|
| 749 | + |
---|
| 750 | + spin_lock_irqsave(&uncore->lock, irqflags); |
---|
| 751 | + __intel_uncore_forcewake_put(uncore, fw_domains, true); |
---|
| 752 | + spin_unlock_irqrestore(&uncore->lock, irqflags); |
---|
| 753 | +} |
---|
| 754 | + |
---|
| 755 | +/** |
---|
| 756 | + * intel_uncore_forcewake_flush - flush the delayed release |
---|
| 757 | + * @uncore: the intel_uncore structure |
---|
| 758 | + * @fw_domains: forcewake domains to flush |
---|
| 759 | + */ |
---|
| 760 | +void intel_uncore_forcewake_flush(struct intel_uncore *uncore, |
---|
| 761 | + enum forcewake_domains fw_domains) |
---|
| 762 | +{ |
---|
| 763 | + struct intel_uncore_forcewake_domain *domain; |
---|
| 764 | + unsigned int tmp; |
---|
| 765 | + |
---|
| 766 | + if (!uncore->funcs.force_wake_put) |
---|
| 767 | + return; |
---|
| 768 | + |
---|
| 769 | + fw_domains &= uncore->fw_domains; |
---|
| 770 | + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
---|
| 771 | + WRITE_ONCE(domain->active, false); |
---|
| 772 | + if (hrtimer_cancel(&domain->timer)) |
---|
| 773 | + intel_uncore_fw_release_timer(&domain->timer); |
---|
| 774 | + } |
---|
748 | 775 | } |
---|
749 | 776 | |
---|
750 | 777 | /** |
---|
751 | 778 | * intel_uncore_forcewake_put__locked - grab forcewake domain references |
---|
752 | | - * @dev_priv: i915 device instance |
---|
| 779 | + * @uncore: the intel_uncore structure |
---|
753 | 780 | * @fw_domains: forcewake domains to get reference on |
---|
754 | 781 | * |
---|
755 | 782 | * See intel_uncore_forcewake_put(). This variant places the onus |
---|
756 | 783 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. |
---|
757 | 784 | */ |
---|
758 | | -void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, |
---|
| 785 | +void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, |
---|
759 | 786 | enum forcewake_domains fw_domains) |
---|
760 | 787 | { |
---|
761 | | - lockdep_assert_held(&dev_priv->uncore.lock); |
---|
| 788 | + lockdep_assert_held(&uncore->lock); |
---|
762 | 789 | |
---|
763 | | - if (!dev_priv->uncore.funcs.force_wake_put) |
---|
| 790 | + if (!uncore->funcs.force_wake_put) |
---|
764 | 791 | return; |
---|
765 | 792 | |
---|
766 | | - __intel_uncore_forcewake_put(dev_priv, fw_domains); |
---|
| 793 | + __intel_uncore_forcewake_put(uncore, fw_domains, false); |
---|
767 | 794 | } |
---|
768 | 795 | |
---|
769 | | -void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) |
---|
| 796 | +void assert_forcewakes_inactive(struct intel_uncore *uncore) |
---|
770 | 797 | { |
---|
771 | | - if (!dev_priv->uncore.funcs.force_wake_get) |
---|
| 798 | + if (!uncore->funcs.force_wake_get) |
---|
772 | 799 | return; |
---|
773 | 800 | |
---|
774 | | - WARN(dev_priv->uncore.fw_domains_active, |
---|
775 | | - "Expected all fw_domains to be inactive, but %08x are still on\n", |
---|
776 | | - dev_priv->uncore.fw_domains_active); |
---|
| 801 | + drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, |
---|
| 802 | + "Expected all fw_domains to be inactive, but %08x are still on\n", |
---|
| 803 | + uncore->fw_domains_active); |
---|
777 | 804 | } |
---|
778 | 805 | |
---|
779 | | -void assert_forcewakes_active(struct drm_i915_private *dev_priv, |
---|
| 806 | +void assert_forcewakes_active(struct intel_uncore *uncore, |
---|
780 | 807 | enum forcewake_domains fw_domains) |
---|
781 | 808 | { |
---|
782 | | - if (!dev_priv->uncore.funcs.force_wake_get) |
---|
| 809 | + struct intel_uncore_forcewake_domain *domain; |
---|
| 810 | + unsigned int tmp; |
---|
| 811 | + |
---|
| 812 | + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) |
---|
783 | 813 | return; |
---|
784 | 814 | |
---|
785 | | - assert_rpm_wakelock_held(dev_priv); |
---|
| 815 | + if (!uncore->funcs.force_wake_get) |
---|
| 816 | + return; |
---|
786 | 817 | |
---|
787 | | - fw_domains &= dev_priv->uncore.fw_domains; |
---|
788 | | - WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, |
---|
789 | | - "Expected %08x fw_domains to be active, but %08x are off\n", |
---|
790 | | - fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); |
---|
| 818 | + spin_lock_irq(&uncore->lock); |
---|
| 819 | + |
---|
| 820 | + assert_rpm_wakelock_held(uncore->rpm); |
---|
| 821 | + |
---|
| 822 | + fw_domains &= uncore->fw_domains; |
---|
| 823 | + drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active, |
---|
| 824 | + "Expected %08x fw_domains to be active, but %08x are off\n", |
---|
| 825 | + fw_domains, fw_domains & ~uncore->fw_domains_active); |
---|
| 826 | + |
---|
| 827 | + /* |
---|
| 828 | + * Check that the caller has an explicit wakeref and we don't mistake |
---|
| 829 | + * it for the auto wakeref. |
---|
| 830 | + */ |
---|
| 831 | + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { |
---|
| 832 | + unsigned int actual = READ_ONCE(domain->wake_count); |
---|
| 833 | + unsigned int expect = 1; |
---|
| 834 | + |
---|
| 835 | + if (uncore->fw_domains_timer & domain->mask) |
---|
| 836 | + expect++; /* pending automatic release */ |
---|
| 837 | + |
---|
| 838 | + if (drm_WARN(&uncore->i915->drm, actual < expect, |
---|
| 839 | + "Expected domain %d to be held awake by caller, count=%d\n", |
---|
| 840 | + domain->id, actual)) |
---|
| 841 | + break; |
---|
| 842 | + } |
---|
| 843 | + |
---|
| 844 | + spin_unlock_irq(&uncore->lock); |
---|
791 | 845 | } |
---|
792 | 846 | |
---|
793 | 847 | /* We give fast paths for the really cool registers */ |
---|
794 | 848 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) |
---|
795 | 849 | |
---|
796 | | -#define GEN11_NEEDS_FORCE_WAKE(reg) \ |
---|
797 | | - ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) |
---|
798 | | - |
---|
799 | | -#define __gen6_reg_read_fw_domains(offset) \ |
---|
| 850 | +#define __gen6_reg_read_fw_domains(uncore, offset) \ |
---|
800 | 851 | ({ \ |
---|
801 | 852 | enum forcewake_domains __fwd; \ |
---|
802 | 853 | if (NEEDS_FORCE_WAKE(offset)) \ |
---|
.. | .. |
---|
836 | 887 | }) |
---|
837 | 888 | |
---|
838 | 889 | static enum forcewake_domains |
---|
839 | | -find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) |
---|
| 890 | +find_fw_domain(struct intel_uncore *uncore, u32 offset) |
---|
840 | 891 | { |
---|
841 | 892 | const struct intel_forcewake_range *entry; |
---|
842 | 893 | |
---|
843 | 894 | entry = BSEARCH(offset, |
---|
844 | | - dev_priv->uncore.fw_domains_table, |
---|
845 | | - dev_priv->uncore.fw_domains_table_entries, |
---|
| 895 | + uncore->fw_domains_table, |
---|
| 896 | + uncore->fw_domains_table_entries, |
---|
846 | 897 | fw_range_cmp); |
---|
847 | 898 | |
---|
848 | 899 | if (!entry) |
---|
.. | .. |
---|
854 | 905 | * translate it here to the list of available domains. |
---|
855 | 906 | */ |
---|
856 | 907 | if (entry->domains == FORCEWAKE_ALL) |
---|
857 | | - return dev_priv->uncore.fw_domains; |
---|
| 908 | + return uncore->fw_domains; |
---|
858 | 909 | |
---|
859 | | - WARN(entry->domains & ~dev_priv->uncore.fw_domains, |
---|
860 | | - "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", |
---|
861 | | - entry->domains & ~dev_priv->uncore.fw_domains, offset); |
---|
| 910 | + drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains, |
---|
| 911 | + "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", |
---|
| 912 | + entry->domains & ~uncore->fw_domains, offset); |
---|
862 | 913 | |
---|
863 | 914 | return entry->domains; |
---|
864 | 915 | } |
---|
865 | 916 | |
---|
866 | 917 | #define GEN_FW_RANGE(s, e, d) \ |
---|
867 | 918 | { .start = (s), .end = (e), .domains = (d) } |
---|
868 | | - |
---|
869 | | -#define HAS_FWTABLE(dev_priv) \ |
---|
870 | | - (INTEL_GEN(dev_priv) >= 9 || \ |
---|
871 | | - IS_CHERRYVIEW(dev_priv) || \ |
---|
872 | | - IS_VALLEYVIEW(dev_priv)) |
---|
873 | 919 | |
---|
874 | 920 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
---|
875 | 921 | static const struct intel_forcewake_range __vlv_fw_ranges[] = { |
---|
.. | .. |
---|
882 | 928 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
---|
883 | 929 | }; |
---|
884 | 930 | |
---|
885 | | -#define __fwtable_reg_read_fw_domains(offset) \ |
---|
| 931 | +#define __fwtable_reg_read_fw_domains(uncore, offset) \ |
---|
886 | 932 | ({ \ |
---|
887 | 933 | enum forcewake_domains __fwd = 0; \ |
---|
888 | 934 | if (NEEDS_FORCE_WAKE((offset))) \ |
---|
889 | | - __fwd = find_fw_domain(dev_priv, offset); \ |
---|
| 935 | + __fwd = find_fw_domain(uncore, offset); \ |
---|
890 | 936 | __fwd; \ |
---|
891 | 937 | }) |
---|
892 | 938 | |
---|
893 | | -#define __gen11_fwtable_reg_read_fw_domains(offset) \ |
---|
894 | | -({ \ |
---|
895 | | - enum forcewake_domains __fwd = 0; \ |
---|
896 | | - if (GEN11_NEEDS_FORCE_WAKE((offset))) \ |
---|
897 | | - __fwd = find_fw_domain(dev_priv, offset); \ |
---|
898 | | - __fwd; \ |
---|
899 | | -}) |
---|
| 939 | +#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ |
---|
| 940 | + find_fw_domain(uncore, offset) |
---|
| 941 | + |
---|
| 942 | +#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ |
---|
| 943 | + find_fw_domain(uncore, offset) |
---|
900 | 944 | |
---|
901 | 945 | /* *Must* be sorted by offset! See intel_shadow_table_check(). */ |
---|
902 | 946 | static const i915_reg_t gen8_shadowed_regs[] = { |
---|
.. | .. |
---|
910 | 954 | }; |
---|
911 | 955 | |
---|
912 | 956 | static const i915_reg_t gen11_shadowed_regs[] = { |
---|
| 957 | + RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
---|
| 958 | + GEN6_RPNSWREQ, /* 0xA008 */ |
---|
| 959 | + GEN6_RC_VIDEO_FREQ, /* 0xA00C */ |
---|
| 960 | + RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ |
---|
| 961 | + RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ |
---|
| 962 | + RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ |
---|
| 963 | + RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ |
---|
| 964 | + RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ |
---|
| 965 | + RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ |
---|
| 966 | + RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ |
---|
| 967 | + /* TODO: Other registers are not yet used */ |
---|
| 968 | +}; |
---|
| 969 | + |
---|
| 970 | +static const i915_reg_t gen12_shadowed_regs[] = { |
---|
913 | 971 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
---|
914 | 972 | GEN6_RPNSWREQ, /* 0xA008 */ |
---|
915 | 973 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ |
---|
.. | .. |
---|
945 | 1003 | |
---|
946 | 1004 | __is_genX_shadowed(8) |
---|
947 | 1005 | __is_genX_shadowed(11) |
---|
| 1006 | +__is_genX_shadowed(12) |
---|
948 | 1007 | |
---|
949 | | -#define __gen8_reg_write_fw_domains(offset) \ |
---|
| 1008 | +static enum forcewake_domains |
---|
| 1009 | +gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) |
---|
| 1010 | +{ |
---|
| 1011 | + return FORCEWAKE_RENDER; |
---|
| 1012 | +} |
---|
| 1013 | + |
---|
| 1014 | +#define __gen8_reg_write_fw_domains(uncore, offset) \ |
---|
950 | 1015 | ({ \ |
---|
951 | 1016 | enum forcewake_domains __fwd; \ |
---|
952 | 1017 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ |
---|
.. | .. |
---|
976 | 1041 | GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), |
---|
977 | 1042 | }; |
---|
978 | 1043 | |
---|
979 | | -#define __fwtable_reg_write_fw_domains(offset) \ |
---|
| 1044 | +#define __fwtable_reg_write_fw_domains(uncore, offset) \ |
---|
980 | 1045 | ({ \ |
---|
981 | 1046 | enum forcewake_domains __fwd = 0; \ |
---|
982 | 1047 | if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ |
---|
983 | | - __fwd = find_fw_domain(dev_priv, offset); \ |
---|
| 1048 | + __fwd = find_fw_domain(uncore, offset); \ |
---|
984 | 1049 | __fwd; \ |
---|
985 | 1050 | }) |
---|
986 | 1051 | |
---|
987 | | -#define __gen11_fwtable_reg_write_fw_domains(offset) \ |
---|
| 1052 | +#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ |
---|
988 | 1053 | ({ \ |
---|
989 | 1054 | enum forcewake_domains __fwd = 0; \ |
---|
990 | | - if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ |
---|
991 | | - __fwd = find_fw_domain(dev_priv, offset); \ |
---|
| 1055 | + const u32 __offset = (offset); \ |
---|
| 1056 | + if (!is_gen11_shadowed(__offset)) \ |
---|
| 1057 | + __fwd = find_fw_domain(uncore, __offset); \ |
---|
| 1058 | + __fwd; \ |
---|
| 1059 | +}) |
---|
| 1060 | + |
---|
| 1061 | +#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ |
---|
| 1062 | +({ \ |
---|
| 1063 | + enum forcewake_domains __fwd = 0; \ |
---|
| 1064 | + const u32 __offset = (offset); \ |
---|
| 1065 | + if (!is_gen12_shadowed(__offset)) \ |
---|
| 1066 | + __fwd = find_fw_domain(uncore, __offset); \ |
---|
992 | 1067 | __fwd; \ |
---|
993 | 1068 | }) |
---|
994 | 1069 | |
---|
.. | .. |
---|
1030 | 1105 | |
---|
1031 | 1106 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
---|
1032 | 1107 | static const struct intel_forcewake_range __gen11_fw_ranges[] = { |
---|
| 1108 | + GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */ |
---|
| 1109 | + GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), |
---|
| 1110 | + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
---|
| 1111 | + GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
---|
| 1112 | + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
---|
| 1113 | + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
---|
| 1114 | + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), |
---|
| 1115 | + GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
---|
| 1116 | + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
---|
| 1117 | + GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
---|
| 1118 | + GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), |
---|
| 1119 | + GEN_FW_RANGE(0x8800, 0x8bff, 0), |
---|
| 1120 | + GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
---|
| 1121 | + GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER), |
---|
| 1122 | + GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), |
---|
| 1123 | + GEN_FW_RANGE(0x9560, 0x95ff, 0), |
---|
| 1124 | + GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER), |
---|
| 1125 | + GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
---|
| 1126 | + GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), |
---|
| 1127 | + GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), |
---|
| 1128 | + GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), |
---|
| 1129 | + GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), |
---|
| 1130 | + GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER), |
---|
| 1131 | + GEN_FW_RANGE(0x24000, 0x2407f, 0), |
---|
| 1132 | + GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER), |
---|
| 1133 | + GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER), |
---|
| 1134 | + GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER), |
---|
| 1135 | + GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER), |
---|
| 1136 | + GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER), |
---|
| 1137 | + GEN_FW_RANGE(0x40000, 0x1bffff, 0), |
---|
| 1138 | + GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), |
---|
| 1139 | + GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), |
---|
| 1140 | + GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0), |
---|
| 1141 | + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), |
---|
| 1142 | + GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0) |
---|
| 1143 | +}; |
---|
| 1144 | + |
---|
| 1145 | +/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
---|
| 1146 | +static const struct intel_forcewake_range __gen12_fw_ranges[] = { |
---|
1033 | 1147 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
---|
1034 | 1148 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
---|
1035 | 1149 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), |
---|
.. | .. |
---|
1049 | 1163 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
---|
1050 | 1164 | GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), |
---|
1051 | 1165 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), |
---|
1052 | | - GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), |
---|
| 1166 | + GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), |
---|
| 1167 | + GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), |
---|
| 1168 | + GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), |
---|
| 1169 | + GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), |
---|
| 1170 | + GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), |
---|
| 1171 | + GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), |
---|
| 1172 | + GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), |
---|
1053 | 1173 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
---|
1054 | 1174 | GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), |
---|
1055 | 1175 | GEN_FW_RANGE(0x40000, 0x1bffff, 0), |
---|
.. | .. |
---|
1063 | 1183 | }; |
---|
1064 | 1184 | |
---|
1065 | 1185 | static void |
---|
1066 | | -ilk_dummy_write(struct drm_i915_private *dev_priv) |
---|
| 1186 | +ilk_dummy_write(struct intel_uncore *uncore) |
---|
1067 | 1187 | { |
---|
1068 | 1188 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up |
---|
1069 | 1189 | * the chip from rc6 before touching it for real. MI_MODE is masked, |
---|
1070 | 1190 | * hence harmless to write 0 into. */ |
---|
1071 | | - __raw_i915_write32(dev_priv, MI_MODE, 0); |
---|
| 1191 | + __raw_uncore_write32(uncore, MI_MODE, 0); |
---|
1072 | 1192 | } |
---|
1073 | 1193 | |
---|
1074 | 1194 | static void |
---|
1075 | | -__unclaimed_reg_debug(struct drm_i915_private *dev_priv, |
---|
| 1195 | +__unclaimed_reg_debug(struct intel_uncore *uncore, |
---|
1076 | 1196 | const i915_reg_t reg, |
---|
1077 | 1197 | const bool read, |
---|
1078 | 1198 | const bool before) |
---|
1079 | 1199 | { |
---|
1080 | | - if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, |
---|
1081 | | - "Unclaimed %s register 0x%x\n", |
---|
1082 | | - read ? "read from" : "write to", |
---|
1083 | | - i915_mmio_reg_offset(reg))) |
---|
| 1200 | + if (drm_WARN(&uncore->i915->drm, |
---|
| 1201 | + check_for_unclaimed_mmio(uncore) && !before, |
---|
| 1202 | + "Unclaimed %s register 0x%x\n", |
---|
| 1203 | + read ? "read from" : "write to", |
---|
| 1204 | + i915_mmio_reg_offset(reg))) |
---|
1084 | 1205 | /* Only report the first N failures */ |
---|
1085 | | - i915_modparams.mmio_debug--; |
---|
| 1206 | + uncore->i915->params.mmio_debug--; |
---|
1086 | 1207 | } |
---|
1087 | 1208 | |
---|
1088 | 1209 | static inline void |
---|
1089 | | -unclaimed_reg_debug(struct drm_i915_private *dev_priv, |
---|
| 1210 | +unclaimed_reg_debug(struct intel_uncore *uncore, |
---|
1090 | 1211 | const i915_reg_t reg, |
---|
1091 | 1212 | const bool read, |
---|
1092 | 1213 | const bool before) |
---|
1093 | 1214 | { |
---|
1094 | | - if (likely(!i915_modparams.mmio_debug)) |
---|
| 1215 | + if (likely(!uncore->i915->params.mmio_debug)) |
---|
1095 | 1216 | return; |
---|
1096 | 1217 | |
---|
1097 | | - __unclaimed_reg_debug(dev_priv, reg, read, before); |
---|
| 1218 | + /* interrupts are disabled and re-enabled around uncore->lock usage */ |
---|
| 1219 | + lockdep_assert_held(&uncore->lock); |
---|
| 1220 | + |
---|
| 1221 | + if (before) |
---|
| 1222 | + spin_lock(&uncore->debug->lock); |
---|
| 1223 | + |
---|
| 1224 | + __unclaimed_reg_debug(uncore, reg, read, before); |
---|
| 1225 | + |
---|
| 1226 | + if (!before) |
---|
| 1227 | + spin_unlock(&uncore->debug->lock); |
---|
1098 | 1228 | } |
---|
| 1229 | + |
---|
| 1230 | +#define __vgpu_read(x) \ |
---|
| 1231 | +static u##x \ |
---|
| 1232 | +vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
---|
| 1233 | + u##x val = __raw_uncore_read##x(uncore, reg); \ |
---|
| 1234 | + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
---|
| 1235 | + return val; \ |
---|
| 1236 | +} |
---|
| 1237 | +__vgpu_read(8) |
---|
| 1238 | +__vgpu_read(16) |
---|
| 1239 | +__vgpu_read(32) |
---|
| 1240 | +__vgpu_read(64) |
---|
1099 | 1241 | |
---|
1100 | 1242 | #define GEN2_READ_HEADER(x) \ |
---|
1101 | 1243 | u##x val = 0; \ |
---|
1102 | | - assert_rpm_wakelock_held(dev_priv); |
---|
| 1244 | + assert_rpm_wakelock_held(uncore->rpm); |
---|
1103 | 1245 | |
---|
1104 | 1246 | #define GEN2_READ_FOOTER \ |
---|
1105 | 1247 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
---|
.. | .. |
---|
1107 | 1249 | |
---|
1108 | 1250 | #define __gen2_read(x) \ |
---|
1109 | 1251 | static u##x \ |
---|
1110 | | -gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
---|
| 1252 | +gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
---|
1111 | 1253 | GEN2_READ_HEADER(x); \ |
---|
1112 | | - val = __raw_i915_read##x(dev_priv, reg); \ |
---|
| 1254 | + val = __raw_uncore_read##x(uncore, reg); \ |
---|
1113 | 1255 | GEN2_READ_FOOTER; \ |
---|
1114 | 1256 | } |
---|
1115 | 1257 | |
---|
1116 | 1258 | #define __gen5_read(x) \ |
---|
1117 | 1259 | static u##x \ |
---|
1118 | | -gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
---|
| 1260 | +gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
---|
1119 | 1261 | GEN2_READ_HEADER(x); \ |
---|
1120 | | - ilk_dummy_write(dev_priv); \ |
---|
1121 | | - val = __raw_i915_read##x(dev_priv, reg); \ |
---|
| 1262 | + ilk_dummy_write(uncore); \ |
---|
| 1263 | + val = __raw_uncore_read##x(uncore, reg); \ |
---|
1122 | 1264 | GEN2_READ_FOOTER; \ |
---|
1123 | 1265 | } |
---|
1124 | 1266 | |
---|
.. | .. |
---|
1141 | 1283 | u32 offset = i915_mmio_reg_offset(reg); \ |
---|
1142 | 1284 | unsigned long irqflags; \ |
---|
1143 | 1285 | u##x val = 0; \ |
---|
1144 | | - assert_rpm_wakelock_held(dev_priv); \ |
---|
1145 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
---|
1146 | | - unclaimed_reg_debug(dev_priv, reg, true, true) |
---|
| 1286 | + assert_rpm_wakelock_held(uncore->rpm); \ |
---|
| 1287 | + spin_lock_irqsave(&uncore->lock, irqflags); \ |
---|
| 1288 | + unclaimed_reg_debug(uncore, reg, true, true) |
---|
1147 | 1289 | |
---|
1148 | 1290 | #define GEN6_READ_FOOTER \ |
---|
1149 | | - unclaimed_reg_debug(dev_priv, reg, true, false); \ |
---|
1150 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
---|
| 1291 | + unclaimed_reg_debug(uncore, reg, true, false); \ |
---|
| 1292 | + spin_unlock_irqrestore(&uncore->lock, irqflags); \ |
---|
1151 | 1293 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
---|
1152 | 1294 | return val |
---|
1153 | 1295 | |
---|
1154 | | -static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, |
---|
| 1296 | +static noinline void ___force_wake_auto(struct intel_uncore *uncore, |
---|
1155 | 1297 | enum forcewake_domains fw_domains) |
---|
1156 | 1298 | { |
---|
1157 | 1299 | struct intel_uncore_forcewake_domain *domain; |
---|
1158 | 1300 | unsigned int tmp; |
---|
1159 | 1301 | |
---|
1160 | | - GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); |
---|
| 1302 | + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); |
---|
1161 | 1303 | |
---|
1162 | | - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) |
---|
| 1304 | + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) |
---|
1163 | 1305 | fw_domain_arm_timer(domain); |
---|
1164 | 1306 | |
---|
1165 | | - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
---|
| 1307 | + uncore->funcs.force_wake_get(uncore, fw_domains); |
---|
1166 | 1308 | } |
---|
1167 | 1309 | |
---|
1168 | | -static inline void __force_wake_auto(struct drm_i915_private *dev_priv, |
---|
| 1310 | +static inline void __force_wake_auto(struct intel_uncore *uncore, |
---|
1169 | 1311 | enum forcewake_domains fw_domains) |
---|
1170 | 1312 | { |
---|
1171 | | - if (WARN_ON(!fw_domains)) |
---|
1172 | | - return; |
---|
| 1313 | + GEM_BUG_ON(!fw_domains); |
---|
1173 | 1314 | |
---|
1174 | 1315 | /* Turn on all requested but inactive supported forcewake domains. */ |
---|
1175 | | - fw_domains &= dev_priv->uncore.fw_domains; |
---|
1176 | | - fw_domains &= ~dev_priv->uncore.fw_domains_active; |
---|
| 1316 | + fw_domains &= uncore->fw_domains; |
---|
| 1317 | + fw_domains &= ~uncore->fw_domains_active; |
---|
1177 | 1318 | |
---|
1178 | 1319 | if (fw_domains) |
---|
1179 | | - ___force_wake_auto(dev_priv, fw_domains); |
---|
| 1320 | + ___force_wake_auto(uncore, fw_domains); |
---|
1180 | 1321 | } |
---|
1181 | 1322 | |
---|
1182 | 1323 | #define __gen_read(func, x) \ |
---|
1183 | 1324 | static u##x \ |
---|
1184 | | -func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
---|
| 1325 | +func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ |
---|
1185 | 1326 | enum forcewake_domains fw_engine; \ |
---|
1186 | 1327 | GEN6_READ_HEADER(x); \ |
---|
1187 | | - fw_engine = __##func##_reg_read_fw_domains(offset); \ |
---|
| 1328 | + fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ |
---|
1188 | 1329 | if (fw_engine) \ |
---|
1189 | | - __force_wake_auto(dev_priv, fw_engine); \ |
---|
1190 | | - val = __raw_i915_read##x(dev_priv, reg); \ |
---|
| 1330 | + __force_wake_auto(uncore, fw_engine); \ |
---|
| 1331 | + val = __raw_uncore_read##x(uncore, reg); \ |
---|
1191 | 1332 | GEN6_READ_FOOTER; \ |
---|
1192 | 1333 | } |
---|
1193 | | -#define __gen6_read(x) __gen_read(gen6, x) |
---|
1194 | | -#define __fwtable_read(x) __gen_read(fwtable, x) |
---|
1195 | | -#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) |
---|
1196 | 1334 | |
---|
1197 | | -__gen11_fwtable_read(8) |
---|
1198 | | -__gen11_fwtable_read(16) |
---|
1199 | | -__gen11_fwtable_read(32) |
---|
1200 | | -__gen11_fwtable_read(64) |
---|
1201 | | -__fwtable_read(8) |
---|
1202 | | -__fwtable_read(16) |
---|
1203 | | -__fwtable_read(32) |
---|
1204 | | -__fwtable_read(64) |
---|
1205 | | -__gen6_read(8) |
---|
1206 | | -__gen6_read(16) |
---|
1207 | | -__gen6_read(32) |
---|
1208 | | -__gen6_read(64) |
---|
| 1335 | +#define __gen_reg_read_funcs(func) \ |
---|
| 1336 | +static enum forcewake_domains \ |
---|
| 1337 | +func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ |
---|
| 1338 | + return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ |
---|
| 1339 | +} \ |
---|
| 1340 | +\ |
---|
| 1341 | +__gen_read(func, 8) \ |
---|
| 1342 | +__gen_read(func, 16) \ |
---|
| 1343 | +__gen_read(func, 32) \ |
---|
| 1344 | +__gen_read(func, 64) |
---|
1209 | 1345 | |
---|
1210 | | -#undef __gen11_fwtable_read |
---|
1211 | | -#undef __fwtable_read |
---|
1212 | | -#undef __gen6_read |
---|
| 1346 | +__gen_reg_read_funcs(gen12_fwtable); |
---|
| 1347 | +__gen_reg_read_funcs(gen11_fwtable); |
---|
| 1348 | +__gen_reg_read_funcs(fwtable); |
---|
| 1349 | +__gen_reg_read_funcs(gen6); |
---|
| 1350 | + |
---|
| 1351 | +#undef __gen_reg_read_funcs |
---|
1213 | 1352 | #undef GEN6_READ_FOOTER |
---|
1214 | 1353 | #undef GEN6_READ_HEADER |
---|
1215 | 1354 | |
---|
1216 | 1355 | #define GEN2_WRITE_HEADER \ |
---|
1217 | 1356 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
---|
1218 | | - assert_rpm_wakelock_held(dev_priv); \ |
---|
| 1357 | + assert_rpm_wakelock_held(uncore->rpm); \ |
---|
1219 | 1358 | |
---|
1220 | 1359 | #define GEN2_WRITE_FOOTER |
---|
1221 | 1360 | |
---|
1222 | 1361 | #define __gen2_write(x) \ |
---|
1223 | 1362 | static void \ |
---|
1224 | | -gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
---|
| 1363 | +gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
---|
1225 | 1364 | GEN2_WRITE_HEADER; \ |
---|
1226 | | - __raw_i915_write##x(dev_priv, reg, val); \ |
---|
| 1365 | + __raw_uncore_write##x(uncore, reg, val); \ |
---|
1227 | 1366 | GEN2_WRITE_FOOTER; \ |
---|
1228 | 1367 | } |
---|
1229 | 1368 | |
---|
1230 | 1369 | #define __gen5_write(x) \ |
---|
1231 | 1370 | static void \ |
---|
1232 | | -gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
---|
| 1371 | +gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
---|
1233 | 1372 | GEN2_WRITE_HEADER; \ |
---|
1234 | | - ilk_dummy_write(dev_priv); \ |
---|
1235 | | - __raw_i915_write##x(dev_priv, reg, val); \ |
---|
| 1373 | + ilk_dummy_write(uncore); \ |
---|
| 1374 | + __raw_uncore_write##x(uncore, reg, val); \ |
---|
1236 | 1375 | GEN2_WRITE_FOOTER; \ |
---|
1237 | 1376 | } |
---|
1238 | 1377 | |
---|
.. | .. |
---|
1253 | 1392 | u32 offset = i915_mmio_reg_offset(reg); \ |
---|
1254 | 1393 | unsigned long irqflags; \ |
---|
1255 | 1394 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
---|
1256 | | - assert_rpm_wakelock_held(dev_priv); \ |
---|
1257 | | - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
---|
1258 | | - unclaimed_reg_debug(dev_priv, reg, false, true) |
---|
| 1395 | + assert_rpm_wakelock_held(uncore->rpm); \ |
---|
| 1396 | + spin_lock_irqsave(&uncore->lock, irqflags); \ |
---|
| 1397 | + unclaimed_reg_debug(uncore, reg, false, true) |
---|
1259 | 1398 | |
---|
1260 | 1399 | #define GEN6_WRITE_FOOTER \ |
---|
1261 | | - unclaimed_reg_debug(dev_priv, reg, false, false); \ |
---|
1262 | | - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) |
---|
| 1400 | + unclaimed_reg_debug(uncore, reg, false, false); \ |
---|
| 1401 | + spin_unlock_irqrestore(&uncore->lock, irqflags) |
---|
1263 | 1402 | |
---|
1264 | 1403 | #define __gen6_write(x) \ |
---|
1265 | 1404 | static void \ |
---|
1266 | | -gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
---|
| 1405 | +gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
---|
1267 | 1406 | GEN6_WRITE_HEADER; \ |
---|
1268 | 1407 | if (NEEDS_FORCE_WAKE(offset)) \ |
---|
1269 | | - __gen6_gt_wait_for_fifo(dev_priv); \ |
---|
1270 | | - __raw_i915_write##x(dev_priv, reg, val); \ |
---|
| 1408 | + __gen6_gt_wait_for_fifo(uncore); \ |
---|
| 1409 | + __raw_uncore_write##x(uncore, reg, val); \ |
---|
1271 | 1410 | GEN6_WRITE_FOOTER; \ |
---|
1272 | 1411 | } |
---|
1273 | | - |
---|
1274 | | -#define __gen_write(func, x) \ |
---|
1275 | | -static void \ |
---|
1276 | | -func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
---|
1277 | | - enum forcewake_domains fw_engine; \ |
---|
1278 | | - GEN6_WRITE_HEADER; \ |
---|
1279 | | - fw_engine = __##func##_reg_write_fw_domains(offset); \ |
---|
1280 | | - if (fw_engine) \ |
---|
1281 | | - __force_wake_auto(dev_priv, fw_engine); \ |
---|
1282 | | - __raw_i915_write##x(dev_priv, reg, val); \ |
---|
1283 | | - GEN6_WRITE_FOOTER; \ |
---|
1284 | | -} |
---|
1285 | | -#define __gen8_write(x) __gen_write(gen8, x) |
---|
1286 | | -#define __fwtable_write(x) __gen_write(fwtable, x) |
---|
1287 | | -#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) |
---|
1288 | | - |
---|
1289 | | -__gen11_fwtable_write(8) |
---|
1290 | | -__gen11_fwtable_write(16) |
---|
1291 | | -__gen11_fwtable_write(32) |
---|
1292 | | -__fwtable_write(8) |
---|
1293 | | -__fwtable_write(16) |
---|
1294 | | -__fwtable_write(32) |
---|
1295 | | -__gen8_write(8) |
---|
1296 | | -__gen8_write(16) |
---|
1297 | | -__gen8_write(32) |
---|
1298 | 1412 | __gen6_write(8) |
---|
1299 | 1413 | __gen6_write(16) |
---|
1300 | 1414 | __gen6_write(32) |
---|
1301 | 1415 | |
---|
1302 | | -#undef __gen11_fwtable_write |
---|
1303 | | -#undef __fwtable_write |
---|
1304 | | -#undef __gen8_write |
---|
1305 | | -#undef __gen6_write |
---|
| 1416 | +#define __gen_write(func, x) \ |
---|
| 1417 | +static void \ |
---|
| 1418 | +func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
---|
| 1419 | + enum forcewake_domains fw_engine; \ |
---|
| 1420 | + GEN6_WRITE_HEADER; \ |
---|
| 1421 | + fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ |
---|
| 1422 | + if (fw_engine) \ |
---|
| 1423 | + __force_wake_auto(uncore, fw_engine); \ |
---|
| 1424 | + __raw_uncore_write##x(uncore, reg, val); \ |
---|
| 1425 | + GEN6_WRITE_FOOTER; \ |
---|
| 1426 | +} |
---|
| 1427 | + |
---|
| 1428 | +#define __gen_reg_write_funcs(func) \ |
---|
| 1429 | +static enum forcewake_domains \ |
---|
| 1430 | +func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ |
---|
| 1431 | + return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ |
---|
| 1432 | +} \ |
---|
| 1433 | +\ |
---|
| 1434 | +__gen_write(func, 8) \ |
---|
| 1435 | +__gen_write(func, 16) \ |
---|
| 1436 | +__gen_write(func, 32) |
---|
| 1437 | + |
---|
| 1438 | +__gen_reg_write_funcs(gen12_fwtable); |
---|
| 1439 | +__gen_reg_write_funcs(gen11_fwtable); |
---|
| 1440 | +__gen_reg_write_funcs(fwtable); |
---|
| 1441 | +__gen_reg_write_funcs(gen8); |
---|
| 1442 | + |
---|
| 1443 | +#undef __gen_reg_write_funcs |
---|
1306 | 1444 | #undef GEN6_WRITE_FOOTER |
---|
1307 | 1445 | #undef GEN6_WRITE_HEADER |
---|
1308 | 1446 | |
---|
1309 | | -#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ |
---|
| 1447 | +#define __vgpu_write(x) \ |
---|
| 1448 | +static void \ |
---|
| 1449 | +vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ |
---|
| 1450 | + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
---|
| 1451 | + __raw_uncore_write##x(uncore, reg, val); \ |
---|
| 1452 | +} |
---|
| 1453 | +__vgpu_write(8) |
---|
| 1454 | +__vgpu_write(16) |
---|
| 1455 | +__vgpu_write(32) |
---|
| 1456 | + |
---|
| 1457 | +#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ |
---|
1310 | 1458 | do { \ |
---|
1311 | | - (i915)->uncore.funcs.mmio_writeb = x##_write8; \ |
---|
1312 | | - (i915)->uncore.funcs.mmio_writew = x##_write16; \ |
---|
1313 | | - (i915)->uncore.funcs.mmio_writel = x##_write32; \ |
---|
| 1459 | + (uncore)->funcs.mmio_writeb = x##_write8; \ |
---|
| 1460 | + (uncore)->funcs.mmio_writew = x##_write16; \ |
---|
| 1461 | + (uncore)->funcs.mmio_writel = x##_write32; \ |
---|
1314 | 1462 | } while (0) |
---|
1315 | 1463 | |
---|
1316 | | -#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ |
---|
| 1464 | +#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ |
---|
1317 | 1465 | do { \ |
---|
1318 | | - (i915)->uncore.funcs.mmio_readb = x##_read8; \ |
---|
1319 | | - (i915)->uncore.funcs.mmio_readw = x##_read16; \ |
---|
1320 | | - (i915)->uncore.funcs.mmio_readl = x##_read32; \ |
---|
1321 | | - (i915)->uncore.funcs.mmio_readq = x##_read64; \ |
---|
| 1466 | + (uncore)->funcs.mmio_readb = x##_read8; \ |
---|
| 1467 | + (uncore)->funcs.mmio_readw = x##_read16; \ |
---|
| 1468 | + (uncore)->funcs.mmio_readl = x##_read32; \ |
---|
| 1469 | + (uncore)->funcs.mmio_readq = x##_read64; \ |
---|
1322 | 1470 | } while (0) |
---|
1323 | 1471 | |
---|
| 1472 | +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ |
---|
| 1473 | +do { \ |
---|
| 1474 | + ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ |
---|
| 1475 | + (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ |
---|
| 1476 | +} while (0) |
---|
1324 | 1477 | |
---|
1325 | | -static void fw_domain_init(struct drm_i915_private *dev_priv, |
---|
1326 | | - enum forcewake_domain_id domain_id, |
---|
1327 | | - i915_reg_t reg_set, |
---|
1328 | | - i915_reg_t reg_ack) |
---|
| 1478 | +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ |
---|
| 1479 | +do { \ |
---|
| 1480 | + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ |
---|
| 1481 | + (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ |
---|
| 1482 | +} while (0) |
---|
| 1483 | + |
---|
| 1484 | +static int __fw_domain_init(struct intel_uncore *uncore, |
---|
| 1485 | + enum forcewake_domain_id domain_id, |
---|
| 1486 | + i915_reg_t reg_set, |
---|
| 1487 | + i915_reg_t reg_ack) |
---|
1329 | 1488 | { |
---|
1330 | 1489 | struct intel_uncore_forcewake_domain *d; |
---|
1331 | 1490 | |
---|
1332 | | - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) |
---|
1333 | | - return; |
---|
| 1491 | + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); |
---|
| 1492 | + GEM_BUG_ON(uncore->fw_domain[domain_id]); |
---|
1334 | 1493 | |
---|
1335 | | - d = &dev_priv->uncore.fw_domain[domain_id]; |
---|
| 1494 | + if (i915_inject_probe_failure(uncore->i915)) |
---|
| 1495 | + return -ENOMEM; |
---|
1336 | 1496 | |
---|
1337 | | - WARN_ON(d->wake_count); |
---|
| 1497 | + d = kzalloc(sizeof(*d), GFP_KERNEL); |
---|
| 1498 | + if (!d) |
---|
| 1499 | + return -ENOMEM; |
---|
1338 | 1500 | |
---|
1339 | | - WARN_ON(!i915_mmio_reg_valid(reg_set)); |
---|
1340 | | - WARN_ON(!i915_mmio_reg_valid(reg_ack)); |
---|
| 1501 | + drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set)); |
---|
| 1502 | + drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack)); |
---|
1341 | 1503 | |
---|
| 1504 | + d->uncore = uncore; |
---|
1342 | 1505 | d->wake_count = 0; |
---|
1343 | | - d->reg_set = reg_set; |
---|
1344 | | - d->reg_ack = reg_ack; |
---|
| 1506 | + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); |
---|
| 1507 | + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); |
---|
1345 | 1508 | |
---|
1346 | 1509 | d->id = domain_id; |
---|
1347 | 1510 | |
---|
.. | .. |
---|
1355 | 1518 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); |
---|
1356 | 1519 | BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); |
---|
1357 | 1520 | |
---|
1358 | | - |
---|
1359 | 1521 | d->mask = BIT(domain_id); |
---|
1360 | 1522 | |
---|
1361 | 1523 | hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
1362 | 1524 | d->timer.function = intel_uncore_fw_release_timer; |
---|
1363 | 1525 | |
---|
1364 | | - dev_priv->uncore.fw_domains |= BIT(domain_id); |
---|
| 1526 | + uncore->fw_domains |= BIT(domain_id); |
---|
1365 | 1527 | |
---|
1366 | | - fw_domain_reset(dev_priv, d); |
---|
| 1528 | + fw_domain_reset(d); |
---|
| 1529 | + |
---|
| 1530 | + uncore->fw_domain[domain_id] = d; |
---|
| 1531 | + |
---|
| 1532 | + return 0; |
---|
1367 | 1533 | } |
---|
1368 | 1534 | |
---|
1369 | | -static void fw_domain_fini(struct drm_i915_private *dev_priv, |
---|
| 1535 | +static void fw_domain_fini(struct intel_uncore *uncore, |
---|
1370 | 1536 | enum forcewake_domain_id domain_id) |
---|
1371 | 1537 | { |
---|
1372 | 1538 | struct intel_uncore_forcewake_domain *d; |
---|
1373 | 1539 | |
---|
1374 | | - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) |
---|
| 1540 | + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); |
---|
| 1541 | + |
---|
| 1542 | + d = fetch_and_zero(&uncore->fw_domain[domain_id]); |
---|
| 1543 | + if (!d) |
---|
1375 | 1544 | return; |
---|
1376 | 1545 | |
---|
1377 | | - d = &dev_priv->uncore.fw_domain[domain_id]; |
---|
1378 | | - |
---|
1379 | | - WARN_ON(d->wake_count); |
---|
1380 | | - WARN_ON(hrtimer_cancel(&d->timer)); |
---|
1381 | | - memset(d, 0, sizeof(*d)); |
---|
1382 | | - |
---|
1383 | | - dev_priv->uncore.fw_domains &= ~BIT(domain_id); |
---|
| 1546 | + uncore->fw_domains &= ~BIT(domain_id); |
---|
| 1547 | + drm_WARN_ON(&uncore->i915->drm, d->wake_count); |
---|
| 1548 | + drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer)); |
---|
| 1549 | + kfree(d); |
---|
1384 | 1550 | } |
---|
1385 | 1551 | |
---|
1386 | | -static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) |
---|
| 1552 | +static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) |
---|
1387 | 1553 | { |
---|
1388 | | - if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) |
---|
1389 | | - return; |
---|
| 1554 | + struct intel_uncore_forcewake_domain *d; |
---|
| 1555 | + int tmp; |
---|
1390 | 1556 | |
---|
1391 | | - if (IS_GEN6(dev_priv)) { |
---|
1392 | | - dev_priv->uncore.fw_reset = 0; |
---|
1393 | | - dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; |
---|
1394 | | - dev_priv->uncore.fw_clear = 0; |
---|
1395 | | - } else { |
---|
1396 | | - /* WaRsClearFWBitsAtReset:bdw,skl */ |
---|
1397 | | - dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); |
---|
1398 | | - dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); |
---|
1399 | | - dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); |
---|
1400 | | - } |
---|
| 1557 | + for_each_fw_domain(d, uncore, tmp) |
---|
| 1558 | + fw_domain_fini(uncore, d->id); |
---|
| 1559 | +} |
---|
1401 | 1560 | |
---|
1402 | | - if (INTEL_GEN(dev_priv) >= 11) { |
---|
| 1561 | +static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) |
---|
| 1562 | +{ |
---|
| 1563 | + struct drm_i915_private *i915 = uncore->i915; |
---|
| 1564 | + int ret = 0; |
---|
| 1565 | + |
---|
| 1566 | + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
---|
| 1567 | + |
---|
| 1568 | +#define fw_domain_init(uncore__, id__, set__, ack__) \ |
---|
| 1569 | + (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) |
---|
| 1570 | + |
---|
| 1571 | + if (INTEL_GEN(i915) >= 11) { |
---|
| 1572 | + /* we'll prune the domains of missing engines later */ |
---|
| 1573 | + intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; |
---|
1403 | 1574 | int i; |
---|
1404 | 1575 | |
---|
1405 | | - dev_priv->uncore.funcs.force_wake_get = |
---|
1406 | | - fw_domains_get_with_fallback; |
---|
1407 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
1408 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1576 | + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; |
---|
| 1577 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
| 1578 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1409 | 1579 | FORCEWAKE_RENDER_GEN9, |
---|
1410 | 1580 | FORCEWAKE_ACK_RENDER_GEN9); |
---|
1411 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, |
---|
| 1581 | + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, |
---|
1412 | 1582 | FORCEWAKE_BLITTER_GEN9, |
---|
1413 | 1583 | FORCEWAKE_ACK_BLITTER_GEN9); |
---|
| 1584 | + |
---|
1414 | 1585 | for (i = 0; i < I915_MAX_VCS; i++) { |
---|
1415 | | - if (!HAS_ENGINE(dev_priv, _VCS(i))) |
---|
| 1586 | + if (!__HAS_ENGINE(emask, _VCS(i))) |
---|
1416 | 1587 | continue; |
---|
1417 | 1588 | |
---|
1418 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, |
---|
| 1589 | + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, |
---|
1419 | 1590 | FORCEWAKE_MEDIA_VDBOX_GEN11(i), |
---|
1420 | 1591 | FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); |
---|
1421 | 1592 | } |
---|
1422 | 1593 | for (i = 0; i < I915_MAX_VECS; i++) { |
---|
1423 | | - if (!HAS_ENGINE(dev_priv, _VECS(i))) |
---|
| 1594 | + if (!__HAS_ENGINE(emask, _VECS(i))) |
---|
1424 | 1595 | continue; |
---|
1425 | 1596 | |
---|
1426 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, |
---|
| 1597 | + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, |
---|
1427 | 1598 | FORCEWAKE_MEDIA_VEBOX_GEN11(i), |
---|
1428 | 1599 | FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); |
---|
1429 | 1600 | } |
---|
1430 | | - } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { |
---|
1431 | | - dev_priv->uncore.funcs.force_wake_get = |
---|
1432 | | - fw_domains_get_with_fallback; |
---|
1433 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
1434 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1601 | + } else if (IS_GEN_RANGE(i915, 9, 10)) { |
---|
| 1602 | + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; |
---|
| 1603 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
| 1604 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1435 | 1605 | FORCEWAKE_RENDER_GEN9, |
---|
1436 | 1606 | FORCEWAKE_ACK_RENDER_GEN9); |
---|
1437 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, |
---|
| 1607 | + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, |
---|
1438 | 1608 | FORCEWAKE_BLITTER_GEN9, |
---|
1439 | 1609 | FORCEWAKE_ACK_BLITTER_GEN9); |
---|
1440 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, |
---|
| 1610 | + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, |
---|
1441 | 1611 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); |
---|
1442 | | - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
---|
1443 | | - dev_priv->uncore.funcs.force_wake_get = fw_domains_get; |
---|
1444 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
1445 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1612 | + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { |
---|
| 1613 | + uncore->funcs.force_wake_get = fw_domains_get; |
---|
| 1614 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
| 1615 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1446 | 1616 | FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); |
---|
1447 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, |
---|
| 1617 | + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, |
---|
1448 | 1618 | FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); |
---|
1449 | | - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
---|
1450 | | - dev_priv->uncore.funcs.force_wake_get = |
---|
| 1619 | + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { |
---|
| 1620 | + uncore->funcs.force_wake_get = |
---|
1451 | 1621 | fw_domains_get_with_thread_status; |
---|
1452 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
1453 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1622 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
| 1623 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1454 | 1624 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); |
---|
1455 | | - } else if (IS_IVYBRIDGE(dev_priv)) { |
---|
| 1625 | + } else if (IS_IVYBRIDGE(i915)) { |
---|
1456 | 1626 | u32 ecobus; |
---|
1457 | 1627 | |
---|
1458 | 1628 | /* IVB configs may use multi-threaded forcewake */ |
---|
.. | .. |
---|
1464 | 1634 | * (correctly) interpreted by the test below as MT |
---|
1465 | 1635 | * forcewake being disabled. |
---|
1466 | 1636 | */ |
---|
1467 | | - dev_priv->uncore.funcs.force_wake_get = |
---|
| 1637 | + uncore->funcs.force_wake_get = |
---|
1468 | 1638 | fw_domains_get_with_thread_status; |
---|
1469 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
| 1639 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
1470 | 1640 | |
---|
1471 | 1641 | /* We need to init first for ECOBUS access and then |
---|
1472 | 1642 | * determine later if we want to reinit, in case of MT access is |
---|
.. | .. |
---|
1475 | 1645 | * before the ecobus check. |
---|
1476 | 1646 | */ |
---|
1477 | 1647 | |
---|
1478 | | - __raw_i915_write32(dev_priv, FORCEWAKE, 0); |
---|
1479 | | - __raw_posting_read(dev_priv, ECOBUS); |
---|
| 1648 | + __raw_uncore_write32(uncore, FORCEWAKE, 0); |
---|
| 1649 | + __raw_posting_read(uncore, ECOBUS); |
---|
1480 | 1650 | |
---|
1481 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
1482 | | - FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
---|
| 1651 | + ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
| 1652 | + FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
---|
| 1653 | + if (ret) |
---|
| 1654 | + goto out; |
---|
1483 | 1655 | |
---|
1484 | | - spin_lock_irq(&dev_priv->uncore.lock); |
---|
1485 | | - fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); |
---|
1486 | | - ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
---|
1487 | | - fw_domains_put(dev_priv, FORCEWAKE_RENDER); |
---|
1488 | | - spin_unlock_irq(&dev_priv->uncore.lock); |
---|
| 1656 | + spin_lock_irq(&uncore->lock); |
---|
| 1657 | + fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); |
---|
| 1658 | + ecobus = __raw_uncore_read32(uncore, ECOBUS); |
---|
| 1659 | + fw_domains_put(uncore, FORCEWAKE_RENDER); |
---|
| 1660 | + spin_unlock_irq(&uncore->lock); |
---|
1489 | 1661 | |
---|
1490 | 1662 | if (!(ecobus & FORCEWAKE_MT_ENABLE)) { |
---|
1491 | | - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); |
---|
1492 | | - DRM_INFO("when using vblank-synced partial screen updates.\n"); |
---|
1493 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1663 | + drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n"); |
---|
| 1664 | + drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n"); |
---|
| 1665 | + fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); |
---|
| 1666 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1494 | 1667 | FORCEWAKE, FORCEWAKE_ACK); |
---|
1495 | 1668 | } |
---|
1496 | | - } else if (IS_GEN6(dev_priv)) { |
---|
1497 | | - dev_priv->uncore.funcs.force_wake_get = |
---|
| 1669 | + } else if (IS_GEN(i915, 6)) { |
---|
| 1670 | + uncore->funcs.force_wake_get = |
---|
1498 | 1671 | fw_domains_get_with_thread_status; |
---|
1499 | | - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
---|
1500 | | - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
---|
| 1672 | + uncore->funcs.force_wake_put = fw_domains_put; |
---|
| 1673 | + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, |
---|
1501 | 1674 | FORCEWAKE, FORCEWAKE_ACK); |
---|
1502 | 1675 | } |
---|
1503 | 1676 | |
---|
| 1677 | +#undef fw_domain_init |
---|
| 1678 | + |
---|
1504 | 1679 | /* All future platforms are expected to require complex power gating */ |
---|
1505 | | - WARN_ON(dev_priv->uncore.fw_domains == 0); |
---|
| 1680 | + drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0); |
---|
| 1681 | + |
---|
| 1682 | +out: |
---|
| 1683 | + if (ret) |
---|
| 1684 | + intel_uncore_fw_domains_fini(uncore); |
---|
| 1685 | + |
---|
| 1686 | + return ret; |
---|
1506 | 1687 | } |
---|
1507 | 1688 | |
---|
1508 | | -#define ASSIGN_FW_DOMAINS_TABLE(d) \ |
---|
| 1689 | +#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ |
---|
1509 | 1690 | { \ |
---|
1510 | | - dev_priv->uncore.fw_domains_table = \ |
---|
| 1691 | + (uncore)->fw_domains_table = \ |
---|
1511 | 1692 | (struct intel_forcewake_range *)(d); \ |
---|
1512 | | - dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ |
---|
| 1693 | + (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ |
---|
1513 | 1694 | } |
---|
1514 | 1695 | |
---|
1515 | 1696 | static int i915_pmic_bus_access_notifier(struct notifier_block *nb, |
---|
1516 | 1697 | unsigned long action, void *data) |
---|
1517 | 1698 | { |
---|
1518 | | - struct drm_i915_private *dev_priv = container_of(nb, |
---|
1519 | | - struct drm_i915_private, uncore.pmic_bus_access_nb); |
---|
| 1699 | + struct intel_uncore *uncore = container_of(nb, |
---|
| 1700 | + struct intel_uncore, pmic_bus_access_nb); |
---|
1520 | 1701 | |
---|
1521 | 1702 | switch (action) { |
---|
1522 | 1703 | case MBI_PMIC_BUS_ACCESS_BEGIN: |
---|
.. | .. |
---|
1533 | 1714 | * wake reference -> disable wakeref asserts for the time of |
---|
1534 | 1715 | * the access. |
---|
1535 | 1716 | */ |
---|
1536 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
1537 | | - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
---|
1538 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1717 | + disable_rpm_wakeref_asserts(uncore->rpm); |
---|
| 1718 | + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); |
---|
| 1719 | + enable_rpm_wakeref_asserts(uncore->rpm); |
---|
1539 | 1720 | break; |
---|
1540 | 1721 | case MBI_PMIC_BUS_ACCESS_END: |
---|
1541 | | - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
---|
| 1722 | + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); |
---|
1542 | 1723 | break; |
---|
1543 | 1724 | } |
---|
1544 | 1725 | |
---|
1545 | 1726 | return NOTIFY_OK; |
---|
1546 | 1727 | } |
---|
1547 | 1728 | |
---|
1548 | | -void intel_uncore_init(struct drm_i915_private *dev_priv) |
---|
| 1729 | +static int uncore_mmio_setup(struct intel_uncore *uncore) |
---|
1549 | 1730 | { |
---|
1550 | | - i915_check_vgpu(dev_priv); |
---|
| 1731 | + struct drm_i915_private *i915 = uncore->i915; |
---|
| 1732 | + struct pci_dev *pdev = i915->drm.pdev; |
---|
| 1733 | + int mmio_bar; |
---|
| 1734 | + int mmio_size; |
---|
1551 | 1735 | |
---|
1552 | | - intel_uncore_edram_detect(dev_priv); |
---|
1553 | | - intel_uncore_fw_domains_init(dev_priv); |
---|
1554 | | - __intel_uncore_early_sanitize(dev_priv, 0); |
---|
1555 | | - |
---|
1556 | | - dev_priv->uncore.unclaimed_mmio_check = 1; |
---|
1557 | | - dev_priv->uncore.pmic_bus_access_nb.notifier_call = |
---|
1558 | | - i915_pmic_bus_access_notifier; |
---|
1559 | | - |
---|
1560 | | - if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { |
---|
1561 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); |
---|
1562 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); |
---|
1563 | | - } else if (IS_GEN5(dev_priv)) { |
---|
1564 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); |
---|
1565 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); |
---|
1566 | | - } else if (IS_GEN(dev_priv, 6, 7)) { |
---|
1567 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); |
---|
1568 | | - |
---|
1569 | | - if (IS_VALLEYVIEW(dev_priv)) { |
---|
1570 | | - ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); |
---|
1571 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); |
---|
1572 | | - } else { |
---|
1573 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); |
---|
1574 | | - } |
---|
1575 | | - } else if (IS_GEN8(dev_priv)) { |
---|
1576 | | - if (IS_CHERRYVIEW(dev_priv)) { |
---|
1577 | | - ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); |
---|
1578 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
---|
1579 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); |
---|
1580 | | - |
---|
1581 | | - } else { |
---|
1582 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); |
---|
1583 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); |
---|
1584 | | - } |
---|
1585 | | - } else if (IS_GEN(dev_priv, 9, 10)) { |
---|
1586 | | - ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); |
---|
1587 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
---|
1588 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); |
---|
1589 | | - } else { |
---|
1590 | | - ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges); |
---|
1591 | | - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable); |
---|
1592 | | - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable); |
---|
| 1736 | + mmio_bar = IS_GEN(i915, 2) ? 1 : 0; |
---|
| 1737 | + /* |
---|
| 1738 | + * Before gen4, the registers and the GTT are behind different BARs. |
---|
| 1739 | + * However, from gen4 onwards, the registers and the GTT are shared |
---|
| 1740 | + * in the same BAR, so we want to restrict this ioremap from |
---|
| 1741 | + * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
---|
| 1742 | + * the register BAR remains the same size for all the earlier |
---|
| 1743 | + * generations up to Ironlake. |
---|
| 1744 | + */ |
---|
| 1745 | + if (INTEL_GEN(i915) < 5) |
---|
| 1746 | + mmio_size = 512 * 1024; |
---|
| 1747 | + else |
---|
| 1748 | + mmio_size = 2 * 1024 * 1024; |
---|
| 1749 | + uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
---|
| 1750 | + if (uncore->regs == NULL) { |
---|
| 1751 | + drm_err(&i915->drm, "failed to map registers\n"); |
---|
| 1752 | + return -EIO; |
---|
1593 | 1753 | } |
---|
1594 | 1754 | |
---|
1595 | | - iosf_mbi_register_pmic_bus_access_notifier( |
---|
1596 | | - &dev_priv->uncore.pmic_bus_access_nb); |
---|
| 1755 | + return 0; |
---|
| 1756 | +} |
---|
| 1757 | + |
---|
| 1758 | +static void uncore_mmio_cleanup(struct intel_uncore *uncore) |
---|
| 1759 | +{ |
---|
| 1760 | + struct pci_dev *pdev = uncore->i915->drm.pdev; |
---|
| 1761 | + |
---|
| 1762 | + pci_iounmap(pdev, uncore->regs); |
---|
| 1763 | +} |
---|
| 1764 | + |
---|
| 1765 | +void intel_uncore_init_early(struct intel_uncore *uncore, |
---|
| 1766 | + struct drm_i915_private *i915) |
---|
| 1767 | +{ |
---|
| 1768 | + spin_lock_init(&uncore->lock); |
---|
| 1769 | + uncore->i915 = i915; |
---|
| 1770 | + uncore->rpm = &i915->runtime_pm; |
---|
| 1771 | + uncore->debug = &i915->mmio_debug; |
---|
| 1772 | +} |
---|
| 1773 | + |
---|
| 1774 | +static void uncore_raw_init(struct intel_uncore *uncore) |
---|
| 1775 | +{ |
---|
| 1776 | + GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); |
---|
| 1777 | + |
---|
| 1778 | + if (intel_vgpu_active(uncore->i915)) { |
---|
| 1779 | + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu); |
---|
| 1780 | + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu); |
---|
| 1781 | + } else if (IS_GEN(uncore->i915, 5)) { |
---|
| 1782 | + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); |
---|
| 1783 | + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); |
---|
| 1784 | + } else { |
---|
| 1785 | + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); |
---|
| 1786 | + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); |
---|
| 1787 | + } |
---|
| 1788 | +} |
---|
| 1789 | + |
---|
| 1790 | +static int uncore_forcewake_init(struct intel_uncore *uncore) |
---|
| 1791 | +{ |
---|
| 1792 | + struct drm_i915_private *i915 = uncore->i915; |
---|
| 1793 | + int ret; |
---|
| 1794 | + |
---|
| 1795 | + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); |
---|
| 1796 | + |
---|
| 1797 | + ret = intel_uncore_fw_domains_init(uncore); |
---|
| 1798 | + if (ret) |
---|
| 1799 | + return ret; |
---|
| 1800 | + forcewake_early_sanitize(uncore, 0); |
---|
| 1801 | + |
---|
| 1802 | + if (IS_GEN_RANGE(i915, 6, 7)) { |
---|
| 1803 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); |
---|
| 1804 | + |
---|
| 1805 | + if (IS_VALLEYVIEW(i915)) { |
---|
| 1806 | + ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); |
---|
| 1807 | + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
---|
| 1808 | + } else { |
---|
| 1809 | + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); |
---|
| 1810 | + } |
---|
| 1811 | + } else if (IS_GEN(i915, 8)) { |
---|
| 1812 | + if (IS_CHERRYVIEW(i915)) { |
---|
| 1813 | + ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); |
---|
| 1814 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); |
---|
| 1815 | + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
---|
| 1816 | + } else { |
---|
| 1817 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); |
---|
| 1818 | + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); |
---|
| 1819 | + } |
---|
| 1820 | + } else if (IS_GEN_RANGE(i915, 9, 10)) { |
---|
| 1821 | + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); |
---|
| 1822 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); |
---|
| 1823 | + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); |
---|
| 1824 | + } else if (IS_GEN(i915, 11)) { |
---|
| 1825 | + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); |
---|
| 1826 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); |
---|
| 1827 | + ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); |
---|
| 1828 | + } else { |
---|
| 1829 | + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); |
---|
| 1830 | + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); |
---|
| 1831 | + ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); |
---|
| 1832 | + } |
---|
| 1833 | + |
---|
| 1834 | + uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; |
---|
| 1835 | + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); |
---|
| 1836 | + |
---|
| 1837 | + return 0; |
---|
| 1838 | +} |
---|
| 1839 | + |
---|
| 1840 | +int intel_uncore_init_mmio(struct intel_uncore *uncore) |
---|
| 1841 | +{ |
---|
| 1842 | + struct drm_i915_private *i915 = uncore->i915; |
---|
| 1843 | + int ret; |
---|
| 1844 | + |
---|
| 1845 | + ret = uncore_mmio_setup(uncore); |
---|
| 1846 | + if (ret) |
---|
| 1847 | + return ret; |
---|
| 1848 | + |
---|
| 1849 | + if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) |
---|
| 1850 | + uncore->flags |= UNCORE_HAS_FORCEWAKE; |
---|
| 1851 | + |
---|
| 1852 | + if (!intel_uncore_has_forcewake(uncore)) { |
---|
| 1853 | + uncore_raw_init(uncore); |
---|
| 1854 | + } else { |
---|
| 1855 | + ret = uncore_forcewake_init(uncore); |
---|
| 1856 | + if (ret) |
---|
| 1857 | + goto out_mmio_cleanup; |
---|
| 1858 | + } |
---|
| 1859 | + |
---|
| 1860 | + /* make sure fw funcs are set if and only if we have fw*/ |
---|
| 1861 | + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); |
---|
| 1862 | + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); |
---|
| 1863 | + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); |
---|
| 1864 | + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); |
---|
| 1865 | + |
---|
| 1866 | + if (HAS_FPGA_DBG_UNCLAIMED(i915)) |
---|
| 1867 | + uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; |
---|
| 1868 | + |
---|
| 1869 | + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) |
---|
| 1870 | + uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; |
---|
| 1871 | + |
---|
| 1872 | + if (IS_GEN_RANGE(i915, 6, 7)) |
---|
| 1873 | + uncore->flags |= UNCORE_HAS_FIFO; |
---|
| 1874 | + |
---|
| 1875 | + /* clear out unclaimed reg detection bit */ |
---|
| 1876 | + if (intel_uncore_unclaimed_mmio(uncore)) |
---|
| 1877 | + drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); |
---|
| 1878 | + |
---|
| 1879 | + return 0; |
---|
| 1880 | + |
---|
| 1881 | +out_mmio_cleanup: |
---|
| 1882 | + uncore_mmio_cleanup(uncore); |
---|
| 1883 | + |
---|
| 1884 | + return ret; |
---|
1597 | 1885 | } |
---|
1598 | 1886 | |
---|
1599 | 1887 | /* |
---|
.. | .. |
---|
1601 | 1889 | * the forcewake domains. Prune them, to make sure they only reference existing |
---|
1602 | 1890 | * engines. |
---|
1603 | 1891 | */ |
---|
1604 | | -void intel_uncore_prune(struct drm_i915_private *dev_priv) |
---|
| 1892 | +void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, |
---|
| 1893 | + struct intel_gt *gt) |
---|
1605 | 1894 | { |
---|
1606 | | - if (INTEL_GEN(dev_priv) >= 11) { |
---|
1607 | | - enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains; |
---|
1608 | | - enum forcewake_domain_id domain_id; |
---|
1609 | | - int i; |
---|
| 1895 | + enum forcewake_domains fw_domains = uncore->fw_domains; |
---|
| 1896 | + enum forcewake_domain_id domain_id; |
---|
| 1897 | + int i; |
---|
1610 | 1898 | |
---|
1611 | | - for (i = 0; i < I915_MAX_VCS; i++) { |
---|
1612 | | - domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; |
---|
| 1899 | + if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11) |
---|
| 1900 | + return; |
---|
1613 | 1901 | |
---|
1614 | | - if (HAS_ENGINE(dev_priv, _VCS(i))) |
---|
1615 | | - continue; |
---|
| 1902 | + for (i = 0; i < I915_MAX_VCS; i++) { |
---|
| 1903 | + domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; |
---|
1616 | 1904 | |
---|
1617 | | - if (fw_domains & BIT(domain_id)) |
---|
1618 | | - fw_domain_fini(dev_priv, domain_id); |
---|
1619 | | - } |
---|
| 1905 | + if (HAS_ENGINE(gt, _VCS(i))) |
---|
| 1906 | + continue; |
---|
1620 | 1907 | |
---|
1621 | | - for (i = 0; i < I915_MAX_VECS; i++) { |
---|
1622 | | - domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; |
---|
| 1908 | + if (fw_domains & BIT(domain_id)) |
---|
| 1909 | + fw_domain_fini(uncore, domain_id); |
---|
| 1910 | + } |
---|
1623 | 1911 | |
---|
1624 | | - if (HAS_ENGINE(dev_priv, _VECS(i))) |
---|
1625 | | - continue; |
---|
| 1912 | + for (i = 0; i < I915_MAX_VECS; i++) { |
---|
| 1913 | + domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; |
---|
1626 | 1914 | |
---|
1627 | | - if (fw_domains & BIT(domain_id)) |
---|
1628 | | - fw_domain_fini(dev_priv, domain_id); |
---|
1629 | | - } |
---|
| 1915 | + if (HAS_ENGINE(gt, _VECS(i))) |
---|
| 1916 | + continue; |
---|
| 1917 | + |
---|
| 1918 | + if (fw_domains & BIT(domain_id)) |
---|
| 1919 | + fw_domain_fini(uncore, domain_id); |
---|
1630 | 1920 | } |
---|
1631 | 1921 | } |
---|
1632 | 1922 | |
---|
1633 | | -void intel_uncore_fini(struct drm_i915_private *dev_priv) |
---|
| 1923 | +void intel_uncore_fini_mmio(struct intel_uncore *uncore) |
---|
1634 | 1924 | { |
---|
1635 | | - /* Paranoia: make sure we have disabled everything before we exit. */ |
---|
1636 | | - intel_uncore_sanitize(dev_priv); |
---|
| 1925 | + if (intel_uncore_has_forcewake(uncore)) { |
---|
| 1926 | + iosf_mbi_punit_acquire(); |
---|
| 1927 | + iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( |
---|
| 1928 | + &uncore->pmic_bus_access_nb); |
---|
| 1929 | + intel_uncore_forcewake_reset(uncore); |
---|
| 1930 | + intel_uncore_fw_domains_fini(uncore); |
---|
| 1931 | + iosf_mbi_punit_release(); |
---|
| 1932 | + } |
---|
1637 | 1933 | |
---|
1638 | | - iosf_mbi_punit_acquire(); |
---|
1639 | | - iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( |
---|
1640 | | - &dev_priv->uncore.pmic_bus_access_nb); |
---|
1641 | | - intel_uncore_forcewake_reset(dev_priv); |
---|
1642 | | - iosf_mbi_punit_release(); |
---|
| 1934 | + uncore_mmio_cleanup(uncore); |
---|
1643 | 1935 | } |
---|
1644 | 1936 | |
---|
1645 | 1937 | static const struct reg_whitelist { |
---|
.. | .. |
---|
1650 | 1942 | } reg_read_whitelist[] = { { |
---|
1651 | 1943 | .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), |
---|
1652 | 1944 | .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), |
---|
1653 | | - .gen_mask = INTEL_GEN_MASK(4, 11), |
---|
| 1945 | + .gen_mask = INTEL_GEN_MASK(4, 12), |
---|
1654 | 1946 | .size = 8 |
---|
1655 | 1947 | } }; |
---|
1656 | 1948 | |
---|
1657 | 1949 | int i915_reg_read_ioctl(struct drm_device *dev, |
---|
1658 | 1950 | void *data, struct drm_file *file) |
---|
1659 | 1951 | { |
---|
1660 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
| 1952 | + struct drm_i915_private *i915 = to_i915(dev); |
---|
| 1953 | + struct intel_uncore *uncore = &i915->uncore; |
---|
1661 | 1954 | struct drm_i915_reg_read *reg = data; |
---|
1662 | 1955 | struct reg_whitelist const *entry; |
---|
| 1956 | + intel_wakeref_t wakeref; |
---|
1663 | 1957 | unsigned int flags; |
---|
1664 | 1958 | int remain; |
---|
1665 | 1959 | int ret = 0; |
---|
.. | .. |
---|
1673 | 1967 | GEM_BUG_ON(entry->size > 8); |
---|
1674 | 1968 | GEM_BUG_ON(entry_offset & (entry->size - 1)); |
---|
1675 | 1969 | |
---|
1676 | | - if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && |
---|
| 1970 | + if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && |
---|
1677 | 1971 | entry_offset == (reg->offset & -entry->size)) |
---|
1678 | 1972 | break; |
---|
1679 | 1973 | entry++; |
---|
.. | .. |
---|
1685 | 1979 | |
---|
1686 | 1980 | flags = reg->offset & (entry->size - 1); |
---|
1687 | 1981 | |
---|
1688 | | - intel_runtime_pm_get(dev_priv); |
---|
1689 | | - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) |
---|
1690 | | - reg->val = I915_READ64_2x32(entry->offset_ldw, |
---|
1691 | | - entry->offset_udw); |
---|
1692 | | - else if (entry->size == 8 && flags == 0) |
---|
1693 | | - reg->val = I915_READ64(entry->offset_ldw); |
---|
1694 | | - else if (entry->size == 4 && flags == 0) |
---|
1695 | | - reg->val = I915_READ(entry->offset_ldw); |
---|
1696 | | - else if (entry->size == 2 && flags == 0) |
---|
1697 | | - reg->val = I915_READ16(entry->offset_ldw); |
---|
1698 | | - else if (entry->size == 1 && flags == 0) |
---|
1699 | | - reg->val = I915_READ8(entry->offset_ldw); |
---|
1700 | | - else |
---|
1701 | | - ret = -EINVAL; |
---|
1702 | | - intel_runtime_pm_put(dev_priv); |
---|
| 1982 | + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { |
---|
| 1983 | + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) |
---|
| 1984 | + reg->val = intel_uncore_read64_2x32(uncore, |
---|
| 1985 | + entry->offset_ldw, |
---|
| 1986 | + entry->offset_udw); |
---|
| 1987 | + else if (entry->size == 8 && flags == 0) |
---|
| 1988 | + reg->val = intel_uncore_read64(uncore, |
---|
| 1989 | + entry->offset_ldw); |
---|
| 1990 | + else if (entry->size == 4 && flags == 0) |
---|
| 1991 | + reg->val = intel_uncore_read(uncore, entry->offset_ldw); |
---|
| 1992 | + else if (entry->size == 2 && flags == 0) |
---|
| 1993 | + reg->val = intel_uncore_read16(uncore, |
---|
| 1994 | + entry->offset_ldw); |
---|
| 1995 | + else if (entry->size == 1 && flags == 0) |
---|
| 1996 | + reg->val = intel_uncore_read8(uncore, |
---|
| 1997 | + entry->offset_ldw); |
---|
| 1998 | + else |
---|
| 1999 | + ret = -EINVAL; |
---|
| 2000 | + } |
---|
1703 | 2001 | |
---|
1704 | 2002 | return ret; |
---|
1705 | | -} |
---|
1706 | | - |
---|
1707 | | -static void gen3_stop_engine(struct intel_engine_cs *engine) |
---|
1708 | | -{ |
---|
1709 | | - struct drm_i915_private *dev_priv = engine->i915; |
---|
1710 | | - const u32 base = engine->mmio_base; |
---|
1711 | | - |
---|
1712 | | - if (intel_engine_stop_cs(engine)) |
---|
1713 | | - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); |
---|
1714 | | - |
---|
1715 | | - I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); |
---|
1716 | | - POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ |
---|
1717 | | - |
---|
1718 | | - I915_WRITE_FW(RING_HEAD(base), 0); |
---|
1719 | | - I915_WRITE_FW(RING_TAIL(base), 0); |
---|
1720 | | - POSTING_READ_FW(RING_TAIL(base)); |
---|
1721 | | - |
---|
1722 | | - /* The ring must be empty before it is disabled */ |
---|
1723 | | - I915_WRITE_FW(RING_CTL(base), 0); |
---|
1724 | | - |
---|
1725 | | - /* Check acts as a post */ |
---|
1726 | | - if (I915_READ_FW(RING_HEAD(base)) != 0) |
---|
1727 | | - DRM_DEBUG_DRIVER("%s: ring head not parked\n", |
---|
1728 | | - engine->name); |
---|
1729 | | -} |
---|
1730 | | - |
---|
1731 | | -static void i915_stop_engines(struct drm_i915_private *dev_priv, |
---|
1732 | | - unsigned engine_mask) |
---|
1733 | | -{ |
---|
1734 | | - struct intel_engine_cs *engine; |
---|
1735 | | - enum intel_engine_id id; |
---|
1736 | | - |
---|
1737 | | - if (INTEL_GEN(dev_priv) < 3) |
---|
1738 | | - return; |
---|
1739 | | - |
---|
1740 | | - for_each_engine_masked(engine, dev_priv, engine_mask, id) |
---|
1741 | | - gen3_stop_engine(engine); |
---|
1742 | | -} |
---|
1743 | | - |
---|
1744 | | -static bool i915_in_reset(struct pci_dev *pdev) |
---|
1745 | | -{ |
---|
1746 | | - u8 gdrst; |
---|
1747 | | - |
---|
1748 | | - pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
---|
1749 | | - return gdrst & GRDOM_RESET_STATUS; |
---|
1750 | | -} |
---|
1751 | | - |
---|
1752 | | -static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
---|
1753 | | -{ |
---|
1754 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
1755 | | - int err; |
---|
1756 | | - |
---|
1757 | | - /* Assert reset for at least 20 usec, and wait for acknowledgement. */ |
---|
1758 | | - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
---|
1759 | | - usleep_range(50, 200); |
---|
1760 | | - err = wait_for(i915_in_reset(pdev), 500); |
---|
1761 | | - |
---|
1762 | | - /* Clear the reset request. */ |
---|
1763 | | - pci_write_config_byte(pdev, I915_GDRST, 0); |
---|
1764 | | - usleep_range(50, 200); |
---|
1765 | | - if (!err) |
---|
1766 | | - err = wait_for(!i915_in_reset(pdev), 500); |
---|
1767 | | - |
---|
1768 | | - return err; |
---|
1769 | | -} |
---|
1770 | | - |
---|
1771 | | -static bool g4x_reset_complete(struct pci_dev *pdev) |
---|
1772 | | -{ |
---|
1773 | | - u8 gdrst; |
---|
1774 | | - |
---|
1775 | | - pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
---|
1776 | | - return (gdrst & GRDOM_RESET_ENABLE) == 0; |
---|
1777 | | -} |
---|
1778 | | - |
---|
1779 | | -static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
---|
1780 | | -{ |
---|
1781 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
1782 | | - |
---|
1783 | | - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
---|
1784 | | - return wait_for(g4x_reset_complete(pdev), 500); |
---|
1785 | | -} |
---|
1786 | | - |
---|
1787 | | -static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
---|
1788 | | -{ |
---|
1789 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
1790 | | - int ret; |
---|
1791 | | - |
---|
1792 | | - /* WaVcpClkGateDisableForMediaReset:ctg,elk */ |
---|
1793 | | - I915_WRITE(VDECCLK_GATE_D, |
---|
1794 | | - I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); |
---|
1795 | | - POSTING_READ(VDECCLK_GATE_D); |
---|
1796 | | - |
---|
1797 | | - pci_write_config_byte(pdev, I915_GDRST, |
---|
1798 | | - GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
---|
1799 | | - ret = wait_for(g4x_reset_complete(pdev), 500); |
---|
1800 | | - if (ret) { |
---|
1801 | | - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); |
---|
1802 | | - goto out; |
---|
1803 | | - } |
---|
1804 | | - |
---|
1805 | | - pci_write_config_byte(pdev, I915_GDRST, |
---|
1806 | | - GRDOM_RENDER | GRDOM_RESET_ENABLE); |
---|
1807 | | - ret = wait_for(g4x_reset_complete(pdev), 500); |
---|
1808 | | - if (ret) { |
---|
1809 | | - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); |
---|
1810 | | - goto out; |
---|
1811 | | - } |
---|
1812 | | - |
---|
1813 | | -out: |
---|
1814 | | - pci_write_config_byte(pdev, I915_GDRST, 0); |
---|
1815 | | - |
---|
1816 | | - I915_WRITE(VDECCLK_GATE_D, |
---|
1817 | | - I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); |
---|
1818 | | - POSTING_READ(VDECCLK_GATE_D); |
---|
1819 | | - |
---|
1820 | | - return ret; |
---|
1821 | | -} |
---|
1822 | | - |
---|
1823 | | -static int ironlake_do_reset(struct drm_i915_private *dev_priv, |
---|
1824 | | - unsigned engine_mask) |
---|
1825 | | -{ |
---|
1826 | | - int ret; |
---|
1827 | | - |
---|
1828 | | - I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); |
---|
1829 | | - ret = intel_wait_for_register(dev_priv, |
---|
1830 | | - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, |
---|
1831 | | - 500); |
---|
1832 | | - if (ret) { |
---|
1833 | | - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); |
---|
1834 | | - goto out; |
---|
1835 | | - } |
---|
1836 | | - |
---|
1837 | | - I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); |
---|
1838 | | - ret = intel_wait_for_register(dev_priv, |
---|
1839 | | - ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, |
---|
1840 | | - 500); |
---|
1841 | | - if (ret) { |
---|
1842 | | - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); |
---|
1843 | | - goto out; |
---|
1844 | | - } |
---|
1845 | | - |
---|
1846 | | -out: |
---|
1847 | | - I915_WRITE(ILK_GDSR, 0); |
---|
1848 | | - POSTING_READ(ILK_GDSR); |
---|
1849 | | - return ret; |
---|
1850 | | -} |
---|
1851 | | - |
---|
1852 | | -/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ |
---|
1853 | | -static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, |
---|
1854 | | - u32 hw_domain_mask) |
---|
1855 | | -{ |
---|
1856 | | - int err; |
---|
1857 | | - |
---|
1858 | | - /* GEN6_GDRST is not in the gt power well, no need to check |
---|
1859 | | - * for fifo space for the write or forcewake the chip for |
---|
1860 | | - * the read |
---|
1861 | | - */ |
---|
1862 | | - __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); |
---|
1863 | | - |
---|
1864 | | - /* Wait for the device to ack the reset requests */ |
---|
1865 | | - err = __intel_wait_for_register_fw(dev_priv, |
---|
1866 | | - GEN6_GDRST, hw_domain_mask, 0, |
---|
1867 | | - 500, 0, |
---|
1868 | | - NULL); |
---|
1869 | | - if (err) |
---|
1870 | | - DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", |
---|
1871 | | - hw_domain_mask); |
---|
1872 | | - |
---|
1873 | | - return err; |
---|
1874 | | -} |
---|
1875 | | - |
---|
1876 | | -/** |
---|
1877 | | - * gen6_reset_engines - reset individual engines |
---|
1878 | | - * @dev_priv: i915 device |
---|
1879 | | - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset |
---|
1880 | | - * |
---|
1881 | | - * This function will reset the individual engines that are set in engine_mask. |
---|
1882 | | - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. |
---|
1883 | | - * |
---|
1884 | | - * Note: It is responsibility of the caller to handle the difference between |
---|
1885 | | - * asking full domain reset versus reset for all available individual engines. |
---|
1886 | | - * |
---|
1887 | | - * Returns 0 on success, nonzero on error. |
---|
1888 | | - */ |
---|
1889 | | -static int gen6_reset_engines(struct drm_i915_private *dev_priv, |
---|
1890 | | - unsigned engine_mask) |
---|
1891 | | -{ |
---|
1892 | | - struct intel_engine_cs *engine; |
---|
1893 | | - const u32 hw_engine_mask[I915_NUM_ENGINES] = { |
---|
1894 | | - [RCS] = GEN6_GRDOM_RENDER, |
---|
1895 | | - [BCS] = GEN6_GRDOM_BLT, |
---|
1896 | | - [VCS] = GEN6_GRDOM_MEDIA, |
---|
1897 | | - [VCS2] = GEN8_GRDOM_MEDIA2, |
---|
1898 | | - [VECS] = GEN6_GRDOM_VECS, |
---|
1899 | | - }; |
---|
1900 | | - u32 hw_mask; |
---|
1901 | | - |
---|
1902 | | - if (engine_mask == ALL_ENGINES) { |
---|
1903 | | - hw_mask = GEN6_GRDOM_FULL; |
---|
1904 | | - } else { |
---|
1905 | | - unsigned int tmp; |
---|
1906 | | - |
---|
1907 | | - hw_mask = 0; |
---|
1908 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
---|
1909 | | - hw_mask |= hw_engine_mask[engine->id]; |
---|
1910 | | - } |
---|
1911 | | - |
---|
1912 | | - return gen6_hw_domain_reset(dev_priv, hw_mask); |
---|
1913 | | -} |
---|
1914 | | - |
---|
1915 | | -/** |
---|
1916 | | - * gen11_reset_engines - reset individual engines |
---|
1917 | | - * @dev_priv: i915 device |
---|
1918 | | - * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset |
---|
1919 | | - * |
---|
1920 | | - * This function will reset the individual engines that are set in engine_mask. |
---|
1921 | | - * If you provide ALL_ENGINES as mask, full global domain reset will be issued. |
---|
1922 | | - * |
---|
1923 | | - * Note: It is responsibility of the caller to handle the difference between |
---|
1924 | | - * asking full domain reset versus reset for all available individual engines. |
---|
1925 | | - * |
---|
1926 | | - * Returns 0 on success, nonzero on error. |
---|
1927 | | - */ |
---|
1928 | | -static int gen11_reset_engines(struct drm_i915_private *dev_priv, |
---|
1929 | | - unsigned engine_mask) |
---|
1930 | | -{ |
---|
1931 | | - struct intel_engine_cs *engine; |
---|
1932 | | - const u32 hw_engine_mask[I915_NUM_ENGINES] = { |
---|
1933 | | - [RCS] = GEN11_GRDOM_RENDER, |
---|
1934 | | - [BCS] = GEN11_GRDOM_BLT, |
---|
1935 | | - [VCS] = GEN11_GRDOM_MEDIA, |
---|
1936 | | - [VCS2] = GEN11_GRDOM_MEDIA2, |
---|
1937 | | - [VCS3] = GEN11_GRDOM_MEDIA3, |
---|
1938 | | - [VCS4] = GEN11_GRDOM_MEDIA4, |
---|
1939 | | - [VECS] = GEN11_GRDOM_VECS, |
---|
1940 | | - [VECS2] = GEN11_GRDOM_VECS2, |
---|
1941 | | - }; |
---|
1942 | | - u32 hw_mask; |
---|
1943 | | - |
---|
1944 | | - BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); |
---|
1945 | | - |
---|
1946 | | - if (engine_mask == ALL_ENGINES) { |
---|
1947 | | - hw_mask = GEN11_GRDOM_FULL; |
---|
1948 | | - } else { |
---|
1949 | | - unsigned int tmp; |
---|
1950 | | - |
---|
1951 | | - hw_mask = 0; |
---|
1952 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
---|
1953 | | - hw_mask |= hw_engine_mask[engine->id]; |
---|
1954 | | - } |
---|
1955 | | - |
---|
1956 | | - return gen6_hw_domain_reset(dev_priv, hw_mask); |
---|
1957 | 2003 | } |
---|
1958 | 2004 | |
---|
1959 | 2005 | /** |
---|
1960 | 2006 | * __intel_wait_for_register_fw - wait until register matches expected state |
---|
1961 | | - * @dev_priv: the i915 device |
---|
| 2007 | + * @uncore: the struct intel_uncore |
---|
1962 | 2008 | * @reg: the register to read |
---|
1963 | 2009 | * @mask: mask to apply to register value |
---|
1964 | 2010 | * @value: expected value |
---|
.. | .. |
---|
1980 | 2026 | * wish to wait without holding forcewake for the duration (i.e. you expect |
---|
1981 | 2027 | * the wait to be slow). |
---|
1982 | 2028 | * |
---|
1983 | | - * Returns 0 if the register matches the desired condition, or -ETIMEOUT. |
---|
| 2029 | + * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. |
---|
1984 | 2030 | */ |
---|
1985 | | -int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, |
---|
| 2031 | +int __intel_wait_for_register_fw(struct intel_uncore *uncore, |
---|
1986 | 2032 | i915_reg_t reg, |
---|
1987 | 2033 | u32 mask, |
---|
1988 | 2034 | u32 value, |
---|
.. | .. |
---|
1990 | 2036 | unsigned int slow_timeout_ms, |
---|
1991 | 2037 | u32 *out_value) |
---|
1992 | 2038 | { |
---|
1993 | | - u32 uninitialized_var(reg_value); |
---|
1994 | | -#define done (((reg_value = I915_READ_FW(reg)) & mask) == value) |
---|
| 2039 | + u32 reg_value = 0; |
---|
| 2040 | +#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) |
---|
1995 | 2041 | int ret; |
---|
1996 | 2042 | |
---|
1997 | 2043 | /* Catch any overuse of this function */ |
---|
1998 | 2044 | might_sleep_if(slow_timeout_ms); |
---|
1999 | 2045 | GEM_BUG_ON(fast_timeout_us > 20000); |
---|
| 2046 | + GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms); |
---|
2000 | 2047 | |
---|
2001 | 2048 | ret = -ETIMEDOUT; |
---|
2002 | 2049 | if (fast_timeout_us && fast_timeout_us <= 20000) |
---|
.. | .. |
---|
2013 | 2060 | |
---|
2014 | 2061 | /** |
---|
2015 | 2062 | * __intel_wait_for_register - wait until register matches expected state |
---|
2016 | | - * @dev_priv: the i915 device |
---|
| 2063 | + * @uncore: the struct intel_uncore |
---|
2017 | 2064 | * @reg: the register to read |
---|
2018 | 2065 | * @mask: mask to apply to register value |
---|
2019 | 2066 | * @value: expected value |
---|
.. | .. |
---|
2028 | 2075 | * |
---|
2029 | 2076 | * Otherwise, the wait will timeout after @timeout_ms milliseconds. |
---|
2030 | 2077 | * |
---|
2031 | | - * Returns 0 if the register matches the desired condition, or -ETIMEOUT. |
---|
| 2078 | + * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. |
---|
2032 | 2079 | */ |
---|
2033 | | -int __intel_wait_for_register(struct drm_i915_private *dev_priv, |
---|
2034 | | - i915_reg_t reg, |
---|
2035 | | - u32 mask, |
---|
2036 | | - u32 value, |
---|
2037 | | - unsigned int fast_timeout_us, |
---|
2038 | | - unsigned int slow_timeout_ms, |
---|
2039 | | - u32 *out_value) |
---|
| 2080 | +int __intel_wait_for_register(struct intel_uncore *uncore, |
---|
| 2081 | + i915_reg_t reg, |
---|
| 2082 | + u32 mask, |
---|
| 2083 | + u32 value, |
---|
| 2084 | + unsigned int fast_timeout_us, |
---|
| 2085 | + unsigned int slow_timeout_ms, |
---|
| 2086 | + u32 *out_value) |
---|
2040 | 2087 | { |
---|
2041 | 2088 | unsigned fw = |
---|
2042 | | - intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); |
---|
| 2089 | + intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); |
---|
2043 | 2090 | u32 reg_value; |
---|
2044 | 2091 | int ret; |
---|
2045 | 2092 | |
---|
2046 | 2093 | might_sleep_if(slow_timeout_ms); |
---|
2047 | 2094 | |
---|
2048 | | - spin_lock_irq(&dev_priv->uncore.lock); |
---|
2049 | | - intel_uncore_forcewake_get__locked(dev_priv, fw); |
---|
| 2095 | + spin_lock_irq(&uncore->lock); |
---|
| 2096 | + intel_uncore_forcewake_get__locked(uncore, fw); |
---|
2050 | 2097 | |
---|
2051 | | - ret = __intel_wait_for_register_fw(dev_priv, |
---|
| 2098 | + ret = __intel_wait_for_register_fw(uncore, |
---|
2052 | 2099 | reg, mask, value, |
---|
2053 | 2100 | fast_timeout_us, 0, ®_value); |
---|
2054 | 2101 | |
---|
2055 | | - intel_uncore_forcewake_put__locked(dev_priv, fw); |
---|
2056 | | - spin_unlock_irq(&dev_priv->uncore.lock); |
---|
| 2102 | + intel_uncore_forcewake_put__locked(uncore, fw); |
---|
| 2103 | + spin_unlock_irq(&uncore->lock); |
---|
2057 | 2104 | |
---|
2058 | 2105 | if (ret && slow_timeout_ms) |
---|
2059 | | - ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), |
---|
| 2106 | + ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, |
---|
| 2107 | + reg), |
---|
2060 | 2108 | (reg_value & mask) == value, |
---|
2061 | 2109 | slow_timeout_ms * 1000, 10, 1000); |
---|
| 2110 | + |
---|
| 2111 | + /* just trace the final value */ |
---|
| 2112 | + trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); |
---|
2062 | 2113 | |
---|
2063 | 2114 | if (out_value) |
---|
2064 | 2115 | *out_value = reg_value; |
---|
.. | .. |
---|
2066 | 2117 | return ret; |
---|
2067 | 2118 | } |
---|
2068 | 2119 | |
---|
2069 | | -static int gen8_reset_engine_start(struct intel_engine_cs *engine) |
---|
| 2120 | +bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) |
---|
2070 | 2121 | { |
---|
2071 | | - struct drm_i915_private *dev_priv = engine->i915; |
---|
2072 | | - int ret; |
---|
| 2122 | + bool ret; |
---|
2073 | 2123 | |
---|
2074 | | - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), |
---|
2075 | | - _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); |
---|
2076 | | - |
---|
2077 | | - ret = __intel_wait_for_register_fw(dev_priv, |
---|
2078 | | - RING_RESET_CTL(engine->mmio_base), |
---|
2079 | | - RESET_CTL_READY_TO_RESET, |
---|
2080 | | - RESET_CTL_READY_TO_RESET, |
---|
2081 | | - 700, 0, |
---|
2082 | | - NULL); |
---|
2083 | | - if (ret) |
---|
2084 | | - DRM_ERROR("%s: reset request timeout\n", engine->name); |
---|
| 2124 | + spin_lock_irq(&uncore->debug->lock); |
---|
| 2125 | + ret = check_for_unclaimed_mmio(uncore); |
---|
| 2126 | + spin_unlock_irq(&uncore->debug->lock); |
---|
2085 | 2127 | |
---|
2086 | 2128 | return ret; |
---|
2087 | | -} |
---|
2088 | | - |
---|
2089 | | -static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) |
---|
2090 | | -{ |
---|
2091 | | - struct drm_i915_private *dev_priv = engine->i915; |
---|
2092 | | - |
---|
2093 | | - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), |
---|
2094 | | - _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); |
---|
2095 | | -} |
---|
2096 | | - |
---|
2097 | | -static int gen8_reset_engines(struct drm_i915_private *dev_priv, |
---|
2098 | | - unsigned engine_mask) |
---|
2099 | | -{ |
---|
2100 | | - struct intel_engine_cs *engine; |
---|
2101 | | - unsigned int tmp; |
---|
2102 | | - int ret; |
---|
2103 | | - |
---|
2104 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { |
---|
2105 | | - if (gen8_reset_engine_start(engine)) { |
---|
2106 | | - ret = -EIO; |
---|
2107 | | - goto not_ready; |
---|
2108 | | - } |
---|
2109 | | - } |
---|
2110 | | - |
---|
2111 | | - if (INTEL_GEN(dev_priv) >= 11) |
---|
2112 | | - ret = gen11_reset_engines(dev_priv, engine_mask); |
---|
2113 | | - else |
---|
2114 | | - ret = gen6_reset_engines(dev_priv, engine_mask); |
---|
2115 | | - |
---|
2116 | | -not_ready: |
---|
2117 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
---|
2118 | | - gen8_reset_engine_cancel(engine); |
---|
2119 | | - |
---|
2120 | | - return ret; |
---|
2121 | | -} |
---|
2122 | | - |
---|
2123 | | -typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); |
---|
2124 | | - |
---|
2125 | | -static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) |
---|
2126 | | -{ |
---|
2127 | | - if (!i915_modparams.reset) |
---|
2128 | | - return NULL; |
---|
2129 | | - |
---|
2130 | | - if (INTEL_GEN(dev_priv) >= 8) |
---|
2131 | | - return gen8_reset_engines; |
---|
2132 | | - else if (INTEL_GEN(dev_priv) >= 6) |
---|
2133 | | - return gen6_reset_engines; |
---|
2134 | | - else if (IS_GEN5(dev_priv)) |
---|
2135 | | - return ironlake_do_reset; |
---|
2136 | | - else if (IS_G4X(dev_priv)) |
---|
2137 | | - return g4x_do_reset; |
---|
2138 | | - else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) |
---|
2139 | | - return g33_do_reset; |
---|
2140 | | - else if (INTEL_GEN(dev_priv) >= 3) |
---|
2141 | | - return i915_do_reset; |
---|
2142 | | - else |
---|
2143 | | - return NULL; |
---|
2144 | | -} |
---|
2145 | | - |
---|
2146 | | -int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
---|
2147 | | -{ |
---|
2148 | | - reset_func reset = intel_get_gpu_reset(dev_priv); |
---|
2149 | | - int retry; |
---|
2150 | | - int ret; |
---|
2151 | | - |
---|
2152 | | - /* |
---|
2153 | | - * We want to perform per-engine reset from atomic context (e.g. |
---|
2154 | | - * softirq), which imposes the constraint that we cannot sleep. |
---|
2155 | | - * However, experience suggests that spending a bit of time waiting |
---|
2156 | | - * for a reset helps in various cases, so for a full-device reset |
---|
2157 | | - * we apply the opposite rule and wait if we want to. As we should |
---|
2158 | | - * always follow up a failed per-engine reset with a full device reset, |
---|
2159 | | - * being a little faster, stricter and more error prone for the |
---|
2160 | | - * atomic case seems an acceptable compromise. |
---|
2161 | | - * |
---|
2162 | | - * Unfortunately this leads to a bimodal routine, when the goal was |
---|
2163 | | - * to have a single reset function that worked for resetting any |
---|
2164 | | - * number of engines simultaneously. |
---|
2165 | | - */ |
---|
2166 | | - might_sleep_if(engine_mask == ALL_ENGINES); |
---|
2167 | | - |
---|
2168 | | - /* |
---|
2169 | | - * If the power well sleeps during the reset, the reset |
---|
2170 | | - * request may be dropped and never completes (causing -EIO). |
---|
2171 | | - */ |
---|
2172 | | - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
---|
2173 | | - for (retry = 0; retry < 3; retry++) { |
---|
2174 | | - |
---|
2175 | | - /* |
---|
2176 | | - * We stop engines, otherwise we might get failed reset and a |
---|
2177 | | - * dead gpu (on elk). Also as modern gpu as kbl can suffer |
---|
2178 | | - * from system hang if batchbuffer is progressing when |
---|
2179 | | - * the reset is issued, regardless of READY_TO_RESET ack. |
---|
2180 | | - * Thus assume it is best to stop engines on all gens |
---|
2181 | | - * where we have a gpu reset. |
---|
2182 | | - * |
---|
2183 | | - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) |
---|
2184 | | - * |
---|
2185 | | - * WaMediaResetMainRingCleanup:ctg,elk (presumably) |
---|
2186 | | - * |
---|
2187 | | - * FIXME: Wa for more modern gens needs to be validated |
---|
2188 | | - */ |
---|
2189 | | - i915_stop_engines(dev_priv, engine_mask); |
---|
2190 | | - |
---|
2191 | | - ret = -ENODEV; |
---|
2192 | | - if (reset) { |
---|
2193 | | - GEM_TRACE("engine_mask=%x\n", engine_mask); |
---|
2194 | | - ret = reset(dev_priv, engine_mask); |
---|
2195 | | - } |
---|
2196 | | - if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) |
---|
2197 | | - break; |
---|
2198 | | - |
---|
2199 | | - cond_resched(); |
---|
2200 | | - } |
---|
2201 | | - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
---|
2202 | | - |
---|
2203 | | - return ret; |
---|
2204 | | -} |
---|
2205 | | - |
---|
2206 | | -bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) |
---|
2207 | | -{ |
---|
2208 | | - return intel_get_gpu_reset(dev_priv) != NULL; |
---|
2209 | | -} |
---|
2210 | | - |
---|
2211 | | -bool intel_has_reset_engine(struct drm_i915_private *dev_priv) |
---|
2212 | | -{ |
---|
2213 | | - return (dev_priv->info.has_reset_engine && |
---|
2214 | | - i915_modparams.reset >= 2); |
---|
2215 | | -} |
---|
2216 | | - |
---|
2217 | | -int intel_reset_guc(struct drm_i915_private *dev_priv) |
---|
2218 | | -{ |
---|
2219 | | - u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC : |
---|
2220 | | - GEN9_GRDOM_GUC; |
---|
2221 | | - int ret; |
---|
2222 | | - |
---|
2223 | | - GEM_BUG_ON(!HAS_GUC(dev_priv)); |
---|
2224 | | - |
---|
2225 | | - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
---|
2226 | | - ret = gen6_hw_domain_reset(dev_priv, guc_domain); |
---|
2227 | | - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
---|
2228 | | - |
---|
2229 | | - return ret; |
---|
2230 | | -} |
---|
2231 | | - |
---|
2232 | | -bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) |
---|
2233 | | -{ |
---|
2234 | | - return check_for_unclaimed_mmio(dev_priv); |
---|
2235 | 2129 | } |
---|
2236 | 2130 | |
---|
2237 | 2131 | bool |
---|
2238 | | -intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) |
---|
| 2132 | +intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) |
---|
2239 | 2133 | { |
---|
2240 | | - if (unlikely(i915_modparams.mmio_debug || |
---|
2241 | | - dev_priv->uncore.unclaimed_mmio_check <= 0)) |
---|
2242 | | - return false; |
---|
| 2134 | + bool ret = false; |
---|
2243 | 2135 | |
---|
2244 | | - if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { |
---|
2245 | | - DRM_DEBUG("Unclaimed register detected, " |
---|
2246 | | - "enabling oneshot unclaimed register reporting. " |
---|
2247 | | - "Please use i915.mmio_debug=N for more information.\n"); |
---|
2248 | | - i915_modparams.mmio_debug++; |
---|
2249 | | - dev_priv->uncore.unclaimed_mmio_check--; |
---|
2250 | | - return true; |
---|
| 2136 | + spin_lock_irq(&uncore->debug->lock); |
---|
| 2137 | + |
---|
| 2138 | + if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) |
---|
| 2139 | + goto out; |
---|
| 2140 | + |
---|
| 2141 | + if (unlikely(check_for_unclaimed_mmio(uncore))) { |
---|
| 2142 | + if (!uncore->i915->params.mmio_debug) { |
---|
| 2143 | + drm_dbg(&uncore->i915->drm, |
---|
| 2144 | + "Unclaimed register detected, " |
---|
| 2145 | + "enabling oneshot unclaimed register reporting. " |
---|
| 2146 | + "Please use i915.mmio_debug=N for more information.\n"); |
---|
| 2147 | + uncore->i915->params.mmio_debug++; |
---|
| 2148 | + } |
---|
| 2149 | + uncore->debug->unclaimed_mmio_check--; |
---|
| 2150 | + ret = true; |
---|
2251 | 2151 | } |
---|
2252 | 2152 | |
---|
2253 | | - return false; |
---|
2254 | | -} |
---|
| 2153 | +out: |
---|
| 2154 | + spin_unlock_irq(&uncore->debug->lock); |
---|
2255 | 2155 | |
---|
2256 | | -static enum forcewake_domains |
---|
2257 | | -intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, |
---|
2258 | | - i915_reg_t reg) |
---|
2259 | | -{ |
---|
2260 | | - u32 offset = i915_mmio_reg_offset(reg); |
---|
2261 | | - enum forcewake_domains fw_domains; |
---|
2262 | | - |
---|
2263 | | - if (INTEL_GEN(dev_priv) >= 11) { |
---|
2264 | | - fw_domains = __gen11_fwtable_reg_read_fw_domains(offset); |
---|
2265 | | - } else if (HAS_FWTABLE(dev_priv)) { |
---|
2266 | | - fw_domains = __fwtable_reg_read_fw_domains(offset); |
---|
2267 | | - } else if (INTEL_GEN(dev_priv) >= 6) { |
---|
2268 | | - fw_domains = __gen6_reg_read_fw_domains(offset); |
---|
2269 | | - } else { |
---|
2270 | | - WARN_ON(!IS_GEN(dev_priv, 2, 5)); |
---|
2271 | | - fw_domains = 0; |
---|
2272 | | - } |
---|
2273 | | - |
---|
2274 | | - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); |
---|
2275 | | - |
---|
2276 | | - return fw_domains; |
---|
2277 | | -} |
---|
2278 | | - |
---|
2279 | | -static enum forcewake_domains |
---|
2280 | | -intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, |
---|
2281 | | - i915_reg_t reg) |
---|
2282 | | -{ |
---|
2283 | | - u32 offset = i915_mmio_reg_offset(reg); |
---|
2284 | | - enum forcewake_domains fw_domains; |
---|
2285 | | - |
---|
2286 | | - if (INTEL_GEN(dev_priv) >= 11) { |
---|
2287 | | - fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); |
---|
2288 | | - } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { |
---|
2289 | | - fw_domains = __fwtable_reg_write_fw_domains(offset); |
---|
2290 | | - } else if (IS_GEN8(dev_priv)) { |
---|
2291 | | - fw_domains = __gen8_reg_write_fw_domains(offset); |
---|
2292 | | - } else if (IS_GEN(dev_priv, 6, 7)) { |
---|
2293 | | - fw_domains = FORCEWAKE_RENDER; |
---|
2294 | | - } else { |
---|
2295 | | - WARN_ON(!IS_GEN(dev_priv, 2, 5)); |
---|
2296 | | - fw_domains = 0; |
---|
2297 | | - } |
---|
2298 | | - |
---|
2299 | | - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); |
---|
2300 | | - |
---|
2301 | | - return fw_domains; |
---|
| 2156 | + return ret; |
---|
2302 | 2157 | } |
---|
2303 | 2158 | |
---|
2304 | 2159 | /** |
---|
2305 | 2160 | * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access |
---|
2306 | 2161 | * a register |
---|
2307 | | - * @dev_priv: pointer to struct drm_i915_private |
---|
| 2162 | + * @uncore: pointer to struct intel_uncore |
---|
2308 | 2163 | * @reg: register in question |
---|
2309 | 2164 | * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE |
---|
2310 | 2165 | * |
---|
.. | .. |
---|
2316 | 2171 | * callers to do FIFO management on their own or risk losing writes. |
---|
2317 | 2172 | */ |
---|
2318 | 2173 | enum forcewake_domains |
---|
2319 | | -intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, |
---|
| 2174 | +intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, |
---|
2320 | 2175 | i915_reg_t reg, unsigned int op) |
---|
2321 | 2176 | { |
---|
2322 | 2177 | enum forcewake_domains fw_domains = 0; |
---|
2323 | 2178 | |
---|
2324 | | - WARN_ON(!op); |
---|
| 2179 | + drm_WARN_ON(&uncore->i915->drm, !op); |
---|
2325 | 2180 | |
---|
2326 | | - if (intel_vgpu_active(dev_priv)) |
---|
| 2181 | + if (!intel_uncore_has_forcewake(uncore)) |
---|
2327 | 2182 | return 0; |
---|
2328 | 2183 | |
---|
2329 | 2184 | if (op & FW_REG_READ) |
---|
2330 | | - fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); |
---|
| 2185 | + fw_domains = uncore->funcs.read_fw_domains(uncore, reg); |
---|
2331 | 2186 | |
---|
2332 | 2187 | if (op & FW_REG_WRITE) |
---|
2333 | | - fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); |
---|
| 2188 | + fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); |
---|
| 2189 | + |
---|
| 2190 | + drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains); |
---|
2334 | 2191 | |
---|
2335 | 2192 | return fw_domains; |
---|
2336 | 2193 | } |
---|