.. | .. |
---|
36 | 36 | #include <linux/pm_runtime.h> |
---|
37 | 37 | #include <linux/pnp.h> |
---|
38 | 38 | #include <linux/slab.h> |
---|
39 | | -#include <linux/vgaarb.h> |
---|
40 | 39 | #include <linux/vga_switcheroo.h> |
---|
41 | 40 | #include <linux/vt.h> |
---|
42 | 41 | #include <acpi/video.h> |
---|
43 | 42 | |
---|
44 | | -#include <drm/drmP.h> |
---|
45 | | -#include <drm/drm_crtc_helper.h> |
---|
46 | 43 | #include <drm/drm_atomic_helper.h> |
---|
47 | | -#include <drm/i915_drm.h> |
---|
| 44 | +#include <drm/drm_ioctl.h> |
---|
| 45 | +#include <drm/drm_irq.h> |
---|
| 46 | +#include <drm/drm_managed.h> |
---|
| 47 | +#include <drm/drm_probe_helper.h> |
---|
48 | 48 | |
---|
| 49 | +#include "display/intel_acpi.h" |
---|
| 50 | +#include "display/intel_audio.h" |
---|
| 51 | +#include "display/intel_bw.h" |
---|
| 52 | +#include "display/intel_cdclk.h" |
---|
| 53 | +#include "display/intel_csr.h" |
---|
| 54 | +#include "display/intel_display_debugfs.h" |
---|
| 55 | +#include "display/intel_display_types.h" |
---|
| 56 | +#include "display/intel_dp.h" |
---|
| 57 | +#include "display/intel_fbdev.h" |
---|
| 58 | +#include "display/intel_hotplug.h" |
---|
| 59 | +#include "display/intel_overlay.h" |
---|
| 60 | +#include "display/intel_pipe_crc.h" |
---|
| 61 | +#include "display/intel_sprite.h" |
---|
| 62 | +#include "display/intel_vga.h" |
---|
| 63 | + |
---|
| 64 | +#include "gem/i915_gem_context.h" |
---|
| 65 | +#include "gem/i915_gem_ioctls.h" |
---|
| 66 | +#include "gem/i915_gem_mman.h" |
---|
| 67 | +#include "gt/intel_gt.h" |
---|
| 68 | +#include "gt/intel_gt_pm.h" |
---|
| 69 | +#include "gt/intel_rc6.h" |
---|
| 70 | + |
---|
| 71 | +#include "i915_debugfs.h" |
---|
49 | 72 | #include "i915_drv.h" |
---|
50 | | -#include "i915_trace.h" |
---|
51 | | -#include "i915_pmu.h" |
---|
| 73 | +#include "i915_ioc32.h" |
---|
| 74 | +#include "i915_irq.h" |
---|
| 75 | +#include "i915_memcpy.h" |
---|
| 76 | +#include "i915_perf.h" |
---|
52 | 77 | #include "i915_query.h" |
---|
| 78 | +#include "i915_suspend.h" |
---|
| 79 | +#include "i915_switcheroo.h" |
---|
| 80 | +#include "i915_sysfs.h" |
---|
| 81 | +#include "i915_trace.h" |
---|
53 | 82 | #include "i915_vgpu.h" |
---|
54 | | -#include "intel_drv.h" |
---|
55 | | -#include "intel_uc.h" |
---|
| 83 | +#include "intel_dram.h" |
---|
| 84 | +#include "intel_gvt.h" |
---|
| 85 | +#include "intel_memory_region.h" |
---|
| 86 | +#include "intel_pm.h" |
---|
| 87 | +#include "intel_sideband.h" |
---|
| 88 | +#include "vlv_suspend.h" |
---|
56 | 89 | |
---|
57 | 90 | static struct drm_driver driver; |
---|
58 | | - |
---|
59 | | -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
---|
60 | | -static unsigned int i915_load_fail_count; |
---|
61 | | - |
---|
62 | | -bool __i915_inject_load_failure(const char *func, int line) |
---|
63 | | -{ |
---|
64 | | - if (i915_load_fail_count >= i915_modparams.inject_load_failure) |
---|
65 | | - return false; |
---|
66 | | - |
---|
67 | | - if (++i915_load_fail_count == i915_modparams.inject_load_failure) { |
---|
68 | | - DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", |
---|
69 | | - i915_modparams.inject_load_failure, func, line); |
---|
70 | | - i915_modparams.inject_load_failure = 0; |
---|
71 | | - return true; |
---|
72 | | - } |
---|
73 | | - |
---|
74 | | - return false; |
---|
75 | | -} |
---|
76 | | - |
---|
77 | | -bool i915_error_injected(void) |
---|
78 | | -{ |
---|
79 | | - return i915_load_fail_count && !i915_modparams.inject_load_failure; |
---|
80 | | -} |
---|
81 | | - |
---|
82 | | -#endif |
---|
83 | | - |
---|
84 | | -#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" |
---|
85 | | -#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ |
---|
86 | | - "providing the dmesg log by booting with drm.debug=0xf" |
---|
87 | | - |
---|
88 | | -void |
---|
89 | | -__i915_printk(struct drm_i915_private *dev_priv, const char *level, |
---|
90 | | - const char *fmt, ...) |
---|
91 | | -{ |
---|
92 | | - static bool shown_bug_once; |
---|
93 | | - struct device *kdev = dev_priv->drm.dev; |
---|
94 | | - bool is_error = level[1] <= KERN_ERR[1]; |
---|
95 | | - bool is_debug = level[1] == KERN_DEBUG[1]; |
---|
96 | | - struct va_format vaf; |
---|
97 | | - va_list args; |
---|
98 | | - |
---|
99 | | - if (is_debug && !(drm_debug & DRM_UT_DRIVER)) |
---|
100 | | - return; |
---|
101 | | - |
---|
102 | | - va_start(args, fmt); |
---|
103 | | - |
---|
104 | | - vaf.fmt = fmt; |
---|
105 | | - vaf.va = &args; |
---|
106 | | - |
---|
107 | | - if (is_error) |
---|
108 | | - dev_printk(level, kdev, "%pV", &vaf); |
---|
109 | | - else |
---|
110 | | - dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", |
---|
111 | | - __builtin_return_address(0), &vaf); |
---|
112 | | - |
---|
113 | | - va_end(args); |
---|
114 | | - |
---|
115 | | - if (is_error && !shown_bug_once) { |
---|
116 | | - /* |
---|
117 | | - * Ask the user to file a bug report for the error, except |
---|
118 | | - * if they may have caused the bug by fiddling with unsafe |
---|
119 | | - * module parameters. |
---|
120 | | - */ |
---|
121 | | - if (!test_taint(TAINT_USER)) |
---|
122 | | - dev_notice(kdev, "%s", FDO_BUG_MSG); |
---|
123 | | - shown_bug_once = true; |
---|
124 | | - } |
---|
125 | | -} |
---|
126 | | - |
---|
127 | | -/* Map PCH device id to PCH type, or PCH_NONE if unknown. */ |
---|
128 | | -static enum intel_pch |
---|
129 | | -intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) |
---|
130 | | -{ |
---|
131 | | - switch (id) { |
---|
132 | | - case INTEL_PCH_IBX_DEVICE_ID_TYPE: |
---|
133 | | - DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
---|
134 | | - WARN_ON(!IS_GEN5(dev_priv)); |
---|
135 | | - return PCH_IBX; |
---|
136 | | - case INTEL_PCH_CPT_DEVICE_ID_TYPE: |
---|
137 | | - DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
---|
138 | | - WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); |
---|
139 | | - return PCH_CPT; |
---|
140 | | - case INTEL_PCH_PPT_DEVICE_ID_TYPE: |
---|
141 | | - DRM_DEBUG_KMS("Found PantherPoint PCH\n"); |
---|
142 | | - WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); |
---|
143 | | - /* PantherPoint is CPT compatible */ |
---|
144 | | - return PCH_CPT; |
---|
145 | | - case INTEL_PCH_LPT_DEVICE_ID_TYPE: |
---|
146 | | - DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
---|
147 | | - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); |
---|
148 | | - WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); |
---|
149 | | - return PCH_LPT; |
---|
150 | | - case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: |
---|
151 | | - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
---|
152 | | - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); |
---|
153 | | - WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); |
---|
154 | | - return PCH_LPT; |
---|
155 | | - case INTEL_PCH_WPT_DEVICE_ID_TYPE: |
---|
156 | | - DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); |
---|
157 | | - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); |
---|
158 | | - WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); |
---|
159 | | - /* WildcatPoint is LPT compatible */ |
---|
160 | | - return PCH_LPT; |
---|
161 | | - case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: |
---|
162 | | - DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); |
---|
163 | | - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); |
---|
164 | | - WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); |
---|
165 | | - /* WildcatPoint is LPT compatible */ |
---|
166 | | - return PCH_LPT; |
---|
167 | | - case INTEL_PCH_SPT_DEVICE_ID_TYPE: |
---|
168 | | - DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
---|
169 | | - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); |
---|
170 | | - return PCH_SPT; |
---|
171 | | - case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: |
---|
172 | | - DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); |
---|
173 | | - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); |
---|
174 | | - return PCH_SPT; |
---|
175 | | - case INTEL_PCH_KBP_DEVICE_ID_TYPE: |
---|
176 | | - DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); |
---|
177 | | - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && |
---|
178 | | - !IS_COFFEELAKE(dev_priv)); |
---|
179 | | - return PCH_KBP; |
---|
180 | | - case INTEL_PCH_CNP_DEVICE_ID_TYPE: |
---|
181 | | - DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); |
---|
182 | | - WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); |
---|
183 | | - return PCH_CNP; |
---|
184 | | - case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: |
---|
185 | | - DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); |
---|
186 | | - WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); |
---|
187 | | - return PCH_CNP; |
---|
188 | | - case INTEL_PCH_ICP_DEVICE_ID_TYPE: |
---|
189 | | - DRM_DEBUG_KMS("Found Ice Lake PCH\n"); |
---|
190 | | - WARN_ON(!IS_ICELAKE(dev_priv)); |
---|
191 | | - return PCH_ICP; |
---|
192 | | - default: |
---|
193 | | - return PCH_NONE; |
---|
194 | | - } |
---|
195 | | -} |
---|
196 | | - |
---|
197 | | -static bool intel_is_virt_pch(unsigned short id, |
---|
198 | | - unsigned short svendor, unsigned short sdevice) |
---|
199 | | -{ |
---|
200 | | - return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || |
---|
201 | | - id == INTEL_PCH_P3X_DEVICE_ID_TYPE || |
---|
202 | | - (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && |
---|
203 | | - svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && |
---|
204 | | - sdevice == PCI_SUBDEVICE_ID_QEMU)); |
---|
205 | | -} |
---|
206 | | - |
---|
207 | | -static unsigned short |
---|
208 | | -intel_virt_detect_pch(const struct drm_i915_private *dev_priv) |
---|
209 | | -{ |
---|
210 | | - unsigned short id = 0; |
---|
211 | | - |
---|
212 | | - /* |
---|
213 | | - * In a virtualized passthrough environment we can be in a |
---|
214 | | - * setup where the ISA bridge is not able to be passed through. |
---|
215 | | - * In this case, a south bridge can be emulated and we have to |
---|
216 | | - * make an educated guess as to which PCH is really there. |
---|
217 | | - */ |
---|
218 | | - |
---|
219 | | - if (IS_GEN5(dev_priv)) |
---|
220 | | - id = INTEL_PCH_IBX_DEVICE_ID_TYPE; |
---|
221 | | - else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) |
---|
222 | | - id = INTEL_PCH_CPT_DEVICE_ID_TYPE; |
---|
223 | | - else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) |
---|
224 | | - id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; |
---|
225 | | - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
---|
226 | | - id = INTEL_PCH_LPT_DEVICE_ID_TYPE; |
---|
227 | | - else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
---|
228 | | - id = INTEL_PCH_SPT_DEVICE_ID_TYPE; |
---|
229 | | - else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) |
---|
230 | | - id = INTEL_PCH_CNP_DEVICE_ID_TYPE; |
---|
231 | | - else if (IS_ICELAKE(dev_priv)) |
---|
232 | | - id = INTEL_PCH_ICP_DEVICE_ID_TYPE; |
---|
233 | | - |
---|
234 | | - if (id) |
---|
235 | | - DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); |
---|
236 | | - else |
---|
237 | | - DRM_DEBUG_KMS("Assuming no PCH\n"); |
---|
238 | | - |
---|
239 | | - return id; |
---|
240 | | -} |
---|
241 | | - |
---|
242 | | -static void intel_detect_pch(struct drm_i915_private *dev_priv) |
---|
243 | | -{ |
---|
244 | | - struct pci_dev *pch = NULL; |
---|
245 | | - |
---|
246 | | - /* |
---|
247 | | - * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
---|
248 | | - * make graphics device passthrough work easy for VMM, that only |
---|
249 | | - * need to expose ISA bridge to let driver know the real hardware |
---|
250 | | - * underneath. This is a requirement from virtualization team. |
---|
251 | | - * |
---|
252 | | - * In some virtualized environments (e.g. XEN), there is irrelevant |
---|
253 | | - * ISA bridge in the system. To work reliably, we should scan trhough |
---|
254 | | - * all the ISA bridge devices and check for the first match, instead |
---|
255 | | - * of only checking the first one. |
---|
256 | | - */ |
---|
257 | | - while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
---|
258 | | - unsigned short id; |
---|
259 | | - enum intel_pch pch_type; |
---|
260 | | - |
---|
261 | | - if (pch->vendor != PCI_VENDOR_ID_INTEL) |
---|
262 | | - continue; |
---|
263 | | - |
---|
264 | | - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
---|
265 | | - |
---|
266 | | - pch_type = intel_pch_type(dev_priv, id); |
---|
267 | | - if (pch_type != PCH_NONE) { |
---|
268 | | - dev_priv->pch_type = pch_type; |
---|
269 | | - dev_priv->pch_id = id; |
---|
270 | | - break; |
---|
271 | | - } else if (intel_is_virt_pch(id, pch->subsystem_vendor, |
---|
272 | | - pch->subsystem_device)) { |
---|
273 | | - id = intel_virt_detect_pch(dev_priv); |
---|
274 | | - pch_type = intel_pch_type(dev_priv, id); |
---|
275 | | - |
---|
276 | | - /* Sanity check virtual PCH id */ |
---|
277 | | - if (WARN_ON(id && pch_type == PCH_NONE)) |
---|
278 | | - id = 0; |
---|
279 | | - |
---|
280 | | - dev_priv->pch_type = pch_type; |
---|
281 | | - dev_priv->pch_id = id; |
---|
282 | | - break; |
---|
283 | | - } |
---|
284 | | - } |
---|
285 | | - |
---|
286 | | - /* |
---|
287 | | - * Use PCH_NOP (PCH but no South Display) for PCH platforms without |
---|
288 | | - * display. |
---|
289 | | - */ |
---|
290 | | - if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) { |
---|
291 | | - DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); |
---|
292 | | - dev_priv->pch_type = PCH_NOP; |
---|
293 | | - dev_priv->pch_id = 0; |
---|
294 | | - } |
---|
295 | | - |
---|
296 | | - if (!pch) |
---|
297 | | - DRM_DEBUG_KMS("No PCH found.\n"); |
---|
298 | | - |
---|
299 | | - pci_dev_put(pch); |
---|
300 | | -} |
---|
301 | | - |
---|
302 | | -static int i915_getparam_ioctl(struct drm_device *dev, void *data, |
---|
303 | | - struct drm_file *file_priv) |
---|
304 | | -{ |
---|
305 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
306 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
307 | | - drm_i915_getparam_t *param = data; |
---|
308 | | - int value; |
---|
309 | | - |
---|
310 | | - switch (param->param) { |
---|
311 | | - case I915_PARAM_IRQ_ACTIVE: |
---|
312 | | - case I915_PARAM_ALLOW_BATCHBUFFER: |
---|
313 | | - case I915_PARAM_LAST_DISPATCH: |
---|
314 | | - case I915_PARAM_HAS_EXEC_CONSTANTS: |
---|
315 | | - /* Reject all old ums/dri params. */ |
---|
316 | | - return -ENODEV; |
---|
317 | | - case I915_PARAM_CHIPSET_ID: |
---|
318 | | - value = pdev->device; |
---|
319 | | - break; |
---|
320 | | - case I915_PARAM_REVISION: |
---|
321 | | - value = pdev->revision; |
---|
322 | | - break; |
---|
323 | | - case I915_PARAM_NUM_FENCES_AVAIL: |
---|
324 | | - value = dev_priv->num_fence_regs; |
---|
325 | | - break; |
---|
326 | | - case I915_PARAM_HAS_OVERLAY: |
---|
327 | | - value = dev_priv->overlay ? 1 : 0; |
---|
328 | | - break; |
---|
329 | | - case I915_PARAM_HAS_BSD: |
---|
330 | | - value = !!dev_priv->engine[VCS]; |
---|
331 | | - break; |
---|
332 | | - case I915_PARAM_HAS_BLT: |
---|
333 | | - value = !!dev_priv->engine[BCS]; |
---|
334 | | - break; |
---|
335 | | - case I915_PARAM_HAS_VEBOX: |
---|
336 | | - value = !!dev_priv->engine[VECS]; |
---|
337 | | - break; |
---|
338 | | - case I915_PARAM_HAS_BSD2: |
---|
339 | | - value = !!dev_priv->engine[VCS2]; |
---|
340 | | - break; |
---|
341 | | - case I915_PARAM_HAS_LLC: |
---|
342 | | - value = HAS_LLC(dev_priv); |
---|
343 | | - break; |
---|
344 | | - case I915_PARAM_HAS_WT: |
---|
345 | | - value = HAS_WT(dev_priv); |
---|
346 | | - break; |
---|
347 | | - case I915_PARAM_HAS_ALIASING_PPGTT: |
---|
348 | | - value = USES_PPGTT(dev_priv); |
---|
349 | | - break; |
---|
350 | | - case I915_PARAM_HAS_SEMAPHORES: |
---|
351 | | - value = HAS_LEGACY_SEMAPHORES(dev_priv); |
---|
352 | | - break; |
---|
353 | | - case I915_PARAM_HAS_SECURE_BATCHES: |
---|
354 | | - value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN); |
---|
355 | | - break; |
---|
356 | | - case I915_PARAM_CMD_PARSER_VERSION: |
---|
357 | | - value = i915_cmd_parser_get_version(dev_priv); |
---|
358 | | - break; |
---|
359 | | - case I915_PARAM_SUBSLICE_TOTAL: |
---|
360 | | - value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); |
---|
361 | | - if (!value) |
---|
362 | | - return -ENODEV; |
---|
363 | | - break; |
---|
364 | | - case I915_PARAM_EU_TOTAL: |
---|
365 | | - value = INTEL_INFO(dev_priv)->sseu.eu_total; |
---|
366 | | - if (!value) |
---|
367 | | - return -ENODEV; |
---|
368 | | - break; |
---|
369 | | - case I915_PARAM_HAS_GPU_RESET: |
---|
370 | | - value = i915_modparams.enable_hangcheck && |
---|
371 | | - intel_has_gpu_reset(dev_priv); |
---|
372 | | - if (value && intel_has_reset_engine(dev_priv)) |
---|
373 | | - value = 2; |
---|
374 | | - break; |
---|
375 | | - case I915_PARAM_HAS_RESOURCE_STREAMER: |
---|
376 | | - value = HAS_RESOURCE_STREAMER(dev_priv); |
---|
377 | | - break; |
---|
378 | | - case I915_PARAM_HAS_POOLED_EU: |
---|
379 | | - value = HAS_POOLED_EU(dev_priv); |
---|
380 | | - break; |
---|
381 | | - case I915_PARAM_MIN_EU_IN_POOL: |
---|
382 | | - value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; |
---|
383 | | - break; |
---|
384 | | - case I915_PARAM_HUC_STATUS: |
---|
385 | | - value = intel_huc_check_status(&dev_priv->huc); |
---|
386 | | - if (value < 0) |
---|
387 | | - return value; |
---|
388 | | - break; |
---|
389 | | - case I915_PARAM_MMAP_GTT_VERSION: |
---|
390 | | - /* Though we've started our numbering from 1, and so class all |
---|
391 | | - * earlier versions as 0, in effect their value is undefined as |
---|
392 | | - * the ioctl will report EINVAL for the unknown param! |
---|
393 | | - */ |
---|
394 | | - value = i915_gem_mmap_gtt_version(); |
---|
395 | | - break; |
---|
396 | | - case I915_PARAM_HAS_SCHEDULER: |
---|
397 | | - value = dev_priv->caps.scheduler; |
---|
398 | | - break; |
---|
399 | | - |
---|
400 | | - case I915_PARAM_MMAP_VERSION: |
---|
401 | | - /* Remember to bump this if the version changes! */ |
---|
402 | | - case I915_PARAM_HAS_GEM: |
---|
403 | | - case I915_PARAM_HAS_PAGEFLIPPING: |
---|
404 | | - case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ |
---|
405 | | - case I915_PARAM_HAS_RELAXED_FENCING: |
---|
406 | | - case I915_PARAM_HAS_COHERENT_RINGS: |
---|
407 | | - case I915_PARAM_HAS_RELAXED_DELTA: |
---|
408 | | - case I915_PARAM_HAS_GEN7_SOL_RESET: |
---|
409 | | - case I915_PARAM_HAS_WAIT_TIMEOUT: |
---|
410 | | - case I915_PARAM_HAS_PRIME_VMAP_FLUSH: |
---|
411 | | - case I915_PARAM_HAS_PINNED_BATCHES: |
---|
412 | | - case I915_PARAM_HAS_EXEC_NO_RELOC: |
---|
413 | | - case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
---|
414 | | - case I915_PARAM_HAS_COHERENT_PHYS_GTT: |
---|
415 | | - case I915_PARAM_HAS_EXEC_SOFTPIN: |
---|
416 | | - case I915_PARAM_HAS_EXEC_ASYNC: |
---|
417 | | - case I915_PARAM_HAS_EXEC_FENCE: |
---|
418 | | - case I915_PARAM_HAS_EXEC_CAPTURE: |
---|
419 | | - case I915_PARAM_HAS_EXEC_BATCH_FIRST: |
---|
420 | | - case I915_PARAM_HAS_EXEC_FENCE_ARRAY: |
---|
421 | | - /* For the time being all of these are always true; |
---|
422 | | - * if some supported hardware does not have one of these |
---|
423 | | - * features this value needs to be provided from |
---|
424 | | - * INTEL_INFO(), a feature macro, or similar. |
---|
425 | | - */ |
---|
426 | | - value = 1; |
---|
427 | | - break; |
---|
428 | | - case I915_PARAM_HAS_CONTEXT_ISOLATION: |
---|
429 | | - value = intel_engines_has_context_isolation(dev_priv); |
---|
430 | | - break; |
---|
431 | | - case I915_PARAM_SLICE_MASK: |
---|
432 | | - value = INTEL_INFO(dev_priv)->sseu.slice_mask; |
---|
433 | | - if (!value) |
---|
434 | | - return -ENODEV; |
---|
435 | | - break; |
---|
436 | | - case I915_PARAM_SUBSLICE_MASK: |
---|
437 | | - value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0]; |
---|
438 | | - if (!value) |
---|
439 | | - return -ENODEV; |
---|
440 | | - break; |
---|
441 | | - case I915_PARAM_CS_TIMESTAMP_FREQUENCY: |
---|
442 | | - value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; |
---|
443 | | - break; |
---|
444 | | - default: |
---|
445 | | - DRM_DEBUG("Unknown parameter %d\n", param->param); |
---|
446 | | - return -EINVAL; |
---|
447 | | - } |
---|
448 | | - |
---|
449 | | - if (put_user(value, param->value)) |
---|
450 | | - return -EFAULT; |
---|
451 | | - |
---|
452 | | - return 0; |
---|
453 | | -} |
---|
454 | 91 | |
---|
455 | 92 | static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) |
---|
456 | 93 | { |
---|
.. | .. |
---|
459 | 96 | dev_priv->bridge_dev = |
---|
460 | 97 | pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); |
---|
461 | 98 | if (!dev_priv->bridge_dev) { |
---|
462 | | - DRM_ERROR("bridge device not found\n"); |
---|
| 99 | + drm_err(&dev_priv->drm, "bridge device not found\n"); |
---|
463 | 100 | return -1; |
---|
464 | 101 | } |
---|
465 | 102 | return 0; |
---|
.. | .. |
---|
496 | 133 | 0, pcibios_align_resource, |
---|
497 | 134 | dev_priv->bridge_dev); |
---|
498 | 135 | if (ret) { |
---|
499 | | - DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); |
---|
| 136 | + drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret); |
---|
500 | 137 | dev_priv->mch_res.start = 0; |
---|
501 | 138 | return ret; |
---|
502 | 139 | } |
---|
.. | .. |
---|
579 | 216 | release_resource(&dev_priv->mch_res); |
---|
580 | 217 | } |
---|
581 | 218 | |
---|
582 | | -/* true = enable decode, false = disable decoder */ |
---|
583 | | -static unsigned int i915_vga_set_decode(void *cookie, bool state) |
---|
584 | | -{ |
---|
585 | | - struct drm_i915_private *dev_priv = cookie; |
---|
586 | | - |
---|
587 | | - intel_modeset_vga_set_state(dev_priv, state); |
---|
588 | | - if (state) |
---|
589 | | - return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | |
---|
590 | | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
---|
591 | | - else |
---|
592 | | - return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
---|
593 | | -} |
---|
594 | | - |
---|
595 | | -static int i915_resume_switcheroo(struct drm_device *dev); |
---|
596 | | -static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); |
---|
597 | | - |
---|
598 | | -static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) |
---|
599 | | -{ |
---|
600 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
601 | | - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; |
---|
602 | | - |
---|
603 | | - if (state == VGA_SWITCHEROO_ON) { |
---|
604 | | - pr_info("switched on\n"); |
---|
605 | | - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
---|
606 | | - /* i915 resume handler doesn't set to D0 */ |
---|
607 | | - pci_set_power_state(pdev, PCI_D0); |
---|
608 | | - i915_resume_switcheroo(dev); |
---|
609 | | - dev->switch_power_state = DRM_SWITCH_POWER_ON; |
---|
610 | | - } else { |
---|
611 | | - pr_info("switched off\n"); |
---|
612 | | - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
---|
613 | | - i915_suspend_switcheroo(dev, pmm); |
---|
614 | | - dev->switch_power_state = DRM_SWITCH_POWER_OFF; |
---|
615 | | - } |
---|
616 | | -} |
---|
617 | | - |
---|
618 | | -static bool i915_switcheroo_can_switch(struct pci_dev *pdev) |
---|
619 | | -{ |
---|
620 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
621 | | - |
---|
622 | | - /* |
---|
623 | | - * FIXME: open_count is protected by drm_global_mutex but that would lead to |
---|
624 | | - * locking inversion with the driver load path. And the access here is |
---|
625 | | - * completely racy anyway. So don't bother with locking for now. |
---|
626 | | - */ |
---|
627 | | - return dev->open_count == 0; |
---|
628 | | -} |
---|
629 | | - |
---|
630 | | -static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { |
---|
631 | | - .set_gpu_state = i915_switcheroo_set_state, |
---|
632 | | - .reprobe = NULL, |
---|
633 | | - .can_switch = i915_switcheroo_can_switch, |
---|
634 | | -}; |
---|
635 | | - |
---|
636 | | -static int i915_load_modeset_init(struct drm_device *dev) |
---|
637 | | -{ |
---|
638 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
639 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
640 | | - int ret; |
---|
641 | | - |
---|
642 | | - if (i915_inject_load_failure()) |
---|
643 | | - return -ENODEV; |
---|
644 | | - |
---|
645 | | - intel_bios_init(dev_priv); |
---|
646 | | - |
---|
647 | | - /* If we have > 1 VGA cards, then we need to arbitrate access |
---|
648 | | - * to the common VGA resources. |
---|
649 | | - * |
---|
650 | | - * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), |
---|
651 | | - * then we do not take part in VGA arbitration and the |
---|
652 | | - * vga_client_register() fails with -ENODEV. |
---|
653 | | - */ |
---|
654 | | - ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); |
---|
655 | | - if (ret && ret != -ENODEV) |
---|
656 | | - goto out; |
---|
657 | | - |
---|
658 | | - intel_register_dsm_handler(); |
---|
659 | | - |
---|
660 | | - ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); |
---|
661 | | - if (ret) |
---|
662 | | - goto cleanup_vga_client; |
---|
663 | | - |
---|
664 | | - /* must happen before intel_power_domains_init_hw() on VLV/CHV */ |
---|
665 | | - intel_update_rawclk(dev_priv); |
---|
666 | | - |
---|
667 | | - intel_power_domains_init_hw(dev_priv, false); |
---|
668 | | - |
---|
669 | | - intel_csr_ucode_init(dev_priv); |
---|
670 | | - |
---|
671 | | - ret = intel_irq_install(dev_priv); |
---|
672 | | - if (ret) |
---|
673 | | - goto cleanup_csr; |
---|
674 | | - |
---|
675 | | - intel_setup_gmbus(dev_priv); |
---|
676 | | - |
---|
677 | | - /* Important: The output setup functions called by modeset_init need |
---|
678 | | - * working irqs for e.g. gmbus and dp aux transfers. */ |
---|
679 | | - ret = intel_modeset_init(dev); |
---|
680 | | - if (ret) |
---|
681 | | - goto cleanup_irq; |
---|
682 | | - |
---|
683 | | - ret = i915_gem_init(dev_priv); |
---|
684 | | - if (ret) |
---|
685 | | - goto cleanup_modeset; |
---|
686 | | - |
---|
687 | | - intel_setup_overlay(dev_priv); |
---|
688 | | - |
---|
689 | | - if (INTEL_INFO(dev_priv)->num_pipes == 0) |
---|
690 | | - return 0; |
---|
691 | | - |
---|
692 | | - ret = intel_fbdev_init(dev); |
---|
693 | | - if (ret) |
---|
694 | | - goto cleanup_gem; |
---|
695 | | - |
---|
696 | | - /* Only enable hotplug handling once the fbdev is fully set up. */ |
---|
697 | | - intel_hpd_init(dev_priv); |
---|
698 | | - |
---|
699 | | - return 0; |
---|
700 | | - |
---|
701 | | -cleanup_gem: |
---|
702 | | - if (i915_gem_suspend(dev_priv)) |
---|
703 | | - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
---|
704 | | - i915_gem_fini(dev_priv); |
---|
705 | | -cleanup_modeset: |
---|
706 | | - intel_modeset_cleanup(dev); |
---|
707 | | -cleanup_irq: |
---|
708 | | - drm_irq_uninstall(dev); |
---|
709 | | - intel_teardown_gmbus(dev_priv); |
---|
710 | | -cleanup_csr: |
---|
711 | | - intel_csr_ucode_fini(dev_priv); |
---|
712 | | - intel_power_domains_fini(dev_priv); |
---|
713 | | - vga_switcheroo_unregister_client(pdev); |
---|
714 | | -cleanup_vga_client: |
---|
715 | | - vga_client_register(pdev, NULL, NULL, NULL); |
---|
716 | | -out: |
---|
717 | | - return ret; |
---|
718 | | -} |
---|
719 | | - |
---|
720 | | -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
---|
721 | | -{ |
---|
722 | | - struct apertures_struct *ap; |
---|
723 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
724 | | - struct i915_ggtt *ggtt = &dev_priv->ggtt; |
---|
725 | | - bool primary; |
---|
726 | | - int ret; |
---|
727 | | - |
---|
728 | | - ap = alloc_apertures(1); |
---|
729 | | - if (!ap) |
---|
730 | | - return -ENOMEM; |
---|
731 | | - |
---|
732 | | - ap->ranges[0].base = ggtt->gmadr.start; |
---|
733 | | - ap->ranges[0].size = ggtt->mappable_end; |
---|
734 | | - |
---|
735 | | - primary = |
---|
736 | | - pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; |
---|
737 | | - |
---|
738 | | - ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); |
---|
739 | | - |
---|
740 | | - kfree(ap); |
---|
741 | | - |
---|
742 | | - return ret; |
---|
743 | | -} |
---|
744 | | - |
---|
745 | | -#if !defined(CONFIG_VGA_CONSOLE) |
---|
746 | | -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) |
---|
747 | | -{ |
---|
748 | | - return 0; |
---|
749 | | -} |
---|
750 | | -#elif !defined(CONFIG_DUMMY_CONSOLE) |
---|
751 | | -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) |
---|
752 | | -{ |
---|
753 | | - return -ENODEV; |
---|
754 | | -} |
---|
755 | | -#else |
---|
756 | | -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) |
---|
757 | | -{ |
---|
758 | | - int ret = 0; |
---|
759 | | - |
---|
760 | | - DRM_INFO("Replacing VGA console driver\n"); |
---|
761 | | - |
---|
762 | | - console_lock(); |
---|
763 | | - if (con_is_bound(&vga_con)) |
---|
764 | | - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); |
---|
765 | | - if (ret == 0) { |
---|
766 | | - ret = do_unregister_con_driver(&vga_con); |
---|
767 | | - |
---|
768 | | - /* Ignore "already unregistered". */ |
---|
769 | | - if (ret == -ENODEV) |
---|
770 | | - ret = 0; |
---|
771 | | - } |
---|
772 | | - console_unlock(); |
---|
773 | | - |
---|
774 | | - return ret; |
---|
775 | | -} |
---|
776 | | -#endif |
---|
777 | | - |
---|
778 | | -static void intel_init_dpio(struct drm_i915_private *dev_priv) |
---|
779 | | -{ |
---|
780 | | - /* |
---|
781 | | - * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), |
---|
782 | | - * CHV x1 PHY (DP/HDMI D) |
---|
783 | | - * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) |
---|
784 | | - */ |
---|
785 | | - if (IS_CHERRYVIEW(dev_priv)) { |
---|
786 | | - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; |
---|
787 | | - DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; |
---|
788 | | - } else if (IS_VALLEYVIEW(dev_priv)) { |
---|
789 | | - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; |
---|
790 | | - } |
---|
791 | | -} |
---|
792 | | - |
---|
793 | 219 | static int i915_workqueues_init(struct drm_i915_private *dev_priv) |
---|
794 | 220 | { |
---|
795 | 221 | /* |
---|
.. | .. |
---|
819 | 245 | out_free_wq: |
---|
820 | 246 | destroy_workqueue(dev_priv->wq); |
---|
821 | 247 | out_err: |
---|
822 | | - DRM_ERROR("Failed to allocate workqueues.\n"); |
---|
| 248 | + drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); |
---|
823 | 249 | |
---|
824 | 250 | return -ENOMEM; |
---|
825 | | -} |
---|
826 | | - |
---|
827 | | -static void i915_engines_cleanup(struct drm_i915_private *i915) |
---|
828 | | -{ |
---|
829 | | - struct intel_engine_cs *engine; |
---|
830 | | - enum intel_engine_id id; |
---|
831 | | - |
---|
832 | | - for_each_engine(engine, i915, id) |
---|
833 | | - kfree(engine); |
---|
834 | 251 | } |
---|
835 | 252 | |
---|
836 | 253 | static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) |
---|
.. | .. |
---|
856 | 273 | pre |= IS_HSW_EARLY_SDV(dev_priv); |
---|
857 | 274 | pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); |
---|
858 | 275 | pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); |
---|
| 276 | + pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0); |
---|
| 277 | + pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2); |
---|
859 | 278 | |
---|
860 | 279 | if (pre) { |
---|
861 | | - DRM_ERROR("This is a pre-production stepping. " |
---|
| 280 | + drm_err(&dev_priv->drm, "This is a pre-production stepping. " |
---|
862 | 281 | "It may not be fully functional.\n"); |
---|
863 | 282 | add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); |
---|
864 | 283 | } |
---|
865 | 284 | } |
---|
866 | 285 | |
---|
| 286 | +static void sanitize_gpu(struct drm_i915_private *i915) |
---|
| 287 | +{ |
---|
| 288 | + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) |
---|
| 289 | + __intel_gt_reset(&i915->gt, ALL_ENGINES); |
---|
| 290 | +} |
---|
| 291 | + |
---|
867 | 292 | /** |
---|
868 | | - * i915_driver_init_early - setup state not requiring device access |
---|
| 293 | + * i915_driver_early_probe - setup state not requiring device access |
---|
869 | 294 | * @dev_priv: device private |
---|
870 | | - * @ent: the matching pci_device_id |
---|
871 | 295 | * |
---|
872 | 296 | * Initialize everything that is a "SW-only" state, that is state not |
---|
873 | 297 | * requiring accessing the device or exposing the driver via kernel internal |
---|
.. | .. |
---|
875 | 299 | * system memory allocation, setting up device specific attributes and |
---|
876 | 300 | * function hooks not requiring accessing the device. |
---|
877 | 301 | */ |
---|
878 | | -static int i915_driver_init_early(struct drm_i915_private *dev_priv, |
---|
879 | | - const struct pci_device_id *ent) |
---|
| 302 | +static int i915_driver_early_probe(struct drm_i915_private *dev_priv) |
---|
880 | 303 | { |
---|
881 | | - const struct intel_device_info *match_info = |
---|
882 | | - (struct intel_device_info *)ent->driver_data; |
---|
883 | | - struct intel_device_info *device_info; |
---|
884 | 304 | int ret = 0; |
---|
885 | 305 | |
---|
886 | | - if (i915_inject_load_failure()) |
---|
| 306 | + if (i915_inject_probe_failure(dev_priv)) |
---|
887 | 307 | return -ENODEV; |
---|
888 | 308 | |
---|
889 | | - /* Setup the write-once "constant" device info */ |
---|
890 | | - device_info = mkwrite_device_info(dev_priv); |
---|
891 | | - memcpy(device_info, match_info, sizeof(*device_info)); |
---|
892 | | - device_info->device_id = dev_priv->drm.pdev->device; |
---|
| 309 | + intel_device_info_subplatform_init(dev_priv); |
---|
893 | 310 | |
---|
894 | | - BUILD_BUG_ON(INTEL_MAX_PLATFORMS > |
---|
895 | | - sizeof(device_info->platform_mask) * BITS_PER_BYTE); |
---|
896 | | - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); |
---|
| 311 | + intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); |
---|
| 312 | + intel_uncore_init_early(&dev_priv->uncore, dev_priv); |
---|
| 313 | + |
---|
897 | 314 | spin_lock_init(&dev_priv->irq_lock); |
---|
898 | 315 | spin_lock_init(&dev_priv->gpu_error.lock); |
---|
899 | 316 | mutex_init(&dev_priv->backlight_lock); |
---|
900 | | - spin_lock_init(&dev_priv->uncore.lock); |
---|
901 | 317 | |
---|
902 | 318 | mutex_init(&dev_priv->sb_lock); |
---|
| 319 | + cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); |
---|
| 320 | + |
---|
903 | 321 | mutex_init(&dev_priv->av_mutex); |
---|
904 | 322 | mutex_init(&dev_priv->wm.wm_mutex); |
---|
905 | 323 | mutex_init(&dev_priv->pps_mutex); |
---|
| 324 | + mutex_init(&dev_priv->hdcp_comp_mutex); |
---|
906 | 325 | |
---|
907 | 326 | i915_memcpy_init_early(dev_priv); |
---|
| 327 | + intel_runtime_pm_init_early(&dev_priv->runtime_pm); |
---|
908 | 328 | |
---|
909 | 329 | ret = i915_workqueues_init(dev_priv); |
---|
910 | 330 | if (ret < 0) |
---|
911 | | - goto err_engines; |
---|
| 331 | + return ret; |
---|
912 | 332 | |
---|
913 | | - ret = i915_gem_init_early(dev_priv); |
---|
| 333 | + ret = vlv_suspend_init(dev_priv); |
---|
914 | 334 | if (ret < 0) |
---|
915 | 335 | goto err_workqueues; |
---|
| 336 | + |
---|
| 337 | + intel_wopcm_init_early(&dev_priv->wopcm); |
---|
| 338 | + |
---|
| 339 | + intel_gt_init_early(&dev_priv->gt, dev_priv); |
---|
| 340 | + |
---|
| 341 | + i915_gem_init_early(dev_priv); |
---|
916 | 342 | |
---|
917 | 343 | /* This must be called before any calls to HAS_PCH_* */ |
---|
918 | 344 | intel_detect_pch(dev_priv); |
---|
919 | 345 | |
---|
920 | | - intel_wopcm_init_early(&dev_priv->wopcm); |
---|
921 | | - intel_uc_init_early(dev_priv); |
---|
922 | 346 | intel_pm_setup(dev_priv); |
---|
923 | | - intel_init_dpio(dev_priv); |
---|
924 | | - intel_power_domains_init(dev_priv); |
---|
| 347 | + ret = intel_power_domains_init(dev_priv); |
---|
| 348 | + if (ret < 0) |
---|
| 349 | + goto err_gem; |
---|
925 | 350 | intel_irq_init(dev_priv); |
---|
926 | | - intel_hangcheck_init(dev_priv); |
---|
927 | 351 | intel_init_display_hooks(dev_priv); |
---|
928 | 352 | intel_init_clock_gating_hooks(dev_priv); |
---|
929 | 353 | intel_init_audio_hooks(dev_priv); |
---|
930 | | - intel_display_crc_init(dev_priv); |
---|
931 | 354 | |
---|
932 | 355 | intel_detect_preproduction_hw(dev_priv); |
---|
933 | 356 | |
---|
934 | 357 | return 0; |
---|
935 | 358 | |
---|
| 359 | +err_gem: |
---|
| 360 | + i915_gem_cleanup_early(dev_priv); |
---|
| 361 | + intel_gt_driver_late_release(&dev_priv->gt); |
---|
| 362 | + vlv_suspend_cleanup(dev_priv); |
---|
936 | 363 | err_workqueues: |
---|
937 | 364 | i915_workqueues_cleanup(dev_priv); |
---|
938 | | -err_engines: |
---|
939 | | - i915_engines_cleanup(dev_priv); |
---|
940 | 365 | return ret; |
---|
941 | 366 | } |
---|
942 | 367 | |
---|
943 | 368 | /** |
---|
944 | | - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() |
---|
| 369 | + * i915_driver_late_release - cleanup the setup done in |
---|
| 370 | + * i915_driver_early_probe() |
---|
945 | 371 | * @dev_priv: device private |
---|
946 | 372 | */ |
---|
947 | | -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) |
---|
| 373 | +static void i915_driver_late_release(struct drm_i915_private *dev_priv) |
---|
948 | 374 | { |
---|
949 | 375 | intel_irq_fini(dev_priv); |
---|
950 | | - intel_uc_cleanup_early(dev_priv); |
---|
| 376 | + intel_power_domains_cleanup(dev_priv); |
---|
951 | 377 | i915_gem_cleanup_early(dev_priv); |
---|
| 378 | + intel_gt_driver_late_release(&dev_priv->gt); |
---|
| 379 | + vlv_suspend_cleanup(dev_priv); |
---|
952 | 380 | i915_workqueues_cleanup(dev_priv); |
---|
953 | | - i915_engines_cleanup(dev_priv); |
---|
954 | | -} |
---|
955 | 381 | |
---|
956 | | -static int i915_mmio_setup(struct drm_i915_private *dev_priv) |
---|
957 | | -{ |
---|
958 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
959 | | - int mmio_bar; |
---|
960 | | - int mmio_size; |
---|
| 382 | + cpu_latency_qos_remove_request(&dev_priv->sb_qos); |
---|
| 383 | + mutex_destroy(&dev_priv->sb_lock); |
---|
961 | 384 | |
---|
962 | | - mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; |
---|
963 | | - /* |
---|
964 | | - * Before gen4, the registers and the GTT are behind different BARs. |
---|
965 | | - * However, from gen4 onwards, the registers and the GTT are shared |
---|
966 | | - * in the same BAR, so we want to restrict this ioremap from |
---|
967 | | - * clobbering the GTT which we want ioremap_wc instead. Fortunately, |
---|
968 | | - * the register BAR remains the same size for all the earlier |
---|
969 | | - * generations up to Ironlake. |
---|
970 | | - */ |
---|
971 | | - if (INTEL_GEN(dev_priv) < 5) |
---|
972 | | - mmio_size = 512 * 1024; |
---|
973 | | - else |
---|
974 | | - mmio_size = 2 * 1024 * 1024; |
---|
975 | | - dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); |
---|
976 | | - if (dev_priv->regs == NULL) { |
---|
977 | | - DRM_ERROR("failed to map registers\n"); |
---|
978 | | - |
---|
979 | | - return -EIO; |
---|
980 | | - } |
---|
981 | | - |
---|
982 | | - /* Try to make sure MCHBAR is enabled before poking at it */ |
---|
983 | | - intel_setup_mchbar(dev_priv); |
---|
984 | | - |
---|
985 | | - return 0; |
---|
986 | | -} |
---|
987 | | - |
---|
988 | | -static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) |
---|
989 | | -{ |
---|
990 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
991 | | - |
---|
992 | | - intel_teardown_mchbar(dev_priv); |
---|
993 | | - pci_iounmap(pdev, dev_priv->regs); |
---|
| 385 | + i915_params_free(&dev_priv->params); |
---|
994 | 386 | } |
---|
995 | 387 | |
---|
996 | 388 | /** |
---|
997 | | - * i915_driver_init_mmio - setup device MMIO |
---|
| 389 | + * i915_driver_mmio_probe - setup device MMIO |
---|
998 | 390 | * @dev_priv: device private |
---|
999 | 391 | * |
---|
1000 | 392 | * Setup minimal device state necessary for MMIO accesses later in the |
---|
.. | .. |
---|
1002 | 394 | * side effects or exposing the driver via kernel internal or user space |
---|
1003 | 395 | * interfaces. |
---|
1004 | 396 | */ |
---|
1005 | | -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) |
---|
| 397 | +static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) |
---|
1006 | 398 | { |
---|
1007 | 399 | int ret; |
---|
1008 | 400 | |
---|
1009 | | - if (i915_inject_load_failure()) |
---|
| 401 | + if (i915_inject_probe_failure(dev_priv)) |
---|
1010 | 402 | return -ENODEV; |
---|
1011 | 403 | |
---|
1012 | 404 | if (i915_get_bridge_dev(dev_priv)) |
---|
1013 | 405 | return -EIO; |
---|
1014 | 406 | |
---|
1015 | | - ret = i915_mmio_setup(dev_priv); |
---|
| 407 | + ret = intel_uncore_init_mmio(&dev_priv->uncore); |
---|
1016 | 408 | if (ret < 0) |
---|
1017 | 409 | goto err_bridge; |
---|
1018 | 410 | |
---|
1019 | | - intel_uncore_init(dev_priv); |
---|
| 411 | + /* Try to make sure MCHBAR is enabled before poking at it */ |
---|
| 412 | + intel_setup_mchbar(dev_priv); |
---|
1020 | 413 | |
---|
1021 | | - intel_device_info_init_mmio(dev_priv); |
---|
1022 | | - |
---|
1023 | | - intel_uncore_prune(dev_priv); |
---|
1024 | | - |
---|
1025 | | - intel_uc_init_mmio(dev_priv); |
---|
1026 | | - |
---|
1027 | | - ret = intel_engines_init_mmio(dev_priv); |
---|
| 414 | + ret = intel_gt_init_mmio(&dev_priv->gt); |
---|
1028 | 415 | if (ret) |
---|
1029 | 416 | goto err_uncore; |
---|
1030 | 417 | |
---|
1031 | | - i915_gem_init_mmio(dev_priv); |
---|
| 418 | + /* As early as possible, scrub existing GPU state before clobbering */ |
---|
| 419 | + sanitize_gpu(dev_priv); |
---|
1032 | 420 | |
---|
1033 | 421 | return 0; |
---|
1034 | 422 | |
---|
1035 | 423 | err_uncore: |
---|
1036 | | - intel_uncore_fini(dev_priv); |
---|
| 424 | + intel_teardown_mchbar(dev_priv); |
---|
| 425 | + intel_uncore_fini_mmio(&dev_priv->uncore); |
---|
1037 | 426 | err_bridge: |
---|
1038 | 427 | pci_dev_put(dev_priv->bridge_dev); |
---|
1039 | 428 | |
---|
.. | .. |
---|
1041 | 430 | } |
---|
1042 | 431 | |
---|
1043 | 432 | /** |
---|
1044 | | - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() |
---|
| 433 | + * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() |
---|
1045 | 434 | * @dev_priv: device private |
---|
1046 | 435 | */ |
---|
1047 | | -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) |
---|
| 436 | +static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) |
---|
1048 | 437 | { |
---|
1049 | | - intel_uncore_fini(dev_priv); |
---|
1050 | | - i915_mmio_cleanup(dev_priv); |
---|
| 438 | + intel_teardown_mchbar(dev_priv); |
---|
| 439 | + intel_uncore_fini_mmio(&dev_priv->uncore); |
---|
1051 | 440 | pci_dev_put(dev_priv->bridge_dev); |
---|
1052 | 441 | } |
---|
1053 | 442 | |
---|
1054 | 443 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
---|
1055 | 444 | { |
---|
1056 | | - /* |
---|
1057 | | - * i915.enable_ppgtt is read-only, so do an early pass to validate the |
---|
1058 | | - * user's requested state against the hardware/driver capabilities. We |
---|
1059 | | - * do this now so that we can print out any log messages once rather |
---|
1060 | | - * than every time we check intel_enable_ppgtt(). |
---|
1061 | | - */ |
---|
1062 | | - i915_modparams.enable_ppgtt = |
---|
1063 | | - intel_sanitize_enable_ppgtt(dev_priv, |
---|
1064 | | - i915_modparams.enable_ppgtt); |
---|
1065 | | - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); |
---|
1066 | | - |
---|
1067 | 445 | intel_gvt_sanitize_options(dev_priv); |
---|
1068 | 446 | } |
---|
1069 | 447 | |
---|
1070 | 448 | /** |
---|
1071 | | - * i915_driver_init_hw - setup state requiring device access |
---|
1072 | | - * @dev_priv: device private |
---|
| 449 | + * i915_set_dma_info - set all relevant PCI dma info as configured for the |
---|
| 450 | + * platform |
---|
| 451 | + * @i915: valid i915 instance |
---|
1073 | 452 | * |
---|
1074 | | - * Setup state that requires accessing the device, but doesn't require |
---|
1075 | | - * exposing the driver via kernel internal or userspace interfaces. |
---|
| 453 | + * Set the dma max segment size, device and coherent masks. The dma mask set |
---|
| 454 | + * needs to occur before i915_ggtt_probe_hw. |
---|
| 455 | + * |
---|
| 456 | + * A couple of platforms have special needs. Address them as well. |
---|
| 457 | + * |
---|
1076 | 458 | */ |
---|
1077 | | -static int i915_driver_init_hw(struct drm_i915_private *dev_priv) |
---|
| 459 | +static int i915_set_dma_info(struct drm_i915_private *i915) |
---|
1078 | 460 | { |
---|
1079 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
| 461 | + struct pci_dev *pdev = i915->drm.pdev; |
---|
| 462 | + unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; |
---|
1080 | 463 | int ret; |
---|
1081 | 464 | |
---|
1082 | | - if (i915_inject_load_failure()) |
---|
1083 | | - return -ENODEV; |
---|
1084 | | - |
---|
1085 | | - intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); |
---|
1086 | | - |
---|
1087 | | - intel_sanitize_options(dev_priv); |
---|
1088 | | - |
---|
1089 | | - i915_perf_init(dev_priv); |
---|
1090 | | - |
---|
1091 | | - ret = i915_ggtt_probe_hw(dev_priv); |
---|
1092 | | - if (ret) |
---|
1093 | | - goto err_perf; |
---|
1094 | | - |
---|
1095 | | - /* |
---|
1096 | | - * WARNING: Apparently we must kick fbdev drivers before vgacon, |
---|
1097 | | - * otherwise the vga fbdev driver falls over. |
---|
1098 | | - */ |
---|
1099 | | - ret = i915_kick_out_firmware_fb(dev_priv); |
---|
1100 | | - if (ret) { |
---|
1101 | | - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); |
---|
1102 | | - goto err_ggtt; |
---|
1103 | | - } |
---|
1104 | | - |
---|
1105 | | - ret = i915_kick_out_vgacon(dev_priv); |
---|
1106 | | - if (ret) { |
---|
1107 | | - DRM_ERROR("failed to remove conflicting VGA console\n"); |
---|
1108 | | - goto err_ggtt; |
---|
1109 | | - } |
---|
1110 | | - |
---|
1111 | | - ret = i915_ggtt_init_hw(dev_priv); |
---|
1112 | | - if (ret) |
---|
1113 | | - goto err_ggtt; |
---|
1114 | | - |
---|
1115 | | - ret = i915_ggtt_enable_hw(dev_priv); |
---|
1116 | | - if (ret) { |
---|
1117 | | - DRM_ERROR("failed to enable GGTT\n"); |
---|
1118 | | - goto err_ggtt; |
---|
1119 | | - } |
---|
1120 | | - |
---|
1121 | | - pci_set_master(pdev); |
---|
| 465 | + GEM_BUG_ON(!mask_size); |
---|
1122 | 466 | |
---|
1123 | 467 | /* |
---|
1124 | 468 | * We don't have a max segment size, so set it to the max so sg's |
---|
.. | .. |
---|
1126 | 470 | */ |
---|
1127 | 471 | dma_set_max_seg_size(&pdev->dev, UINT_MAX); |
---|
1128 | 472 | |
---|
| 473 | + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); |
---|
| 474 | + if (ret) |
---|
| 475 | + goto mask_err; |
---|
| 476 | + |
---|
1129 | 477 | /* overlay on gen2 is broken and can't address above 1G */ |
---|
1130 | | - if (IS_GEN2(dev_priv)) { |
---|
1131 | | - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); |
---|
1132 | | - if (ret) { |
---|
1133 | | - DRM_ERROR("failed to set DMA mask\n"); |
---|
| 478 | + if (IS_GEN(i915, 2)) |
---|
| 479 | + mask_size = 30; |
---|
1134 | 480 | |
---|
1135 | | - goto err_ggtt; |
---|
1136 | | - } |
---|
1137 | | - } |
---|
1138 | | - |
---|
1139 | | - /* 965GM sometimes incorrectly writes to hardware status page (HWS) |
---|
| 481 | + /* |
---|
| 482 | + * 965GM sometimes incorrectly writes to hardware status page (HWS) |
---|
1140 | 483 | * using 32bit addressing, overwriting memory if HWS is located |
---|
1141 | 484 | * above 4GB. |
---|
1142 | 485 | * |
---|
.. | .. |
---|
1144 | 487 | * behaviour if any general state is accessed within a page above 4GB, |
---|
1145 | 488 | * which also needs to be handled carefully. |
---|
1146 | 489 | */ |
---|
1147 | | - if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) { |
---|
1148 | | - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
| 490 | + if (IS_I965G(i915) || IS_I965GM(i915)) |
---|
| 491 | + mask_size = 32; |
---|
1149 | 492 | |
---|
1150 | | - if (ret) { |
---|
1151 | | - DRM_ERROR("failed to set DMA mask\n"); |
---|
| 493 | + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); |
---|
| 494 | + if (ret) |
---|
| 495 | + goto mask_err; |
---|
1152 | 496 | |
---|
1153 | | - goto err_ggtt; |
---|
| 497 | + return 0; |
---|
| 498 | + |
---|
| 499 | +mask_err: |
---|
| 500 | + drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); |
---|
| 501 | + return ret; |
---|
| 502 | +} |
---|
| 503 | + |
---|
| 504 | +/** |
---|
| 505 | + * i915_driver_hw_probe - setup state requiring device access |
---|
| 506 | + * @dev_priv: device private |
---|
| 507 | + * |
---|
| 508 | + * Setup state that requires accessing the device, but doesn't require |
---|
| 509 | + * exposing the driver via kernel internal or userspace interfaces. |
---|
| 510 | + */ |
---|
| 511 | +static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) |
---|
| 512 | +{ |
---|
| 513 | + struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
| 514 | + int ret; |
---|
| 515 | + |
---|
| 516 | + if (i915_inject_probe_failure(dev_priv)) |
---|
| 517 | + return -ENODEV; |
---|
| 518 | + |
---|
| 519 | + intel_device_info_runtime_init(dev_priv); |
---|
| 520 | + |
---|
| 521 | + if (HAS_PPGTT(dev_priv)) { |
---|
| 522 | + if (intel_vgpu_active(dev_priv) && |
---|
| 523 | + !intel_vgpu_has_full_ppgtt(dev_priv)) { |
---|
| 524 | + i915_report_error(dev_priv, |
---|
| 525 | + "incompatible vGPU found, support for isolated ppGTT required\n"); |
---|
| 526 | + return -ENXIO; |
---|
1154 | 527 | } |
---|
1155 | 528 | } |
---|
1156 | 529 | |
---|
1157 | | - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, |
---|
1158 | | - PM_QOS_DEFAULT_VALUE); |
---|
| 530 | + if (HAS_EXECLISTS(dev_priv)) { |
---|
| 531 | + /* |
---|
| 532 | + * Older GVT emulation depends upon intercepting CSB mmio, |
---|
| 533 | + * which we no longer use, preferring to use the HWSP cache |
---|
| 534 | + * instead. |
---|
| 535 | + */ |
---|
| 536 | + if (intel_vgpu_active(dev_priv) && |
---|
| 537 | + !intel_vgpu_has_hwsp_emulation(dev_priv)) { |
---|
| 538 | + i915_report_error(dev_priv, |
---|
| 539 | + "old vGPU host found, support for HWSP emulation required\n"); |
---|
| 540 | + return -ENXIO; |
---|
| 541 | + } |
---|
| 542 | + } |
---|
1159 | 543 | |
---|
1160 | | - intel_uncore_sanitize(dev_priv); |
---|
| 544 | + intel_sanitize_options(dev_priv); |
---|
1161 | 545 | |
---|
1162 | | - i915_gem_load_init_fences(dev_priv); |
---|
| 546 | + /* needs to be done before ggtt probe */ |
---|
| 547 | + intel_dram_edram_detect(dev_priv); |
---|
| 548 | + |
---|
| 549 | + ret = i915_set_dma_info(dev_priv); |
---|
| 550 | + if (ret) |
---|
| 551 | + return ret; |
---|
| 552 | + |
---|
| 553 | + i915_perf_init(dev_priv); |
---|
| 554 | + |
---|
| 555 | + ret = i915_ggtt_probe_hw(dev_priv); |
---|
| 556 | + if (ret) |
---|
| 557 | + goto err_perf; |
---|
| 558 | + |
---|
| 559 | + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb"); |
---|
| 560 | + if (ret) |
---|
| 561 | + goto err_ggtt; |
---|
| 562 | + |
---|
| 563 | + ret = i915_ggtt_init_hw(dev_priv); |
---|
| 564 | + if (ret) |
---|
| 565 | + goto err_ggtt; |
---|
| 566 | + |
---|
| 567 | + ret = intel_memory_regions_hw_probe(dev_priv); |
---|
| 568 | + if (ret) |
---|
| 569 | + goto err_ggtt; |
---|
| 570 | + |
---|
| 571 | + intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt); |
---|
| 572 | + |
---|
| 573 | + ret = i915_ggtt_enable_hw(dev_priv); |
---|
| 574 | + if (ret) { |
---|
| 575 | + drm_err(&dev_priv->drm, "failed to enable GGTT\n"); |
---|
| 576 | + goto err_mem_regions; |
---|
| 577 | + } |
---|
| 578 | + |
---|
| 579 | + pci_set_master(pdev); |
---|
| 580 | + |
---|
| 581 | + intel_gt_init_workarounds(dev_priv); |
---|
1163 | 582 | |
---|
1164 | 583 | /* On the 945G/GM, the chipset reports the MSI capability on the |
---|
1165 | 584 | * integrated graphics even though the support isn't actually there |
---|
.. | .. |
---|
1182 | 601 | */ |
---|
1183 | 602 | if (INTEL_GEN(dev_priv) >= 5) { |
---|
1184 | 603 | if (pci_enable_msi(pdev) < 0) |
---|
1185 | | - DRM_DEBUG_DRIVER("can't enable MSI"); |
---|
| 604 | + drm_dbg(&dev_priv->drm, "can't enable MSI"); |
---|
1186 | 605 | } |
---|
1187 | 606 | |
---|
1188 | 607 | ret = intel_gvt_init(dev_priv); |
---|
.. | .. |
---|
1191 | 610 | |
---|
1192 | 611 | intel_opregion_setup(dev_priv); |
---|
1193 | 612 | |
---|
| 613 | + intel_pcode_init(dev_priv); |
---|
| 614 | + |
---|
| 615 | + /* |
---|
| 616 | + * Fill the dram structure to get the system raw bandwidth and |
---|
| 617 | + * dram info. This will be used for memory latency calculation. |
---|
| 618 | + */ |
---|
| 619 | + intel_dram_detect(dev_priv); |
---|
| 620 | + |
---|
| 621 | + intel_bw_init_hw(dev_priv); |
---|
| 622 | + |
---|
1194 | 623 | return 0; |
---|
1195 | 624 | |
---|
1196 | 625 | err_msi: |
---|
1197 | 626 | if (pdev->msi_enabled) |
---|
1198 | 627 | pci_disable_msi(pdev); |
---|
1199 | | - pm_qos_remove_request(&dev_priv->pm_qos); |
---|
| 628 | +err_mem_regions: |
---|
| 629 | + intel_memory_regions_driver_release(dev_priv); |
---|
1200 | 630 | err_ggtt: |
---|
1201 | | - i915_ggtt_cleanup_hw(dev_priv); |
---|
| 631 | + i915_ggtt_driver_release(dev_priv); |
---|
1202 | 632 | err_perf: |
---|
1203 | 633 | i915_perf_fini(dev_priv); |
---|
1204 | 634 | return ret; |
---|
1205 | 635 | } |
---|
1206 | 636 | |
---|
1207 | 637 | /** |
---|
1208 | | - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() |
---|
| 638 | + * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() |
---|
1209 | 639 | * @dev_priv: device private |
---|
1210 | 640 | */ |
---|
1211 | | -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) |
---|
| 641 | +static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) |
---|
1212 | 642 | { |
---|
1213 | 643 | struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
1214 | 644 | |
---|
.. | .. |
---|
1216 | 646 | |
---|
1217 | 647 | if (pdev->msi_enabled) |
---|
1218 | 648 | pci_disable_msi(pdev); |
---|
1219 | | - |
---|
1220 | | - pm_qos_remove_request(&dev_priv->pm_qos); |
---|
1221 | | - i915_ggtt_cleanup_hw(dev_priv); |
---|
1222 | 649 | } |
---|
1223 | 650 | |
---|
1224 | 651 | /** |
---|
.. | .. |
---|
1232 | 659 | { |
---|
1233 | 660 | struct drm_device *dev = &dev_priv->drm; |
---|
1234 | 661 | |
---|
1235 | | - i915_gem_shrinker_register(dev_priv); |
---|
| 662 | + i915_gem_driver_register(dev_priv); |
---|
1236 | 663 | i915_pmu_register(dev_priv); |
---|
1237 | 664 | |
---|
1238 | | - /* |
---|
1239 | | - * Notify a valid surface after modesetting, |
---|
1240 | | - * when running inside a VM. |
---|
1241 | | - */ |
---|
1242 | | - if (intel_vgpu_active(dev_priv)) |
---|
1243 | | - I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); |
---|
| 665 | + intel_vgpu_register(dev_priv); |
---|
1244 | 666 | |
---|
1245 | 667 | /* Reveal our presence to userspace */ |
---|
1246 | 668 | if (drm_dev_register(dev, 0) == 0) { |
---|
1247 | 669 | i915_debugfs_register(dev_priv); |
---|
| 670 | + intel_display_debugfs_register(dev_priv); |
---|
1248 | 671 | i915_setup_sysfs(dev_priv); |
---|
1249 | 672 | |
---|
1250 | 673 | /* Depends on sysfs having been initialized */ |
---|
1251 | 674 | i915_perf_register(dev_priv); |
---|
1252 | 675 | } else |
---|
1253 | | - DRM_ERROR("Failed to register driver for userspace access!\n"); |
---|
| 676 | + drm_err(&dev_priv->drm, |
---|
| 677 | + "Failed to register driver for userspace access!\n"); |
---|
1254 | 678 | |
---|
1255 | | - if (INTEL_INFO(dev_priv)->num_pipes) { |
---|
| 679 | + if (HAS_DISPLAY(dev_priv)) { |
---|
1256 | 680 | /* Must be done after probing outputs */ |
---|
1257 | 681 | intel_opregion_register(dev_priv); |
---|
1258 | 682 | acpi_video_register(); |
---|
1259 | 683 | } |
---|
1260 | 684 | |
---|
1261 | | - if (IS_GEN5(dev_priv)) |
---|
1262 | | - intel_gpu_ips_init(dev_priv); |
---|
| 685 | + intel_gt_driver_register(&dev_priv->gt); |
---|
1263 | 686 | |
---|
1264 | 687 | intel_audio_init(dev_priv); |
---|
1265 | 688 | |
---|
.. | .. |
---|
1276 | 699 | * We need to coordinate the hotplugs with the asynchronous fbdev |
---|
1277 | 700 | * configuration, for which we use the fbdev->async_cookie. |
---|
1278 | 701 | */ |
---|
1279 | | - if (INTEL_INFO(dev_priv)->num_pipes) |
---|
| 702 | + if (HAS_DISPLAY(dev_priv)) |
---|
1280 | 703 | drm_kms_helper_poll_init(dev); |
---|
| 704 | + |
---|
| 705 | + intel_power_domains_enable(dev_priv); |
---|
| 706 | + intel_runtime_pm_enable(&dev_priv->runtime_pm); |
---|
| 707 | + |
---|
| 708 | + intel_register_dsm_handler(); |
---|
| 709 | + |
---|
| 710 | + if (i915_switcheroo_register(dev_priv)) |
---|
| 711 | + drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); |
---|
1281 | 712 | } |
---|
1282 | 713 | |
---|
1283 | 714 | /** |
---|
.. | .. |
---|
1286 | 717 | */ |
---|
1287 | 718 | static void i915_driver_unregister(struct drm_i915_private *dev_priv) |
---|
1288 | 719 | { |
---|
| 720 | + i915_switcheroo_unregister(dev_priv); |
---|
| 721 | + |
---|
| 722 | + intel_unregister_dsm_handler(); |
---|
| 723 | + |
---|
| 724 | + intel_runtime_pm_disable(&dev_priv->runtime_pm); |
---|
| 725 | + intel_power_domains_disable(dev_priv); |
---|
| 726 | + |
---|
1289 | 727 | intel_fbdev_unregister(dev_priv); |
---|
1290 | 728 | intel_audio_deinit(dev_priv); |
---|
1291 | 729 | |
---|
.. | .. |
---|
1296 | 734 | */ |
---|
1297 | 735 | drm_kms_helper_poll_fini(&dev_priv->drm); |
---|
1298 | 736 | |
---|
1299 | | - intel_gpu_ips_teardown(); |
---|
| 737 | + intel_gt_driver_unregister(&dev_priv->gt); |
---|
1300 | 738 | acpi_video_unregister(); |
---|
1301 | 739 | intel_opregion_unregister(dev_priv); |
---|
1302 | 740 | |
---|
.. | .. |
---|
1304 | 742 | i915_pmu_unregister(dev_priv); |
---|
1305 | 743 | |
---|
1306 | 744 | i915_teardown_sysfs(dev_priv); |
---|
1307 | | - drm_dev_unregister(&dev_priv->drm); |
---|
| 745 | + drm_dev_unplug(&dev_priv->drm); |
---|
1308 | 746 | |
---|
1309 | | - i915_gem_shrinker_unregister(dev_priv); |
---|
| 747 | + i915_gem_driver_unregister(dev_priv); |
---|
1310 | 748 | } |
---|
1311 | 749 | |
---|
1312 | 750 | static void i915_welcome_messages(struct drm_i915_private *dev_priv) |
---|
1313 | 751 | { |
---|
1314 | | - if (drm_debug & DRM_UT_DRIVER) { |
---|
| 752 | + if (drm_debug_enabled(DRM_UT_DRIVER)) { |
---|
1315 | 753 | struct drm_printer p = drm_debug_printer("i915 device info:"); |
---|
1316 | 754 | |
---|
1317 | | - intel_device_info_dump(&dev_priv->info, &p); |
---|
1318 | | - intel_device_info_dump_runtime(&dev_priv->info, &p); |
---|
| 755 | + drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", |
---|
| 756 | + INTEL_DEVID(dev_priv), |
---|
| 757 | + INTEL_REVID(dev_priv), |
---|
| 758 | + intel_platform_name(INTEL_INFO(dev_priv)->platform), |
---|
| 759 | + intel_subplatform(RUNTIME_INFO(dev_priv), |
---|
| 760 | + INTEL_INFO(dev_priv)->platform), |
---|
| 761 | + INTEL_GEN(dev_priv)); |
---|
| 762 | + |
---|
| 763 | + intel_device_info_print_static(INTEL_INFO(dev_priv), &p); |
---|
| 764 | + intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); |
---|
| 765 | + intel_gt_info_print(&dev_priv->gt.info, &p); |
---|
1319 | 766 | } |
---|
1320 | 767 | |
---|
1321 | 768 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
---|
1322 | | - DRM_INFO("DRM_I915_DEBUG enabled\n"); |
---|
| 769 | + drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); |
---|
1323 | 770 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
---|
1324 | | - DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); |
---|
| 771 | + drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); |
---|
| 772 | + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) |
---|
| 773 | + drm_info(&dev_priv->drm, |
---|
| 774 | + "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); |
---|
| 775 | +} |
---|
| 776 | + |
---|
| 777 | +static struct drm_i915_private * |
---|
| 778 | +i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
| 779 | +{ |
---|
| 780 | + const struct intel_device_info *match_info = |
---|
| 781 | + (struct intel_device_info *)ent->driver_data; |
---|
| 782 | + struct intel_device_info *device_info; |
---|
| 783 | + struct drm_i915_private *i915; |
---|
| 784 | + |
---|
| 785 | + i915 = devm_drm_dev_alloc(&pdev->dev, &driver, |
---|
| 786 | + struct drm_i915_private, drm); |
---|
| 787 | + if (IS_ERR(i915)) |
---|
| 788 | + return i915; |
---|
| 789 | + |
---|
| 790 | + i915->drm.pdev = pdev; |
---|
| 791 | + pci_set_drvdata(pdev, i915); |
---|
| 792 | + |
---|
| 793 | + /* Device parameters start as a copy of module parameters. */ |
---|
| 794 | + i915_params_copy(&i915->params, &i915_modparams); |
---|
| 795 | + |
---|
| 796 | + /* Setup the write-once "constant" device info */ |
---|
| 797 | + device_info = mkwrite_device_info(i915); |
---|
| 798 | + memcpy(device_info, match_info, sizeof(*device_info)); |
---|
| 799 | + RUNTIME_INFO(i915)->device_id = pdev->device; |
---|
| 800 | + |
---|
| 801 | + BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); |
---|
| 802 | + |
---|
| 803 | + return i915; |
---|
1325 | 804 | } |
---|
1326 | 805 | |
---|
1327 | 806 | /** |
---|
1328 | | - * i915_driver_load - setup chip and create an initial config |
---|
| 807 | + * i915_driver_probe - setup chip and create an initial config |
---|
1329 | 808 | * @pdev: PCI device |
---|
1330 | 809 | * @ent: matching PCI ID entry |
---|
1331 | 810 | * |
---|
1332 | | - * The driver load routine has to do several things: |
---|
| 811 | + * The driver probe routine has to do several things: |
---|
1333 | 812 | * - drive output discovery via intel_modeset_init() |
---|
1334 | 813 | * - initialize the memory manager |
---|
1335 | 814 | * - allocate initial config memory |
---|
1336 | 815 | * - setup the DRM framebuffer with the allocated memory |
---|
1337 | 816 | */ |
---|
1338 | | -int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
| 817 | +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
---|
1339 | 818 | { |
---|
1340 | 819 | const struct intel_device_info *match_info = |
---|
1341 | 820 | (struct intel_device_info *)ent->driver_data; |
---|
1342 | | - struct drm_i915_private *dev_priv; |
---|
| 821 | + struct drm_i915_private *i915; |
---|
1343 | 822 | int ret; |
---|
1344 | 823 | |
---|
1345 | | - /* Enable nuclear pageflip on ILK+ */ |
---|
1346 | | - if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) |
---|
1347 | | - driver.driver_features &= ~DRIVER_ATOMIC; |
---|
| 824 | + i915 = i915_driver_create(pdev, ent); |
---|
| 825 | + if (IS_ERR(i915)) |
---|
| 826 | + return PTR_ERR(i915); |
---|
1348 | 827 | |
---|
1349 | | - ret = -ENOMEM; |
---|
1350 | | - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
---|
1351 | | - if (dev_priv) |
---|
1352 | | - ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); |
---|
1353 | | - if (ret) { |
---|
1354 | | - DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); |
---|
1355 | | - goto out_free; |
---|
| 828 | + /* Disable nuclear pageflip by default on pre-ILK */ |
---|
| 829 | + if (!i915->params.nuclear_pageflip && match_info->gen < 5) |
---|
| 830 | + i915->drm.driver_features &= ~DRIVER_ATOMIC; |
---|
| 831 | + |
---|
| 832 | + /* |
---|
| 833 | + * Check if we support fake LMEM -- for now we only unleash this for |
---|
| 834 | + * the live selftests(test-and-exit). |
---|
| 835 | + */ |
---|
| 836 | +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
---|
| 837 | + if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { |
---|
| 838 | + if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 && |
---|
| 839 | + i915->params.fake_lmem_start) { |
---|
| 840 | + mkwrite_device_info(i915)->memory_regions = |
---|
| 841 | + REGION_SMEM | REGION_LMEM | REGION_STOLEN; |
---|
| 842 | + mkwrite_device_info(i915)->is_dgfx = true; |
---|
| 843 | + GEM_BUG_ON(!HAS_LMEM(i915)); |
---|
| 844 | + GEM_BUG_ON(!IS_DGFX(i915)); |
---|
| 845 | + } |
---|
1356 | 846 | } |
---|
1357 | | - |
---|
1358 | | - dev_priv->drm.pdev = pdev; |
---|
1359 | | - dev_priv->drm.dev_private = dev_priv; |
---|
| 847 | +#endif |
---|
1360 | 848 | |
---|
1361 | 849 | ret = pci_enable_device(pdev); |
---|
1362 | 850 | if (ret) |
---|
1363 | 851 | goto out_fini; |
---|
1364 | 852 | |
---|
1365 | | - pci_set_drvdata(pdev, &dev_priv->drm); |
---|
1366 | | - /* |
---|
1367 | | - * Disable the system suspend direct complete optimization, which can |
---|
1368 | | - * leave the device suspended skipping the driver's suspend handlers |
---|
1369 | | - * if the device was already runtime suspended. This is needed due to |
---|
1370 | | - * the difference in our runtime and system suspend sequence and |
---|
1371 | | - * becaue the HDA driver may require us to enable the audio power |
---|
1372 | | - * domain during system suspend. |
---|
1373 | | - */ |
---|
1374 | | - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); |
---|
1375 | | - |
---|
1376 | | - ret = i915_driver_init_early(dev_priv, ent); |
---|
| 853 | + ret = i915_driver_early_probe(i915); |
---|
1377 | 854 | if (ret < 0) |
---|
1378 | 855 | goto out_pci_disable; |
---|
1379 | 856 | |
---|
1380 | | - intel_runtime_pm_get(dev_priv); |
---|
| 857 | + disable_rpm_wakeref_asserts(&i915->runtime_pm); |
---|
1381 | 858 | |
---|
1382 | | - ret = i915_driver_init_mmio(dev_priv); |
---|
| 859 | + intel_vgpu_detect(i915); |
---|
| 860 | + |
---|
| 861 | + ret = i915_driver_mmio_probe(i915); |
---|
1383 | 862 | if (ret < 0) |
---|
1384 | 863 | goto out_runtime_pm_put; |
---|
1385 | 864 | |
---|
1386 | | - ret = i915_driver_init_hw(dev_priv); |
---|
| 865 | + ret = i915_driver_hw_probe(i915); |
---|
1387 | 866 | if (ret < 0) |
---|
1388 | 867 | goto out_cleanup_mmio; |
---|
1389 | 868 | |
---|
1390 | | - /* |
---|
1391 | | - * TODO: move the vblank init and parts of modeset init steps into one |
---|
1392 | | - * of the i915_driver_init_/i915_driver_register functions according |
---|
1393 | | - * to the role/effect of the given init step. |
---|
1394 | | - */ |
---|
1395 | | - if (INTEL_INFO(dev_priv)->num_pipes) { |
---|
1396 | | - ret = drm_vblank_init(&dev_priv->drm, |
---|
1397 | | - INTEL_INFO(dev_priv)->num_pipes); |
---|
1398 | | - if (ret) |
---|
1399 | | - goto out_cleanup_hw; |
---|
1400 | | - } |
---|
1401 | | - |
---|
1402 | | - ret = i915_load_modeset_init(&dev_priv->drm); |
---|
| 869 | + ret = intel_modeset_init_noirq(i915); |
---|
1403 | 870 | if (ret < 0) |
---|
1404 | 871 | goto out_cleanup_hw; |
---|
1405 | 872 | |
---|
1406 | | - i915_driver_register(dev_priv); |
---|
| 873 | + ret = intel_irq_install(i915); |
---|
| 874 | + if (ret) |
---|
| 875 | + goto out_cleanup_modeset; |
---|
1407 | 876 | |
---|
1408 | | - intel_runtime_pm_enable(dev_priv); |
---|
| 877 | + ret = intel_modeset_init_nogem(i915); |
---|
| 878 | + if (ret) |
---|
| 879 | + goto out_cleanup_irq; |
---|
1409 | 880 | |
---|
1410 | | - intel_init_ipc(dev_priv); |
---|
| 881 | + ret = i915_gem_init(i915); |
---|
| 882 | + if (ret) |
---|
| 883 | + goto out_cleanup_modeset2; |
---|
1411 | 884 | |
---|
1412 | | - intel_runtime_pm_put(dev_priv); |
---|
| 885 | + ret = intel_modeset_init(i915); |
---|
| 886 | + if (ret) |
---|
| 887 | + goto out_cleanup_gem; |
---|
1413 | 888 | |
---|
1414 | | - i915_welcome_messages(dev_priv); |
---|
| 889 | + i915_driver_register(i915); |
---|
| 890 | + |
---|
| 891 | + enable_rpm_wakeref_asserts(&i915->runtime_pm); |
---|
| 892 | + |
---|
| 893 | + i915_welcome_messages(i915); |
---|
| 894 | + |
---|
| 895 | + i915->do_release = true; |
---|
1415 | 896 | |
---|
1416 | 897 | return 0; |
---|
1417 | 898 | |
---|
| 899 | +out_cleanup_gem: |
---|
| 900 | + i915_gem_suspend(i915); |
---|
| 901 | + i915_gem_driver_remove(i915); |
---|
| 902 | + i915_gem_driver_release(i915); |
---|
| 903 | +out_cleanup_modeset2: |
---|
| 904 | + /* FIXME clean up the error path */ |
---|
| 905 | + intel_modeset_driver_remove(i915); |
---|
| 906 | + intel_irq_uninstall(i915); |
---|
| 907 | + intel_modeset_driver_remove_noirq(i915); |
---|
| 908 | + goto out_cleanup_modeset; |
---|
| 909 | +out_cleanup_irq: |
---|
| 910 | + intel_irq_uninstall(i915); |
---|
| 911 | +out_cleanup_modeset: |
---|
| 912 | + intel_modeset_driver_remove_nogem(i915); |
---|
1418 | 913 | out_cleanup_hw: |
---|
1419 | | - i915_driver_cleanup_hw(dev_priv); |
---|
| 914 | + i915_driver_hw_remove(i915); |
---|
| 915 | + intel_memory_regions_driver_release(i915); |
---|
| 916 | + i915_ggtt_driver_release(i915); |
---|
1420 | 917 | out_cleanup_mmio: |
---|
1421 | | - i915_driver_cleanup_mmio(dev_priv); |
---|
| 918 | + i915_driver_mmio_release(i915); |
---|
1422 | 919 | out_runtime_pm_put: |
---|
1423 | | - intel_runtime_pm_put(dev_priv); |
---|
1424 | | - i915_driver_cleanup_early(dev_priv); |
---|
| 920 | + enable_rpm_wakeref_asserts(&i915->runtime_pm); |
---|
| 921 | + i915_driver_late_release(i915); |
---|
1425 | 922 | out_pci_disable: |
---|
1426 | 923 | pci_disable_device(pdev); |
---|
1427 | 924 | out_fini: |
---|
1428 | | - i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); |
---|
1429 | | - drm_dev_fini(&dev_priv->drm); |
---|
1430 | | -out_free: |
---|
1431 | | - kfree(dev_priv); |
---|
1432 | | - pci_set_drvdata(pdev, NULL); |
---|
| 925 | + i915_probe_error(i915, "Device initialization failed (%d)\n", ret); |
---|
1433 | 926 | return ret; |
---|
1434 | 927 | } |
---|
1435 | 928 | |
---|
1436 | | -void i915_driver_unload(struct drm_device *dev) |
---|
| 929 | +void i915_driver_remove(struct drm_i915_private *i915) |
---|
1437 | 930 | { |
---|
1438 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
1439 | | - struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
| 931 | + disable_rpm_wakeref_asserts(&i915->runtime_pm); |
---|
1440 | 932 | |
---|
1441 | | - i915_driver_unregister(dev_priv); |
---|
| 933 | + i915_driver_unregister(i915); |
---|
1442 | 934 | |
---|
1443 | | - if (i915_gem_suspend(dev_priv)) |
---|
1444 | | - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); |
---|
| 935 | + /* Flush any external code that still may be under the RCU lock */ |
---|
| 936 | + synchronize_rcu(); |
---|
1445 | 937 | |
---|
1446 | | - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
---|
| 938 | + i915_gem_suspend(i915); |
---|
1447 | 939 | |
---|
1448 | | - drm_atomic_helper_shutdown(dev); |
---|
| 940 | + drm_atomic_helper_shutdown(&i915->drm); |
---|
1449 | 941 | |
---|
1450 | | - intel_gvt_cleanup(dev_priv); |
---|
| 942 | + intel_gvt_driver_remove(i915); |
---|
1451 | 943 | |
---|
1452 | | - intel_modeset_cleanup(dev); |
---|
| 944 | + intel_modeset_driver_remove(i915); |
---|
1453 | 945 | |
---|
1454 | | - intel_bios_cleanup(dev_priv); |
---|
| 946 | + intel_irq_uninstall(i915); |
---|
1455 | 947 | |
---|
1456 | | - vga_switcheroo_unregister_client(pdev); |
---|
1457 | | - vga_client_register(pdev, NULL, NULL, NULL); |
---|
| 948 | + intel_modeset_driver_remove_noirq(i915); |
---|
1458 | 949 | |
---|
1459 | | - intel_csr_ucode_fini(dev_priv); |
---|
| 950 | + i915_reset_error_state(i915); |
---|
| 951 | + i915_gem_driver_remove(i915); |
---|
1460 | 952 | |
---|
1461 | | - /* Free error state after interrupts are fully disabled. */ |
---|
1462 | | - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
---|
1463 | | - i915_reset_error_state(dev_priv); |
---|
| 953 | + intel_modeset_driver_remove_nogem(i915); |
---|
1464 | 954 | |
---|
1465 | | - i915_gem_fini(dev_priv); |
---|
1466 | | - intel_fbc_cleanup_cfb(dev_priv); |
---|
| 955 | + i915_driver_hw_remove(i915); |
---|
1467 | 956 | |
---|
1468 | | - intel_power_domains_fini(dev_priv); |
---|
1469 | | - |
---|
1470 | | - i915_driver_cleanup_hw(dev_priv); |
---|
1471 | | - i915_driver_cleanup_mmio(dev_priv); |
---|
1472 | | - |
---|
1473 | | - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); |
---|
| 957 | + enable_rpm_wakeref_asserts(&i915->runtime_pm); |
---|
1474 | 958 | } |
---|
1475 | 959 | |
---|
1476 | 960 | static void i915_driver_release(struct drm_device *dev) |
---|
1477 | 961 | { |
---|
1478 | 962 | struct drm_i915_private *dev_priv = to_i915(dev); |
---|
| 963 | + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
---|
1479 | 964 | |
---|
1480 | | - i915_driver_cleanup_early(dev_priv); |
---|
1481 | | - drm_dev_fini(&dev_priv->drm); |
---|
| 965 | + if (!dev_priv->do_release) |
---|
| 966 | + return; |
---|
1482 | 967 | |
---|
1483 | | - kfree(dev_priv); |
---|
| 968 | + disable_rpm_wakeref_asserts(rpm); |
---|
| 969 | + |
---|
| 970 | + i915_gem_driver_release(dev_priv); |
---|
| 971 | + |
---|
| 972 | + intel_memory_regions_driver_release(dev_priv); |
---|
| 973 | + i915_ggtt_driver_release(dev_priv); |
---|
| 974 | + i915_gem_drain_freed_objects(dev_priv); |
---|
| 975 | + |
---|
| 976 | + i915_driver_mmio_release(dev_priv); |
---|
| 977 | + |
---|
| 978 | + enable_rpm_wakeref_asserts(rpm); |
---|
| 979 | + intel_runtime_pm_driver_release(rpm); |
---|
| 980 | + |
---|
| 981 | + i915_driver_late_release(dev_priv); |
---|
1484 | 982 | } |
---|
1485 | 983 | |
---|
1486 | 984 | static int i915_driver_open(struct drm_device *dev, struct drm_file *file) |
---|
.. | .. |
---|
1517 | 1015 | { |
---|
1518 | 1016 | struct drm_i915_file_private *file_priv = file->driver_priv; |
---|
1519 | 1017 | |
---|
1520 | | - mutex_lock(&dev->struct_mutex); |
---|
1521 | 1018 | i915_gem_context_close(file); |
---|
1522 | | - i915_gem_release(dev, file); |
---|
1523 | | - mutex_unlock(&dev->struct_mutex); |
---|
1524 | 1019 | |
---|
1525 | | - kfree(file_priv); |
---|
| 1020 | + kfree_rcu(file_priv, rcu); |
---|
| 1021 | + |
---|
| 1022 | + /* Catch up with all the deferred frees from "this" client */ |
---|
| 1023 | + i915_gem_flush_free_objects(to_i915(dev)); |
---|
1526 | 1024 | } |
---|
1527 | 1025 | |
---|
1528 | 1026 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
---|
.. | .. |
---|
1537 | 1035 | drm_modeset_unlock_all(dev); |
---|
1538 | 1036 | } |
---|
1539 | 1037 | |
---|
1540 | | -static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
---|
1541 | | - bool rpm_resume); |
---|
1542 | | -static int vlv_suspend_complete(struct drm_i915_private *dev_priv); |
---|
1543 | | - |
---|
1544 | 1038 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
---|
1545 | 1039 | { |
---|
1546 | 1040 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) |
---|
.. | .. |
---|
1553 | 1047 | static int i915_drm_prepare(struct drm_device *dev) |
---|
1554 | 1048 | { |
---|
1555 | 1049 | struct drm_i915_private *i915 = to_i915(dev); |
---|
1556 | | - int err; |
---|
1557 | 1050 | |
---|
1558 | 1051 | /* |
---|
1559 | 1052 | * NB intel_display_suspend() may issue new requests after we've |
---|
.. | .. |
---|
1561 | 1054 | * split out that work and pull it forward so that after point, |
---|
1562 | 1055 | * the GPU is not woken again. |
---|
1563 | 1056 | */ |
---|
1564 | | - err = i915_gem_suspend(i915); |
---|
1565 | | - if (err) |
---|
1566 | | - dev_err(&i915->drm.pdev->dev, |
---|
1567 | | - "GEM idle failed, suspend/resume might fail\n"); |
---|
| 1057 | + i915_gem_suspend(i915); |
---|
1568 | 1058 | |
---|
1569 | | - return err; |
---|
| 1059 | + return 0; |
---|
1570 | 1060 | } |
---|
1571 | 1061 | |
---|
1572 | 1062 | static int i915_drm_suspend(struct drm_device *dev) |
---|
.. | .. |
---|
1575 | 1065 | struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
1576 | 1066 | pci_power_t opregion_target_state; |
---|
1577 | 1067 | |
---|
1578 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
| 1068 | + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1579 | 1069 | |
---|
1580 | 1070 | /* We do a lot of poking in a lot of registers, make sure they work |
---|
1581 | 1071 | * properly. */ |
---|
1582 | | - intel_display_set_init_power(dev_priv, true); |
---|
| 1072 | + intel_power_domains_disable(dev_priv); |
---|
1583 | 1073 | |
---|
1584 | 1074 | drm_kms_helper_poll_disable(dev); |
---|
1585 | 1075 | |
---|
.. | .. |
---|
1596 | 1086 | |
---|
1597 | 1087 | intel_suspend_hw(dev_priv); |
---|
1598 | 1088 | |
---|
1599 | | - i915_gem_suspend_gtt_mappings(dev_priv); |
---|
| 1089 | + i915_ggtt_suspend(&dev_priv->ggtt); |
---|
1600 | 1090 | |
---|
1601 | 1091 | i915_save_state(dev_priv); |
---|
1602 | 1092 | |
---|
1603 | 1093 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
---|
1604 | | - intel_opregion_notify_adapter(dev_priv, opregion_target_state); |
---|
1605 | | - |
---|
1606 | | - intel_opregion_unregister(dev_priv); |
---|
| 1094 | + intel_opregion_suspend(dev_priv, opregion_target_state); |
---|
1607 | 1095 | |
---|
1608 | 1096 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
---|
1609 | 1097 | |
---|
.. | .. |
---|
1611 | 1099 | |
---|
1612 | 1100 | intel_csr_ucode_suspend(dev_priv); |
---|
1613 | 1101 | |
---|
1614 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1102 | + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1615 | 1103 | |
---|
1616 | 1104 | return 0; |
---|
| 1105 | +} |
---|
| 1106 | + |
---|
| 1107 | +static enum i915_drm_suspend_mode |
---|
| 1108 | +get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) |
---|
| 1109 | +{ |
---|
| 1110 | + if (hibernate) |
---|
| 1111 | + return I915_DRM_SUSPEND_HIBERNATE; |
---|
| 1112 | + |
---|
| 1113 | + if (suspend_to_idle(dev_priv)) |
---|
| 1114 | + return I915_DRM_SUSPEND_IDLE; |
---|
| 1115 | + |
---|
| 1116 | + return I915_DRM_SUSPEND_MEM; |
---|
1617 | 1117 | } |
---|
1618 | 1118 | |
---|
1619 | 1119 | static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) |
---|
1620 | 1120 | { |
---|
1621 | 1121 | struct drm_i915_private *dev_priv = to_i915(dev); |
---|
1622 | 1122 | struct pci_dev *pdev = dev_priv->drm.pdev; |
---|
| 1123 | + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
---|
1623 | 1124 | int ret; |
---|
1624 | 1125 | |
---|
1625 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
| 1126 | + disable_rpm_wakeref_asserts(rpm); |
---|
1626 | 1127 | |
---|
1627 | 1128 | i915_gem_suspend_late(dev_priv); |
---|
1628 | 1129 | |
---|
1629 | | - intel_display_set_init_power(dev_priv, false); |
---|
1630 | | - i915_rc6_ctx_wa_suspend(dev_priv); |
---|
1631 | | - intel_uncore_suspend(dev_priv); |
---|
| 1130 | + intel_uncore_suspend(&dev_priv->uncore); |
---|
1632 | 1131 | |
---|
1633 | | - /* |
---|
1634 | | - * In case of firmware assisted context save/restore don't manually |
---|
1635 | | - * deinit the power domains. This also means the CSR/DMC firmware will |
---|
1636 | | - * stay active, it will power down any HW resources as required and |
---|
1637 | | - * also enable deeper system power states that would be blocked if the |
---|
1638 | | - * firmware was inactive. |
---|
1639 | | - */ |
---|
1640 | | - if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) || |
---|
1641 | | - dev_priv->csr.dmc_payload == NULL) { |
---|
1642 | | - intel_power_domains_suspend(dev_priv); |
---|
1643 | | - dev_priv->power_domains_suspended = true; |
---|
1644 | | - } |
---|
| 1132 | + intel_power_domains_suspend(dev_priv, |
---|
| 1133 | + get_suspend_mode(dev_priv, hibernation)); |
---|
1645 | 1134 | |
---|
1646 | | - ret = 0; |
---|
1647 | | - if (IS_GEN9_LP(dev_priv)) |
---|
1648 | | - bxt_enable_dc9(dev_priv); |
---|
1649 | | - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
---|
1650 | | - hsw_enable_pc8(dev_priv); |
---|
1651 | | - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
---|
1652 | | - ret = vlv_suspend_complete(dev_priv); |
---|
| 1135 | + intel_display_power_suspend_late(dev_priv); |
---|
1653 | 1136 | |
---|
| 1137 | + ret = vlv_suspend_complete(dev_priv); |
---|
1654 | 1138 | if (ret) { |
---|
1655 | | - DRM_ERROR("Suspend complete failed: %d\n", ret); |
---|
1656 | | - if (dev_priv->power_domains_suspended) { |
---|
1657 | | - intel_power_domains_init_hw(dev_priv, true); |
---|
1658 | | - dev_priv->power_domains_suspended = false; |
---|
1659 | | - } |
---|
| 1139 | + drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); |
---|
| 1140 | + intel_power_domains_resume(dev_priv); |
---|
1660 | 1141 | |
---|
1661 | 1142 | goto out; |
---|
1662 | 1143 | } |
---|
.. | .. |
---|
1678 | 1159 | pci_set_power_state(pdev, PCI_D3hot); |
---|
1679 | 1160 | |
---|
1680 | 1161 | out: |
---|
1681 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1162 | + enable_rpm_wakeref_asserts(rpm); |
---|
| 1163 | + if (!dev_priv->uncore.user_forcewake_count) |
---|
| 1164 | + intel_runtime_pm_driver_release(rpm); |
---|
1682 | 1165 | |
---|
1683 | 1166 | return ret; |
---|
1684 | 1167 | } |
---|
1685 | 1168 | |
---|
1686 | | -static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
---|
| 1169 | +int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) |
---|
1687 | 1170 | { |
---|
1688 | 1171 | int error; |
---|
1689 | 1172 | |
---|
1690 | | - if (!dev) { |
---|
1691 | | - DRM_ERROR("dev: %p\n", dev); |
---|
1692 | | - DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
---|
1693 | | - return -ENODEV; |
---|
1694 | | - } |
---|
1695 | | - |
---|
1696 | | - if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
---|
1697 | | - state.event != PM_EVENT_FREEZE)) |
---|
| 1173 | + if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && |
---|
| 1174 | + state.event != PM_EVENT_FREEZE)) |
---|
1698 | 1175 | return -EINVAL; |
---|
1699 | 1176 | |
---|
1700 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1177 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
1701 | 1178 | return 0; |
---|
1702 | 1179 | |
---|
1703 | | - error = i915_drm_suspend(dev); |
---|
| 1180 | + error = i915_drm_suspend(&i915->drm); |
---|
1704 | 1181 | if (error) |
---|
1705 | 1182 | return error; |
---|
1706 | 1183 | |
---|
1707 | | - return i915_drm_suspend_late(dev, false); |
---|
| 1184 | + return i915_drm_suspend_late(&i915->drm, false); |
---|
1708 | 1185 | } |
---|
1709 | 1186 | |
---|
1710 | 1187 | static int i915_drm_resume(struct drm_device *dev) |
---|
.. | .. |
---|
1712 | 1189 | struct drm_i915_private *dev_priv = to_i915(dev); |
---|
1713 | 1190 | int ret; |
---|
1714 | 1191 | |
---|
1715 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
1716 | | - intel_sanitize_gt_powersave(dev_priv); |
---|
| 1192 | + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1717 | 1193 | |
---|
1718 | | - i915_gem_sanitize(dev_priv); |
---|
| 1194 | + sanitize_gpu(dev_priv); |
---|
1719 | 1195 | |
---|
1720 | 1196 | ret = i915_ggtt_enable_hw(dev_priv); |
---|
1721 | 1197 | if (ret) |
---|
1722 | | - DRM_ERROR("failed to re-enable GGTT\n"); |
---|
| 1198 | + drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); |
---|
| 1199 | + |
---|
| 1200 | + i915_ggtt_resume(&dev_priv->ggtt); |
---|
1723 | 1201 | |
---|
1724 | 1202 | intel_csr_ucode_resume(dev_priv); |
---|
1725 | 1203 | |
---|
1726 | 1204 | i915_restore_state(dev_priv); |
---|
1727 | 1205 | intel_pps_unlock_regs_wa(dev_priv); |
---|
1728 | | - intel_opregion_setup(dev_priv); |
---|
1729 | 1206 | |
---|
1730 | 1207 | intel_init_pch_refclk(dev_priv); |
---|
1731 | 1208 | |
---|
.. | .. |
---|
1745 | 1222 | |
---|
1746 | 1223 | i915_gem_resume(dev_priv); |
---|
1747 | 1224 | |
---|
1748 | | - intel_modeset_init_hw(dev); |
---|
| 1225 | + intel_modeset_init_hw(dev_priv); |
---|
1749 | 1226 | intel_init_clock_gating(dev_priv); |
---|
1750 | 1227 | |
---|
1751 | 1228 | spin_lock_irq(&dev_priv->irq_lock); |
---|
.. | .. |
---|
1762 | 1239 | /* |
---|
1763 | 1240 | * ... but also need to make sure that hotplug processing |
---|
1764 | 1241 | * doesn't cause havoc. Like in the driver load code we don't |
---|
1765 | | - * bother with the tiny race here where we might loose hotplug |
---|
| 1242 | + * bother with the tiny race here where we might lose hotplug |
---|
1766 | 1243 | * notifications. |
---|
1767 | 1244 | * */ |
---|
1768 | 1245 | intel_hpd_init(dev_priv); |
---|
1769 | 1246 | |
---|
1770 | | - intel_opregion_register(dev_priv); |
---|
| 1247 | + intel_opregion_resume(dev_priv); |
---|
1771 | 1248 | |
---|
1772 | 1249 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
---|
1773 | 1250 | |
---|
1774 | | - intel_opregion_notify_adapter(dev_priv, PCI_D0); |
---|
| 1251 | + intel_power_domains_enable(dev_priv); |
---|
1775 | 1252 | |
---|
1776 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1253 | + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1777 | 1254 | |
---|
1778 | 1255 | return 0; |
---|
1779 | 1256 | } |
---|
.. | .. |
---|
1806 | 1283 | */ |
---|
1807 | 1284 | ret = pci_set_power_state(pdev, PCI_D0); |
---|
1808 | 1285 | if (ret) { |
---|
1809 | | - DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); |
---|
1810 | | - goto out; |
---|
| 1286 | + drm_err(&dev_priv->drm, |
---|
| 1287 | + "failed to set PCI D0 power state (%d)\n", ret); |
---|
| 1288 | + return ret; |
---|
1811 | 1289 | } |
---|
1812 | 1290 | |
---|
1813 | 1291 | /* |
---|
.. | .. |
---|
1823 | 1301 | * depend on the device enable refcount we can't anyway depend on them |
---|
1824 | 1302 | * disabling/enabling the device. |
---|
1825 | 1303 | */ |
---|
1826 | | - if (pci_enable_device(pdev)) { |
---|
1827 | | - ret = -EIO; |
---|
1828 | | - goto out; |
---|
1829 | | - } |
---|
| 1304 | + if (pci_enable_device(pdev)) |
---|
| 1305 | + return -EIO; |
---|
1830 | 1306 | |
---|
1831 | 1307 | pci_set_master(pdev); |
---|
1832 | 1308 | |
---|
1833 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
| 1309 | + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1834 | 1310 | |
---|
1835 | | - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
---|
1836 | | - ret = vlv_resume_prepare(dev_priv, false); |
---|
| 1311 | + ret = vlv_resume_prepare(dev_priv, false); |
---|
1837 | 1312 | if (ret) |
---|
1838 | | - DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
---|
1839 | | - ret); |
---|
| 1313 | + drm_err(&dev_priv->drm, |
---|
| 1314 | + "Resume prepare failed: %d, continuing anyway\n", ret); |
---|
1840 | 1315 | |
---|
1841 | | - intel_uncore_resume_early(dev_priv); |
---|
| 1316 | + intel_uncore_resume_early(&dev_priv->uncore); |
---|
1842 | 1317 | |
---|
1843 | | - if (IS_GEN9_LP(dev_priv)) { |
---|
1844 | | - gen9_sanitize_dc_state(dev_priv); |
---|
1845 | | - bxt_disable_dc9(dev_priv); |
---|
1846 | | - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
---|
1847 | | - hsw_disable_pc8(dev_priv); |
---|
1848 | | - } |
---|
| 1318 | + intel_gt_check_and_clear_faults(&dev_priv->gt); |
---|
1849 | 1319 | |
---|
1850 | | - intel_uncore_sanitize(dev_priv); |
---|
| 1320 | + intel_display_power_resume_early(dev_priv); |
---|
1851 | 1321 | |
---|
1852 | | - if (dev_priv->power_domains_suspended) |
---|
1853 | | - intel_power_domains_init_hw(dev_priv, true); |
---|
1854 | | - else |
---|
1855 | | - intel_display_set_init_power(dev_priv, true); |
---|
| 1322 | + intel_power_domains_resume(dev_priv); |
---|
1856 | 1323 | |
---|
1857 | | - i915_rc6_ctx_wa_resume(dev_priv); |
---|
1858 | | - |
---|
1859 | | - intel_engines_sanitize(dev_priv); |
---|
1860 | | - |
---|
1861 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
1862 | | - |
---|
1863 | | -out: |
---|
1864 | | - dev_priv->power_domains_suspended = false; |
---|
| 1324 | + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); |
---|
1865 | 1325 | |
---|
1866 | 1326 | return ret; |
---|
1867 | 1327 | } |
---|
1868 | 1328 | |
---|
1869 | | -static int i915_resume_switcheroo(struct drm_device *dev) |
---|
| 1329 | +int i915_resume_switcheroo(struct drm_i915_private *i915) |
---|
1870 | 1330 | { |
---|
1871 | 1331 | int ret; |
---|
1872 | 1332 | |
---|
1873 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1333 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
1874 | 1334 | return 0; |
---|
1875 | 1335 | |
---|
1876 | | - ret = i915_drm_resume_early(dev); |
---|
| 1336 | + ret = i915_drm_resume_early(&i915->drm); |
---|
1877 | 1337 | if (ret) |
---|
1878 | 1338 | return ret; |
---|
1879 | 1339 | |
---|
1880 | | - return i915_drm_resume(dev); |
---|
1881 | | -} |
---|
1882 | | - |
---|
1883 | | -/** |
---|
1884 | | - * i915_reset - reset chip after a hang |
---|
1885 | | - * @i915: #drm_i915_private to reset |
---|
1886 | | - * @stalled_mask: mask of the stalled engines with the guilty requests |
---|
1887 | | - * @reason: user error message for why we are resetting |
---|
1888 | | - * |
---|
1889 | | - * Reset the chip. Useful if a hang is detected. Marks the device as wedged |
---|
1890 | | - * on failure. |
---|
1891 | | - * |
---|
1892 | | - * Caller must hold the struct_mutex. |
---|
1893 | | - * |
---|
1894 | | - * Procedure is fairly simple: |
---|
1895 | | - * - reset the chip using the reset reg |
---|
1896 | | - * - re-init context state |
---|
1897 | | - * - re-init hardware status page |
---|
1898 | | - * - re-init ring buffer |
---|
1899 | | - * - re-init interrupt state |
---|
1900 | | - * - re-init display |
---|
1901 | | - */ |
---|
1902 | | -void i915_reset(struct drm_i915_private *i915, |
---|
1903 | | - unsigned int stalled_mask, |
---|
1904 | | - const char *reason) |
---|
1905 | | -{ |
---|
1906 | | - struct i915_gpu_error *error = &i915->gpu_error; |
---|
1907 | | - int ret; |
---|
1908 | | - int i; |
---|
1909 | | - |
---|
1910 | | - GEM_TRACE("flags=%lx\n", error->flags); |
---|
1911 | | - |
---|
1912 | | - might_sleep(); |
---|
1913 | | - lockdep_assert_held(&i915->drm.struct_mutex); |
---|
1914 | | - GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); |
---|
1915 | | - |
---|
1916 | | - if (!test_bit(I915_RESET_HANDOFF, &error->flags)) |
---|
1917 | | - return; |
---|
1918 | | - |
---|
1919 | | - /* Clear any previous failed attempts at recovery. Time to try again. */ |
---|
1920 | | - if (!i915_gem_unset_wedged(i915)) |
---|
1921 | | - goto wakeup; |
---|
1922 | | - |
---|
1923 | | - if (reason) |
---|
1924 | | - dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); |
---|
1925 | | - error->reset_count++; |
---|
1926 | | - |
---|
1927 | | - disable_irq(i915->drm.irq); |
---|
1928 | | - ret = i915_gem_reset_prepare(i915); |
---|
1929 | | - if (ret) { |
---|
1930 | | - dev_err(i915->drm.dev, "GPU recovery failed\n"); |
---|
1931 | | - goto taint; |
---|
1932 | | - } |
---|
1933 | | - |
---|
1934 | | - if (!intel_has_gpu_reset(i915)) { |
---|
1935 | | - if (i915_modparams.reset) |
---|
1936 | | - dev_err(i915->drm.dev, "GPU reset not supported\n"); |
---|
1937 | | - else |
---|
1938 | | - DRM_DEBUG_DRIVER("GPU reset disabled\n"); |
---|
1939 | | - goto error; |
---|
1940 | | - } |
---|
1941 | | - |
---|
1942 | | - for (i = 0; i < 3; i++) { |
---|
1943 | | - ret = intel_gpu_reset(i915, ALL_ENGINES); |
---|
1944 | | - if (ret == 0) |
---|
1945 | | - break; |
---|
1946 | | - |
---|
1947 | | - msleep(100); |
---|
1948 | | - } |
---|
1949 | | - if (ret) { |
---|
1950 | | - dev_err(i915->drm.dev, "Failed to reset chip\n"); |
---|
1951 | | - goto taint; |
---|
1952 | | - } |
---|
1953 | | - |
---|
1954 | | - /* Ok, now get things going again... */ |
---|
1955 | | - |
---|
1956 | | - /* |
---|
1957 | | - * Everything depends on having the GTT running, so we need to start |
---|
1958 | | - * there. |
---|
1959 | | - */ |
---|
1960 | | - ret = i915_ggtt_enable_hw(i915); |
---|
1961 | | - if (ret) { |
---|
1962 | | - DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", |
---|
1963 | | - ret); |
---|
1964 | | - goto error; |
---|
1965 | | - } |
---|
1966 | | - |
---|
1967 | | - i915_gem_reset(i915, stalled_mask); |
---|
1968 | | - intel_overlay_reset(i915); |
---|
1969 | | - |
---|
1970 | | - /* |
---|
1971 | | - * Next we need to restore the context, but we don't use those |
---|
1972 | | - * yet either... |
---|
1973 | | - * |
---|
1974 | | - * Ring buffer needs to be re-initialized in the KMS case, or if X |
---|
1975 | | - * was running at the time of the reset (i.e. we weren't VT |
---|
1976 | | - * switched away). |
---|
1977 | | - */ |
---|
1978 | | - ret = i915_gem_init_hw(i915); |
---|
1979 | | - if (ret) { |
---|
1980 | | - DRM_ERROR("Failed to initialise HW following reset (%d)\n", |
---|
1981 | | - ret); |
---|
1982 | | - goto error; |
---|
1983 | | - } |
---|
1984 | | - |
---|
1985 | | - i915_queue_hangcheck(i915); |
---|
1986 | | - |
---|
1987 | | -finish: |
---|
1988 | | - i915_gem_reset_finish(i915); |
---|
1989 | | - enable_irq(i915->drm.irq); |
---|
1990 | | - |
---|
1991 | | -wakeup: |
---|
1992 | | - clear_bit(I915_RESET_HANDOFF, &error->flags); |
---|
1993 | | - wake_up_bit(&error->flags, I915_RESET_HANDOFF); |
---|
1994 | | - return; |
---|
1995 | | - |
---|
1996 | | -taint: |
---|
1997 | | - /* |
---|
1998 | | - * History tells us that if we cannot reset the GPU now, we |
---|
1999 | | - * never will. This then impacts everything that is run |
---|
2000 | | - * subsequently. On failing the reset, we mark the driver |
---|
2001 | | - * as wedged, preventing further execution on the GPU. |
---|
2002 | | - * We also want to go one step further and add a taint to the |
---|
2003 | | - * kernel so that any subsequent faults can be traced back to |
---|
2004 | | - * this failure. This is important for CI, where if the |
---|
2005 | | - * GPU/driver fails we would like to reboot and restart testing |
---|
2006 | | - * rather than continue on into oblivion. For everyone else, |
---|
2007 | | - * the system should still plod along, but they have been warned! |
---|
2008 | | - */ |
---|
2009 | | - add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
---|
2010 | | -error: |
---|
2011 | | - i915_gem_set_wedged(i915); |
---|
2012 | | - i915_retire_requests(i915); |
---|
2013 | | - goto finish; |
---|
2014 | | -} |
---|
2015 | | - |
---|
2016 | | -static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, |
---|
2017 | | - struct intel_engine_cs *engine) |
---|
2018 | | -{ |
---|
2019 | | - return intel_gpu_reset(dev_priv, intel_engine_flag(engine)); |
---|
2020 | | -} |
---|
2021 | | - |
---|
2022 | | -/** |
---|
2023 | | - * i915_reset_engine - reset GPU engine to recover from a hang |
---|
2024 | | - * @engine: engine to reset |
---|
2025 | | - * @msg: reason for GPU reset; or NULL for no dev_notice() |
---|
2026 | | - * |
---|
2027 | | - * Reset a specific GPU engine. Useful if a hang is detected. |
---|
2028 | | - * Returns zero on successful reset or otherwise an error code. |
---|
2029 | | - * |
---|
2030 | | - * Procedure is: |
---|
2031 | | - * - identifies the request that caused the hang and it is dropped |
---|
2032 | | - * - reset engine (which will force the engine to idle) |
---|
2033 | | - * - re-init/configure engine |
---|
2034 | | - */ |
---|
2035 | | -int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) |
---|
2036 | | -{ |
---|
2037 | | - struct i915_gpu_error *error = &engine->i915->gpu_error; |
---|
2038 | | - struct i915_request *active_request; |
---|
2039 | | - int ret; |
---|
2040 | | - |
---|
2041 | | - GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); |
---|
2042 | | - GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); |
---|
2043 | | - |
---|
2044 | | - active_request = i915_gem_reset_prepare_engine(engine); |
---|
2045 | | - if (IS_ERR_OR_NULL(active_request)) { |
---|
2046 | | - /* Either the previous reset failed, or we pardon the reset. */ |
---|
2047 | | - ret = PTR_ERR(active_request); |
---|
2048 | | - goto out; |
---|
2049 | | - } |
---|
2050 | | - |
---|
2051 | | - if (msg) |
---|
2052 | | - dev_notice(engine->i915->drm.dev, |
---|
2053 | | - "Resetting %s for %s\n", engine->name, msg); |
---|
2054 | | - error->reset_engine_count[engine->id]++; |
---|
2055 | | - |
---|
2056 | | - if (!engine->i915->guc.execbuf_client) |
---|
2057 | | - ret = intel_gt_reset_engine(engine->i915, engine); |
---|
2058 | | - else |
---|
2059 | | - ret = intel_guc_reset_engine(&engine->i915->guc, engine); |
---|
2060 | | - if (ret) { |
---|
2061 | | - /* If we fail here, we expect to fallback to a global reset */ |
---|
2062 | | - DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", |
---|
2063 | | - engine->i915->guc.execbuf_client ? "GuC " : "", |
---|
2064 | | - engine->name, ret); |
---|
2065 | | - goto out; |
---|
2066 | | - } |
---|
2067 | | - |
---|
2068 | | - /* |
---|
2069 | | - * The request that caused the hang is stuck on elsp, we know the |
---|
2070 | | - * active request and can drop it, adjust head to skip the offending |
---|
2071 | | - * request to resume executing remaining requests in the queue. |
---|
2072 | | - */ |
---|
2073 | | - i915_gem_reset_engine(engine, active_request, true); |
---|
2074 | | - |
---|
2075 | | - /* |
---|
2076 | | - * The engine and its registers (and workarounds in case of render) |
---|
2077 | | - * have been reset to their default values. Follow the init_ring |
---|
2078 | | - * process to program RING_MODE, HWSP and re-enable submission. |
---|
2079 | | - */ |
---|
2080 | | - ret = engine->init_hw(engine); |
---|
2081 | | - if (ret) |
---|
2082 | | - goto out; |
---|
2083 | | - |
---|
2084 | | -out: |
---|
2085 | | - i915_gem_reset_finish_engine(engine); |
---|
2086 | | - return ret; |
---|
| 1340 | + return i915_drm_resume(&i915->drm); |
---|
2087 | 1341 | } |
---|
2088 | 1342 | |
---|
2089 | 1343 | static int i915_pm_prepare(struct device *kdev) |
---|
2090 | 1344 | { |
---|
2091 | | - struct pci_dev *pdev = to_pci_dev(kdev); |
---|
2092 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
| 1345 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2093 | 1346 | |
---|
2094 | | - if (!dev) { |
---|
| 1347 | + if (!i915) { |
---|
2095 | 1348 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
---|
2096 | 1349 | return -ENODEV; |
---|
2097 | 1350 | } |
---|
2098 | 1351 | |
---|
2099 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1352 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2100 | 1353 | return 0; |
---|
2101 | 1354 | |
---|
2102 | | - return i915_drm_prepare(dev); |
---|
| 1355 | + return i915_drm_prepare(&i915->drm); |
---|
2103 | 1356 | } |
---|
2104 | 1357 | |
---|
2105 | 1358 | static int i915_pm_suspend(struct device *kdev) |
---|
2106 | 1359 | { |
---|
2107 | | - struct pci_dev *pdev = to_pci_dev(kdev); |
---|
2108 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
| 1360 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2109 | 1361 | |
---|
2110 | | - if (!dev) { |
---|
| 1362 | + if (!i915) { |
---|
2111 | 1363 | dev_err(kdev, "DRM not initialized, aborting suspend.\n"); |
---|
2112 | 1364 | return -ENODEV; |
---|
2113 | 1365 | } |
---|
2114 | 1366 | |
---|
2115 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1367 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2116 | 1368 | return 0; |
---|
2117 | 1369 | |
---|
2118 | | - return i915_drm_suspend(dev); |
---|
| 1370 | + return i915_drm_suspend(&i915->drm); |
---|
2119 | 1371 | } |
---|
2120 | 1372 | |
---|
2121 | 1373 | static int i915_pm_suspend_late(struct device *kdev) |
---|
2122 | 1374 | { |
---|
2123 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1375 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2124 | 1376 | |
---|
2125 | 1377 | /* |
---|
2126 | 1378 | * We have a suspend ordering issue with the snd-hda driver also |
---|
.. | .. |
---|
2131 | 1383 | * FIXME: This should be solved with a special hdmi sink device or |
---|
2132 | 1384 | * similar so that power domains can be employed. |
---|
2133 | 1385 | */ |
---|
2134 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1386 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2135 | 1387 | return 0; |
---|
2136 | 1388 | |
---|
2137 | | - return i915_drm_suspend_late(dev, false); |
---|
| 1389 | + return i915_drm_suspend_late(&i915->drm, false); |
---|
2138 | 1390 | } |
---|
2139 | 1391 | |
---|
2140 | 1392 | static int i915_pm_poweroff_late(struct device *kdev) |
---|
2141 | 1393 | { |
---|
2142 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1394 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2143 | 1395 | |
---|
2144 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1396 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2145 | 1397 | return 0; |
---|
2146 | 1398 | |
---|
2147 | | - return i915_drm_suspend_late(dev, true); |
---|
| 1399 | + return i915_drm_suspend_late(&i915->drm, true); |
---|
2148 | 1400 | } |
---|
2149 | 1401 | |
---|
2150 | 1402 | static int i915_pm_resume_early(struct device *kdev) |
---|
2151 | 1403 | { |
---|
2152 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1404 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2153 | 1405 | |
---|
2154 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1406 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2155 | 1407 | return 0; |
---|
2156 | 1408 | |
---|
2157 | | - return i915_drm_resume_early(dev); |
---|
| 1409 | + return i915_drm_resume_early(&i915->drm); |
---|
2158 | 1410 | } |
---|
2159 | 1411 | |
---|
2160 | 1412 | static int i915_pm_resume(struct device *kdev) |
---|
2161 | 1413 | { |
---|
2162 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1414 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2163 | 1415 | |
---|
2164 | | - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
| 1416 | + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) |
---|
2165 | 1417 | return 0; |
---|
2166 | 1418 | |
---|
2167 | | - return i915_drm_resume(dev); |
---|
| 1419 | + return i915_drm_resume(&i915->drm); |
---|
2168 | 1420 | } |
---|
2169 | 1421 | |
---|
2170 | 1422 | /* freeze: before creating the hibernation_image */ |
---|
2171 | 1423 | static int i915_pm_freeze(struct device *kdev) |
---|
2172 | 1424 | { |
---|
2173 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1425 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2174 | 1426 | int ret; |
---|
2175 | 1427 | |
---|
2176 | | - if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
---|
2177 | | - ret = i915_drm_suspend(dev); |
---|
| 1428 | + if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
---|
| 1429 | + ret = i915_drm_suspend(&i915->drm); |
---|
2178 | 1430 | if (ret) |
---|
2179 | 1431 | return ret; |
---|
2180 | 1432 | } |
---|
2181 | 1433 | |
---|
2182 | | - ret = i915_gem_freeze(kdev_to_i915(kdev)); |
---|
| 1434 | + ret = i915_gem_freeze(i915); |
---|
2183 | 1435 | if (ret) |
---|
2184 | 1436 | return ret; |
---|
2185 | 1437 | |
---|
.. | .. |
---|
2188 | 1440 | |
---|
2189 | 1441 | static int i915_pm_freeze_late(struct device *kdev) |
---|
2190 | 1442 | { |
---|
2191 | | - struct drm_device *dev = &kdev_to_i915(kdev)->drm; |
---|
| 1443 | + struct drm_i915_private *i915 = kdev_to_i915(kdev); |
---|
2192 | 1444 | int ret; |
---|
2193 | 1445 | |
---|
2194 | | - if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { |
---|
2195 | | - ret = i915_drm_suspend_late(dev, true); |
---|
| 1446 | + if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { |
---|
| 1447 | + ret = i915_drm_suspend_late(&i915->drm, true); |
---|
2196 | 1448 | if (ret) |
---|
2197 | 1449 | return ret; |
---|
2198 | 1450 | } |
---|
2199 | 1451 | |
---|
2200 | | - ret = i915_gem_freeze_late(kdev_to_i915(kdev)); |
---|
| 1452 | + ret = i915_gem_freeze_late(i915); |
---|
2201 | 1453 | if (ret) |
---|
2202 | 1454 | return ret; |
---|
2203 | 1455 | |
---|
.. | .. |
---|
2226 | 1478 | return i915_pm_resume(kdev); |
---|
2227 | 1479 | } |
---|
2228 | 1480 | |
---|
2229 | | -/* |
---|
2230 | | - * Save all Gunit registers that may be lost after a D3 and a subsequent |
---|
2231 | | - * S0i[R123] transition. The list of registers needing a save/restore is |
---|
2232 | | - * defined in the VLV2_S0IXRegs document. This documents marks all Gunit |
---|
2233 | | - * registers in the following way: |
---|
2234 | | - * - Driver: saved/restored by the driver |
---|
2235 | | - * - Punit : saved/restored by the Punit firmware |
---|
2236 | | - * - No, w/o marking: no need to save/restore, since the register is R/O or |
---|
2237 | | - * used internally by the HW in a way that doesn't depend |
---|
2238 | | - * keeping the content across a suspend/resume. |
---|
2239 | | - * - Debug : used for debugging |
---|
2240 | | - * |
---|
2241 | | - * We save/restore all registers marked with 'Driver', with the following |
---|
2242 | | - * exceptions: |
---|
2243 | | - * - Registers out of use, including also registers marked with 'Debug'. |
---|
2244 | | - * These have no effect on the driver's operation, so we don't save/restore |
---|
2245 | | - * them to reduce the overhead. |
---|
2246 | | - * - Registers that are fully setup by an initialization function called from |
---|
2247 | | - * the resume path. For example many clock gating and RPS/RC6 registers. |
---|
2248 | | - * - Registers that provide the right functionality with their reset defaults. |
---|
2249 | | - * |
---|
2250 | | - * TODO: Except for registers that based on the above 3 criteria can be safely |
---|
2251 | | - * ignored, we save/restore all others, practically treating the HW context as |
---|
2252 | | - * a black-box for the driver. Further investigation is needed to reduce the |
---|
2253 | | - * saved/restored registers even further, by following the same 3 criteria. |
---|
2254 | | - */ |
---|
2255 | | -static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
---|
2256 | | -{ |
---|
2257 | | - struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
---|
2258 | | - int i; |
---|
2259 | | - |
---|
2260 | | - /* GAM 0x4000-0x4770 */ |
---|
2261 | | - s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); |
---|
2262 | | - s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); |
---|
2263 | | - s->arb_mode = I915_READ(ARB_MODE); |
---|
2264 | | - s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); |
---|
2265 | | - s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); |
---|
2266 | | - |
---|
2267 | | - for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
---|
2268 | | - s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
---|
2269 | | - |
---|
2270 | | - s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
---|
2271 | | - s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
---|
2272 | | - |
---|
2273 | | - s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
---|
2274 | | - s->ecochk = I915_READ(GAM_ECOCHK); |
---|
2275 | | - s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); |
---|
2276 | | - s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); |
---|
2277 | | - |
---|
2278 | | - s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); |
---|
2279 | | - |
---|
2280 | | - /* MBC 0x9024-0x91D0, 0x8500 */ |
---|
2281 | | - s->g3dctl = I915_READ(VLV_G3DCTL); |
---|
2282 | | - s->gsckgctl = I915_READ(VLV_GSCKGCTL); |
---|
2283 | | - s->mbctl = I915_READ(GEN6_MBCTL); |
---|
2284 | | - |
---|
2285 | | - /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
---|
2286 | | - s->ucgctl1 = I915_READ(GEN6_UCGCTL1); |
---|
2287 | | - s->ucgctl3 = I915_READ(GEN6_UCGCTL3); |
---|
2288 | | - s->rcgctl1 = I915_READ(GEN6_RCGCTL1); |
---|
2289 | | - s->rcgctl2 = I915_READ(GEN6_RCGCTL2); |
---|
2290 | | - s->rstctl = I915_READ(GEN6_RSTCTL); |
---|
2291 | | - s->misccpctl = I915_READ(GEN7_MISCCPCTL); |
---|
2292 | | - |
---|
2293 | | - /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
---|
2294 | | - s->gfxpause = I915_READ(GEN6_GFXPAUSE); |
---|
2295 | | - s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); |
---|
2296 | | - s->rpdeuc = I915_READ(GEN6_RPDEUC); |
---|
2297 | | - s->ecobus = I915_READ(ECOBUS); |
---|
2298 | | - s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); |
---|
2299 | | - s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); |
---|
2300 | | - s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); |
---|
2301 | | - s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); |
---|
2302 | | - s->rcedata = I915_READ(VLV_RCEDATA); |
---|
2303 | | - s->spare2gh = I915_READ(VLV_SPAREG2H); |
---|
2304 | | - |
---|
2305 | | - /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
---|
2306 | | - s->gt_imr = I915_READ(GTIMR); |
---|
2307 | | - s->gt_ier = I915_READ(GTIER); |
---|
2308 | | - s->pm_imr = I915_READ(GEN6_PMIMR); |
---|
2309 | | - s->pm_ier = I915_READ(GEN6_PMIER); |
---|
2310 | | - |
---|
2311 | | - for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
---|
2312 | | - s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
---|
2313 | | - |
---|
2314 | | - /* GT SA CZ domain, 0x100000-0x138124 */ |
---|
2315 | | - s->tilectl = I915_READ(TILECTL); |
---|
2316 | | - s->gt_fifoctl = I915_READ(GTFIFOCTL); |
---|
2317 | | - s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); |
---|
2318 | | - s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
---|
2319 | | - s->pmwgicz = I915_READ(VLV_PMWGICZ); |
---|
2320 | | - |
---|
2321 | | - /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
---|
2322 | | - s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
---|
2323 | | - s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
---|
2324 | | - s->pcbr = I915_READ(VLV_PCBR); |
---|
2325 | | - s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
---|
2326 | | - |
---|
2327 | | - /* |
---|
2328 | | - * Not saving any of: |
---|
2329 | | - * DFT, 0x9800-0x9EC0 |
---|
2330 | | - * SARB, 0xB000-0xB1FC |
---|
2331 | | - * GAC, 0x5208-0x524C, 0x14000-0x14C000 |
---|
2332 | | - * PCI CFG |
---|
2333 | | - */ |
---|
2334 | | -} |
---|
2335 | | - |
---|
2336 | | -static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
---|
2337 | | -{ |
---|
2338 | | - struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
---|
2339 | | - u32 val; |
---|
2340 | | - int i; |
---|
2341 | | - |
---|
2342 | | - /* GAM 0x4000-0x4770 */ |
---|
2343 | | - I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); |
---|
2344 | | - I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); |
---|
2345 | | - I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); |
---|
2346 | | - I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); |
---|
2347 | | - I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); |
---|
2348 | | - |
---|
2349 | | - for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
---|
2350 | | - I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
---|
2351 | | - |
---|
2352 | | - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
---|
2353 | | - I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
---|
2354 | | - |
---|
2355 | | - I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
---|
2356 | | - I915_WRITE(GAM_ECOCHK, s->ecochk); |
---|
2357 | | - I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); |
---|
2358 | | - I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); |
---|
2359 | | - |
---|
2360 | | - I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); |
---|
2361 | | - |
---|
2362 | | - /* MBC 0x9024-0x91D0, 0x8500 */ |
---|
2363 | | - I915_WRITE(VLV_G3DCTL, s->g3dctl); |
---|
2364 | | - I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); |
---|
2365 | | - I915_WRITE(GEN6_MBCTL, s->mbctl); |
---|
2366 | | - |
---|
2367 | | - /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
---|
2368 | | - I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); |
---|
2369 | | - I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); |
---|
2370 | | - I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); |
---|
2371 | | - I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); |
---|
2372 | | - I915_WRITE(GEN6_RSTCTL, s->rstctl); |
---|
2373 | | - I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); |
---|
2374 | | - |
---|
2375 | | - /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
---|
2376 | | - I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); |
---|
2377 | | - I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); |
---|
2378 | | - I915_WRITE(GEN6_RPDEUC, s->rpdeuc); |
---|
2379 | | - I915_WRITE(ECOBUS, s->ecobus); |
---|
2380 | | - I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); |
---|
2381 | | - I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); |
---|
2382 | | - I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); |
---|
2383 | | - I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); |
---|
2384 | | - I915_WRITE(VLV_RCEDATA, s->rcedata); |
---|
2385 | | - I915_WRITE(VLV_SPAREG2H, s->spare2gh); |
---|
2386 | | - |
---|
2387 | | - /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
---|
2388 | | - I915_WRITE(GTIMR, s->gt_imr); |
---|
2389 | | - I915_WRITE(GTIER, s->gt_ier); |
---|
2390 | | - I915_WRITE(GEN6_PMIMR, s->pm_imr); |
---|
2391 | | - I915_WRITE(GEN6_PMIER, s->pm_ier); |
---|
2392 | | - |
---|
2393 | | - for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
---|
2394 | | - I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
---|
2395 | | - |
---|
2396 | | - /* GT SA CZ domain, 0x100000-0x138124 */ |
---|
2397 | | - I915_WRITE(TILECTL, s->tilectl); |
---|
2398 | | - I915_WRITE(GTFIFOCTL, s->gt_fifoctl); |
---|
2399 | | - /* |
---|
2400 | | - * Preserve the GT allow wake and GFX force clock bit, they are not |
---|
2401 | | - * be restored, as they are used to control the s0ix suspend/resume |
---|
2402 | | - * sequence by the caller. |
---|
2403 | | - */ |
---|
2404 | | - val = I915_READ(VLV_GTLC_WAKE_CTRL); |
---|
2405 | | - val &= VLV_GTLC_ALLOWWAKEREQ; |
---|
2406 | | - val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; |
---|
2407 | | - I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
---|
2408 | | - |
---|
2409 | | - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
---|
2410 | | - val &= VLV_GFX_CLK_FORCE_ON_BIT; |
---|
2411 | | - val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; |
---|
2412 | | - I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
---|
2413 | | - |
---|
2414 | | - I915_WRITE(VLV_PMWGICZ, s->pmwgicz); |
---|
2415 | | - |
---|
2416 | | - /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
---|
2417 | | - I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
---|
2418 | | - I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
---|
2419 | | - I915_WRITE(VLV_PCBR, s->pcbr); |
---|
2420 | | - I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
---|
2421 | | -} |
---|
2422 | | - |
---|
2423 | | -static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, |
---|
2424 | | - u32 mask, u32 val) |
---|
2425 | | -{ |
---|
2426 | | - /* The HW does not like us polling for PW_STATUS frequently, so |
---|
2427 | | - * use the sleeping loop rather than risk the busy spin within |
---|
2428 | | - * intel_wait_for_register(). |
---|
2429 | | - * |
---|
2430 | | - * Transitioning between RC6 states should be at most 2ms (see |
---|
2431 | | - * valleyview_enable_rps) so use a 3ms timeout. |
---|
2432 | | - */ |
---|
2433 | | - return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, |
---|
2434 | | - 3); |
---|
2435 | | -} |
---|
2436 | | - |
---|
2437 | | -int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
---|
2438 | | -{ |
---|
2439 | | - u32 val; |
---|
2440 | | - int err; |
---|
2441 | | - |
---|
2442 | | - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
---|
2443 | | - val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
---|
2444 | | - if (force_on) |
---|
2445 | | - val |= VLV_GFX_CLK_FORCE_ON_BIT; |
---|
2446 | | - I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
---|
2447 | | - |
---|
2448 | | - if (!force_on) |
---|
2449 | | - return 0; |
---|
2450 | | - |
---|
2451 | | - err = intel_wait_for_register(dev_priv, |
---|
2452 | | - VLV_GTLC_SURVIVABILITY_REG, |
---|
2453 | | - VLV_GFX_CLK_STATUS_BIT, |
---|
2454 | | - VLV_GFX_CLK_STATUS_BIT, |
---|
2455 | | - 20); |
---|
2456 | | - if (err) |
---|
2457 | | - DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", |
---|
2458 | | - I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
---|
2459 | | - |
---|
2460 | | - return err; |
---|
2461 | | -} |
---|
2462 | | - |
---|
2463 | | -static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
---|
2464 | | -{ |
---|
2465 | | - u32 mask; |
---|
2466 | | - u32 val; |
---|
2467 | | - int err; |
---|
2468 | | - |
---|
2469 | | - val = I915_READ(VLV_GTLC_WAKE_CTRL); |
---|
2470 | | - val &= ~VLV_GTLC_ALLOWWAKEREQ; |
---|
2471 | | - if (allow) |
---|
2472 | | - val |= VLV_GTLC_ALLOWWAKEREQ; |
---|
2473 | | - I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
---|
2474 | | - POSTING_READ(VLV_GTLC_WAKE_CTRL); |
---|
2475 | | - |
---|
2476 | | - mask = VLV_GTLC_ALLOWWAKEACK; |
---|
2477 | | - val = allow ? mask : 0; |
---|
2478 | | - |
---|
2479 | | - err = vlv_wait_for_pw_status(dev_priv, mask, val); |
---|
2480 | | - if (err) |
---|
2481 | | - DRM_ERROR("timeout disabling GT waking\n"); |
---|
2482 | | - |
---|
2483 | | - return err; |
---|
2484 | | -} |
---|
2485 | | - |
---|
2486 | | -static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
---|
2487 | | - bool wait_for_on) |
---|
2488 | | -{ |
---|
2489 | | - u32 mask; |
---|
2490 | | - u32 val; |
---|
2491 | | - |
---|
2492 | | - mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
---|
2493 | | - val = wait_for_on ? mask : 0; |
---|
2494 | | - |
---|
2495 | | - /* |
---|
2496 | | - * RC6 transitioning can be delayed up to 2 msec (see |
---|
2497 | | - * valleyview_enable_rps), use 3 msec for safety. |
---|
2498 | | - * |
---|
2499 | | - * This can fail to turn off the rc6 if the GPU is stuck after a failed |
---|
2500 | | - * reset and we are trying to force the machine to sleep. |
---|
2501 | | - */ |
---|
2502 | | - if (vlv_wait_for_pw_status(dev_priv, mask, val)) |
---|
2503 | | - DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n", |
---|
2504 | | - onoff(wait_for_on)); |
---|
2505 | | -} |
---|
2506 | | - |
---|
2507 | | -static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
---|
2508 | | -{ |
---|
2509 | | - if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) |
---|
2510 | | - return; |
---|
2511 | | - |
---|
2512 | | - DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
---|
2513 | | - I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
---|
2514 | | -} |
---|
2515 | | - |
---|
2516 | | -static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
---|
2517 | | -{ |
---|
2518 | | - u32 mask; |
---|
2519 | | - int err; |
---|
2520 | | - |
---|
2521 | | - /* |
---|
2522 | | - * Bspec defines the following GT well on flags as debug only, so |
---|
2523 | | - * don't treat them as hard failures. |
---|
2524 | | - */ |
---|
2525 | | - vlv_wait_for_gt_wells(dev_priv, false); |
---|
2526 | | - |
---|
2527 | | - mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
---|
2528 | | - WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
---|
2529 | | - |
---|
2530 | | - vlv_check_no_gt_access(dev_priv); |
---|
2531 | | - |
---|
2532 | | - err = vlv_force_gfx_clock(dev_priv, true); |
---|
2533 | | - if (err) |
---|
2534 | | - goto err1; |
---|
2535 | | - |
---|
2536 | | - err = vlv_allow_gt_wake(dev_priv, false); |
---|
2537 | | - if (err) |
---|
2538 | | - goto err2; |
---|
2539 | | - |
---|
2540 | | - if (!IS_CHERRYVIEW(dev_priv)) |
---|
2541 | | - vlv_save_gunit_s0ix_state(dev_priv); |
---|
2542 | | - |
---|
2543 | | - err = vlv_force_gfx_clock(dev_priv, false); |
---|
2544 | | - if (err) |
---|
2545 | | - goto err2; |
---|
2546 | | - |
---|
2547 | | - return 0; |
---|
2548 | | - |
---|
2549 | | -err2: |
---|
2550 | | - /* For safety always re-enable waking and disable gfx clock forcing */ |
---|
2551 | | - vlv_allow_gt_wake(dev_priv, true); |
---|
2552 | | -err1: |
---|
2553 | | - vlv_force_gfx_clock(dev_priv, false); |
---|
2554 | | - |
---|
2555 | | - return err; |
---|
2556 | | -} |
---|
2557 | | - |
---|
2558 | | -static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
---|
2559 | | - bool rpm_resume) |
---|
2560 | | -{ |
---|
2561 | | - int err; |
---|
2562 | | - int ret; |
---|
2563 | | - |
---|
2564 | | - /* |
---|
2565 | | - * If any of the steps fail just try to continue, that's the best we |
---|
2566 | | - * can do at this point. Return the first error code (which will also |
---|
2567 | | - * leave RPM permanently disabled). |
---|
2568 | | - */ |
---|
2569 | | - ret = vlv_force_gfx_clock(dev_priv, true); |
---|
2570 | | - |
---|
2571 | | - if (!IS_CHERRYVIEW(dev_priv)) |
---|
2572 | | - vlv_restore_gunit_s0ix_state(dev_priv); |
---|
2573 | | - |
---|
2574 | | - err = vlv_allow_gt_wake(dev_priv, true); |
---|
2575 | | - if (!ret) |
---|
2576 | | - ret = err; |
---|
2577 | | - |
---|
2578 | | - err = vlv_force_gfx_clock(dev_priv, false); |
---|
2579 | | - if (!ret) |
---|
2580 | | - ret = err; |
---|
2581 | | - |
---|
2582 | | - vlv_check_no_gt_access(dev_priv); |
---|
2583 | | - |
---|
2584 | | - if (rpm_resume) |
---|
2585 | | - intel_init_clock_gating(dev_priv); |
---|
2586 | | - |
---|
2587 | | - return ret; |
---|
2588 | | -} |
---|
2589 | | - |
---|
2590 | 1481 | static int intel_runtime_suspend(struct device *kdev) |
---|
2591 | 1482 | { |
---|
2592 | | - struct pci_dev *pdev = to_pci_dev(kdev); |
---|
2593 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
2594 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
| 1483 | + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
---|
| 1484 | + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
---|
2595 | 1485 | int ret; |
---|
2596 | 1486 | |
---|
2597 | | - if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) |
---|
| 1487 | + if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) |
---|
2598 | 1488 | return -ENODEV; |
---|
2599 | 1489 | |
---|
2600 | | - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
---|
2601 | | - return -ENODEV; |
---|
| 1490 | + drm_dbg_kms(&dev_priv->drm, "Suspending device\n"); |
---|
2602 | 1491 | |
---|
2603 | | - DRM_DEBUG_KMS("Suspending device\n"); |
---|
2604 | | - |
---|
2605 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
| 1492 | + disable_rpm_wakeref_asserts(rpm); |
---|
2606 | 1493 | |
---|
2607 | 1494 | /* |
---|
2608 | 1495 | * We are safe here against re-faults, since the fault handler takes |
---|
.. | .. |
---|
2610 | 1497 | */ |
---|
2611 | 1498 | i915_gem_runtime_suspend(dev_priv); |
---|
2612 | 1499 | |
---|
2613 | | - intel_uc_suspend(dev_priv); |
---|
| 1500 | + intel_gt_runtime_suspend(&dev_priv->gt); |
---|
2614 | 1501 | |
---|
2615 | 1502 | intel_runtime_pm_disable_interrupts(dev_priv); |
---|
2616 | 1503 | |
---|
2617 | | - intel_uncore_suspend(dev_priv); |
---|
| 1504 | + intel_uncore_suspend(&dev_priv->uncore); |
---|
2618 | 1505 | |
---|
2619 | | - ret = 0; |
---|
2620 | | - if (IS_GEN9_LP(dev_priv)) { |
---|
2621 | | - bxt_display_core_uninit(dev_priv); |
---|
2622 | | - bxt_enable_dc9(dev_priv); |
---|
2623 | | - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
---|
2624 | | - hsw_enable_pc8(dev_priv); |
---|
2625 | | - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
---|
2626 | | - ret = vlv_suspend_complete(dev_priv); |
---|
2627 | | - } |
---|
| 1506 | + intel_display_power_suspend(dev_priv); |
---|
2628 | 1507 | |
---|
| 1508 | + ret = vlv_suspend_complete(dev_priv); |
---|
2629 | 1509 | if (ret) { |
---|
2630 | | - DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
---|
2631 | | - intel_uncore_runtime_resume(dev_priv); |
---|
| 1510 | + drm_err(&dev_priv->drm, |
---|
| 1511 | + "Runtime suspend failed, disabling it (%d)\n", ret); |
---|
| 1512 | + intel_uncore_runtime_resume(&dev_priv->uncore); |
---|
2632 | 1513 | |
---|
2633 | 1514 | intel_runtime_pm_enable_interrupts(dev_priv); |
---|
2634 | 1515 | |
---|
2635 | | - intel_uc_resume(dev_priv); |
---|
| 1516 | + intel_gt_runtime_resume(&dev_priv->gt); |
---|
2636 | 1517 | |
---|
2637 | | - i915_gem_init_swizzling(dev_priv); |
---|
2638 | | - i915_gem_restore_fences(dev_priv); |
---|
2639 | | - |
---|
2640 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1518 | + enable_rpm_wakeref_asserts(rpm); |
---|
2641 | 1519 | |
---|
2642 | 1520 | return ret; |
---|
2643 | 1521 | } |
---|
2644 | 1522 | |
---|
2645 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
2646 | | - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
---|
| 1523 | + enable_rpm_wakeref_asserts(rpm); |
---|
| 1524 | + intel_runtime_pm_driver_release(rpm); |
---|
2647 | 1525 | |
---|
2648 | | - if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
---|
2649 | | - DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
---|
| 1526 | + if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) |
---|
| 1527 | + drm_err(&dev_priv->drm, |
---|
| 1528 | + "Unclaimed access detected prior to suspending\n"); |
---|
2650 | 1529 | |
---|
2651 | | - dev_priv->runtime_pm.suspended = true; |
---|
| 1530 | + rpm->suspended = true; |
---|
2652 | 1531 | |
---|
2653 | 1532 | /* |
---|
2654 | 1533 | * FIXME: We really should find a document that references the arguments |
---|
.. | .. |
---|
2673 | 1552 | intel_opregion_notify_adapter(dev_priv, PCI_D1); |
---|
2674 | 1553 | } |
---|
2675 | 1554 | |
---|
2676 | | - assert_forcewakes_inactive(dev_priv); |
---|
| 1555 | + assert_forcewakes_inactive(&dev_priv->uncore); |
---|
2677 | 1556 | |
---|
2678 | 1557 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
---|
2679 | 1558 | intel_hpd_poll_init(dev_priv); |
---|
2680 | 1559 | |
---|
2681 | | - DRM_DEBUG_KMS("Device suspended\n"); |
---|
| 1560 | + drm_dbg_kms(&dev_priv->drm, "Device suspended\n"); |
---|
2682 | 1561 | return 0; |
---|
2683 | 1562 | } |
---|
2684 | 1563 | |
---|
2685 | 1564 | static int intel_runtime_resume(struct device *kdev) |
---|
2686 | 1565 | { |
---|
2687 | | - struct pci_dev *pdev = to_pci_dev(kdev); |
---|
2688 | | - struct drm_device *dev = pci_get_drvdata(pdev); |
---|
2689 | | - struct drm_i915_private *dev_priv = to_i915(dev); |
---|
2690 | | - int ret = 0; |
---|
| 1566 | + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); |
---|
| 1567 | + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; |
---|
| 1568 | + int ret; |
---|
2691 | 1569 | |
---|
2692 | | - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) |
---|
| 1570 | + if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) |
---|
2693 | 1571 | return -ENODEV; |
---|
2694 | 1572 | |
---|
2695 | | - DRM_DEBUG_KMS("Resuming device\n"); |
---|
| 1573 | + drm_dbg_kms(&dev_priv->drm, "Resuming device\n"); |
---|
2696 | 1574 | |
---|
2697 | | - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |
---|
2698 | | - disable_rpm_wakeref_asserts(dev_priv); |
---|
| 1575 | + drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); |
---|
| 1576 | + disable_rpm_wakeref_asserts(rpm); |
---|
2699 | 1577 | |
---|
2700 | 1578 | intel_opregion_notify_adapter(dev_priv, PCI_D0); |
---|
2701 | | - dev_priv->runtime_pm.suspended = false; |
---|
2702 | | - if (intel_uncore_unclaimed_mmio(dev_priv)) |
---|
2703 | | - DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); |
---|
| 1579 | + rpm->suspended = false; |
---|
| 1580 | + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) |
---|
| 1581 | + drm_dbg(&dev_priv->drm, |
---|
| 1582 | + "Unclaimed access during suspend, bios?\n"); |
---|
2704 | 1583 | |
---|
2705 | | - if (IS_GEN9_LP(dev_priv)) { |
---|
2706 | | - bxt_disable_dc9(dev_priv); |
---|
2707 | | - bxt_display_core_init(dev_priv, true); |
---|
2708 | | - if (dev_priv->csr.dmc_payload && |
---|
2709 | | - (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) |
---|
2710 | | - gen9_enable_dc5(dev_priv); |
---|
2711 | | - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
---|
2712 | | - hsw_disable_pc8(dev_priv); |
---|
2713 | | - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
---|
2714 | | - ret = vlv_resume_prepare(dev_priv, true); |
---|
2715 | | - } |
---|
| 1584 | + intel_display_power_resume(dev_priv); |
---|
2716 | 1585 | |
---|
2717 | | - intel_uncore_runtime_resume(dev_priv); |
---|
| 1586 | + ret = vlv_resume_prepare(dev_priv, true); |
---|
| 1587 | + |
---|
| 1588 | + intel_uncore_runtime_resume(&dev_priv->uncore); |
---|
2718 | 1589 | |
---|
2719 | 1590 | intel_runtime_pm_enable_interrupts(dev_priv); |
---|
2720 | | - |
---|
2721 | | - intel_uc_resume(dev_priv); |
---|
2722 | 1591 | |
---|
2723 | 1592 | /* |
---|
2724 | 1593 | * No point of rolling back things in case of an error, as the best |
---|
2725 | 1594 | * we can do is to hope that things will still work (and disable RPM). |
---|
2726 | 1595 | */ |
---|
2727 | | - i915_gem_init_swizzling(dev_priv); |
---|
2728 | | - i915_gem_restore_fences(dev_priv); |
---|
| 1596 | + intel_gt_runtime_resume(&dev_priv->gt); |
---|
2729 | 1597 | |
---|
2730 | 1598 | /* |
---|
2731 | 1599 | * On VLV/CHV display interrupts are part of the display |
---|
.. | .. |
---|
2737 | 1605 | |
---|
2738 | 1606 | intel_enable_ipc(dev_priv); |
---|
2739 | 1607 | |
---|
2740 | | - enable_rpm_wakeref_asserts(dev_priv); |
---|
| 1608 | + enable_rpm_wakeref_asserts(rpm); |
---|
2741 | 1609 | |
---|
2742 | 1610 | if (ret) |
---|
2743 | | - DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
---|
| 1611 | + drm_err(&dev_priv->drm, |
---|
| 1612 | + "Runtime resume failed, disabling it (%d)\n", ret); |
---|
2744 | 1613 | else |
---|
2745 | | - DRM_DEBUG_KMS("Device resumed\n"); |
---|
| 1614 | + drm_dbg_kms(&dev_priv->drm, "Device resumed\n"); |
---|
2746 | 1615 | |
---|
2747 | 1616 | return ret; |
---|
2748 | 1617 | } |
---|
.. | .. |
---|
2787 | 1656 | .runtime_resume = intel_runtime_resume, |
---|
2788 | 1657 | }; |
---|
2789 | 1658 | |
---|
2790 | | -static const struct vm_operations_struct i915_gem_vm_ops = { |
---|
2791 | | - .fault = i915_gem_fault, |
---|
2792 | | - .open = drm_gem_vm_open, |
---|
2793 | | - .close = drm_gem_vm_close, |
---|
2794 | | -}; |
---|
2795 | | - |
---|
2796 | 1659 | static const struct file_operations i915_driver_fops = { |
---|
2797 | 1660 | .owner = THIS_MODULE, |
---|
2798 | 1661 | .open = drm_open, |
---|
2799 | | - .release = drm_release, |
---|
| 1662 | + .release = drm_release_noglobal, |
---|
2800 | 1663 | .unlocked_ioctl = drm_ioctl, |
---|
2801 | | - .mmap = drm_gem_mmap, |
---|
| 1664 | + .mmap = i915_gem_mmap, |
---|
2802 | 1665 | .poll = drm_poll, |
---|
2803 | 1666 | .read = drm_read, |
---|
2804 | | - .compat_ioctl = i915_compat_ioctl, |
---|
| 1667 | + .compat_ioctl = i915_ioc32_compat_ioctl, |
---|
2805 | 1668 | .llseek = noop_llseek, |
---|
2806 | 1669 | }; |
---|
2807 | 1670 | |
---|
.. | .. |
---|
2819 | 1682 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), |
---|
2820 | 1683 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), |
---|
2821 | 1684 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), |
---|
2822 | | - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
| 1685 | + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), |
---|
2823 | 1686 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
2824 | 1687 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), |
---|
2825 | 1688 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), |
---|
.. | .. |
---|
2832 | 1695 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
2833 | 1696 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
2834 | 1697 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH), |
---|
2835 | | - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
| 1698 | + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), |
---|
2836 | 1699 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
---|
2837 | 1700 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
---|
2838 | | - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
| 1701 | + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), |
---|
2839 | 1702 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), |
---|
2840 | 1703 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), |
---|
2841 | | - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
| 1704 | + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), |
---|
2842 | 1705 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
2843 | 1706 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
---|
2844 | 1707 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), |
---|
2845 | 1708 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), |
---|
2846 | 1709 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), |
---|
2847 | 1710 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), |
---|
2848 | | - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), |
---|
| 1711 | + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), |
---|
2849 | 1712 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), |
---|
2850 | 1713 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), |
---|
2851 | 1714 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), |
---|
.. | .. |
---|
2857 | 1720 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), |
---|
2858 | 1721 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), |
---|
2859 | 1722 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), |
---|
2860 | | - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), |
---|
2861 | | - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), |
---|
| 1723 | + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), |
---|
| 1724 | + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), |
---|
2862 | 1725 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), |
---|
2863 | 1726 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), |
---|
2864 | 1727 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), |
---|
.. | .. |
---|
2866 | 1729 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), |
---|
2867 | 1730 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), |
---|
2868 | 1731 | DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), |
---|
2869 | | - DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
---|
2870 | | - DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
---|
2871 | | - DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
---|
| 1732 | + DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), |
---|
| 1733 | + DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), |
---|
| 1734 | + DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), |
---|
| 1735 | + DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), |
---|
| 1736 | + DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), |
---|
2872 | 1737 | }; |
---|
2873 | 1738 | |
---|
2874 | 1739 | static struct drm_driver driver = { |
---|
.. | .. |
---|
2876 | 1741 | * deal with them for Intel hardware. |
---|
2877 | 1742 | */ |
---|
2878 | 1743 | .driver_features = |
---|
2879 | | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
---|
2880 | | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, |
---|
| 1744 | + DRIVER_GEM | |
---|
| 1745 | + DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | |
---|
| 1746 | + DRIVER_SYNCOBJ_TIMELINE, |
---|
2881 | 1747 | .release = i915_driver_release, |
---|
2882 | 1748 | .open = i915_driver_open, |
---|
2883 | 1749 | .lastclose = i915_driver_lastclose, |
---|
.. | .. |
---|
2885 | 1751 | |
---|
2886 | 1752 | .gem_close_object = i915_gem_close_object, |
---|
2887 | 1753 | .gem_free_object_unlocked = i915_gem_free_object, |
---|
2888 | | - .gem_vm_ops = &i915_gem_vm_ops, |
---|
2889 | 1754 | |
---|
2890 | 1755 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
---|
2891 | 1756 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
---|
.. | .. |
---|
2893 | 1758 | .gem_prime_import = i915_gem_prime_import, |
---|
2894 | 1759 | |
---|
2895 | 1760 | .dumb_create = i915_gem_dumb_create, |
---|
2896 | | - .dumb_map_offset = i915_gem_mmap_gtt, |
---|
| 1761 | + .dumb_map_offset = i915_gem_dumb_mmap_offset, |
---|
| 1762 | + |
---|
2897 | 1763 | .ioctls = i915_ioctls, |
---|
2898 | 1764 | .num_ioctls = ARRAY_SIZE(i915_ioctls), |
---|
2899 | 1765 | .fops = &i915_driver_fops, |
---|
.. | .. |
---|
2904 | 1770 | .minor = DRIVER_MINOR, |
---|
2905 | 1771 | .patchlevel = DRIVER_PATCHLEVEL, |
---|
2906 | 1772 | }; |
---|
2907 | | - |
---|
2908 | | -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
---|
2909 | | -#include "selftests/mock_drm.c" |
---|
2910 | | -#endif |
---|