hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/gpu/drm/i915/i915_drv.h
....@@ -45,226 +45,75 @@
4545 #include <linux/mm_types.h>
4646 #include <linux/perf_event.h>
4747 #include <linux/pm_qos.h>
48
-#include <linux/reservation.h>
48
+#include <linux/dma-resv.h>
4949 #include <linux/shmem_fs.h>
50
+#include <linux/stackdepot.h>
51
+#include <linux/xarray.h>
5052
51
-#include <drm/drmP.h>
5253 #include <drm/intel-gtt.h>
5354 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
5455 #include <drm/drm_gem.h>
5556 #include <drm/drm_auth.h>
5657 #include <drm/drm_cache.h>
58
+#include <drm/drm_util.h>
59
+#include <drm/drm_dsc.h>
60
+#include <drm/drm_atomic.h>
61
+#include <drm/drm_connector.h>
62
+#include <drm/i915_mei_hdcp_interface.h>
5763
5864 #include "i915_params.h"
5965 #include "i915_reg.h"
6066 #include "i915_utils.h"
6167
62
-#include "intel_bios.h"
68
+#include "display/intel_bios.h"
69
+#include "display/intel_display.h"
70
+#include "display/intel_display_power.h"
71
+#include "display/intel_dpll_mgr.h"
72
+#include "display/intel_dsb.h"
73
+#include "display/intel_frontbuffer.h"
74
+#include "display/intel_global_state.h"
75
+#include "display/intel_gmbus.h"
76
+#include "display/intel_opregion.h"
77
+
78
+#include "gem/i915_gem_context_types.h"
79
+#include "gem/i915_gem_shrinker.h"
80
+#include "gem/i915_gem_stolen.h"
81
+
82
+#include "gt/intel_lrc.h"
83
+#include "gt/intel_engine.h"
84
+#include "gt/intel_gt_types.h"
85
+#include "gt/intel_workarounds.h"
86
+#include "gt/uc/intel_uc.h"
87
+
6388 #include "intel_device_info.h"
64
-#include "intel_display.h"
65
-#include "intel_dpll_mgr.h"
66
-#include "intel_lrc.h"
67
-#include "intel_opregion.h"
68
-#include "intel_ringbuffer.h"
89
+#include "intel_pch.h"
90
+#include "intel_runtime_pm.h"
91
+#include "intel_memory_region.h"
6992 #include "intel_uncore.h"
93
+#include "intel_wakeref.h"
7094 #include "intel_wopcm.h"
71
-#include "intel_uc.h"
7295
7396 #include "i915_gem.h"
74
-#include "i915_gem_context.h"
75
-#include "i915_gem_fence_reg.h"
76
-#include "i915_gem_object.h"
7797 #include "i915_gem_gtt.h"
7898 #include "i915_gpu_error.h"
99
+#include "i915_perf_types.h"
79100 #include "i915_request.h"
80101 #include "i915_scheduler.h"
81
-#include "i915_timeline.h"
102
+#include "gt/intel_timeline.h"
82103 #include "i915_vma.h"
104
+#include "i915_irq.h"
83105
84
-#include "intel_gvt.h"
106
+#include "intel_region_lmem.h"
85107
86108 /* General customization:
87109 */
88110
89111 #define DRIVER_NAME "i915"
90112 #define DRIVER_DESC "Intel Graphics"
91
-#define DRIVER_DATE "20180719"
92
-#define DRIVER_TIMESTAMP 1532015279
113
+#define DRIVER_DATE "20200917"
114
+#define DRIVER_TIMESTAMP 1600375437
93115
94
-/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
95
- * WARN_ON()) for hw state sanity checks to check for unexpected conditions
96
- * which may not necessarily be a user visible problem. This will either
97
- * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
98
- * enable distros and users to tailor their preferred amount of i915 abrt
99
- * spam.
100
- */
101
-#define I915_STATE_WARN(condition, format...) ({ \
102
- int __ret_warn_on = !!(condition); \
103
- if (unlikely(__ret_warn_on)) \
104
- if (!WARN(i915_modparams.verbose_state_checks, format)) \
105
- DRM_ERROR(format); \
106
- unlikely(__ret_warn_on); \
107
-})
108
-
109
-#define I915_STATE_WARN_ON(x) \
110
- I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
111
-
112
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
113
-
114
-bool __i915_inject_load_failure(const char *func, int line);
115
-#define i915_inject_load_failure() \
116
- __i915_inject_load_failure(__func__, __LINE__)
117
-
118
-bool i915_error_injected(void);
119
-
120
-#else
121
-
122
-#define i915_inject_load_failure() false
123
-#define i915_error_injected() false
124
-
125
-#endif
126
-
127
-#define i915_load_error(i915, fmt, ...) \
128
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
129
- fmt, ##__VA_ARGS__)
130
-
131
-typedef struct {
132
- uint32_t val;
133
-} uint_fixed_16_16_t;
134
-
135
-#define FP_16_16_MAX ({ \
136
- uint_fixed_16_16_t fp; \
137
- fp.val = UINT_MAX; \
138
- fp; \
139
-})
140
-
141
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
142
-{
143
- if (val.val == 0)
144
- return true;
145
- return false;
146
-}
147
-
148
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
149
-{
150
- uint_fixed_16_16_t fp;
151
-
152
- WARN_ON(val > U16_MAX);
153
-
154
- fp.val = val << 16;
155
- return fp;
156
-}
157
-
158
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
159
-{
160
- return DIV_ROUND_UP(fp.val, 1 << 16);
161
-}
162
-
163
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
164
-{
165
- return fp.val >> 16;
166
-}
167
-
168
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
169
- uint_fixed_16_16_t min2)
170
-{
171
- uint_fixed_16_16_t min;
172
-
173
- min.val = min(min1.val, min2.val);
174
- return min;
175
-}
176
-
177
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
178
- uint_fixed_16_16_t max2)
179
-{
180
- uint_fixed_16_16_t max;
181
-
182
- max.val = max(max1.val, max2.val);
183
- return max;
184
-}
185
-
186
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
187
-{
188
- uint_fixed_16_16_t fp;
189
- WARN_ON(val > U32_MAX);
190
- fp.val = (uint32_t) val;
191
- return fp;
192
-}
193
-
194
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
195
- uint_fixed_16_16_t d)
196
-{
197
- return DIV_ROUND_UP(val.val, d.val);
198
-}
199
-
200
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
201
- uint_fixed_16_16_t mul)
202
-{
203
- uint64_t intermediate_val;
204
-
205
- intermediate_val = (uint64_t) val * mul.val;
206
- intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
207
- WARN_ON(intermediate_val > U32_MAX);
208
- return (uint32_t) intermediate_val;
209
-}
210
-
211
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
212
- uint_fixed_16_16_t mul)
213
-{
214
- uint64_t intermediate_val;
215
-
216
- intermediate_val = (uint64_t) val.val * mul.val;
217
- intermediate_val = intermediate_val >> 16;
218
- return clamp_u64_to_fixed16(intermediate_val);
219
-}
220
-
221
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
222
-{
223
- uint64_t interm_val;
224
-
225
- interm_val = (uint64_t)val << 16;
226
- interm_val = DIV_ROUND_UP_ULL(interm_val, d);
227
- return clamp_u64_to_fixed16(interm_val);
228
-}
229
-
230
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
231
- uint_fixed_16_16_t d)
232
-{
233
- uint64_t interm_val;
234
-
235
- interm_val = (uint64_t)val << 16;
236
- interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
237
- WARN_ON(interm_val > U32_MAX);
238
- return (uint32_t) interm_val;
239
-}
240
-
241
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
242
- uint_fixed_16_16_t mul)
243
-{
244
- uint64_t intermediate_val;
245
-
246
- intermediate_val = (uint64_t) val * mul.val;
247
- return clamp_u64_to_fixed16(intermediate_val);
248
-}
249
-
250
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
251
- uint_fixed_16_16_t add2)
252
-{
253
- uint64_t interm_sum;
254
-
255
- interm_sum = (uint64_t) add1.val + add2.val;
256
- return clamp_u64_to_fixed16(interm_sum);
257
-}
258
-
259
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
260
- uint32_t add2)
261
-{
262
- uint64_t interm_sum;
263
- uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
264
-
265
- interm_sum = (uint64_t) add1.val + interm_add2.val;
266
- return clamp_u64_to_fixed16(interm_sum);
267
-}
116
+struct drm_i915_gem_object;
268117
269118 enum hpd_pin {
270119 HPD_NONE = 0,
....@@ -277,17 +126,26 @@
277126 HPD_PORT_C,
278127 HPD_PORT_D,
279128 HPD_PORT_E,
280
- HPD_PORT_F,
129
+ HPD_PORT_TC1,
130
+ HPD_PORT_TC2,
131
+ HPD_PORT_TC3,
132
+ HPD_PORT_TC4,
133
+ HPD_PORT_TC5,
134
+ HPD_PORT_TC6,
135
+
281136 HPD_NUM_PINS
282137 };
283138
284139 #define for_each_hpd_pin(__pin) \
285140 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
286141
287
-#define HPD_STORM_DEFAULT_THRESHOLD 5
142
+/* Threshold == 5 for long IRQs, 50 for short */
143
+#define HPD_STORM_DEFAULT_THRESHOLD 50
288144
289145 struct i915_hotplug {
290
- struct work_struct hotplug_work;
146
+ struct delayed_work hotplug_work;
147
+
148
+ const u32 *hpd, *pch_hpd;
291149
292150 struct {
293151 unsigned long last_jiffies;
....@@ -299,6 +157,7 @@
299157 } state;
300158 } stats[HPD_NUM_PINS];
301159 u32 event_bits;
160
+ u32 retry_bits;
302161 struct delayed_work reenable_work;
303162
304163 u32 long_port_mask;
....@@ -309,6 +168,8 @@
309168 bool poll_enabled;
310169
311170 unsigned int hpd_storm_threshold;
171
+ /* Whether or not to count short HPD IRQs in HPD storms */
172
+ u8 hpd_short_storm_enabled;
312173
313174 /*
314175 * if we get a HPD irq from DP and a HPD irq from non-DP
....@@ -333,23 +194,14 @@
333194
334195 struct drm_i915_file_private {
335196 struct drm_i915_private *dev_priv;
336
- struct drm_file *file;
337197
338
- struct {
339
- spinlock_t lock;
340
- struct list_head request_list;
341
-/* 20ms is a fairly arbitrary limit (greater than the average frame time)
342
- * chosen to prevent the CPU getting more than a frame ahead of the GPU
343
- * (when using lax throttling for the frontbuffer). We also use it to
344
- * offer free GPU waitboosts for severely congested workloads.
345
- */
346
-#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
347
- } mm;
348
- struct idr context_idr;
198
+ union {
199
+ struct drm_file *file;
200
+ struct rcu_head rcu;
201
+ };
349202
350
- struct intel_rps_client {
351
- atomic_t boosts;
352
- } rps_client;
203
+ struct xarray context_xa;
204
+ struct xarray vm_xa;
353205
354206 unsigned int bsd_engine;
355207
....@@ -399,33 +251,35 @@
399251 struct intel_connector;
400252 struct intel_encoder;
401253 struct intel_atomic_state;
402
-struct intel_crtc_state;
254
+struct intel_cdclk_config;
255
+struct intel_cdclk_state;
256
+struct intel_cdclk_vals;
403257 struct intel_initial_plane_config;
404258 struct intel_crtc;
405259 struct intel_limit;
406260 struct dpll;
407
-struct intel_cdclk_state;
408261
409262 struct drm_i915_display_funcs {
410263 void (*get_cdclk)(struct drm_i915_private *dev_priv,
411
- struct intel_cdclk_state *cdclk_state);
264
+ struct intel_cdclk_config *cdclk_config);
412265 void (*set_cdclk)(struct drm_i915_private *dev_priv,
413
- const struct intel_cdclk_state *cdclk_state);
266
+ const struct intel_cdclk_config *cdclk_config,
267
+ enum pipe pipe);
268
+ int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
414269 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
415270 enum i9xx_plane_id i9xx_plane);
416
- int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
417
- int (*compute_intermediate_wm)(struct drm_device *dev,
418
- struct intel_crtc *intel_crtc,
419
- struct intel_crtc_state *newstate);
271
+ int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
272
+ int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
420273 void (*initial_watermarks)(struct intel_atomic_state *state,
421
- struct intel_crtc_state *cstate);
274
+ struct intel_crtc *crtc);
422275 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
423
- struct intel_crtc_state *cstate);
276
+ struct intel_crtc *crtc);
424277 void (*optimize_watermarks)(struct intel_atomic_state *state,
425
- struct intel_crtc_state *cstate);
426
- int (*compute_global_watermarks)(struct drm_atomic_state *state);
278
+ struct intel_crtc *crtc);
279
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
427280 void (*update_wm)(struct intel_crtc *crtc);
428
- int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
281
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
282
+ u8 (*calc_voltage_level)(int cdclk);
429283 /* Returns the active state of the crtc, and if the crtc is active,
430284 * fills out the pipe-config with the hw state. */
431285 bool (*get_pipe_config)(struct intel_crtc *,
....@@ -434,11 +288,12 @@
434288 struct intel_initial_plane_config *);
435289 int (*crtc_compute_clock)(struct intel_crtc *crtc,
436290 struct intel_crtc_state *crtc_state);
437
- void (*crtc_enable)(struct intel_crtc_state *pipe_config,
438
- struct drm_atomic_state *old_state);
439
- void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
440
- struct drm_atomic_state *old_state);
441
- void (*update_crtcs)(struct drm_atomic_state *state);
291
+ void (*crtc_enable)(struct intel_atomic_state *state,
292
+ struct intel_crtc *crtc);
293
+ void (*crtc_disable)(struct intel_atomic_state *state,
294
+ struct intel_crtc *crtc);
295
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
296
+ void (*commit_modeset_disables)(struct intel_atomic_state *state);
442297 void (*audio_codec_enable)(struct intel_encoder *encoder,
443298 const struct intel_crtc_state *crtc_state,
444299 const struct drm_connector_state *conn_state);
....@@ -455,25 +310,39 @@
455310 /* display clock increase/decrease */
456311 /* pll clock increase/decrease */
457312
458
- void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
459
- void (*load_luts)(struct drm_crtc_state *crtc_state);
313
+ int (*color_check)(struct intel_crtc_state *crtc_state);
314
+ /*
315
+ * Program double buffered color management registers during
316
+ * vblank evasion. The registers should then latch during the
317
+ * next vblank start, alongside any other double buffered registers
318
+ * involved with the same commit.
319
+ */
320
+ void (*color_commit)(const struct intel_crtc_state *crtc_state);
321
+ /*
322
+ * Load LUTs (and other single buffered color management
323
+ * registers). Will (hopefully) be called during the vblank
324
+ * following the latching of any double buffered registers
325
+ * involved with the same commit.
326
+ */
327
+ void (*load_luts)(const struct intel_crtc_state *crtc_state);
328
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
460329 };
461
-
462
-#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
463
-#define CSR_VERSION_MAJOR(version) ((version) >> 16)
464
-#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
465330
466331 struct intel_csr {
467332 struct work_struct work;
468333 const char *fw_path;
469
- uint32_t *dmc_payload;
470
- uint32_t dmc_fw_size;
471
- uint32_t version;
472
- uint32_t mmio_count;
473
- i915_reg_t mmioaddr[8];
474
- uint32_t mmiodata[8];
475
- uint32_t dc_state;
476
- uint32_t allowed_dc_mask;
334
+ u32 required_version;
335
+ u32 max_fw_size; /* bytes */
336
+ u32 *dmc_payload;
337
+ u32 dmc_fw_size; /* dwords */
338
+ u32 version;
339
+ u32 mmio_count;
340
+ i915_reg_t mmioaddr[20];
341
+ u32 mmiodata[20];
342
+ u32 dc_state;
343
+ u32 target_dc_state;
344
+ u32 allowed_dc_mask;
345
+ intel_wakeref_t wakeref;
477346 };
478347
479348 enum i915_cache_level {
....@@ -488,14 +357,6 @@
488357
489358 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
490359
491
-enum fb_op_origin {
492
- ORIGIN_GTT,
493
- ORIGIN_CPU,
494
- ORIGIN_CS,
495
- ORIGIN_FLIP,
496
- ORIGIN_DIRTYFB,
497
-};
498
-
499360 struct intel_fbc {
500361 /* This is always the inner lock when overlapping with struct_mutex and
501362 * it's the outer lock when overlapping with stolen_lock. */
....@@ -503,7 +364,6 @@
503364 unsigned threshold;
504365 unsigned int possible_framebuffer_bits;
505366 unsigned int busy_bits;
506
- unsigned int visible_pipes_mask;
507367 struct intel_crtc *crtc;
508368
509369 struct drm_mm_node compressed_fb;
....@@ -511,8 +371,8 @@
511371
512372 bool false_color;
513373
514
- bool enabled;
515374 bool active;
375
+ bool activated;
516376 bool flip_pending;
517377
518378 bool underrun_detected;
....@@ -524,12 +384,9 @@
524384 * these problems.
525385 */
526386 struct intel_fbc_state_cache {
527
- struct i915_vma *vma;
528
- unsigned long flags;
529
-
530387 struct {
531388 unsigned int mode_flags;
532
- uint32_t hsw_bdw_pixel_rate;
389
+ u32 hsw_bdw_pixel_rate;
533390 } crtc;
534391
535392 struct {
....@@ -546,13 +403,19 @@
546403 int adjusted_x;
547404 int adjusted_y;
548405
549
- int y;
406
+ u16 pixel_blend_mode;
550407 } plane;
551408
552409 struct {
553410 const struct drm_format_info *format;
554411 unsigned int stride;
412
+ u64 modifier;
555413 } fb;
414
+
415
+ unsigned int fence_y_offset;
416
+ u16 gen9_wa_cfb_stride;
417
+ u16 interval;
418
+ s8 fence_id;
556419 } state_cache;
557420
558421 /*
....@@ -563,22 +426,23 @@
563426 * are supposed to read from it in order to program the registers.
564427 */
565428 struct intel_fbc_reg_params {
566
- struct i915_vma *vma;
567
- unsigned long flags;
568
-
569429 struct {
570430 enum pipe pipe;
571431 enum i9xx_plane_id i9xx_plane;
572
- unsigned int fence_y_offset;
573432 } crtc;
574433
575434 struct {
576435 const struct drm_format_info *format;
577436 unsigned int stride;
437
+ u64 modifier;
578438 } fb;
579439
580440 int cfb_size;
581
- unsigned int gen9_wa_cfb_stride;
441
+ unsigned int fence_y_offset;
442
+ u16 gen9_wa_cfb_stride;
443
+ u16 interval;
444
+ s8 fence_id;
445
+ bool plane_visible;
582446 } params;
583447
584448 const char *no_fbc_reason;
....@@ -613,37 +477,39 @@
613477
614478 struct i915_psr {
615479 struct mutex lock;
480
+
481
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
482
+#define I915_PSR_DEBUG_DEFAULT 0x00
483
+#define I915_PSR_DEBUG_DISABLE 0x01
484
+#define I915_PSR_DEBUG_ENABLE 0x02
485
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
486
+#define I915_PSR_DEBUG_IRQ 0x10
487
+
488
+ u32 debug;
616489 bool sink_support;
617
- struct intel_dp *enabled;
490
+ bool enabled;
491
+ struct intel_dp *dp;
492
+ enum pipe pipe;
493
+ enum transcoder transcoder;
618494 bool active;
619495 struct work_struct work;
620496 unsigned busy_frontbuffer_bits;
621497 bool sink_psr2_support;
622498 bool link_standby;
623499 bool colorimetry_support;
624
- bool alpm;
625500 bool psr2_enabled;
501
+ bool psr2_sel_fetch_enabled;
626502 u8 sink_sync_latency;
627
- bool debug;
628503 ktime_t last_entry_attempt;
629504 ktime_t last_exit;
630
-};
631
-
632
-enum intel_pch {
633
- PCH_NONE = 0, /* No PCH present */
634
- PCH_IBX, /* Ibexpeak PCH */
635
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
636
- PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
637
- PCH_SPT, /* Sunrisepoint PCH */
638
- PCH_KBP, /* Kaby Lake PCH */
639
- PCH_CNP, /* Cannon Lake PCH */
640
- PCH_ICP, /* Ice Lake PCH */
641
- PCH_NOP, /* PCH without south display */
642
-};
643
-
644
-enum intel_sbi_destination {
645
- SBI_ICLK,
646
- SBI_MPHY,
505
+ bool sink_not_reliable;
506
+ bool irq_aux_error;
507
+ u16 su_x_granularity;
508
+ bool dc3co_enabled;
509
+ u32 dc3co_exit_delay;
510
+ struct delayed_work dc3co_work;
511
+ bool force_mode_changed;
512
+ struct drm_dp_vsc_sdp vsc;
647513 };
648514
649515 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
....@@ -668,251 +534,13 @@
668534
669535 struct i915_suspend_saved_registers {
670536 u32 saveDSPARB;
671
- u32 saveFBC_CONTROL;
672
- u32 saveCACHE_MODE_0;
673
- u32 saveMI_ARB_STATE;
674537 u32 saveSWF0[16];
675538 u32 saveSWF1[16];
676539 u32 saveSWF3[3];
677
- uint64_t saveFENCE[I915_MAX_NUM_FENCES];
678
- u32 savePCH_PORT_HOTPLUG;
679540 u16 saveGCDGMBUS;
680541 };
681542
682
-struct vlv_s0ix_state {
683
- /* GAM */
684
- u32 wr_watermark;
685
- u32 gfx_prio_ctrl;
686
- u32 arb_mode;
687
- u32 gfx_pend_tlb0;
688
- u32 gfx_pend_tlb1;
689
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
690
- u32 media_max_req_count;
691
- u32 gfx_max_req_count;
692
- u32 render_hwsp;
693
- u32 ecochk;
694
- u32 bsd_hwsp;
695
- u32 blt_hwsp;
696
- u32 tlb_rd_addr;
697
-
698
- /* MBC */
699
- u32 g3dctl;
700
- u32 gsckgctl;
701
- u32 mbctl;
702
-
703
- /* GCP */
704
- u32 ucgctl1;
705
- u32 ucgctl3;
706
- u32 rcgctl1;
707
- u32 rcgctl2;
708
- u32 rstctl;
709
- u32 misccpctl;
710
-
711
- /* GPM */
712
- u32 gfxpause;
713
- u32 rpdeuhwtc;
714
- u32 rpdeuc;
715
- u32 ecobus;
716
- u32 pwrdwnupctl;
717
- u32 rp_down_timeout;
718
- u32 rp_deucsw;
719
- u32 rcubmabdtmr;
720
- u32 rcedata;
721
- u32 spare2gh;
722
-
723
- /* Display 1 CZ domain */
724
- u32 gt_imr;
725
- u32 gt_ier;
726
- u32 pm_imr;
727
- u32 pm_ier;
728
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
729
-
730
- /* GT SA CZ domain */
731
- u32 tilectl;
732
- u32 gt_fifoctl;
733
- u32 gtlc_wake_ctrl;
734
- u32 gtlc_survive;
735
- u32 pmwgicz;
736
-
737
- /* Display 2 CZ domain */
738
- u32 gu_ctl0;
739
- u32 gu_ctl1;
740
- u32 pcbr;
741
- u32 clock_gate_dis2;
742
-};
743
-
744
-struct intel_rps_ei {
745
- ktime_t ktime;
746
- u32 render_c0;
747
- u32 media_c0;
748
-};
749
-
750
-struct intel_rps {
751
- /*
752
- * work, interrupts_enabled and pm_iir are protected by
753
- * dev_priv->irq_lock
754
- */
755
- struct work_struct work;
756
- bool interrupts_enabled;
757
- u32 pm_iir;
758
-
759
- /* PM interrupt bits that should never be masked */
760
- u32 pm_intrmsk_mbz;
761
-
762
- /* Frequencies are stored in potentially platform dependent multiples.
763
- * In other words, *_freq needs to be multiplied by X to be interesting.
764
- * Soft limits are those which are used for the dynamic reclocking done
765
- * by the driver (raise frequencies under heavy loads, and lower for
766
- * lighter loads). Hard limits are those imposed by the hardware.
767
- *
768
- * A distinction is made for overclocking, which is never enabled by
769
- * default, and is considered to be above the hard limit if it's
770
- * possible at all.
771
- */
772
- u8 cur_freq; /* Current frequency (cached, may not == HW) */
773
- u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
774
- u8 max_freq_softlimit; /* Max frequency permitted by the driver */
775
- u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
776
- u8 min_freq; /* AKA RPn. Minimum frequency */
777
- u8 boost_freq; /* Frequency to request when wait boosting */
778
- u8 idle_freq; /* Frequency to request when we are idle */
779
- u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
780
- u8 rp1_freq; /* "less than" RP0 power/freqency */
781
- u8 rp0_freq; /* Non-overclocked max frequency. */
782
- u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
783
-
784
- int last_adj;
785
-
786
- struct {
787
- struct mutex mutex;
788
-
789
- enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
790
- unsigned int interactive;
791
-
792
- u8 up_threshold; /* Current %busy required to uplock */
793
- u8 down_threshold; /* Current %busy required to downclock */
794
- } power;
795
-
796
- bool enabled;
797
- atomic_t num_waiters;
798
- atomic_t boosts;
799
-
800
- /* manual wa residency calculations */
801
- struct intel_rps_ei ei;
802
-};
803
-
804
-struct intel_rc6 {
805
- bool enabled;
806
- bool ctx_corrupted;
807
- u64 prev_hw_residency[4];
808
- u64 cur_residency[4];
809
-};
810
-
811
-struct intel_llc_pstate {
812
- bool enabled;
813
-};
814
-
815
-struct intel_gen6_power_mgmt {
816
- struct intel_rps rps;
817
- struct intel_rc6 rc6;
818
- struct intel_llc_pstate llc_pstate;
819
-};
820
-
821
-/* defined intel_pm.c */
822
-extern spinlock_t mchdev_lock;
823
-
824
-struct intel_ilk_power_mgmt {
825
- u8 cur_delay;
826
- u8 min_delay;
827
- u8 max_delay;
828
- u8 fmax;
829
- u8 fstart;
830
-
831
- u64 last_count1;
832
- unsigned long last_time1;
833
- unsigned long chipset_power;
834
- u64 last_count2;
835
- u64 last_time2;
836
- unsigned long gfx_power;
837
- u8 corr;
838
-
839
- int c_m;
840
- int r_t;
841
-};
842
-
843
-struct drm_i915_private;
844
-struct i915_power_well;
845
-
846
-struct i915_power_well_ops {
847
- /*
848
- * Synchronize the well's hw state to match the current sw state, for
849
- * example enable/disable it based on the current refcount. Called
850
- * during driver init and resume time, possibly after first calling
851
- * the enable/disable handlers.
852
- */
853
- void (*sync_hw)(struct drm_i915_private *dev_priv,
854
- struct i915_power_well *power_well);
855
- /*
856
- * Enable the well and resources that depend on it (for example
857
- * interrupts located on the well). Called after the 0->1 refcount
858
- * transition.
859
- */
860
- void (*enable)(struct drm_i915_private *dev_priv,
861
- struct i915_power_well *power_well);
862
- /*
863
- * Disable the well and resources that depend on it. Called after
864
- * the 1->0 refcount transition.
865
- */
866
- void (*disable)(struct drm_i915_private *dev_priv,
867
- struct i915_power_well *power_well);
868
- /* Returns the hw enabled state. */
869
- bool (*is_enabled)(struct drm_i915_private *dev_priv,
870
- struct i915_power_well *power_well);
871
-};
872
-
873
-/* Power well structure for haswell */
874
-struct i915_power_well {
875
- const char *name;
876
- bool always_on;
877
- /* power well enable/disable usage count */
878
- int count;
879
- /* cached hw enabled state */
880
- bool hw_enabled;
881
- u64 domains;
882
- /* unique identifier for this power well */
883
- enum i915_power_well_id id;
884
- /*
885
- * Arbitraty data associated with this power well. Platform and power
886
- * well specific.
887
- */
888
- union {
889
- struct {
890
- enum dpio_phy phy;
891
- } bxt;
892
- struct {
893
- /* Mask of pipes whose IRQ logic is backed by the pw */
894
- u8 irq_pipe_mask;
895
- /* The pw is backing the VGA functionality */
896
- bool has_vga:1;
897
- bool has_fuses:1;
898
- } hsw;
899
- };
900
- const struct i915_power_well_ops *ops;
901
-};
902
-
903
-struct i915_power_domains {
904
- /*
905
- * Power wells needed for initialization at driver init and suspend
906
- * time are on. They are kept on until after the first modeset.
907
- */
908
- bool init_power_on;
909
- bool initializing;
910
- int power_well_count;
911
-
912
- struct mutex lock;
913
- int domain_use_count[POWER_DOMAIN_NUM];
914
- struct i915_power_well *power_wells;
915
-};
543
+struct vlv_s0ix_state;
916544
917545 #define MAX_L3_SLICES 2
918546 struct intel_l3_parity {
....@@ -931,27 +559,21 @@
931559 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
932560 spinlock_t obj_lock;
933561
934
- /** List of all objects in gtt_space. Used to restore gtt
935
- * mappings on resume */
936
- struct list_head bound_list;
937562 /**
938
- * List of objects which are not bound to the GTT (thus
939
- * are idle and not used by the GPU). These objects may or may
940
- * not actually have any pages attached.
563
+ * List of objects which are purgeable.
941564 */
942
- struct list_head unbound_list;
565
+ struct list_head purge_list;
943566
944
- /** List of all objects in gtt_space, currently mmaped by userspace.
945
- * All objects within this list must also be on bound_list.
567
+ /**
568
+ * List of objects which have allocated pages and are shrinkable.
946569 */
947
- struct list_head userfault_list;
570
+ struct list_head shrink_list;
948571
949572 /**
950573 * List of objects which are pending destruction.
951574 */
952575 struct llist_head free_list;
953576 struct work_struct free_work;
954
- spinlock_t free_lock;
955577 /**
956578 * Count of objects pending destructions. Used to skip needlessly
957579 * waiting on an RCU barrier if no objects are waiting to be freed.
....@@ -959,24 +581,15 @@
959581 atomic_t free_count;
960582
961583 /**
962
- * Small stash of WC pages
963
- */
964
- struct pagestash wc_stash;
965
-
966
- /**
967584 * tmpfs instance used for shmem backed objects
968585 */
969586 struct vfsmount *gemfs;
970587
971
- /** PPGTT used for aliasing the PPGTT with the GTT */
972
- struct i915_hw_ppgtt *aliasing_ppgtt;
588
+ struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
973589
974590 struct notifier_block oom_notifier;
975591 struct notifier_block vmap_notifier;
976592 struct shrinker shrinker;
977
-
978
- /** LRU list of objects with fence regs on them. */
979
- struct list_head fence_list;
980593
981594 /**
982595 * Workqueue to fault in userptr pages, flushed by the execbuf
....@@ -985,64 +598,47 @@
985598 */
986599 struct workqueue_struct *userptr_wq;
987600
988
- u64 unordered_timeline;
989
-
990
- /* the indicator for dispatch video commands on two BSD rings */
991
- atomic_t bsd_engine_dispatch_index;
992
-
993
- /** Bit 6 swizzling required for X tiling */
994
- uint32_t bit_6_swizzle_x;
995
- /** Bit 6 swizzling required for Y tiling */
996
- uint32_t bit_6_swizzle_y;
997
-
998
- /* accounting, useful for userland debugging */
999
- spinlock_t object_stat_lock;
1000
- u64 object_memory;
1001
- u32 object_count;
601
+ /* shrinker accounting, also useful for userland debugging */
602
+ u64 shrink_memory;
603
+ u32 shrink_count;
1002604 };
1003605
1004606 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
1005607
1006
-#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1007
-#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
608
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
609
+ u64 context);
1008610
1009
-#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
1010
-#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
611
+static inline unsigned long
612
+i915_fence_timeout(const struct drm_i915_private *i915)
613
+{
614
+ return i915_fence_context_timeout(i915, U64_MAX);
615
+}
1011616
1012
-#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
1013
-
1014
-#define DP_AUX_A 0x40
1015
-#define DP_AUX_B 0x10
1016
-#define DP_AUX_C 0x20
1017
-#define DP_AUX_D 0x30
1018
-#define DP_AUX_E 0x50
1019
-#define DP_AUX_F 0x60
1020
-
1021
-#define DDC_PIN_B 0x05
1022
-#define DDC_PIN_C 0x04
1023
-#define DDC_PIN_D 0x06
617
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
618
+#define I915_NUM_QGV_POINTS 8
1024619
1025620 struct ddi_vbt_port_info {
621
+ /* Non-NULL if port present. */
622
+ const struct child_device_config *child;
623
+
1026624 int max_tmds_clock;
1027625
1028
- /*
1029
- * This is an index in the HDMI/DVI DDI buffer translation table.
1030
- * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1031
- * populate this field.
1032
- */
1033
-#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
1034
- uint8_t hdmi_level_shift;
626
+ /* This is an index in the HDMI/DVI DDI buffer translation table. */
627
+ u8 hdmi_level_shift;
628
+ u8 hdmi_level_shift_set:1;
1035629
1036
- uint8_t supports_dvi:1;
1037
- uint8_t supports_hdmi:1;
1038
- uint8_t supports_dp:1;
1039
- uint8_t supports_edp:1;
630
+ u8 supports_dvi:1;
631
+ u8 supports_hdmi:1;
632
+ u8 supports_dp:1;
633
+ u8 supports_edp:1;
634
+ u8 supports_typec_usb:1;
635
+ u8 supports_tbt:1;
1040636
1041
- uint8_t alternate_aux_channel;
1042
- uint8_t alternate_ddc_pin;
637
+ u8 alternate_aux_channel;
638
+ u8 alternate_ddc_pin;
1043639
1044
- uint8_t dp_boost_level;
1045
- uint8_t hdmi_boost_level;
640
+ u8 dp_boost_level;
641
+ u8 hdmi_boost_level;
1046642 int dp_max_link_rate; /* 0 for not limited by VBT */
1047643 };
1048644
....@@ -1068,6 +664,7 @@
1068664 unsigned int panel_type:4;
1069665 int lvds_ssc_freq;
1070666 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
667
+ enum drm_panel_orientation orientation;
1071668
1072669 enum drrs_support_type drrs_type;
1073670
....@@ -1080,6 +677,7 @@
1080677 bool initialized;
1081678 int bpp;
1082679 struct edp_power_seq pps;
680
+ bool hobl;
1083681 } edp;
1084682
1085683 struct {
....@@ -1090,6 +688,7 @@
1090688 enum psr_lines_to_wait lines_to_wait;
1091689 int tp1_wakeup_time_us;
1092690 int tp2_tp3_wakeup_time_us;
691
+ int psr2_tp2_tp3_wakeup_time_us;
1093692 } psr;
1094693
1095694 struct {
....@@ -1113,12 +712,12 @@
1113712 u8 *data;
1114713 const u8 *sequence[MIPI_SEQ_MAX];
1115714 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
715
+ enum drm_panel_orientation orientation;
1116716 } dsi;
1117717
1118718 int crt_ddc_pin;
1119719
1120
- int child_dev_num;
1121
- struct child_device_config *child_dev;
720
+ struct list_head display_devices;
1122721
1123722 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1124723 struct sdvo_device_mapping sdvo_mappings[2];
....@@ -1129,43 +728,34 @@
1129728 INTEL_DDB_PART_5_6, /* IVB+ */
1130729 };
1131730
1132
-struct intel_wm_level {
1133
- bool enable;
1134
- uint32_t pri_val;
1135
- uint32_t spr_val;
1136
- uint32_t cur_val;
1137
- uint32_t fbc_val;
1138
-};
1139
-
1140731 struct ilk_wm_values {
1141
- uint32_t wm_pipe[3];
1142
- uint32_t wm_lp[3];
1143
- uint32_t wm_lp_spr[3];
1144
- uint32_t wm_linetime[3];
732
+ u32 wm_pipe[3];
733
+ u32 wm_lp[3];
734
+ u32 wm_lp_spr[3];
1145735 bool enable_fbc_wm;
1146736 enum intel_ddb_partitioning partitioning;
1147737 };
1148738
1149739 struct g4x_pipe_wm {
1150
- uint16_t plane[I915_MAX_PLANES];
1151
- uint16_t fbc;
740
+ u16 plane[I915_MAX_PLANES];
741
+ u16 fbc;
1152742 };
1153743
1154744 struct g4x_sr_wm {
1155
- uint16_t plane;
1156
- uint16_t cursor;
1157
- uint16_t fbc;
745
+ u16 plane;
746
+ u16 cursor;
747
+ u16 fbc;
1158748 };
1159749
1160750 struct vlv_wm_ddl_values {
1161
- uint8_t plane[I915_MAX_PLANES];
751
+ u8 plane[I915_MAX_PLANES];
1162752 };
1163753
1164754 struct vlv_wm_values {
1165755 struct g4x_pipe_wm pipe[3];
1166756 struct g4x_sr_wm sr;
1167757 struct vlv_wm_ddl_values ddl[3];
1168
- uint8_t level;
758
+ u8 level;
1169759 bool cxsr;
1170760 };
1171761
....@@ -1179,10 +769,10 @@
1179769 };
1180770
1181771 struct skl_ddb_entry {
1182
- uint16_t start, end; /* in number of blocks, 'end' is exclusive */
772
+ u16 start, end; /* in number of blocks, 'end' is exclusive */
1183773 };
1184774
1185
-static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
775
+static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1186776 {
1187777 return entry->end - entry->start;
1188778 }
....@@ -1196,91 +786,6 @@
1196786 return false;
1197787 }
1198788
1199
-struct skl_ddb_allocation {
1200
- /* packed/y */
1201
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
1202
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1203
- u8 enabled_slices; /* GEN11 has configurable 2 slices */
1204
-};
1205
-
1206
-struct skl_ddb_values {
1207
- unsigned dirty_pipes;
1208
- struct skl_ddb_allocation ddb;
1209
-};
1210
-
1211
-struct skl_wm_level {
1212
- bool plane_en;
1213
- uint16_t plane_res_b;
1214
- uint8_t plane_res_l;
1215
-};
1216
-
1217
-/* Stores plane specific WM parameters */
1218
-struct skl_wm_params {
1219
- bool x_tiled, y_tiled;
1220
- bool rc_surface;
1221
- bool is_planar;
1222
- uint32_t width;
1223
- uint8_t cpp;
1224
- uint32_t plane_pixel_rate;
1225
- uint32_t y_min_scanlines;
1226
- uint32_t plane_bytes_per_line;
1227
- uint_fixed_16_16_t plane_blocks_per_line;
1228
- uint_fixed_16_16_t y_tile_minimum;
1229
- uint32_t linetime_us;
1230
- uint32_t dbuf_block_size;
1231
-};
1232
-
1233
-/*
1234
- * This struct helps tracking the state needed for runtime PM, which puts the
1235
- * device in PCI D3 state. Notice that when this happens, nothing on the
1236
- * graphics device works, even register access, so we don't get interrupts nor
1237
- * anything else.
1238
- *
1239
- * Every piece of our code that needs to actually touch the hardware needs to
1240
- * either call intel_runtime_pm_get or call intel_display_power_get with the
1241
- * appropriate power domain.
1242
- *
1243
- * Our driver uses the autosuspend delay feature, which means we'll only really
1244
- * suspend if we stay with zero refcount for a certain amount of time. The
1245
- * default value is currently very conservative (see intel_runtime_pm_enable), but
1246
- * it can be changed with the standard runtime PM files from sysfs.
1247
- *
1248
- * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1249
- * goes back to false exactly before we reenable the IRQs. We use this variable
1250
- * to check if someone is trying to enable/disable IRQs while they're supposed
1251
- * to be disabled. This shouldn't happen and we'll print some error messages in
1252
- * case it happens.
1253
- *
1254
- * For more, read the Documentation/power/runtime_pm.txt.
1255
- */
1256
-struct i915_runtime_pm {
1257
- atomic_t wakeref_count;
1258
- bool suspended;
1259
- bool irqs_enabled;
1260
-};
1261
-
1262
-enum intel_pipe_crc_source {
1263
- INTEL_PIPE_CRC_SOURCE_NONE,
1264
- INTEL_PIPE_CRC_SOURCE_PLANE1,
1265
- INTEL_PIPE_CRC_SOURCE_PLANE2,
1266
- INTEL_PIPE_CRC_SOURCE_PF,
1267
- INTEL_PIPE_CRC_SOURCE_PIPE,
1268
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1269
- INTEL_PIPE_CRC_SOURCE_TV,
1270
- INTEL_PIPE_CRC_SOURCE_DP_B,
1271
- INTEL_PIPE_CRC_SOURCE_DP_C,
1272
- INTEL_PIPE_CRC_SOURCE_DP_D,
1273
- INTEL_PIPE_CRC_SOURCE_AUTO,
1274
- INTEL_PIPE_CRC_SOURCE_MAX,
1275
-};
1276
-
1277
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
1278
-struct intel_pipe_crc {
1279
- spinlock_t lock;
1280
- int skipped;
1281
- enum intel_pipe_crc_source source;
1282
-};
1283
-
1284789 struct i915_frontbuffer_tracking {
1285790 spinlock_t lock;
1286791
....@@ -1292,279 +797,32 @@
1292797 unsigned flip_bits;
1293798 };
1294799
1295
-struct i915_wa_reg {
1296
- u32 addr;
1297
- u32 value;
1298
- /* bitmask representing WA bits */
1299
- u32 mask;
1300
-};
1301
-
1302
-#define I915_MAX_WA_REGS 16
1303
-
1304
-struct i915_workarounds {
1305
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
1306
- u32 count;
1307
-};
1308
-
1309800 struct i915_virtual_gpu {
801
+ struct mutex lock; /* serialises sending of g2v_notify command pkts */
1310802 bool active;
1311803 u32 caps;
1312804 };
1313805
1314
-/* used in computing the new watermarks state */
1315
-struct intel_wm_config {
1316
- unsigned int num_pipes_active;
1317
- bool sprites_enabled;
1318
- bool sprites_scaled;
1319
-};
1320
-
1321
-struct i915_oa_format {
1322
- u32 format;
1323
- int size;
1324
-};
1325
-
1326
-struct i915_oa_reg {
1327
- i915_reg_t addr;
1328
- u32 value;
1329
-};
1330
-
1331
-struct i915_oa_config {
1332
- char uuid[UUID_STRING_LEN + 1];
1333
- int id;
1334
-
1335
- const struct i915_oa_reg *mux_regs;
1336
- u32 mux_regs_len;
1337
- const struct i915_oa_reg *b_counter_regs;
1338
- u32 b_counter_regs_len;
1339
- const struct i915_oa_reg *flex_regs;
1340
- u32 flex_regs_len;
1341
-
1342
- struct attribute_group sysfs_metric;
1343
- struct attribute *attrs[2];
1344
- struct device_attribute sysfs_metric_id;
1345
-
1346
- atomic_t ref_count;
1347
-};
1348
-
1349
-struct i915_perf_stream;
1350
-
1351
-/**
1352
- * struct i915_perf_stream_ops - the OPs to support a specific stream type
1353
- */
1354
-struct i915_perf_stream_ops {
1355
- /**
1356
- * @enable: Enables the collection of HW samples, either in response to
1357
- * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1358
- * without `I915_PERF_FLAG_DISABLED`.
1359
- */
1360
- void (*enable)(struct i915_perf_stream *stream);
1361
-
1362
- /**
1363
- * @disable: Disables the collection of HW samples, either in response
1364
- * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1365
- * the stream.
1366
- */
1367
- void (*disable)(struct i915_perf_stream *stream);
1368
-
1369
- /**
1370
- * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1371
- * once there is something ready to read() for the stream
1372
- */
1373
- void (*poll_wait)(struct i915_perf_stream *stream,
1374
- struct file *file,
1375
- poll_table *wait);
1376
-
1377
- /**
1378
- * @wait_unlocked: For handling a blocking read, wait until there is
1379
- * something to ready to read() for the stream. E.g. wait on the same
1380
- * wait queue that would be passed to poll_wait().
1381
- */
1382
- int (*wait_unlocked)(struct i915_perf_stream *stream);
1383
-
1384
- /**
1385
- * @read: Copy buffered metrics as records to userspace
1386
- * **buf**: the userspace, destination buffer
1387
- * **count**: the number of bytes to copy, requested by userspace
1388
- * **offset**: zero at the start of the read, updated as the read
1389
- * proceeds, it represents how many bytes have been copied so far and
1390
- * the buffer offset for copying the next record.
1391
- *
1392
- * Copy as many buffered i915 perf samples and records for this stream
1393
- * to userspace as will fit in the given buffer.
1394
- *
1395
- * Only write complete records; returning -%ENOSPC if there isn't room
1396
- * for a complete record.
1397
- *
1398
- * Return any error condition that results in a short read such as
1399
- * -%ENOSPC or -%EFAULT, even though these may be squashed before
1400
- * returning to userspace.
1401
- */
1402
- int (*read)(struct i915_perf_stream *stream,
1403
- char __user *buf,
1404
- size_t count,
1405
- size_t *offset);
1406
-
1407
- /**
1408
- * @destroy: Cleanup any stream specific resources.
1409
- *
1410
- * The stream will always be disabled before this is called.
1411
- */
1412
- void (*destroy)(struct i915_perf_stream *stream);
1413
-};
1414
-
1415
-/**
1416
- * struct i915_perf_stream - state for a single open stream FD
1417
- */
1418
-struct i915_perf_stream {
1419
- /**
1420
- * @dev_priv: i915 drm device
1421
- */
1422
- struct drm_i915_private *dev_priv;
1423
-
1424
- /**
1425
- * @link: Links the stream into ``&drm_i915_private->streams``
1426
- */
1427
- struct list_head link;
1428
-
1429
- /**
1430
- * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1431
- * properties given when opening a stream, representing the contents
1432
- * of a single sample as read() by userspace.
1433
- */
1434
- u32 sample_flags;
1435
-
1436
- /**
1437
- * @sample_size: Considering the configured contents of a sample
1438
- * combined with the required header size, this is the total size
1439
- * of a single sample record.
1440
- */
1441
- int sample_size;
1442
-
1443
- /**
1444
- * @ctx: %NULL if measuring system-wide across all contexts or a
1445
- * specific context that is being monitored.
1446
- */
1447
- struct i915_gem_context *ctx;
1448
-
1449
- /**
1450
- * @enabled: Whether the stream is currently enabled, considering
1451
- * whether the stream was opened in a disabled state and based
1452
- * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1453
- */
1454
- bool enabled;
1455
-
1456
- /**
1457
- * @ops: The callbacks providing the implementation of this specific
1458
- * type of configured stream.
1459
- */
1460
- const struct i915_perf_stream_ops *ops;
1461
-
1462
- /**
1463
- * @oa_config: The OA configuration used by the stream.
1464
- */
1465
- struct i915_oa_config *oa_config;
1466
-};
1467
-
1468
-/**
1469
- * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1470
- */
1471
-struct i915_oa_ops {
1472
- /**
1473
- * @is_valid_b_counter_reg: Validates register's address for
1474
- * programming boolean counters for a particular platform.
1475
- */
1476
- bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1477
- u32 addr);
1478
-
1479
- /**
1480
- * @is_valid_mux_reg: Validates register's address for programming mux
1481
- * for a particular platform.
1482
- */
1483
- bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1484
-
1485
- /**
1486
- * @is_valid_flex_reg: Validates register's address for programming
1487
- * flex EU filtering for a particular platform.
1488
- */
1489
- bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1490
-
1491
- /**
1492
- * @init_oa_buffer: Resets the head and tail pointers of the
1493
- * circular buffer for periodic OA reports.
1494
- *
1495
- * Called when first opening a stream for OA metrics, but also may be
1496
- * called in response to an OA buffer overflow or other error
1497
- * condition.
1498
- *
1499
- * Note it may be necessary to clear the full OA buffer here as part of
1500
- * maintaining the invariable that new reports must be written to
1501
- * zeroed memory for us to be able to reliable detect if an expected
1502
- * report has not yet landed in memory. (At least on Haswell the OA
1503
- * buffer tail pointer is not synchronized with reports being visible
1504
- * to the CPU)
1505
- */
1506
- void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1507
-
1508
- /**
1509
- * @enable_metric_set: Selects and applies any MUX configuration to set
1510
- * up the Boolean and Custom (B/C) counters that are part of the
1511
- * counter reports being sampled. May apply system constraints such as
1512
- * disabling EU clock gating as required.
1513
- */
1514
- int (*enable_metric_set)(struct drm_i915_private *dev_priv,
1515
- const struct i915_oa_config *oa_config);
1516
-
1517
- /**
1518
- * @disable_metric_set: Remove system constraints associated with using
1519
- * the OA unit.
1520
- */
1521
- void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1522
-
1523
- /**
1524
- * @oa_enable: Enable periodic sampling
1525
- */
1526
- void (*oa_enable)(struct drm_i915_private *dev_priv);
1527
-
1528
- /**
1529
- * @oa_disable: Disable periodic sampling
1530
- */
1531
- void (*oa_disable)(struct drm_i915_private *dev_priv);
1532
-
1533
- /**
1534
- * @read: Copy data from the circular OA buffer into a given userspace
1535
- * buffer.
1536
- */
1537
- int (*read)(struct i915_perf_stream *stream,
1538
- char __user *buf,
1539
- size_t count,
1540
- size_t *offset);
1541
-
1542
- /**
1543
- * @oa_hw_tail_read: read the OA tail pointer register
1544
- *
1545
- * In particular this enables us to share all the fiddly code for
1546
- * handling the OA unit tail pointer race that affects multiple
1547
- * generations.
1548
- */
1549
- u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1550
-};
1551
-
1552
-struct intel_cdclk_state {
806
+struct intel_cdclk_config {
1553807 unsigned int cdclk, vco, ref, bypass;
1554808 u8 voltage_level;
809
+};
810
+
811
+struct i915_selftest_stash {
812
+ atomic_t counter;
1555813 };
1556814
1557815 struct drm_i915_private {
1558816 struct drm_device drm;
1559817
1560
- struct kmem_cache *objects;
1561
- struct kmem_cache *vmas;
1562
- struct kmem_cache *luts;
1563
- struct kmem_cache *requests;
1564
- struct kmem_cache *dependencies;
1565
- struct kmem_cache *priorities;
818
+ /* FIXME: Device release actions should all be moved to drmm_ */
819
+ bool do_release;
1566820
1567
- const struct intel_device_info info;
821
+ /* i915 device parameters */
822
+ struct i915_params params;
823
+
824
+ const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
825
+ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
1568826 struct intel_driver_caps caps;
1569827
1570828 /**
....@@ -1591,20 +849,14 @@
1591849 */
1592850 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
1593851
1594
- void __iomem *regs;
1595
-
1596852 struct intel_uncore uncore;
1597
-
1598
- struct mutex tlb_invalidate_lock;
853
+ struct intel_uncore_mmio_debug mmio_debug;
1599854
1600855 struct i915_virtual_gpu vgpu;
1601856
1602857 struct intel_gvt *gvt;
1603858
1604859 struct intel_wopcm wopcm;
1605
-
1606
- struct intel_huc huc;
1607
- struct intel_guc guc;
1608860
1609861 struct intel_csr csr;
1610862
....@@ -1615,29 +867,24 @@
1615867 struct mutex gmbus_mutex;
1616868
1617869 /**
1618
- * Base address of the gmbus and gpio block.
870
+ * Base address of where the gmbus and gpio blocks are located (either
871
+ * on PCH or on SoC for platforms without PCH).
1619872 */
1620
- uint32_t gpio_mmio_base;
873
+ u32 gpio_mmio_base;
874
+
875
+ u32 hsw_psr_mmio_adjust;
1621876
1622877 /* MMIO base address for MIPI regs */
1623
- uint32_t mipi_mmio_base;
878
+ u32 mipi_mmio_base;
1624879
1625
- uint32_t psr_mmio_base;
1626
-
1627
- uint32_t pps_mmio_base;
880
+ u32 pps_mmio_base;
1628881
1629882 wait_queue_head_t gmbus_wait_queue;
1630883
1631884 struct pci_dev *bridge_dev;
1632
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
1633
- /* Context used internally to idle the GPU and setup initial state */
1634
- struct i915_gem_context *kernel_context;
1635
- /* Context only to be used for injecting preemption commands */
1636
- struct i915_gem_context *preempt_context;
1637
- struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1638
- [MAX_ENGINE_INSTANCE + 1];
1639885
1640
- struct drm_dma_handle *status_page_dmah;
886
+ struct rb_root uabi_engines;
887
+
1641888 struct resource mch_res;
1642889
1643890 /* protects the irq masks */
....@@ -1645,22 +892,15 @@
1645892
1646893 bool display_irqs_enabled;
1647894
1648
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1649
- struct pm_qos_request pm_qos;
1650
-
1651895 /* Sideband mailbox protection */
1652896 struct mutex sb_lock;
897
+ struct pm_qos_request sb_qos;
1653898
1654899 /** Cached value of IMR to avoid reads in updating the bitfield */
1655900 union {
1656901 u32 irq_mask;
1657902 u32 de_irq_mask[I915_MAX_PIPES];
1658903 };
1659
- u32 gt_irq_mask;
1660
- u32 pm_imr;
1661
- u32 pm_ier;
1662
- u32 pm_rps_events;
1663
- u32 pm_guc_events;
1664904 u32 pipestat_irq_mask[I915_MAX_PIPES];
1665905
1666906 struct i915_hotplug hotplug;
....@@ -1677,42 +917,34 @@
1677917 /* backlight registers and fields in struct intel_panel */
1678918 struct mutex backlight_lock;
1679919
1680
- /* LVDS info */
1681
- bool no_aux_handshake;
1682
-
1683920 /* protects panel power sequencer state */
1684921 struct mutex pps_mutex;
1685
-
1686
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1687
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1688922
1689923 unsigned int fsb_freq, mem_freq, is_ddr3;
1690924 unsigned int skl_preferred_vco_freq;
1691925 unsigned int max_cdclk_freq;
1692926
1693927 unsigned int max_dotclk_freq;
1694
- unsigned int rawclk_freq;
1695928 unsigned int hpll_freq;
1696929 unsigned int fdi_pll_freq;
1697930 unsigned int czclk_freq;
1698931
1699932 struct {
1700
- /*
1701
- * The current logical cdclk state.
1702
- * See intel_atomic_state.cdclk.logical
1703
- *
1704
- * For reading holding any crtc lock is sufficient,
1705
- * for writing must hold all of them.
1706
- */
1707
- struct intel_cdclk_state logical;
1708
- /*
1709
- * The current actual cdclk state.
1710
- * See intel_atomic_state.cdclk.actual
1711
- */
1712
- struct intel_cdclk_state actual;
1713
- /* The current hardware cdclk state */
1714
- struct intel_cdclk_state hw;
933
+ /* The current hardware cdclk configuration */
934
+ struct intel_cdclk_config hw;
935
+
936
+ /* cdclk, divider, and ratio table from bspec */
937
+ const struct intel_cdclk_vals *table;
938
+
939
+ struct intel_global_obj obj;
1715940 } cdclk;
941
+
942
+ struct {
943
+ /* The current hardware dbuf configuration */
944
+ u8 enabled_slices;
945
+
946
+ struct intel_global_obj obj;
947
+ } dbuf;
1716948
1717949 /**
1718950 * wq - Driver workqueue for GEM.
....@@ -1725,6 +957,8 @@
1725957
1726958 /* ordered wq for modesets */
1727959 struct workqueue_struct *modeset_wq;
960
+ /* unbound hipri wq for page flips/plane updates */
961
+ struct workqueue_struct *flip_wq;
1728962
1729963 /* Display functions */
1730964 struct drm_i915_display_funcs display;
....@@ -1742,40 +976,41 @@
1742976
1743977 struct i915_gem_mm mm;
1744978 DECLARE_HASHTABLE(mm_structs, 7);
1745
- struct mutex mm_lock;
1746
-
1747
- struct intel_ppat ppat;
979
+ spinlock_t mm_lock;
1748980
1749981 /* Kernel Modesetting */
1750982
1751983 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1752984 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1753985
1754
-#ifdef CONFIG_DEBUG_FS
1755
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1756
-#endif
986
+ /**
987
+ * dpll and cdclk state is protected by connection_mutex
988
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
989
+ * Must be global rather than per dpll, because on some platforms plls
990
+ * share registers.
991
+ */
992
+ struct {
993
+ struct mutex lock;
1757994
1758
- /* dpll and cdclk state is protected by connection_mutex */
1759
- int num_shared_dpll;
1760
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1761
- const struct intel_dpll_mgr *dpll_mgr;
995
+ int num_shared_dpll;
996
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
997
+ const struct intel_dpll_mgr *mgr;
998
+
999
+ struct {
1000
+ int nssc;
1001
+ int ssc;
1002
+ } ref_clks;
1003
+ } dpll;
1004
+
1005
+ struct list_head global_obj_list;
17621006
17631007 /*
1764
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1765
- * Must be global rather than per dpll, because on some platforms
1766
- * plls share registers.
1008
+ * For reading active_pipes holding any crtc lock is
1009
+ * sufficient, for writing must hold all of them.
17671010 */
1768
- struct mutex dpll_lock;
1011
+ u8 active_pipes;
17691012
1770
- unsigned int active_crtcs;
1771
- /* minimum acceptable cdclk for each pipe */
1772
- int min_cdclk[I915_MAX_PIPES];
1773
- /* minimum acceptable voltage level for each pipe */
1774
- u8 min_voltage_level[I915_MAX_PIPES];
1775
-
1776
- int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1777
-
1778
- struct i915_workarounds workarounds;
1013
+ struct i915_wa_list gt_wa_list;
17791014
17801015 struct i915_frontbuffer_tracking fb_tracking;
17811016
....@@ -1784,29 +1019,23 @@
17841019 struct work_struct free_work;
17851020 } atomic_helper;
17861021
1787
- u16 orig_clock;
1788
-
17891022 bool mchbar_need_disable;
17901023
17911024 struct intel_l3_parity l3_parity;
17921025
1793
- /* Cannot be determined by PCIID. You must always read a register. */
1794
- u32 edram_cap;
1026
+ /*
1027
+ * HTI (aka HDPORT) state read during initial hw readout. Most
1028
+ * platforms don't have HTI, so this will just stay 0. Those that do
1029
+ * will use this later to figure out which PLLs and PHYs are unavailable
1030
+ * for driver usage.
1031
+ */
1032
+ u32 hti_state;
17951033
17961034 /*
1797
- * Protects RPS/RC6 register access and PCU communication.
1798
- * Must be taken after struct_mutex if nested. Note that
1799
- * this lock may be held for long periods of time when
1800
- * talking to hw - so only take it when talking to hw!
1035
+ * edram size in MB.
1036
+ * Cannot be determined by PCIID. You must always read a register.
18011037 */
1802
- struct mutex pcu_lock;
1803
-
1804
- /* gen6+ GT PM state */
1805
- struct intel_gen6_power_mgmt gt_pm;
1806
-
1807
- /* ilk-only ips/rps state. Everything in here is protected by the global
1808
- * mchdev_lock in intel_pm.c */
1809
- struct intel_ilk_power_mgmt ips;
1038
+ u32 edram_size_mb;
18101039
18111040 struct i915_power_domains power_domains;
18121041
....@@ -1831,21 +1060,8 @@
18311060 *
18321061 */
18331062 struct mutex av_mutex;
1834
-
1835
- struct {
1836
- struct list_head list;
1837
- struct llist_head free_list;
1838
- struct work_struct free_work;
1839
-
1840
- /* The hw wants to have a stable context identifier for the
1841
- * lifetime of the context (for OA, PASID, faults, etc).
1842
- * This is limited in execlists to 21 bits.
1843
- */
1844
- struct ida hw_ida;
1845
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1846
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
1847
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
1848
- } contexts;
1063
+ int audio_power_refcount;
1064
+ u32 audio_freq_cntrl;
18491065
18501066 u32 fdi_rx_config;
18511067
....@@ -1862,7 +1078,7 @@
18621078 u32 suspend_count;
18631079 bool power_domains_suspended;
18641080 struct i915_suspend_saved_registers regfile;
1865
- struct vlv_s0ix_state vlv_s0ix_state;
1081
+ struct vlv_s0ix_state *vlv_s0ix_state;
18661082
18671083 enum {
18681084 I915_SAGV_UNKNOWN = 0,
....@@ -1871,6 +1087,8 @@
18711087 I915_SAGV_NOT_CONTROLLED
18721088 } sagv_status;
18731089
1090
+ u32 sagv_block_time_us;
1091
+
18741092 struct {
18751093 /*
18761094 * Raw watermark latency values:
....@@ -1878,32 +1096,31 @@
18781096 * in 0.5us units for WM1+.
18791097 */
18801098 /* primary */
1881
- uint16_t pri_latency[5];
1099
+ u16 pri_latency[5];
18821100 /* sprite */
1883
- uint16_t spr_latency[5];
1101
+ u16 spr_latency[5];
18841102 /* cursor */
1885
- uint16_t cur_latency[5];
1103
+ u16 cur_latency[5];
18861104 /*
18871105 * Raw watermark memory latency values
18881106 * for SKL for all 8 levels
18891107 * in 1us units.
18901108 */
1891
- uint16_t skl_latency[8];
1109
+ u16 skl_latency[8];
18921110
18931111 /* current hardware state */
18941112 union {
18951113 struct ilk_wm_values hw;
1896
- struct skl_ddb_values skl_hw;
18971114 struct vlv_wm_values vlv;
18981115 struct g4x_wm_values g4x;
18991116 };
19001117
1901
- uint8_t max_level;
1118
+ u8 max_level;
19021119
19031120 /*
19041121 * Should be held around atomic WM register writing; also
19051122 * protects * intel_crtc->wm.active and
1906
- * cstate->wm.need_postvbl_update.
1123
+ * crtc_state->wm.need_postvbl_update.
19071124 */
19081125 struct mutex wm_mutex;
19091126
....@@ -1911,200 +1128,69 @@
19111128 * Set during HW readout of watermarks/DDB. Some platforms
19121129 * need to know when we're still using BIOS-provided values
19131130 * (which we don't fully trust).
1131
+ *
1132
+ * FIXME get rid of this.
19141133 */
19151134 bool distrust_bios_wm;
19161135 } wm;
19171136
1918
- struct i915_runtime_pm runtime_pm;
1137
+ struct dram_info {
1138
+ bool valid;
1139
+ bool is_16gb_dimm;
1140
+ u8 num_channels;
1141
+ u8 ranks;
1142
+ u32 bandwidth_kbps;
1143
+ bool symmetric_memory;
1144
+ enum intel_dram_type {
1145
+ INTEL_DRAM_UNKNOWN,
1146
+ INTEL_DRAM_DDR3,
1147
+ INTEL_DRAM_DDR4,
1148
+ INTEL_DRAM_LPDDR3,
1149
+ INTEL_DRAM_LPDDR4
1150
+ } type;
1151
+ u8 num_qgv_points;
1152
+ } dram_info;
19191153
1920
- struct {
1921
- bool initialized;
1154
+ struct intel_bw_info {
1155
+ /* for each QGV point */
1156
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
1157
+ u8 num_qgv_points;
1158
+ u8 num_planes;
1159
+ } max_bw[6];
19221160
1923
- struct kobject *metrics_kobj;
1924
- struct ctl_table_header *sysctl_header;
1161
+ struct intel_global_obj bw_obj;
19251162
1926
- /*
1927
- * Lock associated with adding/modifying/removing OA configs
1928
- * in dev_priv->perf.metrics_idr.
1929
- */
1930
- struct mutex metrics_lock;
1163
+ struct intel_runtime_pm runtime_pm;
19311164
1932
- /*
1933
- * List of dynamic configurations, you need to hold
1934
- * dev_priv->perf.metrics_lock to access it.
1935
- */
1936
- struct idr metrics_idr;
1937
-
1938
- /*
1939
- * Lock associated with anything below within this structure
1940
- * except exclusive_stream.
1941
- */
1942
- struct mutex lock;
1943
- struct list_head streams;
1944
-
1945
- struct {
1946
- /*
1947
- * The stream currently using the OA unit. If accessed
1948
- * outside a syscall associated to its file
1949
- * descriptor, you need to hold
1950
- * dev_priv->drm.struct_mutex.
1951
- */
1952
- struct i915_perf_stream *exclusive_stream;
1953
-
1954
- struct intel_context *pinned_ctx;
1955
- u32 specific_ctx_id;
1956
- u32 specific_ctx_id_mask;
1957
-
1958
- struct hrtimer poll_check_timer;
1959
- wait_queue_head_t poll_wq;
1960
- bool pollin;
1961
-
1962
- /**
1963
- * For rate limiting any notifications of spurious
1964
- * invalid OA reports
1965
- */
1966
- struct ratelimit_state spurious_report_rs;
1967
-
1968
- bool periodic;
1969
- int period_exponent;
1970
-
1971
- struct i915_oa_config test_config;
1972
-
1973
- struct {
1974
- struct i915_vma *vma;
1975
- u8 *vaddr;
1976
- u32 last_ctx_id;
1977
- int format;
1978
- int format_size;
1979
-
1980
- /**
1981
- * Locks reads and writes to all head/tail state
1982
- *
1983
- * Consider: the head and tail pointer state
1984
- * needs to be read consistently from a hrtimer
1985
- * callback (atomic context) and read() fop
1986
- * (user context) with tail pointer updates
1987
- * happening in atomic context and head updates
1988
- * in user context and the (unlikely)
1989
- * possibility of read() errors needing to
1990
- * reset all head/tail state.
1991
- *
1992
- * Note: Contention or performance aren't
1993
- * currently a significant concern here
1994
- * considering the relatively low frequency of
1995
- * hrtimer callbacks (5ms period) and that
1996
- * reads typically only happen in response to a
1997
- * hrtimer event and likely complete before the
1998
- * next callback.
1999
- *
2000
- * Note: This lock is not held *while* reading
2001
- * and copying data to userspace so the value
2002
- * of head observed in htrimer callbacks won't
2003
- * represent any partial consumption of data.
2004
- */
2005
- spinlock_t ptr_lock;
2006
-
2007
- /**
2008
- * One 'aging' tail pointer and one 'aged'
2009
- * tail pointer ready to used for reading.
2010
- *
2011
- * Initial values of 0xffffffff are invalid
2012
- * and imply that an update is required
2013
- * (and should be ignored by an attempted
2014
- * read)
2015
- */
2016
- struct {
2017
- u32 offset;
2018
- } tails[2];
2019
-
2020
- /**
2021
- * Index for the aged tail ready to read()
2022
- * data up to.
2023
- */
2024
- unsigned int aged_tail_idx;
2025
-
2026
- /**
2027
- * A monotonic timestamp for when the current
2028
- * aging tail pointer was read; used to
2029
- * determine when it is old enough to trust.
2030
- */
2031
- u64 aging_timestamp;
2032
-
2033
- /**
2034
- * Although we can always read back the head
2035
- * pointer register, we prefer to avoid
2036
- * trusting the HW state, just to avoid any
2037
- * risk that some hardware condition could
2038
- * somehow bump the head pointer unpredictably
2039
- * and cause us to forward the wrong OA buffer
2040
- * data to userspace.
2041
- */
2042
- u32 head;
2043
- } oa_buffer;
2044
-
2045
- u32 gen7_latched_oastatus1;
2046
- u32 ctx_oactxctrl_offset;
2047
- u32 ctx_flexeu0_offset;
2048
-
2049
- /**
2050
- * The RPT_ID/reason field for Gen8+ includes a bit
2051
- * to determine if the CTX ID in the report is valid
2052
- * but the specific bit differs between Gen 8 and 9
2053
- */
2054
- u32 gen8_valid_ctx_bit;
2055
-
2056
- struct i915_oa_ops ops;
2057
- const struct i915_oa_format *oa_formats;
2058
- } oa;
2059
- } perf;
1165
+ struct i915_perf perf;
20601166
20611167 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1168
+ struct intel_gt gt;
1169
+
20621170 struct {
2063
- void (*resume)(struct drm_i915_private *);
2064
- void (*cleanup_engine)(struct intel_engine_cs *engine);
1171
+ struct i915_gem_contexts {
1172
+ spinlock_t lock; /* locks list */
1173
+ struct list_head list;
20651174
2066
- struct list_head timelines;
1175
+ struct llist_head free_list;
1176
+ struct work_struct free_work;
1177
+ } contexts;
20671178
2068
- struct list_head active_rings;
2069
- struct list_head closed_vma;
2070
- u32 active_requests;
2071
- u32 request_serial;
2072
-
2073
- /**
2074
- * Is the GPU currently considered idle, or busy executing
2075
- * userspace requests? Whilst idle, we allow runtime power
2076
- * management to power down the hardware and display clocks.
2077
- * In order to reduce the effect on performance, there
2078
- * is a slight delay before we do so.
1179
+ /*
1180
+ * We replace the local file with a global mappings as the
1181
+ * backing storage for the mmap is on the device and not
1182
+ * on the struct file, and we do not want to prolong the
1183
+ * lifetime of the local fd. To minimise the number of
1184
+ * anonymous inodes we create, we use a global singleton to
1185
+ * share the global mapping.
20791186 */
2080
- bool awake;
1187
+ struct file *mmap_singleton;
1188
+ } gem;
20811189
2082
- /**
2083
- * The number of times we have woken up.
2084
- */
2085
- unsigned int epoch;
2086
-#define I915_EPOCH_INVALID 0
1190
+ u8 pch_ssc_use;
20871191
2088
- /**
2089
- * We leave the user IRQ off as much as possible,
2090
- * but this means that requests will finish and never
2091
- * be retired once the system goes idle. Set a timer to
2092
- * fire periodically while the ring is running. When it
2093
- * fires, go retire requests.
2094
- */
2095
- struct delayed_work retire_work;
2096
-
2097
- /**
2098
- * When we detect an idle GPU, we want to turn on
2099
- * powersaving features. So once we see that there
2100
- * are no more requests outstanding and no more
2101
- * arrive within a small period of time, we fire
2102
- * off the idle_work.
2103
- */
2104
- struct delayed_work idle_work;
2105
-
2106
- ktime_t last_init_time;
2107
- } gt;
1192
+ /* For i915gm/i945gm vblank irq workaround */
1193
+ u8 vblank_enabled;
21081194
21091195 /* perform PHY state sanity checks? */
21101196 bool chv_phy_assert[2];
....@@ -2122,6 +1208,14 @@
21221208
21231209 struct i915_pmu pmu;
21241210
1211
+ struct i915_hdcp_comp_master *hdcp_master;
1212
+ bool hdcp_comp_added;
1213
+
1214
+ /* Mutex to protect the above hdcp component related values. */
1215
+ struct mutex hdcp_comp_mutex;
1216
+
1217
+ I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1218
+
21251219 /*
21261220 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
21271221 * will be rejected. Instead look for a better place.
....@@ -2135,22 +1229,12 @@
21351229
21361230 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
21371231 {
2138
- return to_i915(dev_get_drvdata(kdev));
1232
+ return dev_get_drvdata(kdev);
21391233 }
21401234
2141
-static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
1235
+static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
21421236 {
2143
- return container_of(wopcm, struct drm_i915_private, wopcm);
2144
-}
2145
-
2146
-static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2147
-{
2148
- return container_of(guc, struct drm_i915_private, guc);
2149
-}
2150
-
2151
-static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2152
-{
2153
- return container_of(huc, struct drm_i915_private, huc);
1237
+ return pci_get_drvdata(pdev);
21541238 }
21551239
21561240 /* Simple iterator over all initialised engines */
....@@ -2161,18 +1245,24 @@
21611245 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
21621246
21631247 /* Iterator over subset of engines selected by mask */
2164
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2165
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
1248
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1249
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
21661250 (tmp__) ? \
2167
- ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
1251
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
21681252 0;)
21691253
2170
-enum hdmi_force_audio {
2171
- HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
2172
- HDMI_AUDIO_OFF, /* force turn off HDMI audio */
2173
- HDMI_AUDIO_AUTO, /* trust EDID */
2174
- HDMI_AUDIO_ON, /* force turn on HDMI audio */
2175
-};
1254
+#define rb_to_uabi_engine(rb) \
1255
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1256
+
1257
+#define for_each_uabi_engine(engine__, i915__) \
1258
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1259
+ (engine__); \
1260
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1261
+
1262
+#define for_each_uabi_class_engine(engine__, class__, i915__) \
1263
+ for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1264
+ (engine__) && (engine__)->uabi_class == (class__); \
1265
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
21761266
21771267 #define I915_GTT_OFFSET_NONE ((u32)-1)
21781268
....@@ -2196,140 +1286,30 @@
21961286 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
21971287 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
21981288
2199
-/*
2200
- * Optimised SGL iterator for GEM objects
2201
- */
2202
-static __always_inline struct sgt_iter {
2203
- struct scatterlist *sgp;
2204
- union {
2205
- unsigned long pfn;
2206
- dma_addr_t dma;
2207
- };
2208
- unsigned int curr;
2209
- unsigned int max;
2210
-} __sgt_iter(struct scatterlist *sgl, bool dma) {
2211
- struct sgt_iter s = { .sgp = sgl };
2212
-
2213
- if (s.sgp) {
2214
- s.max = s.curr = s.sgp->offset;
2215
- s.max += s.sgp->length;
2216
- if (dma)
2217
- s.dma = sg_dma_address(s.sgp);
2218
- else
2219
- s.pfn = page_to_pfn(sg_page(s.sgp));
2220
- }
2221
-
2222
- return s;
2223
-}
2224
-
2225
-static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2226
-{
2227
- ++sg;
2228
- if (unlikely(sg_is_chain(sg)))
2229
- sg = sg_chain_ptr(sg);
2230
- return sg;
2231
-}
2232
-
2233
-/**
2234
- * __sg_next - return the next scatterlist entry in a list
2235
- * @sg: The current sg entry
2236
- *
2237
- * Description:
2238
- * If the entry is the last, return NULL; otherwise, step to the next
2239
- * element in the array (@sg@+1). If that's a chain pointer, follow it;
2240
- * otherwise just return the pointer to the current element.
2241
- **/
2242
-static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2243
-{
2244
- return sg_is_last(sg) ? NULL : ____sg_next(sg);
2245
-}
2246
-
2247
-/**
2248
- * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2249
- * @__dmap: DMA address (output)
2250
- * @__iter: 'struct sgt_iter' (iterator state, internal)
2251
- * @__sgt: sg_table to iterate over (input)
2252
- */
2253
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2254
- for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2255
- ((__dmap) = (__iter).dma + (__iter).curr); \
2256
- (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
2257
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2258
-
2259
-/**
2260
- * for_each_sgt_page - iterate over the pages of the given sg_table
2261
- * @__pp: page pointer (output)
2262
- * @__iter: 'struct sgt_iter' (iterator state, internal)
2263
- * @__sgt: sg_table to iterate over (input)
2264
- */
2265
-#define for_each_sgt_page(__pp, __iter, __sgt) \
2266
- for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2267
- ((__pp) = (__iter).pfn == 0 ? NULL : \
2268
- pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2269
- (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2270
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2271
-
2272
-static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2273
-{
2274
- unsigned int page_sizes;
2275
-
2276
- page_sizes = 0;
2277
- while (sg) {
2278
- GEM_BUG_ON(sg->offset);
2279
- GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
2280
- page_sizes |= sg->length;
2281
- sg = __sg_next(sg);
2282
- }
2283
-
2284
- return page_sizes;
2285
-}
2286
-
2287
-static inline unsigned int i915_sg_segment_size(void)
2288
-{
2289
- unsigned int size = swiotlb_max_segment();
2290
-
2291
- if (size == 0)
2292
- return SCATTERLIST_MAX_SEGMENT;
2293
-
2294
- size = rounddown(size, PAGE_SIZE);
2295
- /* swiotlb_max_segment_size can return 1 byte when it means one page. */
2296
- if (size < PAGE_SIZE)
2297
- size = PAGE_SIZE;
2298
-
2299
- return size;
2300
-}
2301
-
2302
-static inline const struct intel_device_info *
2303
-intel_info(const struct drm_i915_private *dev_priv)
2304
-{
2305
- return &dev_priv->info;
2306
-}
2307
-
2308
-#define INTEL_INFO(dev_priv) intel_info((dev_priv))
1289
+#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
1290
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
23091291 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
23101292
2311
-#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
2312
-#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
1293
+#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
1294
+#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
23131295
23141296 #define REVID_FOREVER 0xff
23151297 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
23161298
2317
-#define GEN_FOREVER (0)
2318
-
23191299 #define INTEL_GEN_MASK(s, e) ( \
23201300 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
23211301 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2322
- GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2323
- (s) != GEN_FOREVER ? (s) - 1 : 0) \
2324
-)
1302
+ GENMASK((e) - 1, (s) - 1))
23251303
2326
-/*
2327
- * Returns true if Gen is in inclusive range [Start, End].
2328
- *
2329
- * Use GEN_FOREVER for unbound start and or end.
2330
- */
2331
-#define IS_GEN(dev_priv, s, e) \
2332
- (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
1304
+/* Returns true if Gen is in inclusive range [Start, End] */
1305
+#define IS_GEN_RANGE(dev_priv, s, e) \
1306
+ (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1307
+
1308
+#define IS_GEN(dev_priv, n) \
1309
+ (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1310
+ INTEL_INFO(dev_priv)->gen == (n))
1311
+
1312
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
23331313
23341314 /*
23351315 * Return true if revision is in range [since,until] inclusive.
....@@ -2339,7 +1319,70 @@
23391319 #define IS_REVID(p, since, until) \
23401320 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
23411321
2342
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
1322
+static __always_inline unsigned int
1323
+__platform_mask_index(const struct intel_runtime_info *info,
1324
+ enum intel_platform p)
1325
+{
1326
+ const unsigned int pbits =
1327
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1328
+
1329
+ /* Expand the platform_mask array if this fails. */
1330
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1331
+ pbits * ARRAY_SIZE(info->platform_mask));
1332
+
1333
+ return p / pbits;
1334
+}
1335
+
1336
+static __always_inline unsigned int
1337
+__platform_mask_bit(const struct intel_runtime_info *info,
1338
+ enum intel_platform p)
1339
+{
1340
+ const unsigned int pbits =
1341
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1342
+
1343
+ return p % pbits + INTEL_SUBPLATFORM_BITS;
1344
+}
1345
+
1346
+static inline u32
1347
+intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1348
+{
1349
+ const unsigned int pi = __platform_mask_index(info, p);
1350
+
1351
+ return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
1352
+}
1353
+
1354
+static __always_inline bool
1355
+IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1356
+{
1357
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1358
+ const unsigned int pi = __platform_mask_index(info, p);
1359
+ const unsigned int pb = __platform_mask_bit(info, p);
1360
+
1361
+ BUILD_BUG_ON(!__builtin_constant_p(p));
1362
+
1363
+ return info->platform_mask[pi] & BIT(pb);
1364
+}
1365
+
1366
+static __always_inline bool
1367
+IS_SUBPLATFORM(const struct drm_i915_private *i915,
1368
+ enum intel_platform p, unsigned int s)
1369
+{
1370
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1371
+ const unsigned int pi = __platform_mask_index(info, p);
1372
+ const unsigned int pb = __platform_mask_bit(info, p);
1373
+ const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1374
+ const u32 mask = info->platform_mask[pi];
1375
+
1376
+ BUILD_BUG_ON(!__builtin_constant_p(p));
1377
+ BUILD_BUG_ON(!__builtin_constant_p(s));
1378
+ BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1379
+
1380
+ /* Shift and test on the MSB position so sign flag can be used. */
1381
+ return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1382
+}
1383
+
1384
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
1385
+#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
23431386
23441387 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
23451388 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
....@@ -2354,14 +1397,14 @@
23541397 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
23551398 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
23561399 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
2357
-#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
2358
-#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
23591400 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
23601401 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
2361
-#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
1402
+#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1403
+#define IS_IRONLAKE_M(dev_priv) \
1404
+ (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
23621405 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
23631406 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
2364
- (dev_priv)->info.gt == 1)
1407
+ INTEL_INFO(dev_priv)->gt == 1)
23651408 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
23661409 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
23671410 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
....@@ -2371,63 +1414,74 @@
23711414 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
23721415 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
23731416 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1417
+#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
23741418 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
23751419 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2376
-#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
1420
+#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
1421
+#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1422
+#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1423
+#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
23771424 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
23781425 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2379
-#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
2380
- ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
2381
- (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
2382
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2383
-/* ULX machines are also considered ULT. */
2384
-#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
2385
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
1426
+#define IS_BDW_ULT(dev_priv) \
1427
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1428
+#define IS_BDW_ULX(dev_priv) \
1429
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
23861430 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
2387
- (dev_priv)->info.gt == 3)
2388
-#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
2389
- (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
1431
+ INTEL_INFO(dev_priv)->gt == 3)
1432
+#define IS_HSW_ULT(dev_priv) \
1433
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
23901434 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
2391
- (dev_priv)->info.gt == 3)
1435
+ INTEL_INFO(dev_priv)->gt == 3)
1436
+#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
1437
+ INTEL_INFO(dev_priv)->gt == 1)
23921438 /* ULX machines are also considered ULT. */
2393
-#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
2394
- INTEL_DEVID(dev_priv) == 0x0A1E)
2395
-#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
2396
- INTEL_DEVID(dev_priv) == 0x1913 || \
2397
- INTEL_DEVID(dev_priv) == 0x1916 || \
2398
- INTEL_DEVID(dev_priv) == 0x1921 || \
2399
- INTEL_DEVID(dev_priv) == 0x1926)
2400
-#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
2401
- INTEL_DEVID(dev_priv) == 0x1915 || \
2402
- INTEL_DEVID(dev_priv) == 0x191E)
2403
-#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
2404
- INTEL_DEVID(dev_priv) == 0x5913 || \
2405
- INTEL_DEVID(dev_priv) == 0x5916 || \
2406
- INTEL_DEVID(dev_priv) == 0x5921 || \
2407
- INTEL_DEVID(dev_priv) == 0x5926)
2408
-#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2409
- INTEL_DEVID(dev_priv) == 0x5915 || \
2410
- INTEL_DEVID(dev_priv) == 0x591E)
1439
+#define IS_HSW_ULX(dev_priv) \
1440
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1441
+#define IS_SKL_ULT(dev_priv) \
1442
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1443
+#define IS_SKL_ULX(dev_priv) \
1444
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1445
+#define IS_KBL_ULT(dev_priv) \
1446
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1447
+#define IS_KBL_ULX(dev_priv) \
1448
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
24111449 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2412
- (dev_priv)->info.gt == 2)
1450
+ INTEL_INFO(dev_priv)->gt == 2)
24131451 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
2414
- (dev_priv)->info.gt == 3)
1452
+ INTEL_INFO(dev_priv)->gt == 3)
24151453 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
2416
- (dev_priv)->info.gt == 4)
1454
+ INTEL_INFO(dev_priv)->gt == 4)
24171455 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
2418
- (dev_priv)->info.gt == 2)
1456
+ INTEL_INFO(dev_priv)->gt == 2)
24191457 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
2420
- (dev_priv)->info.gt == 3)
2421
-#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2422
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
1458
+ INTEL_INFO(dev_priv)->gt == 3)
1459
+#define IS_CFL_ULT(dev_priv) \
1460
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1461
+#define IS_CFL_ULX(dev_priv) \
1462
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
24231463 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2424
- (dev_priv)->info.gt == 2)
1464
+ INTEL_INFO(dev_priv)->gt == 2)
24251465 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2426
- (dev_priv)->info.gt == 3)
2427
-#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
2428
- (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
1466
+ INTEL_INFO(dev_priv)->gt == 3)
24291467
2430
-#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
1468
+#define IS_CML_ULT(dev_priv) \
1469
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1470
+#define IS_CML_ULX(dev_priv) \
1471
+ IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1472
+#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
1473
+ INTEL_INFO(dev_priv)->gt == 2)
1474
+
1475
+#define IS_CNL_WITH_PORT_F(dev_priv) \
1476
+ IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1477
+#define IS_ICL_WITH_PORT_F(dev_priv) \
1478
+ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1479
+
1480
+#define IS_TGL_U(dev_priv) \
1481
+ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
1482
+
1483
+#define IS_TGL_Y(dev_priv) \
1484
+ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
24311485
24321486 #define SKL_REVID_A0 0x0
24331487 #define SKL_REVID_B0 0x1
....@@ -2449,17 +1503,39 @@
24491503 #define IS_BXT_REVID(dev_priv, since, until) \
24501504 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
24511505
2452
-#define KBL_REVID_A0 0x0
2453
-#define KBL_REVID_B0 0x1
2454
-#define KBL_REVID_C0 0x2
2455
-#define KBL_REVID_D0 0x3
2456
-#define KBL_REVID_E0 0x4
1506
+enum {
1507
+ KBL_REVID_A0,
1508
+ KBL_REVID_B0,
1509
+ KBL_REVID_B1,
1510
+ KBL_REVID_C0,
1511
+ KBL_REVID_D0,
1512
+ KBL_REVID_D1,
1513
+ KBL_REVID_E0,
1514
+ KBL_REVID_F0,
1515
+ KBL_REVID_G0,
1516
+};
24571517
2458
-#define IS_KBL_REVID(dev_priv, since, until) \
2459
- (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1518
+struct i915_rev_steppings {
1519
+ u8 gt_stepping;
1520
+ u8 disp_stepping;
1521
+};
1522
+
1523
+/* Defined in intel_workarounds.c */
1524
+extern const struct i915_rev_steppings kbl_revids[];
1525
+
1526
+#define IS_KBL_GT_REVID(dev_priv, since, until) \
1527
+ (IS_KABYLAKE(dev_priv) && \
1528
+ kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
1529
+ kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
1530
+#define IS_KBL_DISP_REVID(dev_priv, since, until) \
1531
+ (IS_KABYLAKE(dev_priv) && \
1532
+ kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
1533
+ kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
24601534
24611535 #define GLK_REVID_A0 0x0
24621536 #define GLK_REVID_A1 0x1
1537
+#define GLK_REVID_A2 0x2
1538
+#define GLK_REVID_B0 0x3
24631539
24641540 #define IS_GLK_REVID(dev_priv, since, until) \
24651541 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
....@@ -2480,94 +1556,130 @@
24801556 #define IS_ICL_REVID(p, since, until) \
24811557 (IS_ICELAKE(p) && IS_REVID(p, since, until))
24821558
2483
-/*
2484
- * The genX designation typically refers to the render engine, so render
2485
- * capability related checks should use IS_GEN, while display and other checks
2486
- * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2487
- * chips, etc.).
2488
- */
2489
-#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
2490
-#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
2491
-#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
2492
-#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
2493
-#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
2494
-#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2495
-#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2496
-#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
2497
-#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
2498
-#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
1559
+#define EHL_REVID_A0 0x0
1560
+
1561
+#define IS_EHL_REVID(p, since, until) \
1562
+ (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
1563
+
1564
+enum {
1565
+ TGL_REVID_A0,
1566
+ TGL_REVID_B0,
1567
+ TGL_REVID_B1,
1568
+ TGL_REVID_C0,
1569
+ TGL_REVID_D0,
1570
+};
1571
+
1572
+extern const struct i915_rev_steppings tgl_uy_revids[];
1573
+extern const struct i915_rev_steppings tgl_revids[];
1574
+
1575
+static inline const struct i915_rev_steppings *
1576
+tgl_revids_get(struct drm_i915_private *dev_priv)
1577
+{
1578
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
1579
+ return tgl_uy_revids;
1580
+ else
1581
+ return tgl_revids;
1582
+}
1583
+
1584
+#define IS_TGL_DISP_REVID(p, since, until) \
1585
+ (IS_TIGERLAKE(p) && \
1586
+ tgl_revids_get(p)->disp_stepping >= (since) && \
1587
+ tgl_revids_get(p)->disp_stepping <= (until))
1588
+
1589
+#define IS_TGL_UY_GT_REVID(p, since, until) \
1590
+ ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
1591
+ tgl_uy_revids->gt_stepping >= (since) && \
1592
+ tgl_uy_revids->gt_stepping <= (until))
1593
+
1594
+#define IS_TGL_GT_REVID(p, since, until) \
1595
+ (IS_TIGERLAKE(p) && \
1596
+ !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
1597
+ tgl_revids->gt_stepping >= (since) && \
1598
+ tgl_revids->gt_stepping <= (until))
1599
+
1600
+#define RKL_REVID_A0 0x0
1601
+#define RKL_REVID_B0 0x1
1602
+#define RKL_REVID_C0 0x4
1603
+
1604
+#define IS_RKL_REVID(p, since, until) \
1605
+ (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
1606
+
1607
+#define DG1_REVID_A0 0x0
1608
+#define DG1_REVID_B0 0x1
1609
+
1610
+#define IS_DG1_REVID(p, since, until) \
1611
+ (IS_DG1(p) && IS_REVID(p, since, until))
24991612
25001613 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
2501
-#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
2502
-#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
1614
+#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1615
+#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
1616
+
1617
+#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1618
+#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1619
+
1620
+#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
1621
+ unsigned int first__ = (first); \
1622
+ unsigned int count__ = (count); \
1623
+ ((gt)->info.engine_mask & \
1624
+ GENMASK(first__ + count__ - 1, first__)) >> first__; \
1625
+})
1626
+#define VDBOX_MASK(gt) \
1627
+ ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1628
+#define VEBOX_MASK(gt) \
1629
+ ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
25031630
25041631 /*
25051632 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
25061633 * All later gens can run the final buffer from the ppgtt
25071634 */
2508
-#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv)
1635
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
25091636
2510
-#define ENGINE_MASK(id) BIT(id)
2511
-#define RENDER_RING ENGINE_MASK(RCS)
2512
-#define BSD_RING ENGINE_MASK(VCS)
2513
-#define BLT_RING ENGINE_MASK(BCS)
2514
-#define VEBOX_RING ENGINE_MASK(VECS)
2515
-#define BSD2_RING ENGINE_MASK(VCS2)
2516
-#define BSD3_RING ENGINE_MASK(VCS3)
2517
-#define BSD4_RING ENGINE_MASK(VCS4)
2518
-#define VEBOX2_RING ENGINE_MASK(VECS2)
2519
-#define ALL_ENGINES (~0)
2520
-
2521
-#define HAS_ENGINE(dev_priv, id) \
2522
- (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
2523
-
2524
-#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2525
-#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2526
-#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2527
-#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2528
-
2529
-#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
2530
-
1637
+#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1638
+#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
1639
+#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
25311640 #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
2532
-
2533
-#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
2534
-#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
2535
-#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
25361641 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
25371642 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
25381643
2539
-#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
1644
+#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
25401645
25411646 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2542
- ((dev_priv)->info.has_logical_ring_contexts)
1647
+ (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
25431648 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2544
- ((dev_priv)->info.has_logical_ring_elsq)
1649
+ (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
25451650 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2546
- ((dev_priv)->info.has_logical_ring_preemption)
1651
+ (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
1652
+
1653
+#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
25471654
25481655 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
25491656
2550
-#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
2551
-#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
2552
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
1657
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1658
+#define HAS_PPGTT(dev_priv) \
1659
+ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1660
+#define HAS_FULL_PPGTT(dev_priv) \
1661
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1662
+
25531663 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
25541664 GEM_BUG_ON((sizes) == 0); \
2555
- ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
1665
+ ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
25561666 })
25571667
2558
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
1668
+#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
25591669 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2560
- ((dev_priv)->info.overlay_needs_physical)
1670
+ (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
25611671
25621672 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
25631673 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
25641674
25651675 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
2566
- (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9)
1676
+ (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
25671677
25681678 /* WaRsDisableCoarsePowerGating:skl,cnl */
2569
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2570
- (IS_CANNONLAKE(dev_priv) || INTEL_GEN(dev_priv) == 9)
1679
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1680
+ (IS_CANNONLAKE(dev_priv) || \
1681
+ IS_SKL_GT3(dev_priv) || \
1682
+ IS_SKL_GT4(dev_priv))
25711683
25721684 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
25731685 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
....@@ -2577,109 +1689,70 @@
25771689 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
25781690 * rows, which changed the alignment requirements and fence programming.
25791691 */
2580
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
1692
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
25811693 !(IS_I915G(dev_priv) || \
25821694 IS_I915GM(dev_priv)))
2583
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
2584
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
1695
+#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1696
+#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
25851697
25861698 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
2587
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
2588
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
1699
+#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
1700
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
25891701
25901702 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
25911703
2592
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
1704
+#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
25931705
2594
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
2595
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2596
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
1706
+#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1707
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1708
+#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
1709
+#define HAS_PSR_HW_TRACKING(dev_priv) \
1710
+ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1711
+#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
1712
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
25971713
2598
-#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
2599
-#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
1714
+#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1715
+#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
26001716 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
26011717
2602
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
1718
+#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
26031719
2604
-#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2605
-#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
1720
+#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
26061721
2607
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
1722
+#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1723
+#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
26081724
2609
-/*
2610
- * For now, anything with a GuC requires uCode loading, and then supports
2611
- * command submission once loaded. But these are logically independent
2612
- * properties, so we have separate macros to test them.
2613
- */
2614
-#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
2615
-#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
2616
-#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2617
-#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
1725
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
26181726
2619
-/* For now, anything with a GuC has also HuC */
2620
-#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
2621
-#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
1727
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1728
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
26221729
2623
-/* Having a GuC is not the same as using a GuC */
2624
-#define USES_GUC(dev_priv) intel_uc_is_using_guc()
2625
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
2626
-#define USES_HUC(dev_priv) intel_uc_is_using_huc()
1730
+#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
26271731
2628
-#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
1732
+#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
26291733
2630
-#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
1734
+#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
26311735
2632
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
2633
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2634
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2635
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2636
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2637
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2638
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
2639
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
2640
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2641
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2642
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
2643
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
2644
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
2645
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
2646
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2647
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
2648
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
26491736
2650
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2651
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2652
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2653
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2654
-#define HAS_PCH_CNP_LP(dev_priv) \
2655
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2656
-#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2657
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2658
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2659
-#define HAS_PCH_LPT_LP(dev_priv) \
2660
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2661
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2662
-#define HAS_PCH_LPT_H(dev_priv) \
2663
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2664
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2665
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2666
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2667
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2668
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2669
-
2670
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
1737
+#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
26711738
26721739 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
26731740
26741741 /* DPF == dynamic parity feature */
2675
-#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
1742
+#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
26761743 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
26771744 2 : HAS_L3_DPF(dev_priv))
26781745
26791746 #define GT_FREQUENCY_MULTIPLIER 50
26801747 #define GEN9_FREQ_SCALER 3
26811748
2682
-#include "i915_trace.h"
1749
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1750
+
1751
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1752
+
1753
+/* Only valid when HAS_DISPLAY() is true */
1754
+#define INTEL_DISPLAY_ENABLED(dev_priv) \
1755
+ (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
26831756
26841757 static inline bool intel_vtd_active(void)
26851758 {
....@@ -2703,235 +1776,41 @@
27031776 return IS_BROXTON(dev_priv) && intel_vtd_active();
27041777 }
27051778
2706
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2707
- int enable_ppgtt);
2708
-
27091779 /* i915_drv.c */
2710
-void __printf(3, 4)
2711
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2712
- const char *fmt, ...);
2713
-
2714
-#define i915_report_error(dev_priv, fmt, ...) \
2715
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2716
-
2717
-#ifdef CONFIG_COMPAT
2718
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2719
- unsigned long arg);
2720
-#else
2721
-#define i915_compat_ioctl NULL
2722
-#endif
27231780 extern const struct dev_pm_ops i915_pm_ops;
27241781
2725
-extern int i915_driver_load(struct pci_dev *pdev,
2726
- const struct pci_device_id *ent);
2727
-extern void i915_driver_unload(struct drm_device *dev);
2728
-extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2729
-extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
1782
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1783
+void i915_driver_remove(struct drm_i915_private *i915);
27301784
2731
-extern void i915_reset(struct drm_i915_private *i915,
2732
- unsigned int stalled_mask,
2733
- const char *reason);
2734
-extern int i915_reset_engine(struct intel_engine_cs *engine,
2735
- const char *reason);
1785
+int i915_resume_switcheroo(struct drm_i915_private *i915);
1786
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
27361787
2737
-extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
2738
-extern int intel_reset_guc(struct drm_i915_private *dev_priv);
2739
-extern int intel_guc_reset_engine(struct intel_guc *guc,
2740
- struct intel_engine_cs *engine);
2741
-extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2742
-extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2743
-extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2744
-extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2745
-extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2746
-extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2747
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2748
-
2749
-int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
2750
-int intel_engines_init(struct drm_i915_private *dev_priv);
2751
-
2752
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
2753
-
2754
-/* intel_hotplug.c */
2755
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2756
- u32 pin_mask, u32 long_mask);
2757
-void intel_hpd_init(struct drm_i915_private *dev_priv);
2758
-void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2759
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2760
-enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
2761
- enum port port);
2762
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2763
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2764
-
2765
-/* i915_irq.c */
2766
-static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2767
-{
2768
- unsigned long delay;
2769
-
2770
- if (unlikely(!i915_modparams.enable_hangcheck))
2771
- return;
2772
-
2773
- /* Don't continually defer the hangcheck so that it is always run at
2774
- * least once after work has been scheduled on any ring. Otherwise,
2775
- * we will ignore a hung ring if a second ring is kept busy.
2776
- */
2777
-
2778
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2779
- queue_delayed_work(system_long_wq,
2780
- &dev_priv->gpu_error.hangcheck_work, delay);
2781
-}
2782
-
2783
-__printf(4, 5)
2784
-void i915_handle_error(struct drm_i915_private *dev_priv,
2785
- u32 engine_mask,
2786
- unsigned long flags,
2787
- const char *fmt, ...);
2788
-#define I915_ERROR_CAPTURE BIT(0)
2789
-
2790
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
2791
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
2792
-int intel_irq_install(struct drm_i915_private *dev_priv);
2793
-void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2794
-
2795
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2796
-{
2797
- return dev_priv->gvt;
2798
-}
2799
-
2800
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2801
-{
2802
- return dev_priv->vgpu.active;
2803
-}
2804
-
2805
-u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
2806
- enum pipe pipe);
2807
-void
2808
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2809
- u32 status_mask);
2810
-
2811
-void
2812
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2813
- u32 status_mask);
2814
-
2815
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2816
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2817
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2818
- uint32_t mask,
2819
- uint32_t bits);
2820
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
2821
- uint32_t interrupt_mask,
2822
- uint32_t enabled_irq_mask);
2823
-static inline void
2824
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2825
-{
2826
- ilk_update_display_irq(dev_priv, bits, bits);
2827
-}
2828
-static inline void
2829
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
2830
-{
2831
- ilk_update_display_irq(dev_priv, bits, 0);
2832
-}
2833
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
2834
- enum pipe pipe,
2835
- uint32_t interrupt_mask,
2836
- uint32_t enabled_irq_mask);
2837
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
2838
- enum pipe pipe, uint32_t bits)
2839
-{
2840
- bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
2841
-}
2842
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
2843
- enum pipe pipe, uint32_t bits)
2844
-{
2845
- bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
2846
-}
2847
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2848
- uint32_t interrupt_mask,
2849
- uint32_t enabled_irq_mask);
2850
-static inline void
2851
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
2852
-{
2853
- ibx_display_interrupt_update(dev_priv, bits, bits);
2854
-}
2855
-static inline void
2856
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
2857
-{
2858
- ibx_display_interrupt_update(dev_priv, bits, 0);
2859
-}
1788
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
1789
+ struct drm_file *file_priv);
28601790
28611791 /* i915_gem.c */
2862
-int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2863
- struct drm_file *file_priv);
2864
-int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2865
- struct drm_file *file_priv);
2866
-int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2867
- struct drm_file *file_priv);
2868
-int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2869
- struct drm_file *file_priv);
2870
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2871
- struct drm_file *file_priv);
2872
-int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2873
- struct drm_file *file_priv);
2874
-int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2875
- struct drm_file *file_priv);
2876
-int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2877
- struct drm_file *file_priv);
2878
-int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2879
- struct drm_file *file_priv);
2880
-int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2881
- struct drm_file *file_priv);
2882
-int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2883
- struct drm_file *file);
2884
-int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2885
- struct drm_file *file);
2886
-int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2887
- struct drm_file *file_priv);
2888
-int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2889
- struct drm_file *file_priv);
2890
-int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
2891
- struct drm_file *file_priv);
2892
-int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
2893
- struct drm_file *file_priv);
28941792 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
28951793 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
2896
-int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2897
- struct drm_file *file);
2898
-int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2899
- struct drm_file *file_priv);
2900
-int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2901
- struct drm_file *file_priv);
2902
-void i915_gem_sanitize(struct drm_i915_private *i915);
2903
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
1794
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
29041795 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
2905
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
29061796 int i915_gem_freeze(struct drm_i915_private *dev_priv);
29071797 int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
29081798
2909
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
2910
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
2911
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
2912
- const struct drm_i915_gem_object_ops *ops);
2913
-struct drm_i915_gem_object *
2914
-i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
2915
-struct drm_i915_gem_object *
2916
-i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
2917
- const void *data, size_t size);
2918
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
2919
-void i915_gem_free_object(struct drm_gem_object *obj);
1799
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
29201800
29211801 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
29221802 {
2923
- if (!atomic_read(&i915->mm.free_count))
2924
- return;
2925
-
2926
- /* A single pass should suffice to release all the freed objects (along
1803
+ /*
1804
+ * A single pass should suffice to release all the freed objects (along
29271805 * most call paths) , but be a little more paranoid in that freeing
29281806 * the objects does take a little amount of time, during which the rcu
29291807 * callbacks could have added new objects into the freed list, and
29301808 * armed the work again.
29311809 */
2932
- do {
1810
+ while (atomic_read(&i915->mm.free_count)) {
1811
+ flush_work(&i915->mm.free_work);
29331812 rcu_barrier();
2934
- } while (flush_work(&i915->mm.free_work));
1813
+ }
29351814 }
29361815
29371816 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
....@@ -2944,270 +1823,67 @@
29441823 * grace period so that we catch work queued via RCU from the first
29451824 * pass. As neither drain_workqueue() nor flush_workqueue() report
29461825 * a result, we make an assumption that we only don't require more
2947
- * than 2 passes to catch all recursive RCU delayed work.
1826
+ * than 3 passes to catch all _recursive_ RCU delayed work.
29481827 *
29491828 */
2950
- int pass = 2;
1829
+ int pass = 3;
29511830 do {
1831
+ flush_workqueue(i915->wq);
29521832 rcu_barrier();
2953
- drain_workqueue(i915->wq);
1833
+ i915_gem_drain_freed_objects(i915);
29541834 } while (--pass);
1835
+ drain_workqueue(i915->wq);
29551836 }
29561837
29571838 struct i915_vma * __must_check
1839
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1840
+ struct i915_gem_ww_ctx *ww,
1841
+ const struct i915_ggtt_view *view,
1842
+ u64 size, u64 alignment, u64 flags);
1843
+
1844
+static inline struct i915_vma * __must_check
29581845 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
29591846 const struct i915_ggtt_view *view,
2960
- u64 size,
2961
- u64 alignment,
2962
- u64 flags);
1847
+ u64 size, u64 alignment, u64 flags)
1848
+{
1849
+ return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
1850
+}
29631851
2964
-struct i915_vma * __must_check
2965
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
2966
- struct i915_address_space *vm,
2967
- const struct i915_ggtt_view *view,
2968
- u64 size,
2969
- u64 alignment,
2970
- u64 flags);
2971
-
2972
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
2973
-void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1852
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1853
+ unsigned long flags);
1854
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1855
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1856
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
29741857
29751858 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
29761859
2977
-static inline int __sg_page_count(const struct scatterlist *sg)
2978
-{
2979
- return sg->length >> PAGE_SHIFT;
2980
-}
2981
-
2982
-struct scatterlist *
2983
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
2984
- unsigned int n, unsigned int *offset);
2985
-
2986
-struct page *
2987
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
2988
- unsigned int n);
2989
-
2990
-struct page *
2991
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
2992
- unsigned int n);
2993
-
2994
-dma_addr_t
2995
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
2996
- unsigned long n);
2997
-
2998
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2999
- struct sg_table *pages,
3000
- unsigned int sg_page_sizes);
3001
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3002
-
3003
-static inline int __must_check
3004
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3005
-{
3006
- might_lock(&obj->mm.lock);
3007
-
3008
- if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
3009
- return 0;
3010
-
3011
- return __i915_gem_object_get_pages(obj);
3012
-}
3013
-
3014
-static inline bool
3015
-i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
3016
-{
3017
- return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
3018
-}
3019
-
3020
-static inline void
3021
-__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3022
-{
3023
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3024
-
3025
- atomic_inc(&obj->mm.pages_pin_count);
3026
-}
3027
-
3028
-static inline bool
3029
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3030
-{
3031
- return atomic_read(&obj->mm.pages_pin_count);
3032
-}
3033
-
3034
-static inline void
3035
-__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3036
-{
3037
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3038
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3039
-
3040
- atomic_dec(&obj->mm.pages_pin_count);
3041
-}
3042
-
3043
-static inline void
3044
-i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3045
-{
3046
- __i915_gem_object_unpin_pages(obj);
3047
-}
3048
-
3049
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
3050
- I915_MM_NORMAL = 0,
3051
- I915_MM_SHRINKER
3052
-};
3053
-
3054
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3055
- enum i915_mm_subclass subclass);
3056
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
3057
-
3058
-enum i915_map_type {
3059
- I915_MAP_WB = 0,
3060
- I915_MAP_WC,
3061
-#define I915_MAP_OVERRIDE BIT(31)
3062
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
3063
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3064
-};
3065
-
3066
-/**
3067
- * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3068
- * @obj: the object to map into kernel address space
3069
- * @type: the type of mapping, used to select pgprot_t
3070
- *
3071
- * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3072
- * pages and then returns a contiguous mapping of the backing storage into
3073
- * the kernel address space. Based on the @type of mapping, the PTE will be
3074
- * set to either WriteBack or WriteCombine (via pgprot_t).
3075
- *
3076
- * The caller is responsible for calling i915_gem_object_unpin_map() when the
3077
- * mapping is no longer required.
3078
- *
3079
- * Returns the pointer through which to access the mapped object, or an
3080
- * ERR_PTR() on error.
3081
- */
3082
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3083
- enum i915_map_type type);
3084
-
3085
-/**
3086
- * i915_gem_object_unpin_map - releases an earlier mapping
3087
- * @obj: the object to unmap
3088
- *
3089
- * After pinning the object and mapping its pages, once you are finished
3090
- * with your access, call i915_gem_object_unpin_map() to release the pin
3091
- * upon the mapping. Once the pin count reaches zero, that mapping may be
3092
- * removed.
3093
- */
3094
-static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3095
-{
3096
- i915_gem_object_unpin_pages(obj);
3097
-}
3098
-
3099
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3100
- unsigned int *needs_clflush);
3101
-int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3102
- unsigned int *needs_clflush);
3103
-#define CLFLUSH_BEFORE BIT(0)
3104
-#define CLFLUSH_AFTER BIT(1)
3105
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
3106
-
3107
-static inline void
3108
-i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3109
-{
3110
- i915_gem_object_unpin_pages(obj);
3111
-}
3112
-
3113
-int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
31141860 int i915_gem_dumb_create(struct drm_file *file_priv,
31151861 struct drm_device *dev,
31161862 struct drm_mode_create_dumb *args);
3117
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3118
- uint32_t handle, uint64_t *offset);
3119
-int i915_gem_mmap_gtt_version(void);
3120
-
3121
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
3122
- struct drm_i915_gem_object *new,
3123
- unsigned frontbuffer_bits);
31241863
31251864 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
31261865
3127
-struct i915_request *
3128
-i915_gem_find_active_request(struct intel_engine_cs *engine);
3129
-
3130
-static inline bool i915_reset_backoff(struct i915_gpu_error *error)
3131
-{
3132
- return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3133
-}
3134
-
3135
-static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3136
-{
3137
- return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
3138
-}
3139
-
3140
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3141
-{
3142
- return unlikely(test_bit(I915_WEDGED, &error->flags));
3143
-}
3144
-
3145
-static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
3146
-{
3147
- return i915_reset_backoff(error) | i915_terminally_wedged(error);
3148
-}
3149
-
31501866 static inline u32 i915_reset_count(struct i915_gpu_error *error)
31511867 {
3152
- return READ_ONCE(error->reset_count);
1868
+ return atomic_read(&error->reset_count);
31531869 }
31541870
31551871 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
3156
- struct intel_engine_cs *engine)
1872
+ const struct intel_engine_cs *engine)
31571873 {
3158
- return READ_ONCE(error->reset_engine_count[engine->id]);
1874
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
31591875 }
31601876
3161
-struct i915_request *
3162
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
3163
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3164
-void i915_gem_reset(struct drm_i915_private *dev_priv,
3165
- unsigned int stalled_mask);
3166
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
3167
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3168
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3169
-bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3170
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
3171
- struct i915_request *request,
3172
- bool stalled);
3173
-
3174
-void i915_gem_init_mmio(struct drm_i915_private *i915);
31751877 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3176
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3177
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3178
-void i915_gem_fini(struct drm_i915_private *dev_priv);
3179
-void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3180
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3181
- unsigned int flags, long timeout);
3182
-int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
1878
+void i915_gem_driver_register(struct drm_i915_private *i915);
1879
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
1880
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1881
+void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1882
+void i915_gem_suspend(struct drm_i915_private *dev_priv);
31831883 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
31841884 void i915_gem_resume(struct drm_i915_private *dev_priv);
3185
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
3186
-int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3187
- unsigned int flags,
3188
- long timeout,
3189
- struct intel_rps_client *rps);
3190
-int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3191
- unsigned int flags,
3192
- const struct i915_sched_attr *attr);
3193
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
31941885
3195
-int __must_check
3196
-i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
3197
-int __must_check
3198
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
3199
-int __must_check
3200
-i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3201
-struct i915_vma * __must_check
3202
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3203
- u32 alignment,
3204
- const struct i915_ggtt_view *view,
3205
- unsigned int flags);
3206
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3207
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3208
- int align);
32091886 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
3210
-void i915_gem_release(struct drm_device *dev, struct drm_file *file);
32111887
32121888 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
32131889 enum i915_cache_level cache_level);
....@@ -3215,33 +1891,12 @@
32151891 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
32161892 struct dma_buf *dma_buf);
32171893
3218
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3219
- struct drm_gem_object *gem_obj, int flags);
3220
-
3221
-static inline struct i915_hw_ppgtt *
3222
-i915_vm_to_ppgtt(struct i915_address_space *vm)
3223
-{
3224
- return container_of(vm, struct i915_hw_ppgtt, vm);
3225
-}
3226
-
3227
-/* i915_gem_fence_reg.c */
3228
-struct drm_i915_fence_reg *
3229
-i915_reserve_fence(struct drm_i915_private *dev_priv);
3230
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
3231
-
3232
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
3233
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3234
-
3235
-void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3236
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3237
- struct sg_table *pages);
3238
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3239
- struct sg_table *pages);
1894
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
32401895
32411896 static inline struct i915_gem_context *
32421897 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
32431898 {
3244
- return idr_find(&file_priv->context_idr, id);
1899
+ return xa_load(&file_priv->context_xa, id);
32451900 }
32461901
32471902 static inline struct i915_gem_context *
....@@ -3258,20 +1913,10 @@
32581913 return ctx;
32591914 }
32601915
3261
-int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3262
- struct drm_file *file);
3263
-int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3264
- struct drm_file *file);
3265
-int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3266
- struct drm_file *file);
3267
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3268
- struct i915_gem_context *ctx,
3269
- uint32_t *reg_state);
3270
-
32711916 /* i915_gem_evict.c */
32721917 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
32731918 u64 min_size, u64 alignment,
3274
- unsigned cache_level,
1919
+ unsigned long color,
32751920 u64 start, u64 end,
32761921 unsigned flags);
32771922 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
....@@ -3279,63 +1924,17 @@
32791924 unsigned int flags);
32801925 int i915_gem_evict_vm(struct i915_address_space *vm);
32811926
3282
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
3283
-
3284
-/* belongs in i915_gem_gtt.h */
3285
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3286
-{
3287
- wmb();
3288
- if (INTEL_GEN(dev_priv) < 6)
3289
- intel_gtt_chipset_flush();
3290
-}
3291
-
3292
-/* i915_gem_stolen.c */
3293
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3294
- struct drm_mm_node *node, u64 size,
3295
- unsigned alignment);
3296
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3297
- struct drm_mm_node *node, u64 size,
3298
- unsigned alignment, u64 start,
3299
- u64 end);
3300
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3301
- struct drm_mm_node *node);
3302
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3303
-void i915_gem_cleanup_stolen(struct drm_device *dev);
3304
-struct drm_i915_gem_object *
3305
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3306
- resource_size_t size);
3307
-struct drm_i915_gem_object *
3308
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3309
- resource_size_t stolen_offset,
3310
- resource_size_t gtt_offset,
3311
- resource_size_t size);
3312
-
33131927 /* i915_gem_internal.c */
33141928 struct drm_i915_gem_object *
33151929 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
33161930 phys_addr_t size);
33171931
3318
-/* i915_gem_shrinker.c */
3319
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3320
- unsigned long target,
3321
- unsigned long *nr_scanned,
3322
- unsigned flags);
3323
-#define I915_SHRINK_PURGEABLE 0x1
3324
-#define I915_SHRINK_UNBOUND 0x2
3325
-#define I915_SHRINK_BOUND 0x4
3326
-#define I915_SHRINK_ACTIVE 0x8
3327
-#define I915_SHRINK_VMAPS 0x10
3328
-unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3329
-void i915_gem_shrinker_register(struct drm_i915_private *i915);
3330
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3331
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
3332
-
33331932 /* i915_gem_tiling.c */
33341933 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
33351934 {
3336
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1935
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
33371936
3338
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1937
+ return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
33391938 i915_gem_object_is_tiled(obj);
33401939 }
33411940
....@@ -3344,288 +1943,37 @@
33441943 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
33451944 unsigned int tiling, unsigned int stride);
33461945
3347
-/* i915_debugfs.c */
3348
-#ifdef CONFIG_DEBUG_FS
3349
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
3350
-int i915_debugfs_connector_add(struct drm_connector *connector);
3351
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
3352
-#else
3353
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3354
-static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3355
-{ return 0; }
3356
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3357
-#endif
3358
-
33591946 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
33601947
33611948 /* i915_cmd_parser.c */
33621949 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3363
-void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1950
+int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
33641951 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3365
-int intel_engine_cmd_parser(struct i915_gem_context *cxt,
3366
- struct intel_engine_cs *engine,
3367
- struct drm_i915_gem_object *batch_obj,
3368
- u64 user_batch_start,
3369
- u32 batch_start_offset,
3370
- u32 batch_len,
3371
- struct drm_i915_gem_object *shadow_batch_obj,
3372
- u64 shadow_batch_start);
3373
-
3374
-/* i915_perf.c */
3375
-extern void i915_perf_init(struct drm_i915_private *dev_priv);
3376
-extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3377
-extern void i915_perf_register(struct drm_i915_private *dev_priv);
3378
-extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3379
-
3380
-/* i915_suspend.c */
3381
-extern int i915_save_state(struct drm_i915_private *dev_priv);
3382
-extern int i915_restore_state(struct drm_i915_private *dev_priv);
3383
-
3384
-/* i915_sysfs.c */
3385
-void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3386
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3387
-
3388
-/* intel_lpe_audio.c */
3389
-int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3390
-void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3391
-void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3392
-void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3393
- enum pipe pipe, enum port port,
3394
- const void *eld, int ls_clock, bool dp_output);
3395
-
3396
-/* intel_i2c.c */
3397
-extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3398
-extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3399
-extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3400
- unsigned int pin);
3401
-extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
3402
-
3403
-extern struct i2c_adapter *
3404
-intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3405
-extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3406
-extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3407
-static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3408
-{
3409
- return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3410
-}
3411
-extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3412
-
3413
-/* intel_bios.c */
3414
-void intel_bios_init(struct drm_i915_private *dev_priv);
3415
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
3416
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3417
-bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3418
-bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3419
-bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3420
-bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3421
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3422
-bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3423
-bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3424
- enum port port);
3425
-bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3426
- enum port port);
3427
-
3428
-/* intel_acpi.c */
3429
-#ifdef CONFIG_ACPI
3430
-extern void intel_register_dsm_handler(void);
3431
-extern void intel_unregister_dsm_handler(void);
3432
-#else
3433
-static inline void intel_register_dsm_handler(void) { return; }
3434
-static inline void intel_unregister_dsm_handler(void) { return; }
3435
-#endif /* CONFIG_ACPI */
1952
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1953
+ struct i915_vma *batch,
1954
+ unsigned long batch_offset,
1955
+ unsigned long batch_length,
1956
+ struct i915_vma *shadow,
1957
+ bool trampoline);
1958
+#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
34361959
34371960 /* intel_device_info.c */
34381961 static inline struct intel_device_info *
34391962 mkwrite_device_info(struct drm_i915_private *dev_priv)
34401963 {
3441
- return (struct intel_device_info *)&dev_priv->info;
1964
+ return (struct intel_device_info *)INTEL_INFO(dev_priv);
34421965 }
3443
-
3444
-/* modesetting */
3445
-extern void intel_modeset_init_hw(struct drm_device *dev);
3446
-extern int intel_modeset_init(struct drm_device *dev);
3447
-extern void intel_modeset_cleanup(struct drm_device *dev);
3448
-extern int intel_connector_register(struct drm_connector *);
3449
-extern void intel_connector_unregister(struct drm_connector *);
3450
-extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3451
- bool state);
3452
-extern void intel_display_resume(struct drm_device *dev);
3453
-extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3454
-extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3455
-extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3456
-extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3457
-extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3458
-extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
3459
- bool interactive);
3460
-extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3461
- bool enable);
34621966
34631967 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
34641968 struct drm_file *file);
34651969
3466
-/* overlay */
3467
-extern struct intel_overlay_error_state *
3468
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3469
-extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3470
- struct intel_overlay_error_state *error);
1970
+#define __I915_REG_OP(op__, dev_priv__, ...) \
1971
+ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
34711972
3472
-extern struct intel_display_error_state *
3473
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3474
-extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3475
- struct intel_display_error_state *error);
1973
+#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
1974
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
34761975
3477
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3478
-int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3479
- u32 val, int fast_timeout_us,
3480
- int slow_timeout_ms);
3481
-#define sandybridge_pcode_write(dev_priv, mbox, val) \
3482
- sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
3483
-
3484
-int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3485
- u32 reply_mask, u32 reply, int timeout_base_ms);
3486
-
3487
-/* intel_sideband.c */
3488
-u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3489
-int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3490
-u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3491
-u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3492
-void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3493
-u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3494
-void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3495
-u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3496
-void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3497
-u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3498
-void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3499
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3500
-void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3501
-u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3502
- enum intel_sbi_destination destination);
3503
-void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3504
- enum intel_sbi_destination destination);
3505
-u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3506
-void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3507
-
3508
-/* intel_dpio_phy.c */
3509
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3510
- enum dpio_phy *phy, enum dpio_channel *ch);
3511
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3512
- enum port port, u32 margin, u32 scale,
3513
- u32 enable, u32 deemphasis);
3514
-void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3515
-void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3516
-bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3517
- enum dpio_phy phy);
3518
-bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3519
- enum dpio_phy phy);
3520
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
3521
-void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3522
- uint8_t lane_lat_optim_mask);
3523
-uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3524
-
3525
-void chv_set_phy_signal_level(struct intel_encoder *encoder,
3526
- u32 deemph_reg_value, u32 margin_reg_value,
3527
- bool uniq_trans_scale);
3528
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3529
- const struct intel_crtc_state *crtc_state,
3530
- bool reset);
3531
-void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
3532
- const struct intel_crtc_state *crtc_state);
3533
-void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3534
- const struct intel_crtc_state *crtc_state);
3535
-void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3536
-void chv_phy_post_pll_disable(struct intel_encoder *encoder,
3537
- const struct intel_crtc_state *old_crtc_state);
3538
-
3539
-void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3540
- u32 demph_reg_value, u32 preemph_reg_value,
3541
- u32 uniqtranscale_reg_value, u32 tx3_demph);
3542
-void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
3543
- const struct intel_crtc_state *crtc_state);
3544
-void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3545
- const struct intel_crtc_state *crtc_state);
3546
-void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3547
- const struct intel_crtc_state *old_crtc_state);
3548
-
3549
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3550
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3551
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
3552
- const i915_reg_t reg);
3553
-
3554
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
3555
-
3556
-static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3557
- const i915_reg_t reg)
3558
-{
3559
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
3560
-}
3561
-
3562
-#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3563
-#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3564
-
3565
-#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3566
-#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3567
-#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3568
-#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3569
-
3570
-#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3571
-#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3572
-#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3573
-#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3574
-
3575
-/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3576
- * will be implemented using 2 32-bit writes in an arbitrary order with
3577
- * an arbitrary delay between them. This can cause the hardware to
3578
- * act upon the intermediate value, possibly leading to corruption and
3579
- * machine death. For this reason we do not support I915_WRITE64, or
3580
- * dev_priv->uncore.funcs.mmio_writeq.
3581
- *
3582
- * When reading a 64-bit value as two 32-bit values, the delay may cause
3583
- * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3584
- * occasionally a 64-bit register does not actualy support a full readq
3585
- * and must be read using two 32-bit reads.
3586
- *
3587
- * You have been warned.
3588
- */
3589
-#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3590
-
3591
-#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3592
- u32 upper, lower, old_upper, loop = 0; \
3593
- upper = I915_READ(upper_reg); \
3594
- do { \
3595
- old_upper = upper; \
3596
- lower = I915_READ(lower_reg); \
3597
- upper = I915_READ(upper_reg); \
3598
- } while (upper != old_upper && loop++ < 2); \
3599
- (u64)upper << 32 | lower; })
3600
-
3601
-#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3602
-#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3603
-
3604
-#define __raw_read(x, s) \
3605
-static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3606
- i915_reg_t reg) \
3607
-{ \
3608
- return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3609
-}
3610
-
3611
-#define __raw_write(x, s) \
3612
-static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3613
- i915_reg_t reg, uint##x##_t val) \
3614
-{ \
3615
- write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3616
-}
3617
-__raw_read(8, b)
3618
-__raw_read(16, w)
3619
-__raw_read(32, l)
3620
-__raw_read(64, q)
3621
-
3622
-__raw_write(8, b)
3623
-__raw_write(16, w)
3624
-__raw_write(32, l)
3625
-__raw_write(64, q)
3626
-
3627
-#undef __raw_read
3628
-#undef __raw_write
1976
+#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
36291977
36301978 /* These are untraced mmio-accessors that are only valid to be used inside
36311979 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
....@@ -3653,177 +2001,16 @@
36532001 * therefore generally be serialised, by either the dev_priv->uncore.lock or
36542002 * a more localised lock guarding all access to that bank of registers.
36552003 */
3656
-#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3657
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3658
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3659
-#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3660
-
3661
-/* "Broadcast RGB" property */
3662
-#define INTEL_BROADCAST_RGB_AUTO 0
3663
-#define INTEL_BROADCAST_RGB_FULL 1
3664
-#define INTEL_BROADCAST_RGB_LIMITED 2
3665
-
3666
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3667
-{
3668
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3669
- return VLV_VGACNTRL;
3670
- else if (INTEL_GEN(dev_priv) >= 5)
3671
- return CPU_VGACNTRL;
3672
- else
3673
- return VGACNTRL;
3674
-}
3675
-
3676
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3677
-{
3678
- unsigned long j = msecs_to_jiffies(m);
3679
-
3680
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3681
-}
3682
-
3683
-static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3684
-{
3685
- /* nsecs_to_jiffies64() does not guard against overflow */
3686
- if (NSEC_PER_SEC % HZ &&
3687
- div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
3688
- return MAX_JIFFY_OFFSET;
3689
-
3690
- return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3691
-}
3692
-
3693
-/*
3694
- * If you need to wait X milliseconds between events A and B, but event B
3695
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3696
- * when event A happened, then just before event B you call this function and
3697
- * pass the timestamp as the first argument, and X as the second argument.
3698
- */
3699
-static inline void
3700
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3701
-{
3702
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3703
-
3704
- /*
3705
- * Don't re-read the value of "jiffies" every time since it may change
3706
- * behind our back and break the math.
3707
- */
3708
- tmp_jiffies = jiffies;
3709
- target_jiffies = timestamp_jiffies +
3710
- msecs_to_jiffies_timeout(to_wait_ms);
3711
-
3712
- if (time_after(target_jiffies, tmp_jiffies)) {
3713
- remaining_jiffies = target_jiffies - tmp_jiffies;
3714
- while (remaining_jiffies)
3715
- remaining_jiffies =
3716
- schedule_timeout_uninterruptible(remaining_jiffies);
3717
- }
3718
-}
3719
-
3720
-static inline bool
3721
-__i915_request_irq_complete(const struct i915_request *rq)
3722
-{
3723
- struct intel_engine_cs *engine = rq->engine;
3724
- u32 seqno;
3725
-
3726
- /* Note that the engine may have wrapped around the seqno, and
3727
- * so our request->global_seqno will be ahead of the hardware,
3728
- * even though it completed the request before wrapping. We catch
3729
- * this by kicking all the waiters before resetting the seqno
3730
- * in hardware, and also signal the fence.
3731
- */
3732
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
3733
- return true;
3734
-
3735
- /* The request was dequeued before we were awoken. We check after
3736
- * inspecting the hw to confirm that this was the same request
3737
- * that generated the HWS update. The memory barriers within
3738
- * the request execution are sufficient to ensure that a check
3739
- * after reading the value from hw matches this request.
3740
- */
3741
- seqno = i915_request_global_seqno(rq);
3742
- if (!seqno)
3743
- return false;
3744
-
3745
- /* Before we do the heavier coherent read of the seqno,
3746
- * check the value (hopefully) in the CPU cacheline.
3747
- */
3748
- if (__i915_request_completed(rq, seqno))
3749
- return true;
3750
-
3751
- /* Ensure our read of the seqno is coherent so that we
3752
- * do not "miss an interrupt" (i.e. if this is the last
3753
- * request and the seqno write from the GPU is not visible
3754
- * by the time the interrupt fires, we will see that the
3755
- * request is incomplete and go back to sleep awaiting
3756
- * another interrupt that will never come.)
3757
- *
3758
- * Strictly, we only need to do this once after an interrupt,
3759
- * but it is easier and safer to do it every time the waiter
3760
- * is woken.
3761
- */
3762
- if (engine->irq_seqno_barrier &&
3763
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
3764
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
3765
-
3766
- /* The ordering of irq_posted versus applying the barrier
3767
- * is crucial. The clearing of the current irq_posted must
3768
- * be visible before we perform the barrier operation,
3769
- * such that if a subsequent interrupt arrives, irq_posted
3770
- * is reasserted and our task rewoken (which causes us to
3771
- * do another __i915_request_irq_complete() immediately
3772
- * and reapply the barrier). Conversely, if the clear
3773
- * occurs after the barrier, then an interrupt that arrived
3774
- * whilst we waited on the barrier would not trigger a
3775
- * barrier on the next pass, and the read may not see the
3776
- * seqno update.
3777
- */
3778
- engine->irq_seqno_barrier(engine);
3779
-
3780
- /* If we consume the irq, but we are no longer the bottom-half,
3781
- * the real bottom-half may not have serialised their own
3782
- * seqno check with the irq-barrier (i.e. may have inspected
3783
- * the seqno before we believe it coherent since they see
3784
- * irq_posted == false but we are still running).
3785
- */
3786
- spin_lock_irq(&b->irq_lock);
3787
- if (b->irq_wait && b->irq_wait->tsk != current)
3788
- /* Note that if the bottom-half is changed as we
3789
- * are sending the wake-up, the new bottom-half will
3790
- * be woken by whomever made the change. We only have
3791
- * to worry about when we steal the irq-posted for
3792
- * ourself.
3793
- */
3794
- wake_up_process(b->irq_wait->tsk);
3795
- spin_unlock_irq(&b->irq_lock);
3796
-
3797
- if (__i915_request_completed(rq, seqno))
3798
- return true;
3799
- }
3800
-
3801
- return false;
3802
-}
3803
-
3804
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
3805
-bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
3806
-
3807
-/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
3808
- * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
3809
- * perform the operation. To check beforehand, pass in the parameters to
3810
- * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
3811
- * you only need to pass in the minor offsets, page-aligned pointers are
3812
- * always valid.
3813
- *
3814
- * For just checking for SSE4.1, in the foreknowledge that the future use
3815
- * will be correctly aligned, just use i915_has_memcpy_from_wc().
3816
- */
3817
-#define i915_can_memcpy_from_wc(dst, src, len) \
3818
- i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
3819
-
3820
-#define i915_has_memcpy_from_wc() \
3821
- i915_memcpy_from_wc(NULL, NULL, 0)
2004
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
2005
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
38222006
38232007 /* i915_mm.c */
38242008 int remap_io_mapping(struct vm_area_struct *vma,
38252009 unsigned long addr, unsigned long pfn, unsigned long size,
38262010 struct io_mapping *iomap);
2011
+int remap_io_sg(struct vm_area_struct *vma,
2012
+ unsigned long addr, unsigned long size,
2013
+ struct scatterlist *sgl, resource_size_t iobase);
38272014
38282015 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
38292016 {
....@@ -3833,4 +2020,22 @@
38332020 return I915_HWS_CSB_WRITE_INDEX;
38342021 }
38352022
2023
+static inline enum i915_map_type
2024
+i915_coherent_map_type(struct drm_i915_private *i915)
2025
+{
2026
+ return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
2027
+}
2028
+
2029
+static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
2030
+{
2031
+ return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
2032
+ 1000000000);
2033
+}
2034
+
2035
+static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
2036
+{
2037
+ return div_u64(val * 1000000000,
2038
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
2039
+}
2040
+
38362041 #endif