forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/i915/i915_debugfs.c
....@@ -26,11 +26,28 @@
2626 *
2727 */
2828
29
-#include <linux/debugfs.h>
30
-#include <linux/sort.h>
3129 #include <linux/sched/mm.h>
32
-#include "intel_drv.h"
33
-#include "intel_guc_submission.h"
30
+#include <linux/sort.h>
31
+
32
+#include <drm/drm_debugfs.h>
33
+
34
+#include "gem/i915_gem_context.h"
35
+#include "gt/intel_gt_buffer_pool.h"
36
+#include "gt/intel_gt_clock_utils.h"
37
+#include "gt/intel_gt.h"
38
+#include "gt/intel_gt_pm.h"
39
+#include "gt/intel_gt_requests.h"
40
+#include "gt/intel_reset.h"
41
+#include "gt/intel_rc6.h"
42
+#include "gt/intel_rps.h"
43
+#include "gt/intel_sseu_debugfs.h"
44
+
45
+#include "i915_debugfs.h"
46
+#include "i915_debugfs_params.h"
47
+#include "i915_irq.h"
48
+#include "i915_trace.h"
49
+#include "intel_pm.h"
50
+#include "intel_sideband.h"
3451
3552 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
3653 {
....@@ -39,33 +56,21 @@
3956
4057 static int i915_capabilities(struct seq_file *m, void *data)
4158 {
42
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
43
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
59
+ struct drm_i915_private *i915 = node_to_i915(m->private);
4460 struct drm_printer p = drm_seq_file_printer(m);
4561
46
- seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47
- seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
62
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
4963
50
- intel_device_info_dump_flags(info, &p);
51
- intel_device_info_dump_runtime(info, &p);
52
- intel_driver_caps_print(&dev_priv->caps, &p);
64
+ intel_device_info_print_static(INTEL_INFO(i915), &p);
65
+ intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
66
+ intel_gt_info_print(&i915->gt.info, &p);
67
+ intel_driver_caps_print(&i915->caps, &p);
5368
5469 kernel_param_lock(THIS_MODULE);
55
- i915_params_dump(&i915_modparams, &p);
70
+ i915_params_dump(&i915->params, &p);
5671 kernel_param_unlock(THIS_MODULE);
5772
5873 return 0;
59
-}
60
-
61
-static char get_active_flag(struct drm_i915_gem_object *obj)
62
-{
63
- return i915_gem_object_is_active(obj) ? '*' : ' ';
64
-}
65
-
66
-static char get_pin_flag(struct drm_i915_gem_object *obj)
67
-{
68
- return obj->pin_global ? 'p' : ' ';
6974 }
7075
7176 static char get_tiling_flag(struct drm_i915_gem_object *obj)
....@@ -80,25 +85,12 @@
8085
8186 static char get_global_flag(struct drm_i915_gem_object *obj)
8287 {
83
- return obj->userfault_count ? 'g' : ' ';
88
+ return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
8489 }
8590
8691 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
8792 {
8893 return obj->mm.mapping ? 'M' : ' ';
89
-}
90
-
91
-static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92
-{
93
- u64 size = 0;
94
- struct i915_vma *vma;
95
-
96
- for_each_ggtt_vma(vma, obj) {
97
- if (drm_mm_node_allocated(&vma->node))
98
- size += vma->node.size;
99
- }
100
-
101
- return size;
10294 }
10395
10496 static const char *
....@@ -131,21 +123,16 @@
131123 }
132124 }
133125
134
-static void
135
-describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
126
+void
127
+i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136128 {
137129 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138130 struct intel_engine_cs *engine;
139131 struct i915_vma *vma;
140
- unsigned int frontbuffer_bits;
141132 int pin_count = 0;
142133
143
- lockdep_assert_held(&obj->base.dev->struct_mutex);
144
-
145
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
134
+ seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
146135 &obj->base,
147
- get_active_flag(obj),
148
- get_pin_flag(obj),
149136 get_tiling_flag(obj),
150137 get_global_flag(obj),
151138 get_pin_mapped_flag(obj),
....@@ -157,16 +144,16 @@
157144 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158145 if (obj->base.name)
159146 seq_printf(m, " (name: %d)", obj->base.name);
160
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
161
- if (i915_vma_is_pinned(vma))
162
- pin_count++;
163
- }
164
- seq_printf(m, " (pinned x %d)", pin_count);
165
- if (obj->pin_global)
166
- seq_printf(m, " (global)");
167
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
147
+
148
+ spin_lock(&obj->vma.lock);
149
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
168150 if (!drm_mm_node_allocated(&vma->node))
169151 continue;
152
+
153
+ spin_unlock(&obj->vma.lock);
154
+
155
+ if (i915_vma_is_pinned(vma))
156
+ pin_count++;
170157
171158 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172159 i915_vma_is_ggtt(vma) ? "g" : "pp",
....@@ -196,112 +183,48 @@
196183 vma->ggtt_view.rotated.plane[1].offset);
197184 break;
198185
186
+ case I915_GGTT_VIEW_REMAPPED:
187
+ seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
188
+ vma->ggtt_view.remapped.plane[0].width,
189
+ vma->ggtt_view.remapped.plane[0].height,
190
+ vma->ggtt_view.remapped.plane[0].stride,
191
+ vma->ggtt_view.remapped.plane[0].offset,
192
+ vma->ggtt_view.remapped.plane[1].width,
193
+ vma->ggtt_view.remapped.plane[1].height,
194
+ vma->ggtt_view.remapped.plane[1].stride,
195
+ vma->ggtt_view.remapped.plane[1].offset);
196
+ break;
197
+
199198 default:
200199 MISSING_CASE(vma->ggtt_view.type);
201200 break;
202201 }
203202 }
204203 if (vma->fence)
205
- seq_printf(m, " , fence: %d%s",
206
- vma->fence->id,
207
- i915_gem_active_isset(&vma->last_fence) ? "*" : "");
204
+ seq_printf(m, " , fence: %d", vma->fence->id);
208205 seq_puts(m, ")");
206
+
207
+ spin_lock(&obj->vma.lock);
209208 }
209
+ spin_unlock(&obj->vma.lock);
210
+
211
+ seq_printf(m, " (pinned x %d)", pin_count);
210212 if (obj->stolen)
211213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
214
+ if (i915_gem_object_is_framebuffer(obj))
215
+ seq_printf(m, " (fb)");
212216
213217 engine = i915_gem_object_last_write_engine(obj);
214218 if (engine)
215219 seq_printf(m, " (%s)", engine->name);
216
-
217
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218
- if (frontbuffer_bits)
219
- seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220
-}
221
-
222
-static int obj_rank_by_stolen(const void *A, const void *B)
223
-{
224
- const struct drm_i915_gem_object *a =
225
- *(const struct drm_i915_gem_object **)A;
226
- const struct drm_i915_gem_object *b =
227
- *(const struct drm_i915_gem_object **)B;
228
-
229
- if (a->stolen->start < b->stolen->start)
230
- return -1;
231
- if (a->stolen->start > b->stolen->start)
232
- return 1;
233
- return 0;
234
-}
235
-
236
-static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237
-{
238
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
239
- struct drm_device *dev = &dev_priv->drm;
240
- struct drm_i915_gem_object **objects;
241
- struct drm_i915_gem_object *obj;
242
- u64 total_obj_size, total_gtt_size;
243
- unsigned long total, count, n;
244
- int ret;
245
-
246
- total = READ_ONCE(dev_priv->mm.object_count);
247
- objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248
- if (!objects)
249
- return -ENOMEM;
250
-
251
- ret = mutex_lock_interruptible(&dev->struct_mutex);
252
- if (ret)
253
- goto out;
254
-
255
- total_obj_size = total_gtt_size = count = 0;
256
-
257
- spin_lock(&dev_priv->mm.obj_lock);
258
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259
- if (count == total)
260
- break;
261
-
262
- if (obj->stolen == NULL)
263
- continue;
264
-
265
- objects[count++] = obj;
266
- total_obj_size += obj->base.size;
267
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
-
269
- }
270
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271
- if (count == total)
272
- break;
273
-
274
- if (obj->stolen == NULL)
275
- continue;
276
-
277
- objects[count++] = obj;
278
- total_obj_size += obj->base.size;
279
- }
280
- spin_unlock(&dev_priv->mm.obj_lock);
281
-
282
- sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
-
284
- seq_puts(m, "Stolen:\n");
285
- for (n = 0; n < count; n++) {
286
- seq_puts(m, " ");
287
- describe_obj(m, objects[n]);
288
- seq_putc(m, '\n');
289
- }
290
- seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291
- count, total_obj_size, total_gtt_size);
292
-
293
- mutex_unlock(&dev->struct_mutex);
294
-out:
295
- kvfree(objects);
296
- return ret;
297220 }
298221
299222 struct file_stats {
300
- struct drm_i915_file_private *file_priv;
223
+ struct i915_address_space *vm;
301224 unsigned long count;
302
- u64 total, unbound;
303
- u64 global, shared;
225
+ u64 total;
304226 u64 active, inactive;
227
+ u64 closed;
305228 };
306229
307230 static int per_file_stats(int id, void *ptr, void *data)
....@@ -310,341 +233,147 @@
310233 struct file_stats *stats = data;
311234 struct i915_vma *vma;
312235
313
- lockdep_assert_held(&obj->base.dev->struct_mutex);
236
+ if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
237
+ return 0;
314238
315239 stats->count++;
316240 stats->total += obj->base.size;
317
- if (!obj->bind_count)
318
- stats->unbound += obj->base.size;
319
- if (obj->base.name || obj->base.dma_buf)
320
- stats->shared += obj->base.size;
321241
322
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
323
- if (!drm_mm_node_allocated(&vma->node))
324
- continue;
325
-
326
- if (i915_vma_is_ggtt(vma)) {
327
- stats->global += vma->node.size;
328
- } else {
329
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
-
331
- if (ppgtt->vm.file != stats->file_priv)
242
+ spin_lock(&obj->vma.lock);
243
+ if (!stats->vm) {
244
+ for_each_ggtt_vma(vma, obj) {
245
+ if (!drm_mm_node_allocated(&vma->node))
332246 continue;
247
+
248
+ if (i915_vma_is_active(vma))
249
+ stats->active += vma->node.size;
250
+ else
251
+ stats->inactive += vma->node.size;
252
+
253
+ if (i915_vma_is_closed(vma))
254
+ stats->closed += vma->node.size;
333255 }
256
+ } else {
257
+ struct rb_node *p = obj->vma.tree.rb_node;
334258
335
- if (i915_vma_is_active(vma))
336
- stats->active += vma->node.size;
337
- else
338
- stats->inactive += vma->node.size;
259
+ while (p) {
260
+ long cmp;
261
+
262
+ vma = rb_entry(p, typeof(*vma), obj_node);
263
+ cmp = i915_vma_compare(vma, stats->vm, NULL);
264
+ if (cmp == 0) {
265
+ if (drm_mm_node_allocated(&vma->node)) {
266
+ if (i915_vma_is_active(vma))
267
+ stats->active += vma->node.size;
268
+ else
269
+ stats->inactive += vma->node.size;
270
+
271
+ if (i915_vma_is_closed(vma))
272
+ stats->closed += vma->node.size;
273
+ }
274
+ break;
275
+ }
276
+ if (cmp < 0)
277
+ p = p->rb_right;
278
+ else
279
+ p = p->rb_left;
280
+ }
339281 }
282
+ spin_unlock(&obj->vma.lock);
340283
284
+ i915_gem_object_put(obj);
341285 return 0;
342286 }
343287
344288 #define print_file_stats(m, name, stats) do { \
345289 if (stats.count) \
346
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
290
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
347291 name, \
348292 stats.count, \
349293 stats.total, \
350294 stats.active, \
351295 stats.inactive, \
352
- stats.global, \
353
- stats.shared, \
354
- stats.unbound); \
296
+ stats.closed); \
355297 } while (0)
356298
357
-static void print_batch_pool_stats(struct seq_file *m,
358
- struct drm_i915_private *dev_priv)
359
-{
360
- struct drm_i915_gem_object *obj;
361
- struct file_stats stats;
362
- struct intel_engine_cs *engine;
363
- enum intel_engine_id id;
364
- int j;
365
-
366
- memset(&stats, 0, sizeof(stats));
367
-
368
- for_each_engine(engine, dev_priv, id) {
369
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370
- list_for_each_entry(obj,
371
- &engine->batch_pool.cache_list[j],
372
- batch_pool_link)
373
- per_file_stats(0, obj, &stats);
374
- }
375
- }
376
-
377
- print_file_stats(m, "[k]batch pool", stats);
378
-}
379
-
380
-static int per_file_ctx_stats(int idx, void *ptr, void *data)
381
-{
382
- struct i915_gem_context *ctx = ptr;
383
- struct intel_engine_cs *engine;
384
- enum intel_engine_id id;
385
-
386
- for_each_engine(engine, ctx->i915, id) {
387
- struct intel_context *ce = to_intel_context(ctx, engine);
388
-
389
- if (ce->state)
390
- per_file_stats(0, ce->state->obj, data);
391
- if (ce->ring)
392
- per_file_stats(0, ce->ring->vma->obj, data);
393
- }
394
-
395
- return 0;
396
-}
397
-
398299 static void print_context_stats(struct seq_file *m,
399
- struct drm_i915_private *dev_priv)
300
+ struct drm_i915_private *i915)
400301 {
401
- struct drm_device *dev = &dev_priv->drm;
402
- struct file_stats stats;
403
- struct drm_file *file;
302
+ struct file_stats kstats = {};
303
+ struct i915_gem_context *ctx, *cn;
404304
405
- memset(&stats, 0, sizeof(stats));
305
+ spin_lock(&i915->gem.contexts.lock);
306
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
307
+ struct i915_gem_engines_iter it;
308
+ struct intel_context *ce;
406309
407
- mutex_lock(&dev->struct_mutex);
408
- if (dev_priv->kernel_context)
409
- per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
310
+ if (!kref_get_unless_zero(&ctx->ref))
311
+ continue;
410312
411
- list_for_each_entry(file, &dev->filelist, lhead) {
412
- struct drm_i915_file_private *fpriv = file->driver_priv;
413
- idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
313
+ spin_unlock(&i915->gem.contexts.lock);
314
+
315
+ for_each_gem_engine(ce,
316
+ i915_gem_context_lock_engines(ctx), it) {
317
+ if (intel_context_pin_if_active(ce)) {
318
+ rcu_read_lock();
319
+ if (ce->state)
320
+ per_file_stats(0,
321
+ ce->state->obj, &kstats);
322
+ per_file_stats(0, ce->ring->vma->obj, &kstats);
323
+ rcu_read_unlock();
324
+ intel_context_unpin(ce);
325
+ }
326
+ }
327
+ i915_gem_context_unlock_engines(ctx);
328
+
329
+ mutex_lock(&ctx->mutex);
330
+ if (!IS_ERR_OR_NULL(ctx->file_priv)) {
331
+ struct file_stats stats = {
332
+ .vm = rcu_access_pointer(ctx->vm),
333
+ };
334
+ struct drm_file *file = ctx->file_priv->file;
335
+ struct task_struct *task;
336
+ char name[80];
337
+
338
+ rcu_read_lock();
339
+ idr_for_each(&file->object_idr, per_file_stats, &stats);
340
+ rcu_read_unlock();
341
+
342
+ rcu_read_lock();
343
+ task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
344
+ snprintf(name, sizeof(name), "%s",
345
+ task ? task->comm : "<unknown>");
346
+ rcu_read_unlock();
347
+
348
+ print_file_stats(m, name, stats);
349
+ }
350
+ mutex_unlock(&ctx->mutex);
351
+
352
+ spin_lock(&i915->gem.contexts.lock);
353
+ list_safe_reset_next(ctx, cn, link);
354
+ i915_gem_context_put(ctx);
414355 }
415
- mutex_unlock(&dev->struct_mutex);
356
+ spin_unlock(&i915->gem.contexts.lock);
416357
417
- print_file_stats(m, "[k]contexts", stats);
358
+ print_file_stats(m, "[k]contexts", kstats);
418359 }
419360
420361 static int i915_gem_object_info(struct seq_file *m, void *data)
421362 {
422
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
423
- struct drm_device *dev = &dev_priv->drm;
424
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
425
- u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426
- u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427
- struct drm_i915_gem_object *obj;
428
- unsigned int page_sizes = 0;
429
- struct drm_file *file;
430
- char buf[80];
431
- int ret;
363
+ struct drm_i915_private *i915 = node_to_i915(m->private);
364
+ struct intel_memory_region *mr;
365
+ enum intel_region_id id;
432366
433
- ret = mutex_lock_interruptible(&dev->struct_mutex);
434
- if (ret)
435
- return ret;
436
-
437
- seq_printf(m, "%u objects, %llu bytes\n",
438
- dev_priv->mm.object_count,
439
- dev_priv->mm.object_memory);
440
-
441
- size = count = 0;
442
- mapped_size = mapped_count = 0;
443
- purgeable_size = purgeable_count = 0;
444
- huge_size = huge_count = 0;
445
-
446
- spin_lock(&dev_priv->mm.obj_lock);
447
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448
- size += obj->base.size;
449
- ++count;
450
-
451
- if (obj->mm.madv == I915_MADV_DONTNEED) {
452
- purgeable_size += obj->base.size;
453
- ++purgeable_count;
454
- }
455
-
456
- if (obj->mm.mapping) {
457
- mapped_count++;
458
- mapped_size += obj->base.size;
459
- }
460
-
461
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462
- huge_count++;
463
- huge_size += obj->base.size;
464
- page_sizes |= obj->mm.page_sizes.sg;
465
- }
466
- }
467
- seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
-
469
- size = count = dpy_size = dpy_count = 0;
470
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471
- size += obj->base.size;
472
- ++count;
473
-
474
- if (obj->pin_global) {
475
- dpy_size += obj->base.size;
476
- ++dpy_count;
477
- }
478
-
479
- if (obj->mm.madv == I915_MADV_DONTNEED) {
480
- purgeable_size += obj->base.size;
481
- ++purgeable_count;
482
- }
483
-
484
- if (obj->mm.mapping) {
485
- mapped_count++;
486
- mapped_size += obj->base.size;
487
- }
488
-
489
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490
- huge_count++;
491
- huge_size += obj->base.size;
492
- page_sizes |= obj->mm.page_sizes.sg;
493
- }
494
- }
495
- spin_unlock(&dev_priv->mm.obj_lock);
496
-
497
- seq_printf(m, "%u bound objects, %llu bytes\n",
498
- count, size);
499
- seq_printf(m, "%u purgeable objects, %llu bytes\n",
500
- purgeable_count, purgeable_size);
501
- seq_printf(m, "%u mapped objects, %llu bytes\n",
502
- mapped_count, mapped_size);
503
- seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504
- huge_count,
505
- stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506
- huge_size);
507
- seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508
- dpy_count, dpy_size);
509
-
510
- seq_printf(m, "%llu [%pa] gtt total\n",
511
- ggtt->vm.total, &ggtt->mappable_end);
512
- seq_printf(m, "Supported page sizes: %s\n",
513
- stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514
- buf, sizeof(buf)));
515
-
367
+ seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
368
+ i915->mm.shrink_count,
369
+ atomic_read(&i915->mm.free_count),
370
+ i915->mm.shrink_memory);
371
+ for_each_memory_region(mr, i915, id)
372
+ seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
373
+ mr->name, &mr->total, &mr->avail);
516374 seq_putc(m, '\n');
517
- print_batch_pool_stats(m, dev_priv);
518
- mutex_unlock(&dev->struct_mutex);
519375
520
- mutex_lock(&dev->filelist_mutex);
521
- print_context_stats(m, dev_priv);
522
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523
- struct file_stats stats;
524
- struct drm_i915_file_private *file_priv = file->driver_priv;
525
- struct i915_request *request;
526
- struct task_struct *task;
527
-
528
- mutex_lock(&dev->struct_mutex);
529
-
530
- memset(&stats, 0, sizeof(stats));
531
- stats.file_priv = file->driver_priv;
532
- spin_lock(&file->table_lock);
533
- idr_for_each(&file->object_idr, per_file_stats, &stats);
534
- spin_unlock(&file->table_lock);
535
- /*
536
- * Although we have a valid reference on file->pid, that does
537
- * not guarantee that the task_struct who called get_pid() is
538
- * still alive (e.g. get_pid(current) => fork() => exit()).
539
- * Therefore, we need to protect this ->comm access using RCU.
540
- */
541
- request = list_first_entry_or_null(&file_priv->mm.request_list,
542
- struct i915_request,
543
- client_link);
544
- rcu_read_lock();
545
- task = pid_task(request && request->gem_context->pid ?
546
- request->gem_context->pid : file->pid,
547
- PIDTYPE_PID);
548
- print_file_stats(m, task ? task->comm : "<unknown>", stats);
549
- rcu_read_unlock();
550
-
551
- mutex_unlock(&dev->struct_mutex);
552
- }
553
- mutex_unlock(&dev->filelist_mutex);
554
-
555
- return 0;
556
-}
557
-
558
-static int i915_gem_gtt_info(struct seq_file *m, void *data)
559
-{
560
- struct drm_info_node *node = m->private;
561
- struct drm_i915_private *dev_priv = node_to_i915(node);
562
- struct drm_device *dev = &dev_priv->drm;
563
- struct drm_i915_gem_object **objects;
564
- struct drm_i915_gem_object *obj;
565
- u64 total_obj_size, total_gtt_size;
566
- unsigned long nobject, n;
567
- int count, ret;
568
-
569
- nobject = READ_ONCE(dev_priv->mm.object_count);
570
- objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571
- if (!objects)
572
- return -ENOMEM;
573
-
574
- ret = mutex_lock_interruptible(&dev->struct_mutex);
575
- if (ret)
576
- return ret;
577
-
578
- count = 0;
579
- spin_lock(&dev_priv->mm.obj_lock);
580
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581
- objects[count++] = obj;
582
- if (count == nobject)
583
- break;
584
- }
585
- spin_unlock(&dev_priv->mm.obj_lock);
586
-
587
- total_obj_size = total_gtt_size = 0;
588
- for (n = 0; n < count; n++) {
589
- obj = objects[n];
590
-
591
- seq_puts(m, " ");
592
- describe_obj(m, obj);
593
- seq_putc(m, '\n');
594
- total_obj_size += obj->base.size;
595
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596
- }
597
-
598
- mutex_unlock(&dev->struct_mutex);
599
-
600
- seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601
- count, total_obj_size, total_gtt_size);
602
- kvfree(objects);
603
-
604
- return 0;
605
-}
606
-
607
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608
-{
609
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
610
- struct drm_device *dev = &dev_priv->drm;
611
- struct drm_i915_gem_object *obj;
612
- struct intel_engine_cs *engine;
613
- enum intel_engine_id id;
614
- int total = 0;
615
- int ret, j;
616
-
617
- ret = mutex_lock_interruptible(&dev->struct_mutex);
618
- if (ret)
619
- return ret;
620
-
621
- for_each_engine(engine, dev_priv, id) {
622
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623
- int count;
624
-
625
- count = 0;
626
- list_for_each_entry(obj,
627
- &engine->batch_pool.cache_list[j],
628
- batch_pool_link)
629
- count++;
630
- seq_printf(m, "%s cache[%d]: %d objects\n",
631
- engine->name, j, count);
632
-
633
- list_for_each_entry(obj,
634
- &engine->batch_pool.cache_list[j],
635
- batch_pool_link) {
636
- seq_puts(m, " ");
637
- describe_obj(m, obj);
638
- seq_putc(m, '\n');
639
- }
640
-
641
- total += count;
642
- }
643
- }
644
-
645
- seq_printf(m, "total: %d\n", total);
646
-
647
- mutex_unlock(&dev->struct_mutex);
376
+ print_context_stats(m, i915);
648377
649378 return 0;
650379 }
....@@ -652,14 +381,16 @@
652381 static void gen8_display_interrupt_info(struct seq_file *m)
653382 {
654383 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655
- int pipe;
384
+ enum pipe pipe;
656385
657386 for_each_pipe(dev_priv, pipe) {
658387 enum intel_display_power_domain power_domain;
388
+ intel_wakeref_t wakeref;
659389
660390 power_domain = POWER_DOMAIN_PIPE(pipe);
661
- if (!intel_display_power_get_if_enabled(dev_priv,
662
- power_domain)) {
391
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
392
+ power_domain);
393
+ if (!wakeref) {
663394 seq_printf(m, "Pipe %c power disabled\n",
664395 pipe_name(pipe));
665396 continue;
....@@ -674,7 +405,7 @@
674405 pipe_name(pipe),
675406 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676407
677
- intel_display_power_put(dev_priv, power_domain);
408
+ intel_display_power_put(dev_priv, power_domain, wakeref);
678409 }
679410
680411 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
....@@ -703,12 +434,14 @@
703434 {
704435 struct drm_i915_private *dev_priv = node_to_i915(m->private);
705436 struct intel_engine_cs *engine;
706
- enum intel_engine_id id;
437
+ intel_wakeref_t wakeref;
707438 int i, pipe;
708439
709
- intel_runtime_pm_get(dev_priv);
440
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
710441
711442 if (IS_CHERRYVIEW(dev_priv)) {
443
+ intel_wakeref_t pref;
444
+
712445 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713446 I915_READ(GEN8_MASTER_IRQ));
714447
....@@ -724,8 +457,9 @@
724457 enum intel_display_power_domain power_domain;
725458
726459 power_domain = POWER_DOMAIN_PIPE(pipe);
727
- if (!intel_display_power_get_if_enabled(dev_priv,
728
- power_domain)) {
460
+ pref = intel_display_power_get_if_enabled(dev_priv,
461
+ power_domain);
462
+ if (!pref) {
729463 seq_printf(m, "Pipe %c power disabled\n",
730464 pipe_name(pipe));
731465 continue;
....@@ -735,17 +469,17 @@
735469 pipe_name(pipe),
736470 I915_READ(PIPESTAT(pipe)));
737471
738
- intel_display_power_put(dev_priv, power_domain);
472
+ intel_display_power_put(dev_priv, power_domain, pref);
739473 }
740474
741
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
475
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742476 seq_printf(m, "Port hotplug:\t%08x\n",
743477 I915_READ(PORT_HOTPLUG_EN));
744478 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745479 I915_READ(VLV_DPFLIPSTAT));
746480 seq_printf(m, "DPINVGTT:\t%08x\n",
747481 I915_READ(DPINVGTT));
748
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
482
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
749483
750484 for (i = 0; i < 4; i++) {
751485 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
....@@ -763,6 +497,10 @@
763497 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764498 I915_READ(GEN8_PCU_IER));
765499 } else if (INTEL_GEN(dev_priv) >= 11) {
500
+ if (HAS_MASTER_UNIT_IRQ(dev_priv))
501
+ seq_printf(m, "Master Unit Interrupt Control: %08x\n",
502
+ I915_READ(DG1_MSTR_UNIT_INTR));
503
+
766504 seq_printf(m, "Master Interrupt Control: %08x\n",
767505 I915_READ(GEN11_GFX_MSTR_IRQ));
768506
....@@ -798,6 +536,8 @@
798536
799537 gen8_display_interrupt_info(m);
800538 } else if (IS_VALLEYVIEW(dev_priv)) {
539
+ intel_wakeref_t pref;
540
+
801541 seq_printf(m, "Display IER:\t%08x\n",
802542 I915_READ(VLV_IER));
803543 seq_printf(m, "Display IIR:\t%08x\n",
....@@ -810,8 +550,9 @@
810550 enum intel_display_power_domain power_domain;
811551
812552 power_domain = POWER_DOMAIN_PIPE(pipe);
813
- if (!intel_display_power_get_if_enabled(dev_priv,
814
- power_domain)) {
553
+ pref = intel_display_power_get_if_enabled(dev_priv,
554
+ power_domain);
555
+ if (!pref) {
815556 seq_printf(m, "Pipe %c power disabled\n",
816557 pipe_name(pipe));
817558 continue;
....@@ -820,7 +561,7 @@
820561 seq_printf(m, "Pipe %c stat:\t%08x\n",
821562 pipe_name(pipe),
822563 I915_READ(PIPESTAT(pipe)));
823
- intel_display_power_put(dev_priv, power_domain);
564
+ intel_display_power_put(dev_priv, power_domain, pref);
824565 }
825566
826567 seq_printf(m, "Master IER:\t%08x\n",
....@@ -840,20 +581,22 @@
840581 seq_printf(m, "PM IMR:\t\t%08x\n",
841582 I915_READ(GEN6_PMIMR));
842583
584
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
843585 seq_printf(m, "Port hotplug:\t%08x\n",
844586 I915_READ(PORT_HOTPLUG_EN));
845587 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846588 I915_READ(VLV_DPFLIPSTAT));
847589 seq_printf(m, "DPINVGTT:\t%08x\n",
848590 I915_READ(DPINVGTT));
591
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
849592
850593 } else if (!HAS_PCH_SPLIT(dev_priv)) {
851594 seq_printf(m, "Interrupt enable: %08x\n",
852
- I915_READ(IER));
595
+ I915_READ(GEN2_IER));
853596 seq_printf(m, "Interrupt identity: %08x\n",
854
- I915_READ(IIR));
597
+ I915_READ(GEN2_IIR));
855598 seq_printf(m, "Interrupt mask: %08x\n",
856
- I915_READ(IMR));
599
+ I915_READ(GEN2_IMR));
857600 for_each_pipe(dev_priv, pipe)
858601 seq_printf(m, "Pipe %c stat: %08x\n",
859602 pipe_name(pipe),
....@@ -900,42 +643,40 @@
900643 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901644
902645 } else if (INTEL_GEN(dev_priv) >= 6) {
903
- for_each_engine(engine, dev_priv, id) {
646
+ for_each_uabi_engine(engine, dev_priv) {
904647 seq_printf(m,
905648 "Graphics Interrupt mask (%s): %08x\n",
906
- engine->name, I915_READ_IMR(engine));
649
+ engine->name, ENGINE_READ(engine, RING_IMR));
907650 }
908651 }
909652
910
- intel_runtime_pm_put(dev_priv);
653
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
911654
912655 return 0;
913656 }
914657
915658 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916659 {
917
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
918
- struct drm_device *dev = &dev_priv->drm;
919
- int i, ret;
660
+ struct drm_i915_private *i915 = node_to_i915(m->private);
661
+ unsigned int i;
920662
921
- ret = mutex_lock_interruptible(&dev->struct_mutex);
922
- if (ret)
923
- return ret;
663
+ seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
924664
925
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
927
- struct i915_vma *vma = dev_priv->fence_regs[i].vma;
665
+ rcu_read_lock();
666
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
667
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
668
+ struct i915_vma *vma = reg->vma;
928669
929670 seq_printf(m, "Fence %d, pin count = %d, object = ",
930
- i, dev_priv->fence_regs[i].pin_count);
671
+ i, atomic_read(&reg->pin_count));
931672 if (!vma)
932673 seq_puts(m, "unused");
933674 else
934
- describe_obj(m, vma->obj);
675
+ i915_debugfs_describe_obj(m, vma->obj);
935676 seq_putc(m, '\n');
936677 }
678
+ rcu_read_unlock();
937679
938
- mutex_unlock(&dev->struct_mutex);
939680 return 0;
940681 }
941682
....@@ -943,49 +684,50 @@
943684 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944685 size_t count, loff_t *pos)
945686 {
946
- struct i915_gpu_state *error = file->private_data;
947
- struct drm_i915_error_state_buf str;
687
+ struct i915_gpu_coredump *error;
948688 ssize_t ret;
949
- loff_t tmp;
689
+ void *buf;
950690
691
+ error = file->private_data;
951692 if (!error)
952693 return 0;
953694
954
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955
- if (ret)
956
- return ret;
695
+ /* Bounce buffer required because of kernfs __user API convenience. */
696
+ buf = kmalloc(count, GFP_KERNEL);
697
+ if (!buf)
698
+ return -ENOMEM;
957699
958
- ret = i915_error_state_to_str(&str, error);
959
- if (ret)
700
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
701
+ if (ret <= 0)
960702 goto out;
961703
962
- tmp = 0;
963
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964
- if (ret < 0)
965
- goto out;
704
+ if (!copy_to_user(ubuf, buf, ret))
705
+ *pos += ret;
706
+ else
707
+ ret = -EFAULT;
966708
967
- *pos = str.start + ret;
968709 out:
969
- i915_error_state_buf_release(&str);
710
+ kfree(buf);
970711 return ret;
971712 }
972713
973714 static int gpu_state_release(struct inode *inode, struct file *file)
974715 {
975
- i915_gpu_state_put(file->private_data);
716
+ i915_gpu_coredump_put(file->private_data);
976717 return 0;
977718 }
978719
979720 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980721 {
981722 struct drm_i915_private *i915 = inode->i_private;
982
- struct i915_gpu_state *gpu;
723
+ struct i915_gpu_coredump *gpu;
724
+ intel_wakeref_t wakeref;
983725
984
- intel_runtime_pm_get(i915);
985
- gpu = i915_capture_gpu_state(i915);
986
- intel_runtime_pm_put(i915);
987
- if (!gpu)
988
- return -ENOMEM;
726
+ gpu = NULL;
727
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
728
+ gpu = i915_gpu_coredump(i915);
729
+ if (IS_ERR(gpu))
730
+ return PTR_ERR(gpu);
989731
990732 file->private_data = gpu;
991733 return 0;
....@@ -1005,12 +747,12 @@
1005747 size_t cnt,
1006748 loff_t *ppos)
1007749 {
1008
- struct i915_gpu_state *error = filp->private_data;
750
+ struct i915_gpu_coredump *error = filp->private_data;
1009751
1010752 if (!error)
1011753 return 0;
1012754
1013
- DRM_DEBUG_DRIVER("Resetting error state\n");
755
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
1014756 i915_reset_error_state(error->i915);
1015757
1016758 return cnt;
....@@ -1018,7 +760,13 @@
1018760
1019761 static int i915_error_state_open(struct inode *inode, struct file *file)
1020762 {
1021
- file->private_data = i915_first_error_state(inode->i_private);
763
+ struct i915_gpu_coredump *error;
764
+
765
+ error = i915_first_error_state(inode->i_private);
766
+ if (IS_ERR(error))
767
+ return PTR_ERR(error);
768
+
769
+ file->private_data = error;
1022770 return 0;
1023771 }
1024772
....@@ -1032,41 +780,19 @@
1032780 };
1033781 #endif
1034782
1035
-static int
1036
-i915_next_seqno_set(void *data, u64 val)
1037
-{
1038
- struct drm_i915_private *dev_priv = data;
1039
- struct drm_device *dev = &dev_priv->drm;
1040
- int ret;
1041
-
1042
- ret = mutex_lock_interruptible(&dev->struct_mutex);
1043
- if (ret)
1044
- return ret;
1045
-
1046
- intel_runtime_pm_get(dev_priv);
1047
- ret = i915_gem_set_global_seqno(dev, val);
1048
- intel_runtime_pm_put(dev_priv);
1049
-
1050
- mutex_unlock(&dev->struct_mutex);
1051
-
1052
- return ret;
1053
-}
1054
-
1055
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056
- NULL, i915_next_seqno_set,
1057
- "0x%llx\n");
1058
-
1059783 static int i915_frequency_info(struct seq_file *m, void *unused)
1060784 {
1061785 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
786
+ struct intel_uncore *uncore = &dev_priv->uncore;
787
+ struct intel_rps *rps = &dev_priv->gt.rps;
788
+ intel_wakeref_t wakeref;
1063789 int ret = 0;
1064790
1065
- intel_runtime_pm_get(dev_priv);
791
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1066792
1067
- if (IS_GEN5(dev_priv)) {
1068
- u16 rgvswctl = I915_READ16(MEMSWCTL);
1069
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
793
+ if (IS_GEN(dev_priv, 5)) {
794
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
795
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
1070796
1071797 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072798 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
....@@ -1077,8 +803,6 @@
1077803 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078804 u32 rpmodectl, freq_sts;
1079805
1080
- mutex_lock(&dev_priv->pcu_lock);
1081
-
1082806 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083807 seq_printf(m, "Video Turbo Mode: %s\n",
1084808 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
....@@ -1088,29 +812,31 @@
1088812 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089813 GEN6_RP_MEDIA_SW_MODE));
1090814
815
+ vlv_punit_get(dev_priv);
1091816 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
817
+ vlv_punit_put(dev_priv);
818
+
1092819 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093820 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094821
1095822 seq_printf(m, "actual GPU freq: %d MHz\n",
1096
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
823
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
1097824
1098825 seq_printf(m, "current GPU freq: %d MHz\n",
1099
- intel_gpu_freq(dev_priv, rps->cur_freq));
826
+ intel_gpu_freq(rps, rps->cur_freq));
1100827
1101828 seq_printf(m, "max GPU freq: %d MHz\n",
1102
- intel_gpu_freq(dev_priv, rps->max_freq));
829
+ intel_gpu_freq(rps, rps->max_freq));
1103830
1104831 seq_printf(m, "min GPU freq: %d MHz\n",
1105
- intel_gpu_freq(dev_priv, rps->min_freq));
832
+ intel_gpu_freq(rps, rps->min_freq));
1106833
1107834 seq_printf(m, "idle GPU freq: %d MHz\n",
1108
- intel_gpu_freq(dev_priv, rps->idle_freq));
835
+ intel_gpu_freq(rps, rps->idle_freq));
1109836
1110837 seq_printf(m,
1111838 "efficient (RPe) frequency: %d MHz\n",
1112
- intel_gpu_freq(dev_priv, rps->efficient_freq));
1113
- mutex_unlock(&dev_priv->pcu_lock);
839
+ intel_gpu_freq(rps, rps->efficient_freq));
1114840 } else if (INTEL_GEN(dev_priv) >= 6) {
1115841 u32 rp_state_limits;
1116842 u32 gt_perf_status;
....@@ -1132,7 +858,7 @@
1132858 }
1133859
1134860 /* RPSTAT1 is in the GT power well */
1135
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
861
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1136862
1137863 reqf = I915_READ(GEN6_RPNSWREQ);
1138864 if (INTEL_GEN(dev_priv) >= 9)
....@@ -1144,7 +870,7 @@
1144870 else
1145871 reqf >>= 25;
1146872 }
1147
- reqf = intel_gpu_freq(dev_priv, reqf);
873
+ reqf = intel_gpu_freq(rps, reqf);
1148874
1149875 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150876 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
....@@ -1157,10 +883,9 @@
1157883 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158884 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159885 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160
- cagf = intel_gpu_freq(dev_priv,
1161
- intel_get_cagf(dev_priv, rpstat));
886
+ cagf = intel_rps_read_actual_frequency(rps);
1162887
1163
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
888
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1164889
1165890 if (INTEL_GEN(dev_priv) >= 11) {
1166891 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
....@@ -1212,21 +937,30 @@
1212937 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213938 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214939 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215
- seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216
- rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217
- seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218
- rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219
- seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220
- rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
940
+ seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
941
+ rpupei,
942
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
943
+ seq_printf(m, "RP CUR UP: %d (%dun)\n",
944
+ rpcurup,
945
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
946
+ seq_printf(m, "RP PREV UP: %d (%dns)\n",
947
+ rpprevup,
948
+ intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
1221949 seq_printf(m, "Up threshold: %d%%\n",
1222950 rps->power.up_threshold);
1223951
1224
- seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225
- rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226
- seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227
- rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228
- seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229
- rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
952
+ seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
953
+ rpdownei,
954
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
955
+ rpdownei));
956
+ seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
957
+ rpcurdown,
958
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
959
+ rpcurdown));
960
+ seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
961
+ rpprevdown,
962
+ intel_gt_pm_interval_to_ns(&dev_priv->gt,
963
+ rpprevdown));
1230964 seq_printf(m, "Down threshold: %d%%\n",
1231965 rps->power.down_threshold);
1232966
....@@ -1235,37 +969,37 @@
1235969 max_freq *= (IS_GEN9_BC(dev_priv) ||
1236970 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237971 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238
- intel_gpu_freq(dev_priv, max_freq));
972
+ intel_gpu_freq(rps, max_freq));
1239973
1240974 max_freq = (rp_state_cap & 0xff00) >> 8;
1241975 max_freq *= (IS_GEN9_BC(dev_priv) ||
1242976 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243977 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244
- intel_gpu_freq(dev_priv, max_freq));
978
+ intel_gpu_freq(rps, max_freq));
1245979
1246980 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247981 rp_state_cap >> 0) & 0xff;
1248982 max_freq *= (IS_GEN9_BC(dev_priv) ||
1249983 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250984 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251
- intel_gpu_freq(dev_priv, max_freq));
985
+ intel_gpu_freq(rps, max_freq));
1252986 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253
- intel_gpu_freq(dev_priv, rps->max_freq));
987
+ intel_gpu_freq(rps, rps->max_freq));
1254988
1255989 seq_printf(m, "Current freq: %d MHz\n",
1256
- intel_gpu_freq(dev_priv, rps->cur_freq));
990
+ intel_gpu_freq(rps, rps->cur_freq));
1257991 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258992 seq_printf(m, "Idle freq: %d MHz\n",
1259
- intel_gpu_freq(dev_priv, rps->idle_freq));
993
+ intel_gpu_freq(rps, rps->idle_freq));
1260994 seq_printf(m, "Min freq: %d MHz\n",
1261
- intel_gpu_freq(dev_priv, rps->min_freq));
995
+ intel_gpu_freq(rps, rps->min_freq));
1262996 seq_printf(m, "Boost freq: %d MHz\n",
1263
- intel_gpu_freq(dev_priv, rps->boost_freq));
997
+ intel_gpu_freq(rps, rps->boost_freq));
1264998 seq_printf(m, "Max freq: %d MHz\n",
1265
- intel_gpu_freq(dev_priv, rps->max_freq));
999
+ intel_gpu_freq(rps, rps->max_freq));
12661000 seq_printf(m,
12671001 "efficient (RPe) frequency: %d MHz\n",
1268
- intel_gpu_freq(dev_priv, rps->efficient_freq));
1002
+ intel_gpu_freq(rps, rps->efficient_freq));
12691003 } else {
12701004 seq_puts(m, "no P-state info available\n");
12711005 }
....@@ -1274,557 +1008,20 @@
12741008 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
12751009 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
12761010
1277
- intel_runtime_pm_put(dev_priv);
1011
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
12781012 return ret;
1279
-}
1280
-
1281
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282
- struct seq_file *m,
1283
- struct intel_instdone *instdone)
1284
-{
1285
- int slice;
1286
- int subslice;
1287
-
1288
- seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289
- instdone->instdone);
1290
-
1291
- if (INTEL_GEN(dev_priv) <= 3)
1292
- return;
1293
-
1294
- seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295
- instdone->slice_common);
1296
-
1297
- if (INTEL_GEN(dev_priv) <= 6)
1298
- return;
1299
-
1300
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301
- seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302
- slice, subslice, instdone->sampler[slice][subslice]);
1303
-
1304
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305
- seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306
- slice, subslice, instdone->row[slice][subslice]);
1307
-}
1308
-
1309
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310
-{
1311
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312
- struct intel_engine_cs *engine;
1313
- u64 acthd[I915_NUM_ENGINES];
1314
- u32 seqno[I915_NUM_ENGINES];
1315
- struct intel_instdone instdone;
1316
- enum intel_engine_id id;
1317
-
1318
- if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319
- seq_puts(m, "Wedged\n");
1320
- if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321
- seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322
- if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323
- seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324
- if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325
- seq_puts(m, "Waiter holding struct mutex\n");
1326
- if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327
- seq_puts(m, "struct_mutex blocked for reset\n");
1328
-
1329
- if (!i915_modparams.enable_hangcheck) {
1330
- seq_puts(m, "Hangcheck disabled\n");
1331
- return 0;
1332
- }
1333
-
1334
- intel_runtime_pm_get(dev_priv);
1335
-
1336
- for_each_engine(engine, dev_priv, id) {
1337
- acthd[id] = intel_engine_get_active_head(engine);
1338
- seqno[id] = intel_engine_get_seqno(engine);
1339
- }
1340
-
1341
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342
-
1343
- intel_runtime_pm_put(dev_priv);
1344
-
1345
- if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346
- seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347
- jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348
- jiffies));
1349
- else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350
- seq_puts(m, "Hangcheck active, work pending\n");
1351
- else
1352
- seq_puts(m, "Hangcheck inactive\n");
1353
-
1354
- seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
-
1356
- for_each_engine(engine, dev_priv, id) {
1357
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358
- struct rb_node *rb;
1359
-
1360
- seq_printf(m, "%s:\n", engine->name);
1361
- seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362
- engine->hangcheck.seqno, seqno[id],
1363
- intel_engine_last_submit(engine));
1364
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365
- yesno(intel_engine_has_waiter(engine)),
1366
- yesno(test_bit(engine->id,
1367
- &dev_priv->gpu_error.missed_irq_rings)),
1368
- yesno(engine->hangcheck.stalled),
1369
- yesno(engine->hangcheck.wedged));
1370
-
1371
- spin_lock_irq(&b->rb_lock);
1372
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374
-
1375
- seq_printf(m, "\t%s [%d] waiting for %x\n",
1376
- w->tsk->comm, w->tsk->pid, w->seqno);
1377
- }
1378
- spin_unlock_irq(&b->rb_lock);
1379
-
1380
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381
- (long long)engine->hangcheck.acthd,
1382
- (long long)acthd[id]);
1383
- seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384
- hangcheck_action_to_str(engine->hangcheck.action),
1385
- engine->hangcheck.action,
1386
- jiffies_to_msecs(jiffies -
1387
- engine->hangcheck.action_timestamp));
1388
-
1389
- if (engine->id == RCS) {
1390
- seq_puts(m, "\tinstdone read =\n");
1391
-
1392
- i915_instdone_info(dev_priv, m, &instdone);
1393
-
1394
- seq_puts(m, "\tinstdone accu =\n");
1395
-
1396
- i915_instdone_info(dev_priv, m,
1397
- &engine->hangcheck.instdone);
1398
- }
1399
- }
1400
-
1401
- return 0;
1402
-}
1403
-
1404
-static int i915_reset_info(struct seq_file *m, void *unused)
1405
-{
1406
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407
- struct i915_gpu_error *error = &dev_priv->gpu_error;
1408
- struct intel_engine_cs *engine;
1409
- enum intel_engine_id id;
1410
-
1411
- seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
-
1413
- for_each_engine(engine, dev_priv, id) {
1414
- seq_printf(m, "%s = %u\n", engine->name,
1415
- i915_reset_engine_count(error, engine));
1416
- }
1417
-
1418
- return 0;
1419
-}
1420
-
1421
-static int ironlake_drpc_info(struct seq_file *m)
1422
-{
1423
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424
- u32 rgvmodectl, rstdbyctl;
1425
- u16 crstandvid;
1426
-
1427
- rgvmodectl = I915_READ(MEMMODECTL);
1428
- rstdbyctl = I915_READ(RSTDBYCTL);
1429
- crstandvid = I915_READ16(CRSTANDVID);
1430
-
1431
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432
- seq_printf(m, "Boost freq: %d\n",
1433
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434
- MEMMODE_BOOST_FREQ_SHIFT);
1435
- seq_printf(m, "HW control enabled: %s\n",
1436
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437
- seq_printf(m, "SW control enabled: %s\n",
1438
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439
- seq_printf(m, "Gated voltage change: %s\n",
1440
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441
- seq_printf(m, "Starting frequency: P%d\n",
1442
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443
- seq_printf(m, "Max P-state: P%d\n",
1444
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448
- seq_printf(m, "Render standby enabled: %s\n",
1449
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450
- seq_puts(m, "Current RS state: ");
1451
- switch (rstdbyctl & RSX_STATUS_MASK) {
1452
- case RSX_STATUS_ON:
1453
- seq_puts(m, "on\n");
1454
- break;
1455
- case RSX_STATUS_RC1:
1456
- seq_puts(m, "RC1\n");
1457
- break;
1458
- case RSX_STATUS_RC1E:
1459
- seq_puts(m, "RC1E\n");
1460
- break;
1461
- case RSX_STATUS_RS1:
1462
- seq_puts(m, "RS1\n");
1463
- break;
1464
- case RSX_STATUS_RS2:
1465
- seq_puts(m, "RS2 (RC6)\n");
1466
- break;
1467
- case RSX_STATUS_RS3:
1468
- seq_puts(m, "RC3 (RC6+)\n");
1469
- break;
1470
- default:
1471
- seq_puts(m, "unknown\n");
1472
- break;
1473
- }
1474
-
1475
- return 0;
1476
-}
1477
-
1478
-static int i915_forcewake_domains(struct seq_file *m, void *data)
1479
-{
1480
- struct drm_i915_private *i915 = node_to_i915(m->private);
1481
- struct intel_uncore_forcewake_domain *fw_domain;
1482
- unsigned int tmp;
1483
-
1484
- seq_printf(m, "user.bypass_count = %u\n",
1485
- i915->uncore.user_forcewake.count);
1486
-
1487
- for_each_fw_domain(fw_domain, i915, tmp)
1488
- seq_printf(m, "%s.wake_count = %u\n",
1489
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490
- READ_ONCE(fw_domain->wake_count));
1491
-
1492
- return 0;
1493
-}
1494
-
1495
-static void print_rc6_res(struct seq_file *m,
1496
- const char *title,
1497
- const i915_reg_t reg)
1498
-{
1499
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
-
1501
- seq_printf(m, "%s %u (%llu us)\n",
1502
- title, I915_READ(reg),
1503
- intel_rc6_residency_us(dev_priv, reg));
1504
-}
1505
-
1506
-static int vlv_drpc_info(struct seq_file *m)
1507
-{
1508
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509
- u32 rcctl1, pw_status;
1510
-
1511
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
-
1514
- seq_printf(m, "RC6 Enabled: %s\n",
1515
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516
- GEN6_RC_CTL_EI_MODE(1))));
1517
- seq_printf(m, "Render Power Well: %s\n",
1518
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519
- seq_printf(m, "Media Power Well: %s\n",
1520
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521
-
1522
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524
-
1525
- return i915_forcewake_domains(m, NULL);
1526
-}
1527
-
1528
-static int gen6_drpc_info(struct seq_file *m)
1529
-{
1530
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531
- u32 gt_core_status, rcctl1, rc6vids = 0;
1532
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533
-
1534
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536
-
1537
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538
- if (INTEL_GEN(dev_priv) >= 9) {
1539
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541
- }
1542
-
1543
- if (INTEL_GEN(dev_priv) <= 7) {
1544
- mutex_lock(&dev_priv->pcu_lock);
1545
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546
- &rc6vids);
1547
- mutex_unlock(&dev_priv->pcu_lock);
1548
- }
1549
-
1550
- seq_printf(m, "RC1e Enabled: %s\n",
1551
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552
- seq_printf(m, "RC6 Enabled: %s\n",
1553
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554
- if (INTEL_GEN(dev_priv) >= 9) {
1555
- seq_printf(m, "Render Well Gating Enabled: %s\n",
1556
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557
- seq_printf(m, "Media Well Gating Enabled: %s\n",
1558
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559
- }
1560
- seq_printf(m, "Deep RC6 Enabled: %s\n",
1561
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564
- seq_puts(m, "Current RC state: ");
1565
- switch (gt_core_status & GEN6_RCn_MASK) {
1566
- case GEN6_RC0:
1567
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568
- seq_puts(m, "Core Power Down\n");
1569
- else
1570
- seq_puts(m, "on\n");
1571
- break;
1572
- case GEN6_RC3:
1573
- seq_puts(m, "RC3\n");
1574
- break;
1575
- case GEN6_RC6:
1576
- seq_puts(m, "RC6\n");
1577
- break;
1578
- case GEN6_RC7:
1579
- seq_puts(m, "RC7\n");
1580
- break;
1581
- default:
1582
- seq_puts(m, "Unknown\n");
1583
- break;
1584
- }
1585
-
1586
- seq_printf(m, "Core Power Down: %s\n",
1587
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588
- if (INTEL_GEN(dev_priv) >= 9) {
1589
- seq_printf(m, "Render Power Well: %s\n",
1590
- (gen9_powergate_status &
1591
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592
- seq_printf(m, "Media Power Well: %s\n",
1593
- (gen9_powergate_status &
1594
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595
- }
1596
-
1597
- /* Not exactly sure what this is */
1598
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599
- GEN6_GT_GFX_RC6_LOCKED);
1600
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603
-
1604
- if (INTEL_GEN(dev_priv) <= 7) {
1605
- seq_printf(m, "RC6 voltage: %dmV\n",
1606
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607
- seq_printf(m, "RC6+ voltage: %dmV\n",
1608
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609
- seq_printf(m, "RC6++ voltage: %dmV\n",
1610
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611
- }
1612
-
1613
- return i915_forcewake_domains(m, NULL);
1614
-}
1615
-
1616
-static int i915_drpc_info(struct seq_file *m, void *unused)
1617
-{
1618
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619
- int err;
1620
-
1621
- intel_runtime_pm_get(dev_priv);
1622
-
1623
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624
- err = vlv_drpc_info(m);
1625
- else if (INTEL_GEN(dev_priv) >= 6)
1626
- err = gen6_drpc_info(m);
1627
- else
1628
- err = ironlake_drpc_info(m);
1629
-
1630
- intel_runtime_pm_put(dev_priv);
1631
-
1632
- return err;
1633
-}
1634
-
1635
-static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636
-{
1637
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638
-
1639
- seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640
- dev_priv->fb_tracking.busy_bits);
1641
-
1642
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643
- dev_priv->fb_tracking.flip_bits);
1644
-
1645
- return 0;
1646
-}
1647
-
1648
-static int i915_fbc_status(struct seq_file *m, void *unused)
1649
-{
1650
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651
- struct intel_fbc *fbc = &dev_priv->fbc;
1652
-
1653
- if (!HAS_FBC(dev_priv))
1654
- return -ENODEV;
1655
-
1656
- intel_runtime_pm_get(dev_priv);
1657
- mutex_lock(&fbc->lock);
1658
-
1659
- if (intel_fbc_is_active(dev_priv))
1660
- seq_puts(m, "FBC enabled\n");
1661
- else
1662
- seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
-
1664
- if (intel_fbc_is_active(dev_priv)) {
1665
- u32 mask;
1666
-
1667
- if (INTEL_GEN(dev_priv) >= 8)
1668
- mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669
- else if (INTEL_GEN(dev_priv) >= 7)
1670
- mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671
- else if (INTEL_GEN(dev_priv) >= 5)
1672
- mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673
- else if (IS_G4X(dev_priv))
1674
- mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675
- else
1676
- mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677
- FBC_STAT_COMPRESSED);
1678
-
1679
- seq_printf(m, "Compressing: %s\n", yesno(mask));
1680
- }
1681
-
1682
- mutex_unlock(&fbc->lock);
1683
- intel_runtime_pm_put(dev_priv);
1684
-
1685
- return 0;
1686
-}
1687
-
1688
-static int i915_fbc_false_color_get(void *data, u64 *val)
1689
-{
1690
- struct drm_i915_private *dev_priv = data;
1691
-
1692
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693
- return -ENODEV;
1694
-
1695
- *val = dev_priv->fbc.false_color;
1696
-
1697
- return 0;
1698
-}
1699
-
1700
-static int i915_fbc_false_color_set(void *data, u64 val)
1701
-{
1702
- struct drm_i915_private *dev_priv = data;
1703
- u32 reg;
1704
-
1705
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706
- return -ENODEV;
1707
-
1708
- mutex_lock(&dev_priv->fbc.lock);
1709
-
1710
- reg = I915_READ(ILK_DPFC_CONTROL);
1711
- dev_priv->fbc.false_color = val;
1712
-
1713
- I915_WRITE(ILK_DPFC_CONTROL, val ?
1714
- (reg | FBC_CTL_FALSE_COLOR) :
1715
- (reg & ~FBC_CTL_FALSE_COLOR));
1716
-
1717
- mutex_unlock(&dev_priv->fbc.lock);
1718
- return 0;
1719
-}
1720
-
1721
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722
- i915_fbc_false_color_get, i915_fbc_false_color_set,
1723
- "%llu\n");
1724
-
1725
-static int i915_ips_status(struct seq_file *m, void *unused)
1726
-{
1727
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728
-
1729
- if (!HAS_IPS(dev_priv))
1730
- return -ENODEV;
1731
-
1732
- intel_runtime_pm_get(dev_priv);
1733
-
1734
- seq_printf(m, "Enabled by kernel parameter: %s\n",
1735
- yesno(i915_modparams.enable_ips));
1736
-
1737
- if (INTEL_GEN(dev_priv) >= 8) {
1738
- seq_puts(m, "Currently: unknown\n");
1739
- } else {
1740
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741
- seq_puts(m, "Currently: enabled\n");
1742
- else
1743
- seq_puts(m, "Currently: disabled\n");
1744
- }
1745
-
1746
- intel_runtime_pm_put(dev_priv);
1747
-
1748
- return 0;
1749
-}
1750
-
1751
-static int i915_sr_status(struct seq_file *m, void *unused)
1752
-{
1753
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754
- bool sr_enabled = false;
1755
-
1756
- intel_runtime_pm_get(dev_priv);
1757
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758
-
1759
- if (INTEL_GEN(dev_priv) >= 9)
1760
- /* no global SR status; inspect per-plane WM */;
1761
- else if (HAS_PCH_SPLIT(dev_priv))
1762
- sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765
- sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766
- else if (IS_I915GM(dev_priv))
1767
- sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768
- else if (IS_PINEVIEW(dev_priv))
1769
- sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771
- sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772
-
1773
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774
- intel_runtime_pm_put(dev_priv);
1775
-
1776
- seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777
-
1778
- return 0;
1779
-}
1780
-
1781
-static int i915_emon_status(struct seq_file *m, void *unused)
1782
-{
1783
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784
- struct drm_device *dev = &dev_priv->drm;
1785
- unsigned long temp, chipset, gfx;
1786
- int ret;
1787
-
1788
- if (!IS_GEN5(dev_priv))
1789
- return -ENODEV;
1790
-
1791
- intel_runtime_pm_get(dev_priv);
1792
-
1793
- ret = mutex_lock_interruptible(&dev->struct_mutex);
1794
- if (ret)
1795
- return ret;
1796
-
1797
- temp = i915_mch_val(dev_priv);
1798
- chipset = i915_chipset_val(dev_priv);
1799
- gfx = i915_gfx_val(dev_priv);
1800
- mutex_unlock(&dev->struct_mutex);
1801
-
1802
- seq_printf(m, "GMCH temp: %ld\n", temp);
1803
- seq_printf(m, "Chipset power: %ld\n", chipset);
1804
- seq_printf(m, "GFX power: %ld\n", gfx);
1805
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
1806
-
1807
- intel_runtime_pm_put(dev_priv);
1808
-
1809
- return 0;
18101013 }
18111014
18121015 static int i915_ring_freq_table(struct seq_file *m, void *unused)
18131016 {
18141017 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1815
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
1018
+ struct intel_rps *rps = &dev_priv->gt.rps;
18161019 unsigned int max_gpu_freq, min_gpu_freq;
1020
+ intel_wakeref_t wakeref;
18171021 int gpu_freq, ia_freq;
1818
- int ret;
18191022
18201023 if (!HAS_LLC(dev_priv))
18211024 return -ENODEV;
1822
-
1823
- intel_runtime_pm_get(dev_priv);
1824
-
1825
- ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1826
- if (ret)
1827
- goto out;
18281025
18291026 min_gpu_freq = rps->min_freq;
18301027 max_gpu_freq = rps->max_freq;
....@@ -1836,103 +1033,22 @@
18361033
18371034 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
18381035
1036
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
18391037 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
18401038 ia_freq = gpu_freq;
18411039 sandybridge_pcode_read(dev_priv,
18421040 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1843
- &ia_freq);
1041
+ &ia_freq, NULL);
18441042 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1845
- intel_gpu_freq(dev_priv, (gpu_freq *
1846
- (IS_GEN9_BC(dev_priv) ||
1847
- INTEL_GEN(dev_priv) >= 10 ?
1848
- GEN9_FREQ_SCALER : 1))),
1043
+ intel_gpu_freq(rps,
1044
+ (gpu_freq *
1045
+ (IS_GEN9_BC(dev_priv) ||
1046
+ INTEL_GEN(dev_priv) >= 10 ?
1047
+ GEN9_FREQ_SCALER : 1))),
18491048 ((ia_freq >> 0) & 0xff) * 100,
18501049 ((ia_freq >> 8) & 0xff) * 100);
18511050 }
1852
-
1853
- mutex_unlock(&dev_priv->pcu_lock);
1854
-
1855
-out:
1856
- intel_runtime_pm_put(dev_priv);
1857
- return ret;
1858
-}
1859
-
1860
-static int i915_opregion(struct seq_file *m, void *unused)
1861
-{
1862
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1863
- struct drm_device *dev = &dev_priv->drm;
1864
- struct intel_opregion *opregion = &dev_priv->opregion;
1865
- int ret;
1866
-
1867
- ret = mutex_lock_interruptible(&dev->struct_mutex);
1868
- if (ret)
1869
- goto out;
1870
-
1871
- if (opregion->header)
1872
- seq_write(m, opregion->header, OPREGION_SIZE);
1873
-
1874
- mutex_unlock(&dev->struct_mutex);
1875
-
1876
-out:
1877
- return 0;
1878
-}
1879
-
1880
-static int i915_vbt(struct seq_file *m, void *unused)
1881
-{
1882
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1883
-
1884
- if (opregion->vbt)
1885
- seq_write(m, opregion->vbt, opregion->vbt_size);
1886
-
1887
- return 0;
1888
-}
1889
-
1890
-static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1891
-{
1892
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1893
- struct drm_device *dev = &dev_priv->drm;
1894
- struct intel_framebuffer *fbdev_fb = NULL;
1895
- struct drm_framebuffer *drm_fb;
1896
- int ret;
1897
-
1898
- ret = mutex_lock_interruptible(&dev->struct_mutex);
1899
- if (ret)
1900
- return ret;
1901
-
1902
-#ifdef CONFIG_DRM_FBDEV_EMULATION
1903
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1904
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1905
-
1906
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1907
- fbdev_fb->base.width,
1908
- fbdev_fb->base.height,
1909
- fbdev_fb->base.format->depth,
1910
- fbdev_fb->base.format->cpp[0] * 8,
1911
- fbdev_fb->base.modifier,
1912
- drm_framebuffer_read_refcount(&fbdev_fb->base));
1913
- describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1914
- seq_putc(m, '\n');
1915
- }
1916
-#endif
1917
-
1918
- mutex_lock(&dev->mode_config.fb_lock);
1919
- drm_for_each_fb(drm_fb, dev) {
1920
- struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1921
- if (fb == fbdev_fb)
1922
- continue;
1923
-
1924
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1925
- fb->base.width,
1926
- fb->base.height,
1927
- fb->base.format->depth,
1928
- fb->base.format->cpp[0] * 8,
1929
- fb->base.modifier,
1930
- drm_framebuffer_read_refcount(&fb->base));
1931
- describe_obj(m, intel_fb_obj(&fb->base));
1932
- seq_putc(m, '\n');
1933
- }
1934
- mutex_unlock(&dev->mode_config.fb_lock);
1935
- mutex_unlock(&dev->struct_mutex);
1051
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
19361052
19371053 return 0;
19381054 }
....@@ -1945,19 +1061,20 @@
19451061
19461062 static int i915_context_status(struct seq_file *m, void *unused)
19471063 {
1948
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
1949
- struct drm_device *dev = &dev_priv->drm;
1950
- struct intel_engine_cs *engine;
1951
- struct i915_gem_context *ctx;
1952
- enum intel_engine_id id;
1953
- int ret;
1064
+ struct drm_i915_private *i915 = node_to_i915(m->private);
1065
+ struct i915_gem_context *ctx, *cn;
19541066
1955
- ret = mutex_lock_interruptible(&dev->struct_mutex);
1956
- if (ret)
1957
- return ret;
1067
+ spin_lock(&i915->gem.contexts.lock);
1068
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1069
+ struct i915_gem_engines_iter it;
1070
+ struct intel_context *ce;
19581071
1959
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1960
- seq_printf(m, "HW context %u ", ctx->hw_id);
1072
+ if (!kref_get_unless_zero(&ctx->ref))
1073
+ continue;
1074
+
1075
+ spin_unlock(&i915->gem.contexts.lock);
1076
+
1077
+ seq_puts(m, "HW context ");
19611078 if (ctx->pid) {
19621079 struct task_struct *task;
19631080
....@@ -1976,22 +1093,26 @@
19761093 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
19771094 seq_putc(m, '\n');
19781095
1979
- for_each_engine(engine, dev_priv, id) {
1980
- struct intel_context *ce =
1981
- to_intel_context(ctx, engine);
1982
-
1983
- seq_printf(m, "%s: ", engine->name);
1984
- if (ce->state)
1985
- describe_obj(m, ce->state->obj);
1986
- if (ce->ring)
1096
+ for_each_gem_engine(ce,
1097
+ i915_gem_context_lock_engines(ctx), it) {
1098
+ if (intel_context_pin_if_active(ce)) {
1099
+ seq_printf(m, "%s: ", ce->engine->name);
1100
+ if (ce->state)
1101
+ i915_debugfs_describe_obj(m, ce->state->obj);
19871102 describe_ctx_ring(m, ce->ring);
1988
- seq_putc(m, '\n');
1103
+ seq_putc(m, '\n');
1104
+ intel_context_unpin(ce);
1105
+ }
19891106 }
1107
+ i915_gem_context_unlock_engines(ctx);
19901108
19911109 seq_putc(m, '\n');
1992
- }
19931110
1994
- mutex_unlock(&dev->struct_mutex);
1111
+ spin_lock(&i915->gem.contexts.lock);
1112
+ list_safe_reset_next(ctx, cn, link);
1113
+ i915_gem_context_put(ctx);
1114
+ }
1115
+ spin_unlock(&i915->gem.contexts.lock);
19951116
19961117 return 0;
19971118 }
....@@ -2023,178 +1144,54 @@
20231144 static int i915_swizzle_info(struct seq_file *m, void *data)
20241145 {
20251146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2026
-
2027
- intel_runtime_pm_get(dev_priv);
1147
+ struct intel_uncore *uncore = &dev_priv->uncore;
1148
+ intel_wakeref_t wakeref;
20281149
20291150 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2030
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1151
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
20311152 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2032
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2033
-
2034
- if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2035
- seq_printf(m, "DDC = 0x%08x\n",
2036
- I915_READ(DCC));
2037
- seq_printf(m, "DDC2 = 0x%08x\n",
2038
- I915_READ(DCC2));
2039
- seq_printf(m, "C0DRB3 = 0x%04x\n",
2040
- I915_READ16(C0DRB3));
2041
- seq_printf(m, "C1DRB3 = 0x%04x\n",
2042
- I915_READ16(C1DRB3));
2043
- } else if (INTEL_GEN(dev_priv) >= 6) {
2044
- seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2045
- I915_READ(MAD_DIMM_C0));
2046
- seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2047
- I915_READ(MAD_DIMM_C1));
2048
- seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2049
- I915_READ(MAD_DIMM_C2));
2050
- seq_printf(m, "TILECTL = 0x%08x\n",
2051
- I915_READ(TILECTL));
2052
- if (INTEL_GEN(dev_priv) >= 8)
2053
- seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2054
- I915_READ(GAMTARBMODE));
2055
- else
2056
- seq_printf(m, "ARB_MODE = 0x%08x\n",
2057
- I915_READ(ARB_MODE));
2058
- seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2059
- I915_READ(DISP_ARB_CTL));
2060
- }
1153
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
20611154
20621155 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
20631156 seq_puts(m, "L-shaped memory detected\n");
20641157
2065
- intel_runtime_pm_put(dev_priv);
2066
-
2067
- return 0;
2068
-}
2069
-
2070
-static int per_file_ctx(int id, void *ptr, void *data)
2071
-{
2072
- struct i915_gem_context *ctx = ptr;
2073
- struct seq_file *m = data;
2074
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2075
-
2076
- if (!ppgtt) {
2077
- seq_printf(m, " no ppgtt for context %d\n",
2078
- ctx->user_handle);
1158
+ /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
1159
+ if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
20791160 return 0;
1161
+
1162
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1163
+
1164
+ if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1165
+ seq_printf(m, "DDC = 0x%08x\n",
1166
+ intel_uncore_read(uncore, DCC));
1167
+ seq_printf(m, "DDC2 = 0x%08x\n",
1168
+ intel_uncore_read(uncore, DCC2));
1169
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
1170
+ intel_uncore_read16(uncore, C0DRB3));
1171
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
1172
+ intel_uncore_read16(uncore, C1DRB3));
1173
+ } else if (INTEL_GEN(dev_priv) >= 6) {
1174
+ seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1175
+ intel_uncore_read(uncore, MAD_DIMM_C0));
1176
+ seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1177
+ intel_uncore_read(uncore, MAD_DIMM_C1));
1178
+ seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1179
+ intel_uncore_read(uncore, MAD_DIMM_C2));
1180
+ seq_printf(m, "TILECTL = 0x%08x\n",
1181
+ intel_uncore_read(uncore, TILECTL));
1182
+ if (INTEL_GEN(dev_priv) >= 8)
1183
+ seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1184
+ intel_uncore_read(uncore, GAMTARBMODE));
1185
+ else
1186
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
1187
+ intel_uncore_read(uncore, ARB_MODE));
1188
+ seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1189
+ intel_uncore_read(uncore, DISP_ARB_CTL));
20801190 }
20811191
2082
- if (i915_gem_context_is_default(ctx))
2083
- seq_puts(m, " default context:\n");
2084
- else
2085
- seq_printf(m, " context %d:\n", ctx->user_handle);
2086
- ppgtt->debug_dump(ppgtt, m);
1192
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
20871193
20881194 return 0;
2089
-}
2090
-
2091
-static void gen8_ppgtt_info(struct seq_file *m,
2092
- struct drm_i915_private *dev_priv)
2093
-{
2094
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2095
- struct intel_engine_cs *engine;
2096
- enum intel_engine_id id;
2097
- int i;
2098
-
2099
- if (!ppgtt)
2100
- return;
2101
-
2102
- for_each_engine(engine, dev_priv, id) {
2103
- seq_printf(m, "%s\n", engine->name);
2104
- for (i = 0; i < 4; i++) {
2105
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2106
- pdp <<= 32;
2107
- pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2108
- seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2109
- }
2110
- }
2111
-}
2112
-
2113
-static void gen6_ppgtt_info(struct seq_file *m,
2114
- struct drm_i915_private *dev_priv)
2115
-{
2116
- struct intel_engine_cs *engine;
2117
- enum intel_engine_id id;
2118
-
2119
- if (IS_GEN6(dev_priv))
2120
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2121
-
2122
- for_each_engine(engine, dev_priv, id) {
2123
- seq_printf(m, "%s\n", engine->name);
2124
- if (IS_GEN7(dev_priv))
2125
- seq_printf(m, "GFX_MODE: 0x%08x\n",
2126
- I915_READ(RING_MODE_GEN7(engine)));
2127
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2128
- I915_READ(RING_PP_DIR_BASE(engine)));
2129
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2130
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
2131
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2132
- I915_READ(RING_PP_DIR_DCLV(engine)));
2133
- }
2134
- if (dev_priv->mm.aliasing_ppgtt) {
2135
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2136
-
2137
- seq_puts(m, "aliasing PPGTT:\n");
2138
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2139
-
2140
- ppgtt->debug_dump(ppgtt, m);
2141
- }
2142
-
2143
- seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2144
-}
2145
-
2146
-static int i915_ppgtt_info(struct seq_file *m, void *data)
2147
-{
2148
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2149
- struct drm_device *dev = &dev_priv->drm;
2150
- struct drm_file *file;
2151
- int ret;
2152
-
2153
- mutex_lock(&dev->filelist_mutex);
2154
- ret = mutex_lock_interruptible(&dev->struct_mutex);
2155
- if (ret)
2156
- goto out_unlock;
2157
-
2158
- intel_runtime_pm_get(dev_priv);
2159
-
2160
- if (INTEL_GEN(dev_priv) >= 8)
2161
- gen8_ppgtt_info(m, dev_priv);
2162
- else if (INTEL_GEN(dev_priv) >= 6)
2163
- gen6_ppgtt_info(m, dev_priv);
2164
-
2165
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2166
- struct drm_i915_file_private *file_priv = file->driver_priv;
2167
- struct task_struct *task;
2168
-
2169
- task = get_pid_task(file->pid, PIDTYPE_PID);
2170
- if (!task) {
2171
- ret = -ESRCH;
2172
- goto out_rpm;
2173
- }
2174
- seq_printf(m, "\nproc: %s\n", task->comm);
2175
- put_task_struct(task);
2176
- idr_for_each(&file_priv->context_idr, per_file_ctx,
2177
- (void *)(unsigned long)m);
2178
- }
2179
-
2180
-out_rpm:
2181
- intel_runtime_pm_put(dev_priv);
2182
- mutex_unlock(&dev->struct_mutex);
2183
-out_unlock:
2184
- mutex_unlock(&dev->filelist_mutex);
2185
- return ret;
2186
-}
2187
-
2188
-static int count_irq_waiters(struct drm_i915_private *i915)
2189
-{
2190
- struct intel_engine_cs *engine;
2191
- enum intel_engine_id id;
2192
- int count = 0;
2193
-
2194
- for_each_engine(engine, i915, id)
2195
- count += intel_engine_has_waiter(engine);
2196
-
2197
- return count;
21981195 }
21991196
22001197 static const char *rps_power_to_str(unsigned int power)
....@@ -2214,58 +1211,39 @@
22141211 static int i915_rps_boost_info(struct seq_file *m, void *data)
22151212 {
22161213 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2217
- struct drm_device *dev = &dev_priv->drm;
2218
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
2219
- struct drm_file *file;
1214
+ struct intel_rps *rps = &dev_priv->gt.rps;
22201215
2221
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2222
- seq_printf(m, "GPU busy? %s [%d requests]\n",
2223
- yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2224
- seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
1216
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
1217
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
1218
+ seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
22251219 seq_printf(m, "Boosts outstanding? %d\n",
22261220 atomic_read(&rps->num_waiters));
22271221 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2228
- seq_printf(m, "Frequency requested %d\n",
2229
- intel_gpu_freq(dev_priv, rps->cur_freq));
1222
+ seq_printf(m, "Frequency requested %d, actual %d\n",
1223
+ intel_gpu_freq(rps, rps->cur_freq),
1224
+ intel_rps_read_actual_frequency(rps));
22301225 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2231
- intel_gpu_freq(dev_priv, rps->min_freq),
2232
- intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2233
- intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2234
- intel_gpu_freq(dev_priv, rps->max_freq));
1226
+ intel_gpu_freq(rps, rps->min_freq),
1227
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
1228
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
1229
+ intel_gpu_freq(rps, rps->max_freq));
22351230 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2236
- intel_gpu_freq(dev_priv, rps->idle_freq),
2237
- intel_gpu_freq(dev_priv, rps->efficient_freq),
2238
- intel_gpu_freq(dev_priv, rps->boost_freq));
1231
+ intel_gpu_freq(rps, rps->idle_freq),
1232
+ intel_gpu_freq(rps, rps->efficient_freq),
1233
+ intel_gpu_freq(rps, rps->boost_freq));
22391234
2240
- mutex_lock(&dev->filelist_mutex);
2241
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2242
- struct drm_i915_file_private *file_priv = file->driver_priv;
2243
- struct task_struct *task;
1235
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
22441236
2245
- rcu_read_lock();
2246
- task = pid_task(file->pid, PIDTYPE_PID);
2247
- seq_printf(m, "%s [%d]: %d boosts\n",
2248
- task ? task->comm : "<unknown>",
2249
- task ? task->pid : -1,
2250
- atomic_read(&file_priv->rps_client.boosts));
2251
- rcu_read_unlock();
2252
- }
2253
- seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2254
- atomic_read(&rps->boosts));
2255
- mutex_unlock(&dev->filelist_mutex);
2256
-
2257
- if (INTEL_GEN(dev_priv) >= 6 &&
2258
- rps->enabled &&
2259
- dev_priv->gt.active_requests) {
1237
+ if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
22601238 u32 rpup, rpupei;
22611239 u32 rpdown, rpdownei;
22621240
2263
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1241
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
22641242 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
22651243 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
22661244 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
22671245 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2268
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1246
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
22691247
22701248 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
22711249 rps_power_to_str(rps->power.mode));
....@@ -2288,524 +1266,8 @@
22881266 const bool edram = INTEL_GEN(dev_priv) > 8;
22891267
22901268 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2291
- seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2292
- intel_uncore_edram_size(dev_priv)/1024/1024);
2293
-
2294
- return 0;
2295
-}
2296
-
2297
-static int i915_huc_load_status_info(struct seq_file *m, void *data)
2298
-{
2299
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2300
- struct drm_printer p;
2301
-
2302
- if (!HAS_HUC(dev_priv))
2303
- return -ENODEV;
2304
-
2305
- p = drm_seq_file_printer(m);
2306
- intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2307
-
2308
- intel_runtime_pm_get(dev_priv);
2309
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2310
- intel_runtime_pm_put(dev_priv);
2311
-
2312
- return 0;
2313
-}
2314
-
2315
-static int i915_guc_load_status_info(struct seq_file *m, void *data)
2316
-{
2317
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2318
- struct drm_printer p;
2319
- u32 tmp, i;
2320
-
2321
- if (!HAS_GUC(dev_priv))
2322
- return -ENODEV;
2323
-
2324
- p = drm_seq_file_printer(m);
2325
- intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2326
-
2327
- intel_runtime_pm_get(dev_priv);
2328
-
2329
- tmp = I915_READ(GUC_STATUS);
2330
-
2331
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2332
- seq_printf(m, "\tBootrom status = 0x%x\n",
2333
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2334
- seq_printf(m, "\tuKernel status = 0x%x\n",
2335
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2336
- seq_printf(m, "\tMIA Core status = 0x%x\n",
2337
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2338
- seq_puts(m, "\nScratch registers:\n");
2339
- for (i = 0; i < 16; i++)
2340
- seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2341
-
2342
- intel_runtime_pm_put(dev_priv);
2343
-
2344
- return 0;
2345
-}
2346
-
2347
-static const char *
2348
-stringify_guc_log_type(enum guc_log_buffer_type type)
2349
-{
2350
- switch (type) {
2351
- case GUC_ISR_LOG_BUFFER:
2352
- return "ISR";
2353
- case GUC_DPC_LOG_BUFFER:
2354
- return "DPC";
2355
- case GUC_CRASH_DUMP_LOG_BUFFER:
2356
- return "CRASH";
2357
- default:
2358
- MISSING_CASE(type);
2359
- }
2360
-
2361
- return "";
2362
-}
2363
-
2364
-static void i915_guc_log_info(struct seq_file *m,
2365
- struct drm_i915_private *dev_priv)
2366
-{
2367
- struct intel_guc_log *log = &dev_priv->guc.log;
2368
- enum guc_log_buffer_type type;
2369
-
2370
- if (!intel_guc_log_relay_enabled(log)) {
2371
- seq_puts(m, "GuC log relay disabled\n");
2372
- return;
2373
- }
2374
-
2375
- seq_puts(m, "GuC logging stats:\n");
2376
-
2377
- seq_printf(m, "\tRelay full count: %u\n",
2378
- log->relay.full_count);
2379
-
2380
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2381
- seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2382
- stringify_guc_log_type(type),
2383
- log->stats[type].flush,
2384
- log->stats[type].sampled_overflow);
2385
- }
2386
-}
2387
-
2388
-static void i915_guc_client_info(struct seq_file *m,
2389
- struct drm_i915_private *dev_priv,
2390
- struct intel_guc_client *client)
2391
-{
2392
- struct intel_engine_cs *engine;
2393
- enum intel_engine_id id;
2394
- uint64_t tot = 0;
2395
-
2396
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2397
- client->priority, client->stage_id, client->proc_desc_offset);
2398
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2399
- client->doorbell_id, client->doorbell_offset);
2400
-
2401
- for_each_engine(engine, dev_priv, id) {
2402
- u64 submissions = client->submissions[id];
2403
- tot += submissions;
2404
- seq_printf(m, "\tSubmissions: %llu %s\n",
2405
- submissions, engine->name);
2406
- }
2407
- seq_printf(m, "\tTotal: %llu\n", tot);
2408
-}
2409
-
2410
-static int i915_guc_info(struct seq_file *m, void *data)
2411
-{
2412
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2413
- const struct intel_guc *guc = &dev_priv->guc;
2414
-
2415
- if (!USES_GUC(dev_priv))
2416
- return -ENODEV;
2417
-
2418
- i915_guc_log_info(m, dev_priv);
2419
-
2420
- if (!USES_GUC_SUBMISSION(dev_priv))
2421
- return 0;
2422
-
2423
- GEM_BUG_ON(!guc->execbuf_client);
2424
-
2425
- seq_printf(m, "\nDoorbell map:\n");
2426
- seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2427
- seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2428
-
2429
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2430
- i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2431
- if (guc->preempt_client) {
2432
- seq_printf(m, "\nGuC preempt client @ %p:\n",
2433
- guc->preempt_client);
2434
- i915_guc_client_info(m, dev_priv, guc->preempt_client);
2435
- }
2436
-
2437
- /* Add more as required ... */
2438
-
2439
- return 0;
2440
-}
2441
-
2442
-static int i915_guc_stage_pool(struct seq_file *m, void *data)
2443
-{
2444
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2445
- const struct intel_guc *guc = &dev_priv->guc;
2446
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2447
- struct intel_guc_client *client = guc->execbuf_client;
2448
- unsigned int tmp;
2449
- int index;
2450
-
2451
- if (!USES_GUC_SUBMISSION(dev_priv))
2452
- return -ENODEV;
2453
-
2454
- for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2455
- struct intel_engine_cs *engine;
2456
-
2457
- if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2458
- continue;
2459
-
2460
- seq_printf(m, "GuC stage descriptor %u:\n", index);
2461
- seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2462
- seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2463
- seq_printf(m, "\tPriority: %d\n", desc->priority);
2464
- seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2465
- seq_printf(m, "\tEngines used: 0x%x\n",
2466
- desc->engines_used);
2467
- seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2468
- desc->db_trigger_phy,
2469
- desc->db_trigger_cpu,
2470
- desc->db_trigger_uk);
2471
- seq_printf(m, "\tProcess descriptor: 0x%x\n",
2472
- desc->process_desc);
2473
- seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2474
- desc->wq_addr, desc->wq_size);
2475
- seq_putc(m, '\n');
2476
-
2477
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2478
- u32 guc_engine_id = engine->guc_id;
2479
- struct guc_execlist_context *lrc =
2480
- &desc->lrc[guc_engine_id];
2481
-
2482
- seq_printf(m, "\t%s LRC:\n", engine->name);
2483
- seq_printf(m, "\t\tContext desc: 0x%x\n",
2484
- lrc->context_desc);
2485
- seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2486
- seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2487
- seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2488
- seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2489
- seq_putc(m, '\n');
2490
- }
2491
- }
2492
-
2493
- return 0;
2494
-}
2495
-
2496
-static int i915_guc_log_dump(struct seq_file *m, void *data)
2497
-{
2498
- struct drm_info_node *node = m->private;
2499
- struct drm_i915_private *dev_priv = node_to_i915(node);
2500
- bool dump_load_err = !!node->info_ent->data;
2501
- struct drm_i915_gem_object *obj = NULL;
2502
- u32 *log;
2503
- int i = 0;
2504
-
2505
- if (!HAS_GUC(dev_priv))
2506
- return -ENODEV;
2507
-
2508
- if (dump_load_err)
2509
- obj = dev_priv->guc.load_err_log;
2510
- else if (dev_priv->guc.log.vma)
2511
- obj = dev_priv->guc.log.vma->obj;
2512
-
2513
- if (!obj)
2514
- return 0;
2515
-
2516
- log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2517
- if (IS_ERR(log)) {
2518
- DRM_DEBUG("Failed to pin object\n");
2519
- seq_puts(m, "(log data unaccessible)\n");
2520
- return PTR_ERR(log);
2521
- }
2522
-
2523
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2524
- seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2525
- *(log + i), *(log + i + 1),
2526
- *(log + i + 2), *(log + i + 3));
2527
-
2528
- seq_putc(m, '\n');
2529
-
2530
- i915_gem_object_unpin_map(obj);
2531
-
2532
- return 0;
2533
-}
2534
-
2535
-static int i915_guc_log_level_get(void *data, u64 *val)
2536
-{
2537
- struct drm_i915_private *dev_priv = data;
2538
-
2539
- if (!USES_GUC(dev_priv))
2540
- return -ENODEV;
2541
-
2542
- *val = intel_guc_log_get_level(&dev_priv->guc.log);
2543
-
2544
- return 0;
2545
-}
2546
-
2547
-static int i915_guc_log_level_set(void *data, u64 val)
2548
-{
2549
- struct drm_i915_private *dev_priv = data;
2550
-
2551
- if (!USES_GUC(dev_priv))
2552
- return -ENODEV;
2553
-
2554
- return intel_guc_log_set_level(&dev_priv->guc.log, val);
2555
-}
2556
-
2557
-DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2558
- i915_guc_log_level_get, i915_guc_log_level_set,
2559
- "%lld\n");
2560
-
2561
-static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2562
-{
2563
- struct drm_i915_private *dev_priv = inode->i_private;
2564
-
2565
- if (!USES_GUC(dev_priv))
2566
- return -ENODEV;
2567
-
2568
- file->private_data = &dev_priv->guc.log;
2569
-
2570
- return intel_guc_log_relay_open(&dev_priv->guc.log);
2571
-}
2572
-
2573
-static ssize_t
2574
-i915_guc_log_relay_write(struct file *filp,
2575
- const char __user *ubuf,
2576
- size_t cnt,
2577
- loff_t *ppos)
2578
-{
2579
- struct intel_guc_log *log = filp->private_data;
2580
-
2581
- intel_guc_log_relay_flush(log);
2582
-
2583
- return cnt;
2584
-}
2585
-
2586
-static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2587
-{
2588
- struct drm_i915_private *dev_priv = inode->i_private;
2589
-
2590
- intel_guc_log_relay_close(&dev_priv->guc.log);
2591
-
2592
- return 0;
2593
-}
2594
-
2595
-static const struct file_operations i915_guc_log_relay_fops = {
2596
- .owner = THIS_MODULE,
2597
- .open = i915_guc_log_relay_open,
2598
- .write = i915_guc_log_relay_write,
2599
- .release = i915_guc_log_relay_release,
2600
-};
2601
-
2602
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2603
-{
2604
- u8 val;
2605
- static const char * const sink_status[] = {
2606
- "inactive",
2607
- "transition to active, capture and display",
2608
- "active, display from RFB",
2609
- "active, capture and display on sink device timings",
2610
- "transition to inactive, capture and display, timing re-sync",
2611
- "reserved",
2612
- "reserved",
2613
- "sink internal error",
2614
- };
2615
- struct drm_connector *connector = m->private;
2616
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
2617
- struct intel_dp *intel_dp =
2618
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2619
- int ret;
2620
-
2621
- if (!CAN_PSR(dev_priv)) {
2622
- seq_puts(m, "PSR Unsupported\n");
2623
- return -ENODEV;
2624
- }
2625
-
2626
- if (connector->status != connector_status_connected)
2627
- return -ENODEV;
2628
-
2629
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2630
-
2631
- if (ret == 1) {
2632
- const char *str = "unknown";
2633
-
2634
- val &= DP_PSR_SINK_STATE_MASK;
2635
- if (val < ARRAY_SIZE(sink_status))
2636
- str = sink_status[val];
2637
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2638
- } else {
2639
- return ret;
2640
- }
2641
-
2642
- return 0;
2643
-}
2644
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2645
-
2646
-static void
2647
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2648
-{
2649
- u32 val, psr_status;
2650
-
2651
- if (dev_priv->psr.psr2_enabled) {
2652
- static const char * const live_status[] = {
2653
- "IDLE",
2654
- "CAPTURE",
2655
- "CAPTURE_FS",
2656
- "SLEEP",
2657
- "BUFON_FW",
2658
- "ML_UP",
2659
- "SU_STANDBY",
2660
- "FAST_SLEEP",
2661
- "DEEP_SLEEP",
2662
- "BUF_ON",
2663
- "TG_ON"
2664
- };
2665
- psr_status = I915_READ(EDP_PSR2_STATUS);
2666
- val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2667
- EDP_PSR2_STATUS_STATE_SHIFT;
2668
- if (val < ARRAY_SIZE(live_status)) {
2669
- seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2670
- psr_status, live_status[val]);
2671
- return;
2672
- }
2673
- } else {
2674
- static const char * const live_status[] = {
2675
- "IDLE",
2676
- "SRDONACK",
2677
- "SRDENT",
2678
- "BUFOFF",
2679
- "BUFON",
2680
- "AUXACK",
2681
- "SRDOFFACK",
2682
- "SRDENT_ON",
2683
- };
2684
- psr_status = I915_READ(EDP_PSR_STATUS);
2685
- val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2686
- EDP_PSR_STATUS_STATE_SHIFT;
2687
- if (val < ARRAY_SIZE(live_status)) {
2688
- seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2689
- psr_status, live_status[val]);
2690
- return;
2691
- }
2692
- }
2693
-
2694
- seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2695
-}
2696
-
2697
-static int i915_edp_psr_status(struct seq_file *m, void *data)
2698
-{
2699
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2700
- u32 psrperf = 0;
2701
- bool enabled = false;
2702
- bool sink_support;
2703
-
2704
- if (!HAS_PSR(dev_priv))
2705
- return -ENODEV;
2706
-
2707
- sink_support = dev_priv->psr.sink_support;
2708
- seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2709
- if (!sink_support)
2710
- return 0;
2711
-
2712
- intel_runtime_pm_get(dev_priv);
2713
-
2714
- mutex_lock(&dev_priv->psr.lock);
2715
- seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2716
- seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2717
- dev_priv->psr.busy_frontbuffer_bits);
2718
-
2719
- if (dev_priv->psr.psr2_enabled)
2720
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2721
- else
2722
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2723
-
2724
- seq_printf(m, "Main link in standby mode: %s\n",
2725
- yesno(dev_priv->psr.link_standby));
2726
-
2727
- seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2728
-
2729
- /*
2730
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
2731
- */
2732
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2733
- psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2734
- EDP_PSR_PERF_CNT_MASK;
2735
-
2736
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
2737
- }
2738
-
2739
- psr_source_status(dev_priv, m);
2740
- mutex_unlock(&dev_priv->psr.lock);
2741
-
2742
- if (READ_ONCE(dev_priv->psr.debug)) {
2743
- seq_printf(m, "Last attempted entry at: %lld\n",
2744
- dev_priv->psr.last_entry_attempt);
2745
- seq_printf(m, "Last exit at: %lld\n",
2746
- dev_priv->psr.last_exit);
2747
- }
2748
-
2749
- intel_runtime_pm_put(dev_priv);
2750
- return 0;
2751
-}
2752
-
2753
-static int
2754
-i915_edp_psr_debug_set(void *data, u64 val)
2755
-{
2756
- struct drm_i915_private *dev_priv = data;
2757
-
2758
- if (!CAN_PSR(dev_priv))
2759
- return -ENODEV;
2760
-
2761
- DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2762
-
2763
- intel_runtime_pm_get(dev_priv);
2764
- intel_psr_irq_control(dev_priv, !!val);
2765
- intel_runtime_pm_put(dev_priv);
2766
-
2767
- return 0;
2768
-}
2769
-
2770
-static int
2771
-i915_edp_psr_debug_get(void *data, u64 *val)
2772
-{
2773
- struct drm_i915_private *dev_priv = data;
2774
-
2775
- if (!CAN_PSR(dev_priv))
2776
- return -ENODEV;
2777
-
2778
- *val = READ_ONCE(dev_priv->psr.debug);
2779
- return 0;
2780
-}
2781
-
2782
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2783
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2784
- "%llu\n");
2785
-
2786
-static int i915_energy_uJ(struct seq_file *m, void *data)
2787
-{
2788
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2789
- unsigned long long power;
2790
- u32 units;
2791
-
2792
- if (INTEL_GEN(dev_priv) < 6)
2793
- return -ENODEV;
2794
-
2795
- intel_runtime_pm_get(dev_priv);
2796
-
2797
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2798
- intel_runtime_pm_put(dev_priv);
2799
- return -ENODEV;
2800
- }
2801
-
2802
- units = (power & 0x1f00) >> 8;
2803
- power = I915_READ(MCH_SECP_NRG_STTS);
2804
- power = (1000000 * power) >> units; /* convert to uJ */
2805
-
2806
- intel_runtime_pm_put(dev_priv);
2807
-
2808
- seq_printf(m, "%llu", power);
1269
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1270
+ dev_priv->edram_size_mb);
28091271
28101272 return 0;
28111273 }
....@@ -2818,8 +1280,10 @@
28181280 if (!HAS_RUNTIME_PM(dev_priv))
28191281 seq_puts(m, "Runtime power management not supported\n");
28201282
2821
- seq_printf(m, "GPU idle: %s (epoch %u)\n",
2822
- yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
1283
+ seq_printf(m, "Runtime power status: %s\n",
1284
+ enableddisabled(!dev_priv->power_domains.wakeref));
1285
+
1286
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
28231287 seq_printf(m, "IRQs disabled: %s\n",
28241288 yesno(!intel_irqs_enabled(dev_priv)));
28251289 #ifdef CONFIG_PM
....@@ -2832,413 +1296,11 @@
28321296 pci_power_name(pdev->current_state),
28331297 pdev->current_state);
28341298
2835
- return 0;
2836
-}
1299
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1300
+ struct drm_printer p = drm_seq_file_printer(m);
28371301
2838
-static int i915_power_domain_info(struct seq_file *m, void *unused)
2839
-{
2840
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2841
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
2842
- int i;
2843
-
2844
- mutex_lock(&power_domains->lock);
2845
-
2846
- seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2847
- for (i = 0; i < power_domains->power_well_count; i++) {
2848
- struct i915_power_well *power_well;
2849
- enum intel_display_power_domain power_domain;
2850
-
2851
- power_well = &power_domains->power_wells[i];
2852
- seq_printf(m, "%-25s %d\n", power_well->name,
2853
- power_well->count);
2854
-
2855
- for_each_power_domain(power_domain, power_well->domains)
2856
- seq_printf(m, " %-23s %d\n",
2857
- intel_display_power_domain_str(power_domain),
2858
- power_domains->domain_use_count[power_domain]);
1302
+ print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
28591303 }
2860
-
2861
- mutex_unlock(&power_domains->lock);
2862
-
2863
- return 0;
2864
-}
2865
-
2866
-static int i915_dmc_info(struct seq_file *m, void *unused)
2867
-{
2868
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2869
- struct intel_csr *csr;
2870
-
2871
- if (!HAS_CSR(dev_priv))
2872
- return -ENODEV;
2873
-
2874
- csr = &dev_priv->csr;
2875
-
2876
- intel_runtime_pm_get(dev_priv);
2877
-
2878
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2879
- seq_printf(m, "path: %s\n", csr->fw_path);
2880
-
2881
- if (!csr->dmc_payload)
2882
- goto out;
2883
-
2884
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2885
- CSR_VERSION_MINOR(csr->version));
2886
-
2887
- if (IS_KABYLAKE(dev_priv) ||
2888
- (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2889
- seq_printf(m, "DC3 -> DC5 count: %d\n",
2890
- I915_READ(SKL_CSR_DC3_DC5_COUNT));
2891
- seq_printf(m, "DC5 -> DC6 count: %d\n",
2892
- I915_READ(SKL_CSR_DC5_DC6_COUNT));
2893
- } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2894
- seq_printf(m, "DC3 -> DC5 count: %d\n",
2895
- I915_READ(BXT_CSR_DC3_DC5_COUNT));
2896
- }
2897
-
2898
-out:
2899
- seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2900
- seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2901
- seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2902
-
2903
- intel_runtime_pm_put(dev_priv);
2904
-
2905
- return 0;
2906
-}
2907
-
2908
-static void intel_seq_print_mode(struct seq_file *m, int tabs,
2909
- struct drm_display_mode *mode)
2910
-{
2911
- int i;
2912
-
2913
- for (i = 0; i < tabs; i++)
2914
- seq_putc(m, '\t');
2915
-
2916
- seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2917
- mode->base.id, mode->name,
2918
- mode->vrefresh, mode->clock,
2919
- mode->hdisplay, mode->hsync_start,
2920
- mode->hsync_end, mode->htotal,
2921
- mode->vdisplay, mode->vsync_start,
2922
- mode->vsync_end, mode->vtotal,
2923
- mode->type, mode->flags);
2924
-}
2925
-
2926
-static void intel_encoder_info(struct seq_file *m,
2927
- struct intel_crtc *intel_crtc,
2928
- struct intel_encoder *intel_encoder)
2929
-{
2930
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2931
- struct drm_device *dev = &dev_priv->drm;
2932
- struct drm_crtc *crtc = &intel_crtc->base;
2933
- struct intel_connector *intel_connector;
2934
- struct drm_encoder *encoder;
2935
-
2936
- encoder = &intel_encoder->base;
2937
- seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2938
- encoder->base.id, encoder->name);
2939
- for_each_connector_on_encoder(dev, encoder, intel_connector) {
2940
- struct drm_connector *connector = &intel_connector->base;
2941
- seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2942
- connector->base.id,
2943
- connector->name,
2944
- drm_get_connector_status_name(connector->status));
2945
- if (connector->status == connector_status_connected) {
2946
- struct drm_display_mode *mode = &crtc->mode;
2947
- seq_printf(m, ", mode:\n");
2948
- intel_seq_print_mode(m, 2, mode);
2949
- } else {
2950
- seq_putc(m, '\n');
2951
- }
2952
- }
2953
-}
2954
-
2955
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2956
-{
2957
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
2958
- struct drm_device *dev = &dev_priv->drm;
2959
- struct drm_crtc *crtc = &intel_crtc->base;
2960
- struct intel_encoder *intel_encoder;
2961
- struct drm_plane_state *plane_state = crtc->primary->state;
2962
- struct drm_framebuffer *fb = plane_state->fb;
2963
-
2964
- if (fb)
2965
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2966
- fb->base.id, plane_state->src_x >> 16,
2967
- plane_state->src_y >> 16, fb->width, fb->height);
2968
- else
2969
- seq_puts(m, "\tprimary plane disabled\n");
2970
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2971
- intel_encoder_info(m, intel_crtc, intel_encoder);
2972
-}
2973
-
2974
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2975
-{
2976
- struct drm_display_mode *mode = panel->fixed_mode;
2977
-
2978
- seq_printf(m, "\tfixed mode:\n");
2979
- intel_seq_print_mode(m, 2, mode);
2980
-}
2981
-
2982
-static void intel_dp_info(struct seq_file *m,
2983
- struct intel_connector *intel_connector)
2984
-{
2985
- struct intel_encoder *intel_encoder = intel_connector->encoder;
2986
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2987
-
2988
- seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2989
- seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2990
- if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2991
- intel_panel_info(m, &intel_connector->panel);
2992
-
2993
- drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2994
- &intel_dp->aux);
2995
-}
2996
-
2997
-static void intel_dp_mst_info(struct seq_file *m,
2998
- struct intel_connector *intel_connector)
2999
-{
3000
- struct intel_encoder *intel_encoder = intel_connector->encoder;
3001
- struct intel_dp_mst_encoder *intel_mst =
3002
- enc_to_mst(&intel_encoder->base);
3003
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
3004
- struct intel_dp *intel_dp = &intel_dig_port->dp;
3005
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3006
- intel_connector->port);
3007
-
3008
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3009
-}
3010
-
3011
-static void intel_hdmi_info(struct seq_file *m,
3012
- struct intel_connector *intel_connector)
3013
-{
3014
- struct intel_encoder *intel_encoder = intel_connector->encoder;
3015
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3016
-
3017
- seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3018
-}
3019
-
3020
-static void intel_lvds_info(struct seq_file *m,
3021
- struct intel_connector *intel_connector)
3022
-{
3023
- intel_panel_info(m, &intel_connector->panel);
3024
-}
3025
-
3026
-static void intel_connector_info(struct seq_file *m,
3027
- struct drm_connector *connector)
3028
-{
3029
- struct intel_connector *intel_connector = to_intel_connector(connector);
3030
- struct intel_encoder *intel_encoder = intel_connector->encoder;
3031
- struct drm_display_mode *mode;
3032
-
3033
- seq_printf(m, "connector %d: type %s, status: %s\n",
3034
- connector->base.id, connector->name,
3035
- drm_get_connector_status_name(connector->status));
3036
- if (connector->status == connector_status_connected) {
3037
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
3038
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3039
- connector->display_info.width_mm,
3040
- connector->display_info.height_mm);
3041
- seq_printf(m, "\tsubpixel order: %s\n",
3042
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3043
- seq_printf(m, "\tCEA rev: %d\n",
3044
- connector->display_info.cea_rev);
3045
- }
3046
-
3047
- if (!intel_encoder)
3048
- return;
3049
-
3050
- switch (connector->connector_type) {
3051
- case DRM_MODE_CONNECTOR_DisplayPort:
3052
- case DRM_MODE_CONNECTOR_eDP:
3053
- if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3054
- intel_dp_mst_info(m, intel_connector);
3055
- else
3056
- intel_dp_info(m, intel_connector);
3057
- break;
3058
- case DRM_MODE_CONNECTOR_LVDS:
3059
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3060
- intel_lvds_info(m, intel_connector);
3061
- break;
3062
- case DRM_MODE_CONNECTOR_HDMIA:
3063
- if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3064
- intel_encoder->type == INTEL_OUTPUT_DDI)
3065
- intel_hdmi_info(m, intel_connector);
3066
- break;
3067
- default:
3068
- break;
3069
- }
3070
-
3071
- seq_printf(m, "\tmodes:\n");
3072
- list_for_each_entry(mode, &connector->modes, head)
3073
- intel_seq_print_mode(m, 2, mode);
3074
-}
3075
-
3076
-static const char *plane_type(enum drm_plane_type type)
3077
-{
3078
- switch (type) {
3079
- case DRM_PLANE_TYPE_OVERLAY:
3080
- return "OVL";
3081
- case DRM_PLANE_TYPE_PRIMARY:
3082
- return "PRI";
3083
- case DRM_PLANE_TYPE_CURSOR:
3084
- return "CUR";
3085
- /*
3086
- * Deliberately omitting default: to generate compiler warnings
3087
- * when a new drm_plane_type gets added.
3088
- */
3089
- }
3090
-
3091
- return "unknown";
3092
-}
3093
-
3094
-static const char *plane_rotation(unsigned int rotation)
3095
-{
3096
- static char buf[48];
3097
- /*
3098
- * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3099
- * will print them all to visualize if the values are misused
3100
- */
3101
- snprintf(buf, sizeof(buf),
3102
- "%s%s%s%s%s%s(0x%08x)",
3103
- (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3104
- (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3105
- (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3106
- (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3107
- (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3108
- (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3109
- rotation);
3110
-
3111
- return buf;
3112
-}
3113
-
3114
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3115
-{
3116
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3117
- struct drm_device *dev = &dev_priv->drm;
3118
- struct intel_plane *intel_plane;
3119
-
3120
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3121
- struct drm_plane_state *state;
3122
- struct drm_plane *plane = &intel_plane->base;
3123
- struct drm_format_name_buf format_name;
3124
-
3125
- if (!plane->state) {
3126
- seq_puts(m, "plane->state is NULL!\n");
3127
- continue;
3128
- }
3129
-
3130
- state = plane->state;
3131
-
3132
- if (state->fb) {
3133
- drm_get_format_name(state->fb->format->format,
3134
- &format_name);
3135
- } else {
3136
- sprintf(format_name.str, "N/A");
3137
- }
3138
-
3139
- seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3140
- plane->base.id,
3141
- plane_type(intel_plane->base.type),
3142
- state->crtc_x, state->crtc_y,
3143
- state->crtc_w, state->crtc_h,
3144
- (state->src_x >> 16),
3145
- ((state->src_x & 0xffff) * 15625) >> 10,
3146
- (state->src_y >> 16),
3147
- ((state->src_y & 0xffff) * 15625) >> 10,
3148
- (state->src_w >> 16),
3149
- ((state->src_w & 0xffff) * 15625) >> 10,
3150
- (state->src_h >> 16),
3151
- ((state->src_h & 0xffff) * 15625) >> 10,
3152
- format_name.str,
3153
- plane_rotation(state->rotation));
3154
- }
3155
-}
3156
-
3157
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3158
-{
3159
- struct intel_crtc_state *pipe_config;
3160
- int num_scalers = intel_crtc->num_scalers;
3161
- int i;
3162
-
3163
- pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3164
-
3165
- /* Not all platformas have a scaler */
3166
- if (num_scalers) {
3167
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3168
- num_scalers,
3169
- pipe_config->scaler_state.scaler_users,
3170
- pipe_config->scaler_state.scaler_id);
3171
-
3172
- for (i = 0; i < num_scalers; i++) {
3173
- struct intel_scaler *sc =
3174
- &pipe_config->scaler_state.scalers[i];
3175
-
3176
- seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3177
- i, yesno(sc->in_use), sc->mode);
3178
- }
3179
- seq_puts(m, "\n");
3180
- } else {
3181
- seq_puts(m, "\tNo scalers available on this platform\n");
3182
- }
3183
-}
3184
-
3185
-static int i915_display_info(struct seq_file *m, void *unused)
3186
-{
3187
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3188
- struct drm_device *dev = &dev_priv->drm;
3189
- struct intel_crtc *crtc;
3190
- struct drm_connector *connector;
3191
- struct drm_connector_list_iter conn_iter;
3192
-
3193
- intel_runtime_pm_get(dev_priv);
3194
- seq_printf(m, "CRTC info\n");
3195
- seq_printf(m, "---------\n");
3196
- for_each_intel_crtc(dev, crtc) {
3197
- struct intel_crtc_state *pipe_config;
3198
-
3199
- drm_modeset_lock(&crtc->base.mutex, NULL);
3200
- pipe_config = to_intel_crtc_state(crtc->base.state);
3201
-
3202
- seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3203
- crtc->base.base.id, pipe_name(crtc->pipe),
3204
- yesno(pipe_config->base.active),
3205
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3206
- yesno(pipe_config->dither), pipe_config->pipe_bpp);
3207
-
3208
- if (pipe_config->base.active) {
3209
- struct intel_plane *cursor =
3210
- to_intel_plane(crtc->base.cursor);
3211
-
3212
- intel_crtc_info(m, crtc);
3213
-
3214
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3215
- yesno(cursor->base.state->visible),
3216
- cursor->base.state->crtc_x,
3217
- cursor->base.state->crtc_y,
3218
- cursor->base.state->crtc_w,
3219
- cursor->base.state->crtc_h,
3220
- cursor->cursor.base);
3221
- intel_scaler_info(m, crtc);
3222
- intel_plane_info(m, crtc);
3223
- }
3224
-
3225
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3226
- yesno(!crtc->cpu_fifo_underrun_disabled),
3227
- yesno(!crtc->pch_fifo_underrun_disabled));
3228
- drm_modeset_unlock(&crtc->base.mutex);
3229
- }
3230
-
3231
- seq_printf(m, "\n");
3232
- seq_printf(m, "Connector info\n");
3233
- seq_printf(m, "--------------\n");
3234
- mutex_lock(&dev->mode_config.mutex);
3235
- drm_connector_list_iter_begin(dev, &conn_iter);
3236
- drm_for_each_connector_iter(connector, &conn_iter)
3237
- intel_connector_info(m, connector);
3238
- drm_connector_list_iter_end(&conn_iter);
3239
- mutex_unlock(&dev->mode_config.mutex);
3240
-
3241
- intel_runtime_pm_put(dev_priv);
32421304
32431305 return 0;
32441306 }
....@@ -3247,33 +1309,22 @@
32471309 {
32481310 struct drm_i915_private *dev_priv = node_to_i915(m->private);
32491311 struct intel_engine_cs *engine;
3250
- enum intel_engine_id id;
1312
+ intel_wakeref_t wakeref;
32511313 struct drm_printer p;
32521314
3253
- intel_runtime_pm_get(dev_priv);
1315
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
32541316
3255
- seq_printf(m, "GT awake? %s (epoch %u)\n",
3256
- yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3257
- seq_printf(m, "Global active requests: %d\n",
3258
- dev_priv->gt.active_requests);
3259
- seq_printf(m, "CS timestamp frequency: %u kHz\n",
3260
- dev_priv->info.cs_timestamp_frequency_khz);
1317
+ seq_printf(m, "GT awake? %s [%d]\n",
1318
+ yesno(dev_priv->gt.awake),
1319
+ atomic_read(&dev_priv->gt.wakeref.count));
1320
+ seq_printf(m, "CS timestamp frequency: %u Hz\n",
1321
+ RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
32611322
32621323 p = drm_seq_file_printer(m);
3263
- for_each_engine(engine, dev_priv, id)
1324
+ for_each_uabi_engine(engine, dev_priv)
32641325 intel_engine_dump(engine, &p, "%s\n", engine->name);
32651326
3266
- intel_runtime_pm_put(dev_priv);
3267
-
3268
- return 0;
3269
-}
3270
-
3271
-static int i915_rcs_topology(struct seq_file *m, void *unused)
3272
-{
3273
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3274
- struct drm_printer p = drm_seq_file_printer(m);
3275
-
3276
- intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
1327
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
32771328
32781329 return 0;
32791330 }
....@@ -3288,745 +1339,63 @@
32881339 return 0;
32891340 }
32901341
3291
-static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3292
-{
3293
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3294
- struct drm_device *dev = &dev_priv->drm;
3295
- int i;
3296
-
3297
- drm_modeset_lock_all(dev);
3298
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3299
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3300
-
3301
- seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3302
- pll->info->id);
3303
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3304
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3305
- seq_printf(m, " tracked hardware state:\n");
3306
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3307
- seq_printf(m, " dpll_md: 0x%08x\n",
3308
- pll->state.hw_state.dpll_md);
3309
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3310
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3311
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
3312
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3313
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3314
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3315
- pll->state.hw_state.mg_refclkin_ctl);
3316
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3317
- pll->state.hw_state.mg_clktop2_coreclkctl1);
3318
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3319
- pll->state.hw_state.mg_clktop2_hsclkctl);
3320
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
3321
- pll->state.hw_state.mg_pll_div0);
3322
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
3323
- pll->state.hw_state.mg_pll_div1);
3324
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
3325
- pll->state.hw_state.mg_pll_lf);
3326
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3327
- pll->state.hw_state.mg_pll_frac_lock);
3328
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3329
- pll->state.hw_state.mg_pll_ssc);
3330
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
3331
- pll->state.hw_state.mg_pll_bias);
3332
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3333
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
3334
- }
3335
- drm_modeset_unlock_all(dev);
3336
-
3337
- return 0;
3338
-}
3339
-
33401342 static int i915_wa_registers(struct seq_file *m, void *unused)
33411343 {
3342
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3343
- int i;
1344
+ struct drm_i915_private *i915 = node_to_i915(m->private);
1345
+ struct intel_engine_cs *engine;
33441346
3345
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
3346
- for (i = 0; i < wa->count; ++i)
3347
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3348
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
1347
+ for_each_uabi_engine(engine, i915) {
1348
+ const struct i915_wa_list *wal = &engine->ctx_wa_list;
1349
+ const struct i915_wa *wa;
1350
+ unsigned int count;
33491351
3350
- return 0;
3351
-}
1352
+ count = wal->count;
1353
+ if (!count)
1354
+ continue;
33521355
3353
-static int i915_ipc_status_show(struct seq_file *m, void *data)
3354
-{
3355
- struct drm_i915_private *dev_priv = m->private;
1356
+ seq_printf(m, "%s: Workarounds applied: %u\n",
1357
+ engine->name, count);
33561358
3357
- seq_printf(m, "Isochronous Priority Control: %s\n",
3358
- yesno(dev_priv->ipc_enabled));
3359
- return 0;
3360
-}
1359
+ for (wa = wal->list; count--; wa++)
1360
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1361
+ i915_mmio_reg_offset(wa->reg),
1362
+ wa->set, wa->clr);
33611363
3362
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
3363
-{
3364
- struct drm_i915_private *dev_priv = inode->i_private;
3365
-
3366
- if (!HAS_IPC(dev_priv))
3367
- return -ENODEV;
3368
-
3369
- return single_open(file, i915_ipc_status_show, dev_priv);
3370
-}
3371
-
3372
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3373
- size_t len, loff_t *offp)
3374
-{
3375
- struct seq_file *m = file->private_data;
3376
- struct drm_i915_private *dev_priv = m->private;
3377
- int ret;
3378
- bool enable;
3379
-
3380
- ret = kstrtobool_from_user(ubuf, len, &enable);
3381
- if (ret < 0)
3382
- return ret;
3383
-
3384
- intel_runtime_pm_get(dev_priv);
3385
- if (!dev_priv->ipc_enabled && enable)
3386
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3387
- dev_priv->wm.distrust_bios_wm = true;
3388
- dev_priv->ipc_enabled = enable;
3389
- intel_enable_ipc(dev_priv);
3390
- intel_runtime_pm_put(dev_priv);
3391
-
3392
- return len;
3393
-}
3394
-
3395
-static const struct file_operations i915_ipc_status_fops = {
3396
- .owner = THIS_MODULE,
3397
- .open = i915_ipc_status_open,
3398
- .read = seq_read,
3399
- .llseek = seq_lseek,
3400
- .release = single_release,
3401
- .write = i915_ipc_status_write
3402
-};
3403
-
3404
-static int i915_ddb_info(struct seq_file *m, void *unused)
3405
-{
3406
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3407
- struct drm_device *dev = &dev_priv->drm;
3408
- struct skl_ddb_allocation *ddb;
3409
- struct skl_ddb_entry *entry;
3410
- enum pipe pipe;
3411
- int plane;
3412
-
3413
- if (INTEL_GEN(dev_priv) < 9)
3414
- return -ENODEV;
3415
-
3416
- drm_modeset_lock_all(dev);
3417
-
3418
- ddb = &dev_priv->wm.skl_hw.ddb;
3419
-
3420
- seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3421
-
3422
- for_each_pipe(dev_priv, pipe) {
3423
- seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3424
-
3425
- for_each_universal_plane(dev_priv, pipe, plane) {
3426
- entry = &ddb->plane[pipe][plane];
3427
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3428
- entry->start, entry->end,
3429
- skl_ddb_entry_size(entry));
3430
- }
3431
-
3432
- entry = &ddb->plane[pipe][PLANE_CURSOR];
3433
- seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3434
- entry->end, skl_ddb_entry_size(entry));
1364
+ seq_printf(m, "\n");
34351365 }
34361366
3437
- drm_modeset_unlock_all(dev);
3438
-
34391367 return 0;
34401368 }
3441
-
3442
-static void drrs_status_per_crtc(struct seq_file *m,
3443
- struct drm_device *dev,
3444
- struct intel_crtc *intel_crtc)
3445
-{
3446
- struct drm_i915_private *dev_priv = to_i915(dev);
3447
- struct i915_drrs *drrs = &dev_priv->drrs;
3448
- int vrefresh = 0;
3449
- struct drm_connector *connector;
3450
- struct drm_connector_list_iter conn_iter;
3451
-
3452
- drm_connector_list_iter_begin(dev, &conn_iter);
3453
- drm_for_each_connector_iter(connector, &conn_iter) {
3454
- if (connector->state->crtc != &intel_crtc->base)
3455
- continue;
3456
-
3457
- seq_printf(m, "%s:\n", connector->name);
3458
- }
3459
- drm_connector_list_iter_end(&conn_iter);
3460
-
3461
- if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3462
- seq_puts(m, "\tVBT: DRRS_type: Static");
3463
- else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3464
- seq_puts(m, "\tVBT: DRRS_type: Seamless");
3465
- else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3466
- seq_puts(m, "\tVBT: DRRS_type: None");
3467
- else
3468
- seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3469
-
3470
- seq_puts(m, "\n\n");
3471
-
3472
- if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3473
- struct intel_panel *panel;
3474
-
3475
- mutex_lock(&drrs->mutex);
3476
- /* DRRS Supported */
3477
- seq_puts(m, "\tDRRS Supported: Yes\n");
3478
-
3479
- /* disable_drrs() will make drrs->dp NULL */
3480
- if (!drrs->dp) {
3481
- seq_puts(m, "Idleness DRRS: Disabled\n");
3482
- if (dev_priv->psr.enabled)
3483
- seq_puts(m,
3484
- "\tAs PSR is enabled, DRRS is not enabled\n");
3485
- mutex_unlock(&drrs->mutex);
3486
- return;
3487
- }
3488
-
3489
- panel = &drrs->dp->attached_connector->panel;
3490
- seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3491
- drrs->busy_frontbuffer_bits);
3492
-
3493
- seq_puts(m, "\n\t\t");
3494
- if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3495
- seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3496
- vrefresh = panel->fixed_mode->vrefresh;
3497
- } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3498
- seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3499
- vrefresh = panel->downclock_mode->vrefresh;
3500
- } else {
3501
- seq_printf(m, "DRRS_State: Unknown(%d)\n",
3502
- drrs->refresh_rate_type);
3503
- mutex_unlock(&drrs->mutex);
3504
- return;
3505
- }
3506
- seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3507
-
3508
- seq_puts(m, "\n\t\t");
3509
- mutex_unlock(&drrs->mutex);
3510
- } else {
3511
- /* DRRS not supported. Print the VBT parameter*/
3512
- seq_puts(m, "\tDRRS Supported : No");
3513
- }
3514
- seq_puts(m, "\n");
3515
-}
3516
-
3517
-static int i915_drrs_status(struct seq_file *m, void *unused)
3518
-{
3519
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3520
- struct drm_device *dev = &dev_priv->drm;
3521
- struct intel_crtc *intel_crtc;
3522
- int active_crtc_cnt = 0;
3523
-
3524
- drm_modeset_lock_all(dev);
3525
- for_each_intel_crtc(dev, intel_crtc) {
3526
- if (intel_crtc->base.state->active) {
3527
- active_crtc_cnt++;
3528
- seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3529
-
3530
- drrs_status_per_crtc(m, dev, intel_crtc);
3531
- }
3532
- }
3533
- drm_modeset_unlock_all(dev);
3534
-
3535
- if (!active_crtc_cnt)
3536
- seq_puts(m, "No active crtc found\n");
3537
-
3538
- return 0;
3539
-}
3540
-
3541
-static int i915_dp_mst_info(struct seq_file *m, void *unused)
3542
-{
3543
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
3544
- struct drm_device *dev = &dev_priv->drm;
3545
- struct intel_encoder *intel_encoder;
3546
- struct intel_digital_port *intel_dig_port;
3547
- struct drm_connector *connector;
3548
- struct drm_connector_list_iter conn_iter;
3549
-
3550
- drm_connector_list_iter_begin(dev, &conn_iter);
3551
- drm_for_each_connector_iter(connector, &conn_iter) {
3552
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3553
- continue;
3554
-
3555
- intel_encoder = intel_attached_encoder(connector);
3556
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3557
- continue;
3558
-
3559
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3560
- if (!intel_dig_port->dp.can_mst)
3561
- continue;
3562
-
3563
- seq_printf(m, "MST Source Port %c\n",
3564
- port_name(intel_dig_port->base.port));
3565
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3566
- }
3567
- drm_connector_list_iter_end(&conn_iter);
3568
-
3569
- return 0;
3570
-}
3571
-
3572
-static ssize_t i915_displayport_test_active_write(struct file *file,
3573
- const char __user *ubuf,
3574
- size_t len, loff_t *offp)
3575
-{
3576
- char *input_buffer;
3577
- int status = 0;
3578
- struct drm_device *dev;
3579
- struct drm_connector *connector;
3580
- struct drm_connector_list_iter conn_iter;
3581
- struct intel_dp *intel_dp;
3582
- int val = 0;
3583
-
3584
- dev = ((struct seq_file *)file->private_data)->private;
3585
-
3586
- if (len == 0)
3587
- return 0;
3588
-
3589
- input_buffer = memdup_user_nul(ubuf, len);
3590
- if (IS_ERR(input_buffer))
3591
- return PTR_ERR(input_buffer);
3592
-
3593
- DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3594
-
3595
- drm_connector_list_iter_begin(dev, &conn_iter);
3596
- drm_for_each_connector_iter(connector, &conn_iter) {
3597
- struct intel_encoder *encoder;
3598
-
3599
- if (connector->connector_type !=
3600
- DRM_MODE_CONNECTOR_DisplayPort)
3601
- continue;
3602
-
3603
- encoder = to_intel_encoder(connector->encoder);
3604
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3605
- continue;
3606
-
3607
- if (encoder && connector->status == connector_status_connected) {
3608
- intel_dp = enc_to_intel_dp(&encoder->base);
3609
- status = kstrtoint(input_buffer, 10, &val);
3610
- if (status < 0)
3611
- break;
3612
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3613
- /* To prevent erroneous activation of the compliance
3614
- * testing code, only accept an actual value of 1 here
3615
- */
3616
- if (val == 1)
3617
- intel_dp->compliance.test_active = 1;
3618
- else
3619
- intel_dp->compliance.test_active = 0;
3620
- }
3621
- }
3622
- drm_connector_list_iter_end(&conn_iter);
3623
- kfree(input_buffer);
3624
- if (status < 0)
3625
- return status;
3626
-
3627
- *offp += len;
3628
- return len;
3629
-}
3630
-
3631
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3632
-{
3633
- struct drm_i915_private *dev_priv = m->private;
3634
- struct drm_device *dev = &dev_priv->drm;
3635
- struct drm_connector *connector;
3636
- struct drm_connector_list_iter conn_iter;
3637
- struct intel_dp *intel_dp;
3638
-
3639
- drm_connector_list_iter_begin(dev, &conn_iter);
3640
- drm_for_each_connector_iter(connector, &conn_iter) {
3641
- struct intel_encoder *encoder;
3642
-
3643
- if (connector->connector_type !=
3644
- DRM_MODE_CONNECTOR_DisplayPort)
3645
- continue;
3646
-
3647
- encoder = to_intel_encoder(connector->encoder);
3648
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3649
- continue;
3650
-
3651
- if (encoder && connector->status == connector_status_connected) {
3652
- intel_dp = enc_to_intel_dp(&encoder->base);
3653
- if (intel_dp->compliance.test_active)
3654
- seq_puts(m, "1");
3655
- else
3656
- seq_puts(m, "0");
3657
- } else
3658
- seq_puts(m, "0");
3659
- }
3660
- drm_connector_list_iter_end(&conn_iter);
3661
-
3662
- return 0;
3663
-}
3664
-
3665
-static int i915_displayport_test_active_open(struct inode *inode,
3666
- struct file *file)
3667
-{
3668
- return single_open(file, i915_displayport_test_active_show,
3669
- inode->i_private);
3670
-}
3671
-
3672
-static const struct file_operations i915_displayport_test_active_fops = {
3673
- .owner = THIS_MODULE,
3674
- .open = i915_displayport_test_active_open,
3675
- .read = seq_read,
3676
- .llseek = seq_lseek,
3677
- .release = single_release,
3678
- .write = i915_displayport_test_active_write
3679
-};
3680
-
3681
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3682
-{
3683
- struct drm_i915_private *dev_priv = m->private;
3684
- struct drm_device *dev = &dev_priv->drm;
3685
- struct drm_connector *connector;
3686
- struct drm_connector_list_iter conn_iter;
3687
- struct intel_dp *intel_dp;
3688
-
3689
- drm_connector_list_iter_begin(dev, &conn_iter);
3690
- drm_for_each_connector_iter(connector, &conn_iter) {
3691
- struct intel_encoder *encoder;
3692
-
3693
- if (connector->connector_type !=
3694
- DRM_MODE_CONNECTOR_DisplayPort)
3695
- continue;
3696
-
3697
- encoder = to_intel_encoder(connector->encoder);
3698
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3699
- continue;
3700
-
3701
- if (encoder && connector->status == connector_status_connected) {
3702
- intel_dp = enc_to_intel_dp(&encoder->base);
3703
- if (intel_dp->compliance.test_type ==
3704
- DP_TEST_LINK_EDID_READ)
3705
- seq_printf(m, "%lx",
3706
- intel_dp->compliance.test_data.edid);
3707
- else if (intel_dp->compliance.test_type ==
3708
- DP_TEST_LINK_VIDEO_PATTERN) {
3709
- seq_printf(m, "hdisplay: %d\n",
3710
- intel_dp->compliance.test_data.hdisplay);
3711
- seq_printf(m, "vdisplay: %d\n",
3712
- intel_dp->compliance.test_data.vdisplay);
3713
- seq_printf(m, "bpc: %u\n",
3714
- intel_dp->compliance.test_data.bpc);
3715
- }
3716
- } else
3717
- seq_puts(m, "0");
3718
- }
3719
- drm_connector_list_iter_end(&conn_iter);
3720
-
3721
- return 0;
3722
-}
3723
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3724
-
3725
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3726
-{
3727
- struct drm_i915_private *dev_priv = m->private;
3728
- struct drm_device *dev = &dev_priv->drm;
3729
- struct drm_connector *connector;
3730
- struct drm_connector_list_iter conn_iter;
3731
- struct intel_dp *intel_dp;
3732
-
3733
- drm_connector_list_iter_begin(dev, &conn_iter);
3734
- drm_for_each_connector_iter(connector, &conn_iter) {
3735
- struct intel_encoder *encoder;
3736
-
3737
- if (connector->connector_type !=
3738
- DRM_MODE_CONNECTOR_DisplayPort)
3739
- continue;
3740
-
3741
- encoder = to_intel_encoder(connector->encoder);
3742
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3743
- continue;
3744
-
3745
- if (encoder && connector->status == connector_status_connected) {
3746
- intel_dp = enc_to_intel_dp(&encoder->base);
3747
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3748
- } else
3749
- seq_puts(m, "0");
3750
- }
3751
- drm_connector_list_iter_end(&conn_iter);
3752
-
3753
- return 0;
3754
-}
3755
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3756
-
3757
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3758
-{
3759
- struct drm_i915_private *dev_priv = m->private;
3760
- struct drm_device *dev = &dev_priv->drm;
3761
- int level;
3762
- int num_levels;
3763
-
3764
- if (IS_CHERRYVIEW(dev_priv))
3765
- num_levels = 3;
3766
- else if (IS_VALLEYVIEW(dev_priv))
3767
- num_levels = 1;
3768
- else if (IS_G4X(dev_priv))
3769
- num_levels = 3;
3770
- else
3771
- num_levels = ilk_wm_max_level(dev_priv) + 1;
3772
-
3773
- drm_modeset_lock_all(dev);
3774
-
3775
- for (level = 0; level < num_levels; level++) {
3776
- unsigned int latency = wm[level];
3777
-
3778
- /*
3779
- * - WM1+ latency values in 0.5us units
3780
- * - latencies are in us on gen9/vlv/chv
3781
- */
3782
- if (INTEL_GEN(dev_priv) >= 9 ||
3783
- IS_VALLEYVIEW(dev_priv) ||
3784
- IS_CHERRYVIEW(dev_priv) ||
3785
- IS_G4X(dev_priv))
3786
- latency *= 10;
3787
- else if (level > 0)
3788
- latency *= 5;
3789
-
3790
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
3791
- level, wm[level], latency / 10, latency % 10);
3792
- }
3793
-
3794
- drm_modeset_unlock_all(dev);
3795
-}
3796
-
3797
-static int pri_wm_latency_show(struct seq_file *m, void *data)
3798
-{
3799
- struct drm_i915_private *dev_priv = m->private;
3800
- const uint16_t *latencies;
3801
-
3802
- if (INTEL_GEN(dev_priv) >= 9)
3803
- latencies = dev_priv->wm.skl_latency;
3804
- else
3805
- latencies = dev_priv->wm.pri_latency;
3806
-
3807
- wm_latency_show(m, latencies);
3808
-
3809
- return 0;
3810
-}
3811
-
3812
-static int spr_wm_latency_show(struct seq_file *m, void *data)
3813
-{
3814
- struct drm_i915_private *dev_priv = m->private;
3815
- const uint16_t *latencies;
3816
-
3817
- if (INTEL_GEN(dev_priv) >= 9)
3818
- latencies = dev_priv->wm.skl_latency;
3819
- else
3820
- latencies = dev_priv->wm.spr_latency;
3821
-
3822
- wm_latency_show(m, latencies);
3823
-
3824
- return 0;
3825
-}
3826
-
3827
-static int cur_wm_latency_show(struct seq_file *m, void *data)
3828
-{
3829
- struct drm_i915_private *dev_priv = m->private;
3830
- const uint16_t *latencies;
3831
-
3832
- if (INTEL_GEN(dev_priv) >= 9)
3833
- latencies = dev_priv->wm.skl_latency;
3834
- else
3835
- latencies = dev_priv->wm.cur_latency;
3836
-
3837
- wm_latency_show(m, latencies);
3838
-
3839
- return 0;
3840
-}
3841
-
3842
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
3843
-{
3844
- struct drm_i915_private *dev_priv = inode->i_private;
3845
-
3846
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3847
- return -ENODEV;
3848
-
3849
- return single_open(file, pri_wm_latency_show, dev_priv);
3850
-}
3851
-
3852
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
3853
-{
3854
- struct drm_i915_private *dev_priv = inode->i_private;
3855
-
3856
- if (HAS_GMCH_DISPLAY(dev_priv))
3857
- return -ENODEV;
3858
-
3859
- return single_open(file, spr_wm_latency_show, dev_priv);
3860
-}
3861
-
3862
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
3863
-{
3864
- struct drm_i915_private *dev_priv = inode->i_private;
3865
-
3866
- if (HAS_GMCH_DISPLAY(dev_priv))
3867
- return -ENODEV;
3868
-
3869
- return single_open(file, cur_wm_latency_show, dev_priv);
3870
-}
3871
-
3872
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3873
- size_t len, loff_t *offp, uint16_t wm[8])
3874
-{
3875
- struct seq_file *m = file->private_data;
3876
- struct drm_i915_private *dev_priv = m->private;
3877
- struct drm_device *dev = &dev_priv->drm;
3878
- uint16_t new[8] = { 0 };
3879
- int num_levels;
3880
- int level;
3881
- int ret;
3882
- char tmp[32];
3883
-
3884
- if (IS_CHERRYVIEW(dev_priv))
3885
- num_levels = 3;
3886
- else if (IS_VALLEYVIEW(dev_priv))
3887
- num_levels = 1;
3888
- else if (IS_G4X(dev_priv))
3889
- num_levels = 3;
3890
- else
3891
- num_levels = ilk_wm_max_level(dev_priv) + 1;
3892
-
3893
- if (len >= sizeof(tmp))
3894
- return -EINVAL;
3895
-
3896
- if (copy_from_user(tmp, ubuf, len))
3897
- return -EFAULT;
3898
-
3899
- tmp[len] = '\0';
3900
-
3901
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3902
- &new[0], &new[1], &new[2], &new[3],
3903
- &new[4], &new[5], &new[6], &new[7]);
3904
- if (ret != num_levels)
3905
- return -EINVAL;
3906
-
3907
- drm_modeset_lock_all(dev);
3908
-
3909
- for (level = 0; level < num_levels; level++)
3910
- wm[level] = new[level];
3911
-
3912
- drm_modeset_unlock_all(dev);
3913
-
3914
- return len;
3915
-}
3916
-
3917
-
3918
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3919
- size_t len, loff_t *offp)
3920
-{
3921
- struct seq_file *m = file->private_data;
3922
- struct drm_i915_private *dev_priv = m->private;
3923
- uint16_t *latencies;
3924
-
3925
- if (INTEL_GEN(dev_priv) >= 9)
3926
- latencies = dev_priv->wm.skl_latency;
3927
- else
3928
- latencies = dev_priv->wm.pri_latency;
3929
-
3930
- return wm_latency_write(file, ubuf, len, offp, latencies);
3931
-}
3932
-
3933
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3934
- size_t len, loff_t *offp)
3935
-{
3936
- struct seq_file *m = file->private_data;
3937
- struct drm_i915_private *dev_priv = m->private;
3938
- uint16_t *latencies;
3939
-
3940
- if (INTEL_GEN(dev_priv) >= 9)
3941
- latencies = dev_priv->wm.skl_latency;
3942
- else
3943
- latencies = dev_priv->wm.spr_latency;
3944
-
3945
- return wm_latency_write(file, ubuf, len, offp, latencies);
3946
-}
3947
-
3948
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3949
- size_t len, loff_t *offp)
3950
-{
3951
- struct seq_file *m = file->private_data;
3952
- struct drm_i915_private *dev_priv = m->private;
3953
- uint16_t *latencies;
3954
-
3955
- if (INTEL_GEN(dev_priv) >= 9)
3956
- latencies = dev_priv->wm.skl_latency;
3957
- else
3958
- latencies = dev_priv->wm.cur_latency;
3959
-
3960
- return wm_latency_write(file, ubuf, len, offp, latencies);
3961
-}
3962
-
3963
-static const struct file_operations i915_pri_wm_latency_fops = {
3964
- .owner = THIS_MODULE,
3965
- .open = pri_wm_latency_open,
3966
- .read = seq_read,
3967
- .llseek = seq_lseek,
3968
- .release = single_release,
3969
- .write = pri_wm_latency_write
3970
-};
3971
-
3972
-static const struct file_operations i915_spr_wm_latency_fops = {
3973
- .owner = THIS_MODULE,
3974
- .open = spr_wm_latency_open,
3975
- .read = seq_read,
3976
- .llseek = seq_lseek,
3977
- .release = single_release,
3978
- .write = spr_wm_latency_write
3979
-};
3980
-
3981
-static const struct file_operations i915_cur_wm_latency_fops = {
3982
- .owner = THIS_MODULE,
3983
- .open = cur_wm_latency_open,
3984
- .read = seq_read,
3985
- .llseek = seq_lseek,
3986
- .release = single_release,
3987
- .write = cur_wm_latency_write
3988
-};
39891369
39901370 static int
39911371 i915_wedged_get(void *data, u64 *val)
39921372 {
3993
- struct drm_i915_private *dev_priv = data;
1373
+ struct drm_i915_private *i915 = data;
1374
+ int ret = intel_gt_terminally_wedged(&i915->gt);
39941375
3995
- *val = i915_terminally_wedged(&dev_priv->gpu_error);
3996
-
3997
- return 0;
1376
+ switch (ret) {
1377
+ case -EIO:
1378
+ *val = 1;
1379
+ return 0;
1380
+ case 0:
1381
+ *val = 0;
1382
+ return 0;
1383
+ default:
1384
+ return ret;
1385
+ }
39981386 }
39991387
40001388 static int
40011389 i915_wedged_set(void *data, u64 val)
40021390 {
40031391 struct drm_i915_private *i915 = data;
4004
- struct intel_engine_cs *engine;
4005
- unsigned int tmp;
40061392
4007
- /*
4008
- * There is no safeguard against this debugfs entry colliding
4009
- * with the hangcheck calling same i915_handle_error() in
4010
- * parallel, causing an explosion. For now we assume that the
4011
- * test harness is responsible enough not to inject gpu hangs
4012
- * while it is writing to 'i915_wedged'
4013
- */
1393
+ /* Flush any previous reset before applying for a new one */
1394
+ wait_event(i915->gt.reset.queue,
1395
+ !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
40141396
4015
- if (i915_reset_backoff(&i915->gpu_error))
4016
- return -EAGAIN;
4017
-
4018
- for_each_engine_masked(engine, i915, val, tmp) {
4019
- engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4020
- engine->hangcheck.stalled = true;
4021
- }
4022
-
4023
- i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4024
- "Manually set wedged engine mask = %llx", val);
4025
-
4026
- wait_on_bit(&i915->gpu_error.flags,
4027
- I915_RESET_HANDOFF,
4028
- TASK_UNINTERRUPTIBLE);
4029
-
1397
+ intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1398
+ "Manually set wedged engine mask = %llx", val);
40301399 return 0;
40311400 }
40321401
....@@ -4035,81 +1404,34 @@
40351404 "%llu\n");
40361405
40371406 static int
4038
-fault_irq_set(struct drm_i915_private *i915,
4039
- unsigned long *irq,
4040
- unsigned long val)
4041
-{
4042
- int err;
4043
-
4044
- err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4045
- if (err)
4046
- return err;
4047
-
4048
- err = i915_gem_wait_for_idle(i915,
4049
- I915_WAIT_LOCKED |
4050
- I915_WAIT_INTERRUPTIBLE,
4051
- MAX_SCHEDULE_TIMEOUT);
4052
- if (err)
4053
- goto err_unlock;
4054
-
4055
- *irq = val;
4056
- mutex_unlock(&i915->drm.struct_mutex);
4057
-
4058
- /* Flush idle worker to disarm irq */
4059
- drain_delayed_work(&i915->gt.idle_work);
4060
-
4061
- return 0;
4062
-
4063
-err_unlock:
4064
- mutex_unlock(&i915->drm.struct_mutex);
4065
- return err;
4066
-}
4067
-
4068
-static int
4069
-i915_ring_missed_irq_get(void *data, u64 *val)
4070
-{
4071
- struct drm_i915_private *dev_priv = data;
4072
-
4073
- *val = dev_priv->gpu_error.missed_irq_rings;
4074
- return 0;
4075
-}
4076
-
4077
-static int
4078
-i915_ring_missed_irq_set(void *data, u64 val)
1407
+i915_perf_noa_delay_set(void *data, u64 val)
40791408 {
40801409 struct drm_i915_private *i915 = data;
40811410
4082
- return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4083
-}
1411
+ /*
1412
+ * This would lead to infinite waits as we're doing timestamp
1413
+ * difference on the CS with only 32bits.
1414
+ */
1415
+ if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
1416
+ return -EINVAL;
40841417
4085
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4086
- i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4087
- "0x%08llx\n");
4088
-
4089
-static int
4090
-i915_ring_test_irq_get(void *data, u64 *val)
4091
-{
4092
- struct drm_i915_private *dev_priv = data;
4093
-
4094
- *val = dev_priv->gpu_error.test_irq_rings;
4095
-
1418
+ atomic64_set(&i915->perf.noa_programming_delay, val);
40961419 return 0;
40971420 }
40981421
40991422 static int
4100
-i915_ring_test_irq_set(void *data, u64 val)
1423
+i915_perf_noa_delay_get(void *data, u64 *val)
41011424 {
41021425 struct drm_i915_private *i915 = data;
41031426
4104
- val &= INTEL_INFO(i915)->ring_mask;
4105
- DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4106
-
4107
- return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
1427
+ *val = atomic64_read(&i915->perf.noa_programming_delay);
1428
+ return 0;
41081429 }
41091430
4110
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4111
- i915_ring_test_irq_get, i915_ring_test_irq_set,
4112
- "0x%08llx\n");
1431
+DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1432
+ i915_perf_noa_delay_get,
1433
+ i915_perf_noa_delay_set,
1434
+ "%llu\n");
41131435
41141436 #define DROP_UNBOUND BIT(0)
41151437 #define DROP_BOUND BIT(1)
....@@ -4118,13 +1440,19 @@
41181440 #define DROP_FREED BIT(4)
41191441 #define DROP_SHRINK_ALL BIT(5)
41201442 #define DROP_IDLE BIT(6)
1443
+#define DROP_RESET_ACTIVE BIT(7)
1444
+#define DROP_RESET_SEQNO BIT(8)
1445
+#define DROP_RCU BIT(9)
41211446 #define DROP_ALL (DROP_UNBOUND | \
41221447 DROP_BOUND | \
41231448 DROP_RETIRE | \
41241449 DROP_ACTIVE | \
41251450 DROP_FREED | \
41261451 DROP_SHRINK_ALL |\
4127
- DROP_IDLE)
1452
+ DROP_IDLE | \
1453
+ DROP_RESET_ACTIVE | \
1454
+ DROP_RESET_SEQNO | \
1455
+ DROP_RCU)
41281456 static int
41291457 i915_drop_caches_get(void *data, u64 *val)
41301458 {
....@@ -4132,59 +1460,70 @@
41321460
41331461 return 0;
41341462 }
1463
+static int
1464
+gt_drop_caches(struct intel_gt *gt, u64 val)
1465
+{
1466
+ int ret;
1467
+
1468
+ if (val & DROP_RESET_ACTIVE &&
1469
+ wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1470
+ intel_gt_set_wedged(gt);
1471
+
1472
+ if (val & DROP_RETIRE)
1473
+ intel_gt_retire_requests(gt);
1474
+
1475
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
1476
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1477
+ if (ret)
1478
+ return ret;
1479
+ }
1480
+
1481
+ if (val & DROP_IDLE) {
1482
+ ret = intel_gt_pm_wait_for_idle(gt);
1483
+ if (ret)
1484
+ return ret;
1485
+ }
1486
+
1487
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1488
+ intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1489
+
1490
+ if (val & DROP_FREED)
1491
+ intel_gt_flush_buffer_pool(gt);
1492
+
1493
+ return 0;
1494
+}
41351495
41361496 static int
41371497 i915_drop_caches_set(void *data, u64 val)
41381498 {
4139
- struct drm_i915_private *dev_priv = data;
4140
- struct drm_device *dev = &dev_priv->drm;
4141
- int ret = 0;
1499
+ struct drm_i915_private *i915 = data;
1500
+ int ret;
41421501
41431502 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
41441503 val, val & DROP_ALL);
41451504
4146
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
4147
- * on ioctls on -EAGAIN. */
4148
- if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4149
- ret = mutex_lock_interruptible(&dev->struct_mutex);
4150
- if (ret)
4151
- return ret;
4152
-
4153
- if (val & DROP_ACTIVE)
4154
- ret = i915_gem_wait_for_idle(dev_priv,
4155
- I915_WAIT_INTERRUPTIBLE |
4156
- I915_WAIT_LOCKED,
4157
- MAX_SCHEDULE_TIMEOUT);
4158
-
4159
- if (val & DROP_RETIRE)
4160
- i915_retire_requests(dev_priv);
4161
-
4162
- mutex_unlock(&dev->struct_mutex);
4163
- }
1505
+ ret = gt_drop_caches(&i915->gt, val);
1506
+ if (ret)
1507
+ return ret;
41641508
41651509 fs_reclaim_acquire(GFP_KERNEL);
41661510 if (val & DROP_BOUND)
4167
- i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
1511
+ i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
41681512
41691513 if (val & DROP_UNBOUND)
4170
- i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1514
+ i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
41711515
41721516 if (val & DROP_SHRINK_ALL)
4173
- i915_gem_shrink_all(dev_priv);
1517
+ i915_gem_shrink_all(i915);
41741518 fs_reclaim_release(GFP_KERNEL);
41751519
4176
- if (val & DROP_IDLE) {
4177
- do {
4178
- if (READ_ONCE(dev_priv->gt.active_requests))
4179
- flush_delayed_work(&dev_priv->gt.retire_work);
4180
- drain_delayed_work(&dev_priv->gt.idle_work);
4181
- } while (READ_ONCE(dev_priv->gt.awake));
4182
- }
1520
+ if (val & DROP_RCU)
1521
+ rcu_barrier();
41831522
41841523 if (val & DROP_FREED)
4185
- i915_gem_drain_freed_objects(dev_priv);
1524
+ i915_gem_drain_freed_objects(i915);
41861525
4187
- return ret;
1526
+ return 0;
41881527 }
41891528
41901529 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
....@@ -4195,16 +1534,14 @@
41951534 i915_cache_sharing_get(void *data, u64 *val)
41961535 {
41971536 struct drm_i915_private *dev_priv = data;
4198
- u32 snpcr;
1537
+ intel_wakeref_t wakeref;
1538
+ u32 snpcr = 0;
41991539
4200
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
1540
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
42011541 return -ENODEV;
42021542
4203
- intel_runtime_pm_get(dev_priv);
4204
-
4205
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4206
-
4207
- intel_runtime_pm_put(dev_priv);
1543
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1544
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
42081545
42091546 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
42101547
....@@ -4215,24 +1552,26 @@
42151552 i915_cache_sharing_set(void *data, u64 val)
42161553 {
42171554 struct drm_i915_private *dev_priv = data;
4218
- u32 snpcr;
1555
+ intel_wakeref_t wakeref;
42191556
4220
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
1557
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
42211558 return -ENODEV;
42221559
42231560 if (val > 3)
42241561 return -EINVAL;
42251562
4226
- intel_runtime_pm_get(dev_priv);
4227
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
1563
+ drm_dbg(&dev_priv->drm,
1564
+ "Manually setting uncore sharing to %llu\n", val);
1565
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1566
+ u32 snpcr;
42281567
4229
- /* Update the cache sharing policy here as well */
4230
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4231
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
4232
- snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4233
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1568
+ /* Update the cache sharing policy here as well */
1569
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1570
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
1571
+ snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
1572
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1573
+ }
42341574
4235
- intel_runtime_pm_put(dev_priv);
42361575 return 0;
42371576 }
42381577
....@@ -4240,262 +1579,23 @@
42401579 i915_cache_sharing_get, i915_cache_sharing_set,
42411580 "%llu\n");
42421581
4243
-static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4244
- struct sseu_dev_info *sseu)
4245
-{
4246
-#define SS_MAX 2
4247
- const int ss_max = SS_MAX;
4248
- u32 sig1[SS_MAX], sig2[SS_MAX];
4249
- int ss;
4250
-
4251
- sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4252
- sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4253
- sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4254
- sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4255
-
4256
- for (ss = 0; ss < ss_max; ss++) {
4257
- unsigned int eu_cnt;
4258
-
4259
- if (sig1[ss] & CHV_SS_PG_ENABLE)
4260
- /* skip disabled subslice */
4261
- continue;
4262
-
4263
- sseu->slice_mask = BIT(0);
4264
- sseu->subslice_mask[0] |= BIT(ss);
4265
- eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4266
- ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4267
- ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4268
- ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4269
- sseu->eu_total += eu_cnt;
4270
- sseu->eu_per_subslice = max_t(unsigned int,
4271
- sseu->eu_per_subslice, eu_cnt);
4272
- }
4273
-#undef SS_MAX
4274
-}
4275
-
4276
-static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4277
- struct sseu_dev_info *sseu)
4278
-{
4279
-#define SS_MAX 6
4280
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
4281
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4282
- int s, ss;
4283
-
4284
- for (s = 0; s < info->sseu.max_slices; s++) {
4285
- /*
4286
- * FIXME: Valid SS Mask respects the spec and read
4287
- * only valid bits for those registers, excluding reserverd
4288
- * although this seems wrong because it would leave many
4289
- * subslices without ACK.
4290
- */
4291
- s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4292
- GEN10_PGCTL_VALID_SS_MASK(s);
4293
- eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4294
- eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4295
- }
4296
-
4297
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4298
- GEN9_PGCTL_SSA_EU19_ACK |
4299
- GEN9_PGCTL_SSA_EU210_ACK |
4300
- GEN9_PGCTL_SSA_EU311_ACK;
4301
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4302
- GEN9_PGCTL_SSB_EU19_ACK |
4303
- GEN9_PGCTL_SSB_EU210_ACK |
4304
- GEN9_PGCTL_SSB_EU311_ACK;
4305
-
4306
- for (s = 0; s < info->sseu.max_slices; s++) {
4307
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4308
- /* skip disabled slice */
4309
- continue;
4310
-
4311
- sseu->slice_mask |= BIT(s);
4312
- sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4313
-
4314
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4315
- unsigned int eu_cnt;
4316
-
4317
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4318
- /* skip disabled subslice */
4319
- continue;
4320
-
4321
- eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4322
- eu_mask[ss % 2]);
4323
- sseu->eu_total += eu_cnt;
4324
- sseu->eu_per_subslice = max_t(unsigned int,
4325
- sseu->eu_per_subslice,
4326
- eu_cnt);
4327
- }
4328
- }
4329
-#undef SS_MAX
4330
-}
4331
-
4332
-static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4333
- struct sseu_dev_info *sseu)
4334
-{
4335
-#define SS_MAX 3
4336
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
4337
- u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4338
- int s, ss;
4339
-
4340
- for (s = 0; s < info->sseu.max_slices; s++) {
4341
- s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4342
- eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4343
- eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4344
- }
4345
-
4346
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4347
- GEN9_PGCTL_SSA_EU19_ACK |
4348
- GEN9_PGCTL_SSA_EU210_ACK |
4349
- GEN9_PGCTL_SSA_EU311_ACK;
4350
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4351
- GEN9_PGCTL_SSB_EU19_ACK |
4352
- GEN9_PGCTL_SSB_EU210_ACK |
4353
- GEN9_PGCTL_SSB_EU311_ACK;
4354
-
4355
- for (s = 0; s < info->sseu.max_slices; s++) {
4356
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4357
- /* skip disabled slice */
4358
- continue;
4359
-
4360
- sseu->slice_mask |= BIT(s);
4361
-
4362
- if (IS_GEN9_BC(dev_priv))
4363
- sseu->subslice_mask[s] =
4364
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4365
-
4366
- for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4367
- unsigned int eu_cnt;
4368
-
4369
- if (IS_GEN9_LP(dev_priv)) {
4370
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4371
- /* skip disabled subslice */
4372
- continue;
4373
-
4374
- sseu->subslice_mask[s] |= BIT(ss);
4375
- }
4376
-
4377
- eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4378
- eu_mask[ss%2]);
4379
- sseu->eu_total += eu_cnt;
4380
- sseu->eu_per_subslice = max_t(unsigned int,
4381
- sseu->eu_per_subslice,
4382
- eu_cnt);
4383
- }
4384
- }
4385
-#undef SS_MAX
4386
-}
4387
-
4388
-static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4389
- struct sseu_dev_info *sseu)
4390
-{
4391
- u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4392
- int s;
4393
-
4394
- sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4395
-
4396
- if (sseu->slice_mask) {
4397
- sseu->eu_per_subslice =
4398
- INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4399
- for (s = 0; s < fls(sseu->slice_mask); s++) {
4400
- sseu->subslice_mask[s] =
4401
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4402
- }
4403
- sseu->eu_total = sseu->eu_per_subslice *
4404
- sseu_subslice_total(sseu);
4405
-
4406
- /* subtract fused off EU(s) from enabled slice(s) */
4407
- for (s = 0; s < fls(sseu->slice_mask); s++) {
4408
- u8 subslice_7eu =
4409
- INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4410
-
4411
- sseu->eu_total -= hweight8(subslice_7eu);
4412
- }
4413
- }
4414
-}
4415
-
4416
-static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4417
- const struct sseu_dev_info *sseu)
4418
-{
4419
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
4420
- const char *type = is_available_info ? "Available" : "Enabled";
4421
- int s;
4422
-
4423
- seq_printf(m, " %s Slice Mask: %04x\n", type,
4424
- sseu->slice_mask);
4425
- seq_printf(m, " %s Slice Total: %u\n", type,
4426
- hweight8(sseu->slice_mask));
4427
- seq_printf(m, " %s Subslice Total: %u\n", type,
4428
- sseu_subslice_total(sseu));
4429
- for (s = 0; s < fls(sseu->slice_mask); s++) {
4430
- seq_printf(m, " %s Slice%i subslices: %u\n", type,
4431
- s, hweight8(sseu->subslice_mask[s]));
4432
- }
4433
- seq_printf(m, " %s EU Total: %u\n", type,
4434
- sseu->eu_total);
4435
- seq_printf(m, " %s EU Per Subslice: %u\n", type,
4436
- sseu->eu_per_subslice);
4437
-
4438
- if (!is_available_info)
4439
- return;
4440
-
4441
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4442
- if (HAS_POOLED_EU(dev_priv))
4443
- seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4444
-
4445
- seq_printf(m, " Has Slice Power Gating: %s\n",
4446
- yesno(sseu->has_slice_pg));
4447
- seq_printf(m, " Has Subslice Power Gating: %s\n",
4448
- yesno(sseu->has_subslice_pg));
4449
- seq_printf(m, " Has EU Power Gating: %s\n",
4450
- yesno(sseu->has_eu_pg));
4451
-}
4452
-
44531582 static int i915_sseu_status(struct seq_file *m, void *unused)
44541583 {
4455
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
4456
- struct sseu_dev_info sseu;
1584
+ struct drm_i915_private *i915 = node_to_i915(m->private);
1585
+ struct intel_gt *gt = &i915->gt;
44571586
4458
- if (INTEL_GEN(dev_priv) < 8)
4459
- return -ENODEV;
4460
-
4461
- seq_puts(m, "SSEU Device Info\n");
4462
- i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4463
-
4464
- seq_puts(m, "SSEU Device Status\n");
4465
- memset(&sseu, 0, sizeof(sseu));
4466
- sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4467
- sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4468
- sseu.max_eus_per_subslice =
4469
- INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4470
-
4471
- intel_runtime_pm_get(dev_priv);
4472
-
4473
- if (IS_CHERRYVIEW(dev_priv)) {
4474
- cherryview_sseu_device_status(dev_priv, &sseu);
4475
- } else if (IS_BROADWELL(dev_priv)) {
4476
- broadwell_sseu_device_status(dev_priv, &sseu);
4477
- } else if (IS_GEN9(dev_priv)) {
4478
- gen9_sseu_device_status(dev_priv, &sseu);
4479
- } else if (INTEL_GEN(dev_priv) >= 10) {
4480
- gen10_sseu_device_status(dev_priv, &sseu);
4481
- }
4482
-
4483
- intel_runtime_pm_put(dev_priv);
4484
-
4485
- i915_print_sseu_info(m, false, &sseu);
4486
-
4487
- return 0;
1587
+ return intel_sseu_status(m, gt);
44881588 }
44891589
44901590 static int i915_forcewake_open(struct inode *inode, struct file *file)
44911591 {
44921592 struct drm_i915_private *i915 = inode->i_private;
1593
+ struct intel_gt *gt = &i915->gt;
44931594
4494
- if (INTEL_GEN(i915) < 6)
4495
- return 0;
4496
-
4497
- intel_runtime_pm_get(i915);
4498
- intel_uncore_forcewake_user_get(i915);
1595
+ atomic_inc(&gt->user_wakeref);
1596
+ intel_gt_pm_get(gt);
1597
+ if (INTEL_GEN(i915) >= 6)
1598
+ intel_uncore_forcewake_user_get(gt->uncore);
44991599
45001600 return 0;
45011601 }
....@@ -4503,12 +1603,12 @@
45031603 static int i915_forcewake_release(struct inode *inode, struct file *file)
45041604 {
45051605 struct drm_i915_private *i915 = inode->i_private;
1606
+ struct intel_gt *gt = &i915->gt;
45061607
4507
- if (INTEL_GEN(i915) < 6)
4508
- return 0;
4509
-
4510
- intel_uncore_forcewake_user_put(i915);
4511
- intel_runtime_pm_put(i915);
1608
+ if (INTEL_GEN(i915) >= 6)
1609
+ intel_uncore_forcewake_user_put(&i915->uncore);
1610
+ intel_gt_pm_put(gt);
1611
+ atomic_dec(&gt->user_wakeref);
45121612
45131613 return 0;
45141614 }
....@@ -4519,229 +1619,21 @@
45191619 .release = i915_forcewake_release,
45201620 };
45211621
4522
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4523
-{
4524
- struct drm_i915_private *dev_priv = m->private;
4525
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
4526
-
4527
- seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4528
- seq_printf(m, "Detected: %s\n",
4529
- yesno(delayed_work_pending(&hotplug->reenable_work)));
4530
-
4531
- return 0;
4532
-}
4533
-
4534
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4535
- const char __user *ubuf, size_t len,
4536
- loff_t *offp)
4537
-{
4538
- struct seq_file *m = file->private_data;
4539
- struct drm_i915_private *dev_priv = m->private;
4540
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
4541
- unsigned int new_threshold;
4542
- int i;
4543
- char *newline;
4544
- char tmp[16];
4545
-
4546
- if (len >= sizeof(tmp))
4547
- return -EINVAL;
4548
-
4549
- if (copy_from_user(tmp, ubuf, len))
4550
- return -EFAULT;
4551
-
4552
- tmp[len] = '\0';
4553
-
4554
- /* Strip newline, if any */
4555
- newline = strchr(tmp, '\n');
4556
- if (newline)
4557
- *newline = '\0';
4558
-
4559
- if (strcmp(tmp, "reset") == 0)
4560
- new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4561
- else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4562
- return -EINVAL;
4563
-
4564
- if (new_threshold > 0)
4565
- DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4566
- new_threshold);
4567
- else
4568
- DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4569
-
4570
- spin_lock_irq(&dev_priv->irq_lock);
4571
- hotplug->hpd_storm_threshold = new_threshold;
4572
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4573
- for_each_hpd_pin(i)
4574
- hotplug->stats[i].count = 0;
4575
- spin_unlock_irq(&dev_priv->irq_lock);
4576
-
4577
- /* Re-enable hpd immediately if we were in an irq storm */
4578
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
4579
-
4580
- return len;
4581
-}
4582
-
4583
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4584
-{
4585
- return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4586
-}
4587
-
4588
-static const struct file_operations i915_hpd_storm_ctl_fops = {
4589
- .owner = THIS_MODULE,
4590
- .open = i915_hpd_storm_ctl_open,
4591
- .read = seq_read,
4592
- .llseek = seq_lseek,
4593
- .release = single_release,
4594
- .write = i915_hpd_storm_ctl_write
4595
-};
4596
-
4597
-static int i915_drrs_ctl_set(void *data, u64 val)
4598
-{
4599
- struct drm_i915_private *dev_priv = data;
4600
- struct drm_device *dev = &dev_priv->drm;
4601
- struct intel_crtc *intel_crtc;
4602
- struct intel_encoder *encoder;
4603
- struct intel_dp *intel_dp;
4604
-
4605
- if (INTEL_GEN(dev_priv) < 7)
4606
- return -ENODEV;
4607
-
4608
- drm_modeset_lock_all(dev);
4609
- for_each_intel_crtc(dev, intel_crtc) {
4610
- if (!intel_crtc->base.state->active ||
4611
- !intel_crtc->config->has_drrs)
4612
- continue;
4613
-
4614
- for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4615
- if (encoder->type != INTEL_OUTPUT_EDP)
4616
- continue;
4617
-
4618
- DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4619
- val ? "en" : "dis", val);
4620
-
4621
- intel_dp = enc_to_intel_dp(&encoder->base);
4622
- if (val)
4623
- intel_edp_drrs_enable(intel_dp,
4624
- intel_crtc->config);
4625
- else
4626
- intel_edp_drrs_disable(intel_dp,
4627
- intel_crtc->config);
4628
- }
4629
- }
4630
- drm_modeset_unlock_all(dev);
4631
-
4632
- return 0;
4633
-}
4634
-
4635
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4636
-
4637
-static ssize_t
4638
-i915_fifo_underrun_reset_write(struct file *filp,
4639
- const char __user *ubuf,
4640
- size_t cnt, loff_t *ppos)
4641
-{
4642
- struct drm_i915_private *dev_priv = filp->private_data;
4643
- struct intel_crtc *intel_crtc;
4644
- struct drm_device *dev = &dev_priv->drm;
4645
- int ret;
4646
- bool reset;
4647
-
4648
- ret = kstrtobool_from_user(ubuf, cnt, &reset);
4649
- if (ret)
4650
- return ret;
4651
-
4652
- if (!reset)
4653
- return cnt;
4654
-
4655
- for_each_intel_crtc(dev, intel_crtc) {
4656
- struct drm_crtc_commit *commit;
4657
- struct intel_crtc_state *crtc_state;
4658
-
4659
- ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4660
- if (ret)
4661
- return ret;
4662
-
4663
- crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4664
- commit = crtc_state->base.commit;
4665
- if (commit) {
4666
- ret = wait_for_completion_interruptible(&commit->hw_done);
4667
- if (!ret)
4668
- ret = wait_for_completion_interruptible(&commit->flip_done);
4669
- }
4670
-
4671
- if (!ret && crtc_state->base.active) {
4672
- DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4673
- pipe_name(intel_crtc->pipe));
4674
-
4675
- intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4676
- }
4677
-
4678
- drm_modeset_unlock(&intel_crtc->base.mutex);
4679
-
4680
- if (ret)
4681
- return ret;
4682
- }
4683
-
4684
- ret = intel_fbc_reset_underrun(dev_priv);
4685
- if (ret)
4686
- return ret;
4687
-
4688
- return cnt;
4689
-}
4690
-
4691
-static const struct file_operations i915_fifo_underrun_reset_ops = {
4692
- .owner = THIS_MODULE,
4693
- .open = simple_open,
4694
- .write = i915_fifo_underrun_reset_write,
4695
- .llseek = default_llseek,
4696
-};
4697
-
46981622 static const struct drm_info_list i915_debugfs_list[] = {
46991623 {"i915_capabilities", i915_capabilities, 0},
47001624 {"i915_gem_objects", i915_gem_object_info, 0},
4701
- {"i915_gem_gtt", i915_gem_gtt_info, 0},
4702
- {"i915_gem_stolen", i915_gem_stolen_list_info },
47031625 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
47041626 {"i915_gem_interrupt", i915_interrupt_info, 0},
4705
- {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4706
- {"i915_guc_info", i915_guc_info, 0},
4707
- {"i915_guc_load_status", i915_guc_load_status_info, 0},
4708
- {"i915_guc_log_dump", i915_guc_log_dump, 0},
4709
- {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4710
- {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4711
- {"i915_huc_load_status", i915_huc_load_status_info, 0},
47121627 {"i915_frequency_info", i915_frequency_info, 0},
4713
- {"i915_hangcheck_info", i915_hangcheck_info, 0},
4714
- {"i915_reset_info", i915_reset_info, 0},
4715
- {"i915_drpc_info", i915_drpc_info, 0},
4716
- {"i915_emon_status", i915_emon_status, 0},
47171628 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4718
- {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4719
- {"i915_fbc_status", i915_fbc_status, 0},
4720
- {"i915_ips_status", i915_ips_status, 0},
4721
- {"i915_sr_status", i915_sr_status, 0},
4722
- {"i915_opregion", i915_opregion, 0},
4723
- {"i915_vbt", i915_vbt, 0},
4724
- {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
47251629 {"i915_context_status", i915_context_status, 0},
4726
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
47271630 {"i915_swizzle_info", i915_swizzle_info, 0},
4728
- {"i915_ppgtt_info", i915_ppgtt_info, 0},
47291631 {"i915_llc", i915_llc, 0},
4730
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
4731
- {"i915_energy_uJ", i915_energy_uJ, 0},
47321632 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4733
- {"i915_power_domain_info", i915_power_domain_info, 0},
4734
- {"i915_dmc_info", i915_dmc_info, 0},
4735
- {"i915_display_info", i915_display_info, 0},
47361633 {"i915_engine_info", i915_engine_info, 0},
4737
- {"i915_rcs_topology", i915_rcs_topology, 0},
47381634 {"i915_shrinker_info", i915_shrinker_info, 0},
4739
- {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4740
- {"i915_dp_mst_info", i915_dp_mst_info, 0},
47411635 {"i915_wa_registers", i915_wa_registers, 0},
4742
- {"i915_ddb_info", i915_ddb_info, 0},
47431636 {"i915_sseu_status", i915_sseu_status, 0},
4744
- {"i915_drrs_status", i915_drrs_status, 0},
47451637 {"i915_rps_boost_info", i915_rps_boost_info, 0},
47461638 };
47471639 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
....@@ -4750,171 +1642,34 @@
47501642 const char *name;
47511643 const struct file_operations *fops;
47521644 } i915_debugfs_files[] = {
1645
+ {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
47531646 {"i915_wedged", &i915_wedged_fops},
47541647 {"i915_cache_sharing", &i915_cache_sharing_fops},
4755
- {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4756
- {"i915_ring_test_irq", &i915_ring_test_irq_fops},
47571648 {"i915_gem_drop_caches", &i915_drop_caches_fops},
47581649 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
47591650 {"i915_error_state", &i915_error_state_fops},
47601651 {"i915_gpu_info", &i915_gpu_info_fops},
47611652 #endif
4762
- {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4763
- {"i915_next_seqno", &i915_next_seqno_fops},
4764
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4765
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4766
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4767
- {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4768
- {"i915_dp_test_data", &i915_displayport_test_data_fops},
4769
- {"i915_dp_test_type", &i915_displayport_test_type_fops},
4770
- {"i915_dp_test_active", &i915_displayport_test_active_fops},
4771
- {"i915_guc_log_level", &i915_guc_log_level_fops},
4772
- {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4773
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4774
- {"i915_ipc_status", &i915_ipc_status_fops},
4775
- {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4776
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
47771653 };
47781654
4779
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
1655
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
47801656 {
47811657 struct drm_minor *minor = dev_priv->drm.primary;
4782
- struct dentry *ent;
47831658 int i;
47841659
4785
- ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4786
- minor->debugfs_root, to_i915(minor->dev),
4787
- &i915_forcewake_fops);
4788
- if (!ent)
4789
- return -ENOMEM;
1660
+ i915_debugfs_params(dev_priv);
47901661
1662
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
1663
+ to_i915(minor->dev), &i915_forcewake_fops);
47911664 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4792
- ent = debugfs_create_file(i915_debugfs_files[i].name,
4793
- S_IRUGO | S_IWUSR,
4794
- minor->debugfs_root,
4795
- to_i915(minor->dev),
4796
- i915_debugfs_files[i].fops);
4797
- if (!ent)
4798
- return -ENOMEM;
1665
+ debugfs_create_file(i915_debugfs_files[i].name,
1666
+ S_IRUGO | S_IWUSR,
1667
+ minor->debugfs_root,
1668
+ to_i915(minor->dev),
1669
+ i915_debugfs_files[i].fops);
47991670 }
48001671
4801
- return drm_debugfs_create_files(i915_debugfs_list,
4802
- I915_DEBUGFS_ENTRIES,
4803
- minor->debugfs_root, minor);
4804
-}
4805
-
4806
-struct dpcd_block {
4807
- /* DPCD dump start address. */
4808
- unsigned int offset;
4809
- /* DPCD dump end address, inclusive. If unset, .size will be used. */
4810
- unsigned int end;
4811
- /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4812
- size_t size;
4813
- /* Only valid for eDP. */
4814
- bool edp;
4815
-};
4816
-
4817
-static const struct dpcd_block i915_dpcd_debug[] = {
4818
- { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4819
- { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4820
- { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4821
- { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4822
- { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4823
- { .offset = DP_SET_POWER },
4824
- { .offset = DP_EDP_DPCD_REV },
4825
- { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4826
- { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4827
- { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4828
-};
4829
-
4830
-static int i915_dpcd_show(struct seq_file *m, void *data)
4831
-{
4832
- struct drm_connector *connector = m->private;
4833
- struct intel_dp *intel_dp =
4834
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4835
- uint8_t buf[16];
4836
- ssize_t err;
4837
- int i;
4838
-
4839
- if (connector->status != connector_status_connected)
4840
- return -ENODEV;
4841
-
4842
- for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4843
- const struct dpcd_block *b = &i915_dpcd_debug[i];
4844
- size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4845
-
4846
- if (b->edp &&
4847
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4848
- continue;
4849
-
4850
- /* low tech for now */
4851
- if (WARN_ON(size > sizeof(buf)))
4852
- continue;
4853
-
4854
- err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4855
- if (err <= 0) {
4856
- DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4857
- size, b->offset, err);
4858
- continue;
4859
- }
4860
-
4861
- seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4862
- }
4863
-
4864
- return 0;
4865
-}
4866
-DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4867
-
4868
-static int i915_panel_show(struct seq_file *m, void *data)
4869
-{
4870
- struct drm_connector *connector = m->private;
4871
- struct intel_dp *intel_dp =
4872
- enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4873
-
4874
- if (connector->status != connector_status_connected)
4875
- return -ENODEV;
4876
-
4877
- seq_printf(m, "Panel power up delay: %d\n",
4878
- intel_dp->panel_power_up_delay);
4879
- seq_printf(m, "Panel power down delay: %d\n",
4880
- intel_dp->panel_power_down_delay);
4881
- seq_printf(m, "Backlight on delay: %d\n",
4882
- intel_dp->backlight_on_delay);
4883
- seq_printf(m, "Backlight off delay: %d\n",
4884
- intel_dp->backlight_off_delay);
4885
-
4886
- return 0;
4887
-}
4888
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
4889
-
4890
-/**
4891
- * i915_debugfs_connector_add - add i915 specific connector debugfs files
4892
- * @connector: pointer to a registered drm_connector
4893
- *
4894
- * Cleanup will be done by drm_connector_unregister() through a call to
4895
- * drm_debugfs_connector_remove().
4896
- *
4897
- * Returns 0 on success, negative error codes on error.
4898
- */
4899
-int i915_debugfs_connector_add(struct drm_connector *connector)
4900
-{
4901
- struct dentry *root = connector->debugfs_entry;
4902
-
4903
- /* The connector must have been registered beforehands. */
4904
- if (!root)
4905
- return -ENODEV;
4906
-
4907
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4908
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4909
- debugfs_create_file("i915_dpcd", S_IRUGO, root,
4910
- connector, &i915_dpcd_fops);
4911
-
4912
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4913
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4914
- connector, &i915_panel_fops);
4915
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4916
- connector, &i915_psr_sink_status_fops);
4917
- }
4918
-
4919
- return 0;
1672
+ drm_debugfs_create_files(i915_debugfs_list,
1673
+ I915_DEBUGFS_ENTRIES,
1674
+ minor->debugfs_root, minor);
49201675 }