1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
node_to_i915(struct drm_info_node * node)35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 return to_i915(node->minor->dev);
38 }
39
i915_capabilities(struct seq_file * m,void * data)40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 struct drm_printer p = drm_seq_file_printer(m);
45
46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50 intel_device_info_dump_flags(info, &p);
51 intel_device_info_dump_runtime(info, &p);
52 intel_driver_caps_print(&dev_priv->caps, &p);
53
54 kernel_param_lock(THIS_MODULE);
55 i915_params_dump(&i915_modparams, &p);
56 kernel_param_unlock(THIS_MODULE);
57
58 return 0;
59 }
60
get_active_flag(struct drm_i915_gem_object * obj)61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
get_pin_flag(struct drm_i915_gem_object * obj)66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 return obj->pin_global ? 'p' : ' ';
69 }
70
get_tiling_flag(struct drm_i915_gem_object * obj)71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 switch (i915_gem_object_get_tiling(obj)) {
74 default:
75 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
78 }
79 }
80
get_global_flag(struct drm_i915_gem_object * obj)81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 return obj->userfault_count ? 'g' : ' ';
84 }
85
get_pin_mapped_flag(struct drm_i915_gem_object * obj)86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 return obj->mm.mapping ? 'M' : ' ';
89 }
90
i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object * obj)91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 u64 size = 0;
94 struct i915_vma *vma;
95
96 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
98 size += vma->node.size;
99 }
100
101 return size;
102 }
103
104 static const char *
stringify_page_sizes(unsigned int page_sizes,char * buf,size_t len)105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132 }
133
134 static void
describe_obj(struct seq_file * m,struct drm_i915_gem_object * obj)135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 struct intel_engine_cs *engine;
139 struct i915_vma *vma;
140 unsigned int frontbuffer_bits;
141 int pin_count = 0;
142
143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 &obj->base,
147 get_active_flag(obj),
148 get_pin_flag(obj),
149 get_tiling_flag(obj),
150 get_global_flag(obj),
151 get_pin_mapped_flag(obj),
152 obj->base.size / 1024,
153 obj->read_domains,
154 obj->write_domain,
155 i915_cache_level_str(dev_priv, obj->cache_level),
156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 if (i915_vma_is_pinned(vma))
162 pin_count++;
163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
165 if (obj->pin_global)
166 seq_printf(m, " (global)");
167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 i915_vma_is_ggtt(vma) ? "g" : "pp",
173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 seq_puts(m, ")");
209 }
210 if (obj->stolen)
211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213 engine = i915_gem_object_last_write_engine(obj);
214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
obj_rank_by_stolen(const void * A,const void * B)222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
228
229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
234 }
235
i915_gem_stolen_list_info(struct seq_file * m,void * data)236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
240 struct drm_i915_gem_object **objects;
241 struct drm_i915_gem_object *obj;
242 u64 total_obj_size, total_gtt_size;
243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 if (!objects)
249 return -ENOMEM;
250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
253 goto out;
254
255 total_obj_size = total_gtt_size = count = 0;
256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 if (count == total)
260 break;
261
262 if (obj->stolen == NULL)
263 continue;
264
265 objects[count++] = obj;
266 total_obj_size += obj->base.size;
267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269 }
270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 if (count == total)
272 break;
273
274 if (obj->stolen == NULL)
275 continue;
276
277 objects[count++] = obj;
278 total_obj_size += obj->base.size;
279 }
280 spin_unlock(&dev_priv->mm.obj_lock);
281
282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 count, total_obj_size, total_gtt_size);
292
293 mutex_unlock(&dev->struct_mutex);
294 out:
295 kvfree(objects);
296 return ret;
297 }
298
299 struct file_stats {
300 struct drm_i915_file_private *file_priv;
301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
305 };
306
per_file_stats(int id,void * ptr,void * data)307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
311 struct i915_vma *vma;
312
313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315 stats->count++;
316 stats->total += obj->base.size;
317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
325
326 if (i915_vma_is_ggtt(vma)) {
327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331 if (ppgtt->vm.file != stats->file_priv)
332 continue;
333 }
334
335 if (i915_vma_is_active(vma))
336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
339 }
340
341 return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355 } while (0)
356
print_batch_pool_stats(struct seq_file * m,struct drm_i915_private * dev_priv)357 static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359 {
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
362 struct intel_engine_cs *engine;
363 enum intel_engine_id id;
364 int j;
365
366 memset(&stats, 0, sizeof(stats));
367
368 for_each_engine(engine, dev_priv, id) {
369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 list_for_each_entry(obj,
371 &engine->batch_pool.cache_list[j],
372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
375 }
376
377 print_file_stats(m, "[k]batch pool", stats);
378 }
379
per_file_ctx_stats(int idx,void * ptr,void * data)380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382 struct i915_gem_context *ctx = ptr;
383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
385
386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
388
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
393 }
394
395 return 0;
396 }
397
print_context_stats(struct seq_file * m,struct drm_i915_private * dev_priv)398 static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400 {
401 struct drm_device *dev = &dev_priv->drm;
402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
407 mutex_lock(&dev->struct_mutex);
408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411 list_for_each_entry(file, &dev->filelist, lhead) {
412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
415 mutex_unlock(&dev->struct_mutex);
416
417 print_file_stats(m, "[k]contexts", stats);
418 }
419
i915_gem_object_info(struct seq_file * m,void * data)420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 struct drm_i915_gem_object *obj;
428 unsigned int page_sizes = 0;
429 struct drm_file *file;
430 char buf[80];
431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
437 seq_printf(m, "%u objects, %llu bytes\n",
438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
444 huge_size = huge_count = 0;
445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448 size += obj->base.size;
449 ++count;
450
451 if (obj->mm.madv == I915_MADV_DONTNEED) {
452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
455
456 if (obj->mm.mapping) {
457 mapped_count++;
458 mapped_size += obj->base.size;
459 }
460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
466 }
467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469 size = count = dpy_size = dpy_count = 0;
470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471 size += obj->base.size;
472 ++count;
473
474 if (obj->pin_global) {
475 dpy_size += obj->base.size;
476 ++dpy_count;
477 }
478
479 if (obj->mm.madv == I915_MADV_DONTNEED) {
480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
483
484 if (obj->mm.mapping) {
485 mapped_count++;
486 mapped_size += obj->base.size;
487 }
488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
494 }
495 spin_unlock(&dev_priv->mm.obj_lock);
496
497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
500 purgeable_count, purgeable_size);
501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508 dpy_count, dpy_size);
509
510 seq_printf(m, "%llu [%pa] gtt total\n",
511 ggtt->vm.total, &ggtt->mappable_end);
512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
515
516 seq_putc(m, '\n');
517 print_batch_pool_stats(m, dev_priv);
518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
521 print_context_stats(m, dev_priv);
522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
524 struct drm_i915_file_private *file_priv = file->driver_priv;
525 struct i915_request *request;
526 struct task_struct *task;
527
528 mutex_lock(&dev->struct_mutex);
529
530 memset(&stats, 0, sizeof(stats));
531 stats.file_priv = file->driver_priv;
532 spin_lock(&file->table_lock);
533 idr_for_each(&file->object_idr, per_file_stats, &stats);
534 spin_unlock(&file->table_lock);
535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
541 request = list_first_entry_or_null(&file_priv->mm.request_list,
542 struct i915_request,
543 client_link);
544 rcu_read_lock();
545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
547 PIDTYPE_PID);
548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 rcu_read_unlock();
550
551 mutex_unlock(&dev->struct_mutex);
552 }
553 mutex_unlock(&dev->filelist_mutex);
554
555 return 0;
556 }
557
i915_gem_gtt_info(struct seq_file * m,void * data)558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560 struct drm_info_node *node = m->private;
561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
563 struct drm_i915_gem_object **objects;
564 struct drm_i915_gem_object *obj;
565 u64 total_obj_size, total_gtt_size;
566 unsigned long nobject, n;
567 int count, ret;
568
569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
591 seq_puts(m, " ");
592 describe_obj(m, obj);
593 seq_putc(m, '\n');
594 total_obj_size += obj->base.size;
595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601 count, total_obj_size, total_gtt_size);
602 kvfree(objects);
603
604 return 0;
605 }
606
i915_gem_batch_pool_info(struct seq_file * m,void * data)607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
611 struct drm_i915_gem_object *obj;
612 struct intel_engine_cs *engine;
613 enum intel_engine_id id;
614 int total = 0;
615 int ret, j;
616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
621 for_each_engine(engine, dev_priv, id) {
622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
627 &engine->batch_pool.cache_list[j],
628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
631 engine->name, j, count);
632
633 list_for_each_entry(obj,
634 &engine->batch_pool.cache_list[j],
635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
642 }
643 }
644
645 seq_printf(m, "total: %d\n", total);
646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650 }
651
gen8_display_interrupt_info(struct seq_file * m)652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700 }
701
i915_interrupt_info(struct seq_file * m,void * data)702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
705 struct intel_engine_cs *engine;
706 enum intel_engine_id id;
707 int i, pipe;
708
709 intel_runtime_pm_get(dev_priv);
710
711 if (IS_CHERRYVIEW(dev_priv)) {
712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
786 } else if (INTEL_GEN(dev_priv) >= 8) {
787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
799 gen8_display_interrupt_info(m);
800 } else if (IS_VALLEYVIEW(dev_priv)) {
801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
823 intel_display_power_put(dev_priv, power_domain);
824 }
825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
857 for_each_pipe(dev_priv, pipe)
858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
903 for_each_engine(engine, dev_priv, id) {
904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
906 engine->name, I915_READ_IMR(engine));
907 }
908 }
909
910 intel_runtime_pm_put(dev_priv);
911
912 return 0;
913 }
914
i915_gem_fence_regs_info(struct seq_file * m,void * data)915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
924
925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
931 if (!vma)
932 seq_puts(m, "unused");
933 else
934 describe_obj(m, vma->obj);
935 seq_putc(m, '\n');
936 }
937
938 mutex_unlock(&dev->struct_mutex);
939 return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
gpu_state_read(struct file * file,char __user * ubuf,size_t count,loff_t * pos)943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
945 {
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
950
951 if (!error)
952 return 0;
953
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
957
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
961
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
966
967 *pos = str.start + ret;
968 out:
969 i915_error_state_buf_release(&str);
970 return ret;
971 }
972
gpu_state_release(struct inode * inode,struct file * file)973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975 i915_gpu_state_put(file->private_data);
976 return 0;
977 }
978
i915_gpu_info_open(struct inode * inode,struct file * file)979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981 struct drm_i915_private *i915 = inode->i_private;
982 struct i915_gpu_state *gpu;
983
984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
987 if (!gpu)
988 return -ENOMEM;
989
990 file->private_data = gpu;
991 return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
i915_error_state_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1003 i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
1007 {
1008 struct i915_gpu_state *error = filp->private_data;
1009
1010 if (!error)
1011 return 0;
1012
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
1014 i915_reset_error_state(error->i915);
1015
1016 return cnt;
1017 }
1018
i915_error_state_open(struct inode * inode,struct file * file)1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021 file->private_data = i915_first_error_state(inode->i_private);
1022 return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
1028 .read = gpu_state_read,
1029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
1031 .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
i915_next_seqno_set(void * data,u64 val)1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
1040 int ret;
1041
1042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
1046 intel_runtime_pm_get(dev_priv);
1047 ret = i915_gem_set_global_seqno(dev, val);
1048 intel_runtime_pm_put(dev_priv);
1049
1050 mutex_unlock(&dev->struct_mutex);
1051
1052 return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056 NULL, i915_next_seqno_set,
1057 "0x%llx\n");
1058
i915_frequency_info(struct seq_file * m,void * unused)1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
1066
1067 if (IS_GEN5(dev_priv)) {
1068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078 u32 rpmodectl, freq_sts;
1079
1080 mutex_lock(&dev_priv->pcu_lock);
1081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
1091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
1099 intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
1102 intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
1105 intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
1108 intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
1112 intel_gpu_freq(dev_priv, rps->efficient_freq));
1113 mutex_unlock(&dev_priv->pcu_lock);
1114 } else if (INTEL_GEN(dev_priv) >= 6) {
1115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
1118 u32 rpmodectl, rpinclimit, rpdeclimit;
1119 u32 rpstat, cagf, reqf;
1120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
1122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123 int max_freq;
1124
1125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126 if (IS_GEN9_LP(dev_priv)) {
1127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
1134 /* RPSTAT1 is in the GT power well */
1135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137 reqf = I915_READ(GEN6_RPNSWREQ);
1138 if (INTEL_GEN(dev_priv) >= 9)
1139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
1142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
1147 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153 rpstat = I915_READ(GEN6_RPSTAT1);
1154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
1162
1163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
1175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
1179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
1184 }
1185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
1187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
1194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
1200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201 rps->pm_intrmsk_mbz);
1202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203 seq_printf(m, "Render p-state ratio: %d\n",
1204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
1209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221 seq_printf(m, "Up threshold: %d%%\n",
1222 rps->power.up_threshold);
1223
1224 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230 seq_printf(m, "Down threshold: %d%%\n",
1231 rps->power.down_threshold);
1232
1233 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234 rp_state_cap >> 16) & 0xff;
1235 max_freq *= (IS_GEN9_BC(dev_priv) ||
1236 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238 intel_gpu_freq(dev_priv, max_freq));
1239
1240 max_freq = (rp_state_cap & 0xff00) >> 8;
1241 max_freq *= (IS_GEN9_BC(dev_priv) ||
1242 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244 intel_gpu_freq(dev_priv, max_freq));
1245
1246 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247 rp_state_cap >> 0) & 0xff;
1248 max_freq *= (IS_GEN9_BC(dev_priv) ||
1249 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251 intel_gpu_freq(dev_priv, max_freq));
1252 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253 intel_gpu_freq(dev_priv, rps->max_freq));
1254
1255 seq_printf(m, "Current freq: %d MHz\n",
1256 intel_gpu_freq(dev_priv, rps->cur_freq));
1257 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258 seq_printf(m, "Idle freq: %d MHz\n",
1259 intel_gpu_freq(dev_priv, rps->idle_freq));
1260 seq_printf(m, "Min freq: %d MHz\n",
1261 intel_gpu_freq(dev_priv, rps->min_freq));
1262 seq_printf(m, "Boost freq: %d MHz\n",
1263 intel_gpu_freq(dev_priv, rps->boost_freq));
1264 seq_printf(m, "Max freq: %d MHz\n",
1265 intel_gpu_freq(dev_priv, rps->max_freq));
1266 seq_printf(m,
1267 "efficient (RPe) frequency: %d MHz\n",
1268 intel_gpu_freq(dev_priv, rps->efficient_freq));
1269 } else {
1270 seq_puts(m, "no P-state info available\n");
1271 }
1272
1273 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
1277 intel_runtime_pm_put(dev_priv);
1278 return ret;
1279 }
1280
i915_instdone_info(struct drm_i915_private * dev_priv,struct seq_file * m,struct intel_instdone * instdone)1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 struct seq_file *m,
1283 struct intel_instdone *instdone)
1284 {
1285 int slice;
1286 int subslice;
1287
1288 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 instdone->instdone);
1290
1291 if (INTEL_GEN(dev_priv) <= 3)
1292 return;
1293
1294 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 instdone->slice_common);
1296
1297 if (INTEL_GEN(dev_priv) <= 6)
1298 return;
1299
1300 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 slice, subslice, instdone->sampler[slice][subslice]);
1303
1304 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 slice, subslice, instdone->row[slice][subslice]);
1307 }
1308
i915_hangcheck_info(struct seq_file * m,void * unused)1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312 struct intel_engine_cs *engine;
1313 u64 acthd[I915_NUM_ENGINES];
1314 u32 seqno[I915_NUM_ENGINES];
1315 struct intel_instdone instdone;
1316 enum intel_engine_id id;
1317
1318 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319 seq_puts(m, "Wedged\n");
1320 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325 seq_puts(m, "Waiter holding struct mutex\n");
1326 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327 seq_puts(m, "struct_mutex blocked for reset\n");
1328
1329 if (!i915_modparams.enable_hangcheck) {
1330 seq_puts(m, "Hangcheck disabled\n");
1331 return 0;
1332 }
1333
1334 intel_runtime_pm_get(dev_priv);
1335
1336 for_each_engine(engine, dev_priv, id) {
1337 acthd[id] = intel_engine_get_active_head(engine);
1338 seqno[id] = intel_engine_get_seqno(engine);
1339 }
1340
1341 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342
1343 intel_runtime_pm_put(dev_priv);
1344
1345 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 jiffies));
1349 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 seq_puts(m, "Hangcheck active, work pending\n");
1351 else
1352 seq_puts(m, "Hangcheck inactive\n");
1353
1354 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
1356 for_each_engine(engine, dev_priv, id) {
1357 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 struct rb_node *rb;
1359
1360 seq_printf(m, "%s:\n", engine->name);
1361 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362 engine->hangcheck.seqno, seqno[id],
1363 intel_engine_last_submit(engine));
1364 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365 yesno(intel_engine_has_waiter(engine)),
1366 yesno(test_bit(engine->id,
1367 &dev_priv->gpu_error.missed_irq_rings)),
1368 yesno(engine->hangcheck.stalled),
1369 yesno(engine->hangcheck.wedged));
1370
1371 spin_lock_irq(&b->rb_lock);
1372 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374
1375 seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 w->tsk->comm, w->tsk->pid, w->seqno);
1377 }
1378 spin_unlock_irq(&b->rb_lock);
1379
1380 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381 (long long)engine->hangcheck.acthd,
1382 (long long)acthd[id]);
1383 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 hangcheck_action_to_str(engine->hangcheck.action),
1385 engine->hangcheck.action,
1386 jiffies_to_msecs(jiffies -
1387 engine->hangcheck.action_timestamp));
1388
1389 if (engine->id == RCS) {
1390 seq_puts(m, "\tinstdone read =\n");
1391
1392 i915_instdone_info(dev_priv, m, &instdone);
1393
1394 seq_puts(m, "\tinstdone accu =\n");
1395
1396 i915_instdone_info(dev_priv, m,
1397 &engine->hangcheck.instdone);
1398 }
1399 }
1400
1401 return 0;
1402 }
1403
i915_reset_info(struct seq_file * m,void * unused)1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 struct intel_engine_cs *engine;
1409 enum intel_engine_id id;
1410
1411 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413 for_each_engine(engine, dev_priv, id) {
1414 seq_printf(m, "%s = %u\n", engine->name,
1415 i915_reset_engine_count(error, engine));
1416 }
1417
1418 return 0;
1419 }
1420
ironlake_drpc_info(struct seq_file * m)1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424 u32 rgvmodectl, rstdbyctl;
1425 u16 crstandvid;
1426
1427 rgvmodectl = I915_READ(MEMMODECTL);
1428 rstdbyctl = I915_READ(RSTDBYCTL);
1429 crstandvid = I915_READ16(CRSTANDVID);
1430
1431 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432 seq_printf(m, "Boost freq: %d\n",
1433 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 MEMMODE_BOOST_FREQ_SHIFT);
1435 seq_printf(m, "HW control enabled: %s\n",
1436 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437 seq_printf(m, "SW control enabled: %s\n",
1438 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439 seq_printf(m, "Gated voltage change: %s\n",
1440 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441 seq_printf(m, "Starting frequency: P%d\n",
1442 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443 seq_printf(m, "Max P-state: P%d\n",
1444 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 seq_printf(m, "Render standby enabled: %s\n",
1449 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450 seq_puts(m, "Current RS state: ");
1451 switch (rstdbyctl & RSX_STATUS_MASK) {
1452 case RSX_STATUS_ON:
1453 seq_puts(m, "on\n");
1454 break;
1455 case RSX_STATUS_RC1:
1456 seq_puts(m, "RC1\n");
1457 break;
1458 case RSX_STATUS_RC1E:
1459 seq_puts(m, "RC1E\n");
1460 break;
1461 case RSX_STATUS_RS1:
1462 seq_puts(m, "RS1\n");
1463 break;
1464 case RSX_STATUS_RS2:
1465 seq_puts(m, "RS2 (RC6)\n");
1466 break;
1467 case RSX_STATUS_RS3:
1468 seq_puts(m, "RC3 (RC6+)\n");
1469 break;
1470 default:
1471 seq_puts(m, "unknown\n");
1472 break;
1473 }
1474
1475 return 0;
1476 }
1477
i915_forcewake_domains(struct seq_file * m,void * data)1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480 struct drm_i915_private *i915 = node_to_i915(m->private);
1481 struct intel_uncore_forcewake_domain *fw_domain;
1482 unsigned int tmp;
1483
1484 seq_printf(m, "user.bypass_count = %u\n",
1485 i915->uncore.user_forcewake.count);
1486
1487 for_each_fw_domain(fw_domain, i915, tmp)
1488 seq_printf(m, "%s.wake_count = %u\n",
1489 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490 READ_ONCE(fw_domain->wake_count));
1491
1492 return 0;
1493 }
1494
print_rc6_res(struct seq_file * m,const char * title,const i915_reg_t reg)1495 static void print_rc6_res(struct seq_file *m,
1496 const char *title,
1497 const i915_reg_t reg)
1498 {
1499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501 seq_printf(m, "%s %u (%llu us)\n",
1502 title, I915_READ(reg),
1503 intel_rc6_residency_us(dev_priv, reg));
1504 }
1505
vlv_drpc_info(struct seq_file * m)1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509 u32 rcctl1, pw_status;
1510
1511 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
1514 seq_printf(m, "RC6 Enabled: %s\n",
1515 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 GEN6_RC_CTL_EI_MODE(1))));
1517 seq_printf(m, "Render Power Well: %s\n",
1518 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519 seq_printf(m, "Media Power Well: %s\n",
1520 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521
1522 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524
1525 return i915_forcewake_domains(m, NULL);
1526 }
1527
gen6_drpc_info(struct seq_file * m)1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531 u32 gt_core_status, rcctl1, rc6vids = 0;
1532 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533
1534 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536
1537 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538 if (INTEL_GEN(dev_priv) >= 9) {
1539 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 }
1542
1543 if (INTEL_GEN(dev_priv) <= 7) {
1544 mutex_lock(&dev_priv->pcu_lock);
1545 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 &rc6vids);
1547 mutex_unlock(&dev_priv->pcu_lock);
1548 }
1549
1550 seq_printf(m, "RC1e Enabled: %s\n",
1551 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 seq_printf(m, "RC6 Enabled: %s\n",
1553 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554 if (INTEL_GEN(dev_priv) >= 9) {
1555 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 }
1560 seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564 seq_puts(m, "Current RC state: ");
1565 switch (gt_core_status & GEN6_RCn_MASK) {
1566 case GEN6_RC0:
1567 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568 seq_puts(m, "Core Power Down\n");
1569 else
1570 seq_puts(m, "on\n");
1571 break;
1572 case GEN6_RC3:
1573 seq_puts(m, "RC3\n");
1574 break;
1575 case GEN6_RC6:
1576 seq_puts(m, "RC6\n");
1577 break;
1578 case GEN6_RC7:
1579 seq_puts(m, "RC7\n");
1580 break;
1581 default:
1582 seq_puts(m, "Unknown\n");
1583 break;
1584 }
1585
1586 seq_printf(m, "Core Power Down: %s\n",
1587 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588 if (INTEL_GEN(dev_priv) >= 9) {
1589 seq_printf(m, "Render Power Well: %s\n",
1590 (gen9_powergate_status &
1591 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 seq_printf(m, "Media Power Well: %s\n",
1593 (gen9_powergate_status &
1594 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 }
1596
1597 /* Not exactly sure what this is */
1598 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 GEN6_GT_GFX_RC6_LOCKED);
1600 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603
1604 if (INTEL_GEN(dev_priv) <= 7) {
1605 seq_printf(m, "RC6 voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 seq_printf(m, "RC6+ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 seq_printf(m, "RC6++ voltage: %dmV\n",
1610 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 }
1612
1613 return i915_forcewake_domains(m, NULL);
1614 }
1615
i915_drpc_info(struct seq_file * m,void * unused)1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619 int err;
1620
1621 intel_runtime_pm_get(dev_priv);
1622
1623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624 err = vlv_drpc_info(m);
1625 else if (INTEL_GEN(dev_priv) >= 6)
1626 err = gen6_drpc_info(m);
1627 else
1628 err = ironlake_drpc_info(m);
1629
1630 intel_runtime_pm_put(dev_priv);
1631
1632 return err;
1633 }
1634
i915_frontbuffer_tracking(struct seq_file * m,void * unused)1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638
1639 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 dev_priv->fb_tracking.busy_bits);
1641
1642 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 dev_priv->fb_tracking.flip_bits);
1644
1645 return 0;
1646 }
1647
i915_fbc_status(struct seq_file * m,void * unused)1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651 struct intel_fbc *fbc = &dev_priv->fbc;
1652
1653 if (!HAS_FBC(dev_priv))
1654 return -ENODEV;
1655
1656 intel_runtime_pm_get(dev_priv);
1657 mutex_lock(&fbc->lock);
1658
1659 if (intel_fbc_is_active(dev_priv))
1660 seq_puts(m, "FBC enabled\n");
1661 else
1662 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
1664 if (intel_fbc_is_active(dev_priv)) {
1665 u32 mask;
1666
1667 if (INTEL_GEN(dev_priv) >= 8)
1668 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 7)
1670 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 else if (INTEL_GEN(dev_priv) >= 5)
1672 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 else if (IS_G4X(dev_priv))
1674 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 else
1676 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 FBC_STAT_COMPRESSED);
1678
1679 seq_printf(m, "Compressing: %s\n", yesno(mask));
1680 }
1681
1682 mutex_unlock(&fbc->lock);
1683 intel_runtime_pm_put(dev_priv);
1684
1685 return 0;
1686 }
1687
i915_fbc_false_color_get(void * data,u64 * val)1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690 struct drm_i915_private *dev_priv = data;
1691
1692 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693 return -ENODEV;
1694
1695 *val = dev_priv->fbc.false_color;
1696
1697 return 0;
1698 }
1699
i915_fbc_false_color_set(void * data,u64 val)1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702 struct drm_i915_private *dev_priv = data;
1703 u32 reg;
1704
1705 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706 return -ENODEV;
1707
1708 mutex_lock(&dev_priv->fbc.lock);
1709
1710 reg = I915_READ(ILK_DPFC_CONTROL);
1711 dev_priv->fbc.false_color = val;
1712
1713 I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 (reg | FBC_CTL_FALSE_COLOR) :
1715 (reg & ~FBC_CTL_FALSE_COLOR));
1716
1717 mutex_unlock(&dev_priv->fbc.lock);
1718 return 0;
1719 }
1720
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 i915_fbc_false_color_get, i915_fbc_false_color_set,
1723 "%llu\n");
1724
i915_ips_status(struct seq_file * m,void * unused)1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728
1729 if (!HAS_IPS(dev_priv))
1730 return -ENODEV;
1731
1732 intel_runtime_pm_get(dev_priv);
1733
1734 seq_printf(m, "Enabled by kernel parameter: %s\n",
1735 yesno(i915_modparams.enable_ips));
1736
1737 if (INTEL_GEN(dev_priv) >= 8) {
1738 seq_puts(m, "Currently: unknown\n");
1739 } else {
1740 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 seq_puts(m, "Currently: enabled\n");
1742 else
1743 seq_puts(m, "Currently: disabled\n");
1744 }
1745
1746 intel_runtime_pm_put(dev_priv);
1747
1748 return 0;
1749 }
1750
i915_sr_status(struct seq_file * m,void * unused)1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754 bool sr_enabled = false;
1755
1756 intel_runtime_pm_get(dev_priv);
1757 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758
1759 if (INTEL_GEN(dev_priv) >= 9)
1760 /* no global SR status; inspect per-plane WM */;
1761 else if (HAS_PCH_SPLIT(dev_priv))
1762 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766 else if (IS_I915GM(dev_priv))
1767 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768 else if (IS_PINEVIEW(dev_priv))
1769 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772
1773 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774 intel_runtime_pm_put(dev_priv);
1775
1776 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777
1778 return 0;
1779 }
1780
i915_emon_status(struct seq_file * m,void * unused)1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 struct drm_device *dev = &dev_priv->drm;
1785 unsigned long temp, chipset, gfx;
1786 int ret;
1787
1788 if (!IS_GEN5(dev_priv))
1789 return -ENODEV;
1790
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
1794
1795 temp = i915_mch_val(dev_priv);
1796 chipset = i915_chipset_val(dev_priv);
1797 gfx = i915_gfx_val(dev_priv);
1798 mutex_unlock(&dev->struct_mutex);
1799
1800 seq_printf(m, "GMCH temp: %ld\n", temp);
1801 seq_printf(m, "Chipset power: %ld\n", chipset);
1802 seq_printf(m, "GFX power: %ld\n", gfx);
1803 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804
1805 return 0;
1806 }
1807
i915_ring_freq_table(struct seq_file * m,void * unused)1808 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809 {
1810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1811 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1812 unsigned int max_gpu_freq, min_gpu_freq;
1813 int gpu_freq, ia_freq;
1814 int ret;
1815
1816 if (!HAS_LLC(dev_priv))
1817 return -ENODEV;
1818
1819 intel_runtime_pm_get(dev_priv);
1820
1821 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1822 if (ret)
1823 goto out;
1824
1825 min_gpu_freq = rps->min_freq;
1826 max_gpu_freq = rps->max_freq;
1827 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1828 /* Convert GT frequency to 50 HZ units */
1829 min_gpu_freq /= GEN9_FREQ_SCALER;
1830 max_gpu_freq /= GEN9_FREQ_SCALER;
1831 }
1832
1833 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1834
1835 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1836 ia_freq = gpu_freq;
1837 sandybridge_pcode_read(dev_priv,
1838 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 &ia_freq);
1840 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1841 intel_gpu_freq(dev_priv, (gpu_freq *
1842 (IS_GEN9_BC(dev_priv) ||
1843 INTEL_GEN(dev_priv) >= 10 ?
1844 GEN9_FREQ_SCALER : 1))),
1845 ((ia_freq >> 0) & 0xff) * 100,
1846 ((ia_freq >> 8) & 0xff) * 100);
1847 }
1848
1849 mutex_unlock(&dev_priv->pcu_lock);
1850
1851 out:
1852 intel_runtime_pm_put(dev_priv);
1853 return ret;
1854 }
1855
i915_opregion(struct seq_file * m,void * unused)1856 static int i915_opregion(struct seq_file *m, void *unused)
1857 {
1858 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 struct drm_device *dev = &dev_priv->drm;
1860 struct intel_opregion *opregion = &dev_priv->opregion;
1861 int ret;
1862
1863 ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 if (ret)
1865 goto out;
1866
1867 if (opregion->header)
1868 seq_write(m, opregion->header, OPREGION_SIZE);
1869
1870 mutex_unlock(&dev->struct_mutex);
1871
1872 out:
1873 return 0;
1874 }
1875
i915_vbt(struct seq_file * m,void * unused)1876 static int i915_vbt(struct seq_file *m, void *unused)
1877 {
1878 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1879
1880 if (opregion->vbt)
1881 seq_write(m, opregion->vbt, opregion->vbt_size);
1882
1883 return 0;
1884 }
1885
i915_gem_framebuffer_info(struct seq_file * m,void * data)1886 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887 {
1888 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 struct drm_device *dev = &dev_priv->drm;
1890 struct intel_framebuffer *fbdev_fb = NULL;
1891 struct drm_framebuffer *drm_fb;
1892 int ret;
1893
1894 ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 if (ret)
1896 return ret;
1897
1898 #ifdef CONFIG_DRM_FBDEV_EMULATION
1899 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1900 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1901
1902 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 fbdev_fb->base.width,
1904 fbdev_fb->base.height,
1905 fbdev_fb->base.format->depth,
1906 fbdev_fb->base.format->cpp[0] * 8,
1907 fbdev_fb->base.modifier,
1908 drm_framebuffer_read_refcount(&fbdev_fb->base));
1909 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1910 seq_putc(m, '\n');
1911 }
1912 #endif
1913
1914 mutex_lock(&dev->mode_config.fb_lock);
1915 drm_for_each_fb(drm_fb, dev) {
1916 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 if (fb == fbdev_fb)
1918 continue;
1919
1920 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1921 fb->base.width,
1922 fb->base.height,
1923 fb->base.format->depth,
1924 fb->base.format->cpp[0] * 8,
1925 fb->base.modifier,
1926 drm_framebuffer_read_refcount(&fb->base));
1927 describe_obj(m, intel_fb_obj(&fb->base));
1928 seq_putc(m, '\n');
1929 }
1930 mutex_unlock(&dev->mode_config.fb_lock);
1931 mutex_unlock(&dev->struct_mutex);
1932
1933 return 0;
1934 }
1935
describe_ctx_ring(struct seq_file * m,struct intel_ring * ring)1936 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1937 {
1938 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 ring->space, ring->head, ring->tail, ring->emit);
1940 }
1941
i915_context_status(struct seq_file * m,void * unused)1942 static int i915_context_status(struct seq_file *m, void *unused)
1943 {
1944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 struct drm_device *dev = &dev_priv->drm;
1946 struct intel_engine_cs *engine;
1947 struct i915_gem_context *ctx;
1948 enum intel_engine_id id;
1949 int ret;
1950
1951 ret = mutex_lock_interruptible(&dev->struct_mutex);
1952 if (ret)
1953 return ret;
1954
1955 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1956 seq_printf(m, "HW context %u ", ctx->hw_id);
1957 if (ctx->pid) {
1958 struct task_struct *task;
1959
1960 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1961 if (task) {
1962 seq_printf(m, "(%s [%d]) ",
1963 task->comm, task->pid);
1964 put_task_struct(task);
1965 }
1966 } else if (IS_ERR(ctx->file_priv)) {
1967 seq_puts(m, "(deleted) ");
1968 } else {
1969 seq_puts(m, "(kernel) ");
1970 }
1971
1972 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1973 seq_putc(m, '\n');
1974
1975 for_each_engine(engine, dev_priv, id) {
1976 struct intel_context *ce =
1977 to_intel_context(ctx, engine);
1978
1979 seq_printf(m, "%s: ", engine->name);
1980 if (ce->state)
1981 describe_obj(m, ce->state->obj);
1982 if (ce->ring)
1983 describe_ctx_ring(m, ce->ring);
1984 seq_putc(m, '\n');
1985 }
1986
1987 seq_putc(m, '\n');
1988 }
1989
1990 mutex_unlock(&dev->struct_mutex);
1991
1992 return 0;
1993 }
1994
swizzle_string(unsigned swizzle)1995 static const char *swizzle_string(unsigned swizzle)
1996 {
1997 switch (swizzle) {
1998 case I915_BIT_6_SWIZZLE_NONE:
1999 return "none";
2000 case I915_BIT_6_SWIZZLE_9:
2001 return "bit9";
2002 case I915_BIT_6_SWIZZLE_9_10:
2003 return "bit9/bit10";
2004 case I915_BIT_6_SWIZZLE_9_11:
2005 return "bit9/bit11";
2006 case I915_BIT_6_SWIZZLE_9_10_11:
2007 return "bit9/bit10/bit11";
2008 case I915_BIT_6_SWIZZLE_9_17:
2009 return "bit9/bit17";
2010 case I915_BIT_6_SWIZZLE_9_10_17:
2011 return "bit9/bit10/bit17";
2012 case I915_BIT_6_SWIZZLE_UNKNOWN:
2013 return "unknown";
2014 }
2015
2016 return "bug";
2017 }
2018
i915_swizzle_info(struct seq_file * m,void * data)2019 static int i915_swizzle_info(struct seq_file *m, void *data)
2020 {
2021 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2022
2023 intel_runtime_pm_get(dev_priv);
2024
2025 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2026 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2027 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2028 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2029
2030 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2031 seq_printf(m, "DDC = 0x%08x\n",
2032 I915_READ(DCC));
2033 seq_printf(m, "DDC2 = 0x%08x\n",
2034 I915_READ(DCC2));
2035 seq_printf(m, "C0DRB3 = 0x%04x\n",
2036 I915_READ16(C0DRB3));
2037 seq_printf(m, "C1DRB3 = 0x%04x\n",
2038 I915_READ16(C1DRB3));
2039 } else if (INTEL_GEN(dev_priv) >= 6) {
2040 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2041 I915_READ(MAD_DIMM_C0));
2042 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2043 I915_READ(MAD_DIMM_C1));
2044 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2045 I915_READ(MAD_DIMM_C2));
2046 seq_printf(m, "TILECTL = 0x%08x\n",
2047 I915_READ(TILECTL));
2048 if (INTEL_GEN(dev_priv) >= 8)
2049 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2050 I915_READ(GAMTARBMODE));
2051 else
2052 seq_printf(m, "ARB_MODE = 0x%08x\n",
2053 I915_READ(ARB_MODE));
2054 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2055 I915_READ(DISP_ARB_CTL));
2056 }
2057
2058 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2059 seq_puts(m, "L-shaped memory detected\n");
2060
2061 intel_runtime_pm_put(dev_priv);
2062
2063 return 0;
2064 }
2065
per_file_ctx(int id,void * ptr,void * data)2066 static int per_file_ctx(int id, void *ptr, void *data)
2067 {
2068 struct i915_gem_context *ctx = ptr;
2069 struct seq_file *m = data;
2070 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2071
2072 if (!ppgtt) {
2073 seq_printf(m, " no ppgtt for context %d\n",
2074 ctx->user_handle);
2075 return 0;
2076 }
2077
2078 if (i915_gem_context_is_default(ctx))
2079 seq_puts(m, " default context:\n");
2080 else
2081 seq_printf(m, " context %d:\n", ctx->user_handle);
2082 ppgtt->debug_dump(ppgtt, m);
2083
2084 return 0;
2085 }
2086
gen8_ppgtt_info(struct seq_file * m,struct drm_i915_private * dev_priv)2087 static void gen8_ppgtt_info(struct seq_file *m,
2088 struct drm_i915_private *dev_priv)
2089 {
2090 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2091 struct intel_engine_cs *engine;
2092 enum intel_engine_id id;
2093 int i;
2094
2095 if (!ppgtt)
2096 return;
2097
2098 for_each_engine(engine, dev_priv, id) {
2099 seq_printf(m, "%s\n", engine->name);
2100 for (i = 0; i < 4; i++) {
2101 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2102 pdp <<= 32;
2103 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2104 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2105 }
2106 }
2107 }
2108
gen6_ppgtt_info(struct seq_file * m,struct drm_i915_private * dev_priv)2109 static void gen6_ppgtt_info(struct seq_file *m,
2110 struct drm_i915_private *dev_priv)
2111 {
2112 struct intel_engine_cs *engine;
2113 enum intel_engine_id id;
2114
2115 if (IS_GEN6(dev_priv))
2116 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2117
2118 for_each_engine(engine, dev_priv, id) {
2119 seq_printf(m, "%s\n", engine->name);
2120 if (IS_GEN7(dev_priv))
2121 seq_printf(m, "GFX_MODE: 0x%08x\n",
2122 I915_READ(RING_MODE_GEN7(engine)));
2123 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2124 I915_READ(RING_PP_DIR_BASE(engine)));
2125 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2126 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2127 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2128 I915_READ(RING_PP_DIR_DCLV(engine)));
2129 }
2130 if (dev_priv->mm.aliasing_ppgtt) {
2131 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2132
2133 seq_puts(m, "aliasing PPGTT:\n");
2134 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2135
2136 ppgtt->debug_dump(ppgtt, m);
2137 }
2138
2139 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2140 }
2141
i915_ppgtt_info(struct seq_file * m,void * data)2142 static int i915_ppgtt_info(struct seq_file *m, void *data)
2143 {
2144 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2145 struct drm_device *dev = &dev_priv->drm;
2146 struct drm_file *file;
2147 int ret;
2148
2149 mutex_lock(&dev->filelist_mutex);
2150 ret = mutex_lock_interruptible(&dev->struct_mutex);
2151 if (ret)
2152 goto out_unlock;
2153
2154 intel_runtime_pm_get(dev_priv);
2155
2156 if (INTEL_GEN(dev_priv) >= 8)
2157 gen8_ppgtt_info(m, dev_priv);
2158 else if (INTEL_GEN(dev_priv) >= 6)
2159 gen6_ppgtt_info(m, dev_priv);
2160
2161 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2162 struct drm_i915_file_private *file_priv = file->driver_priv;
2163 struct task_struct *task;
2164
2165 task = get_pid_task(file->pid, PIDTYPE_PID);
2166 if (!task) {
2167 ret = -ESRCH;
2168 goto out_rpm;
2169 }
2170 seq_printf(m, "\nproc: %s\n", task->comm);
2171 put_task_struct(task);
2172 idr_for_each(&file_priv->context_idr, per_file_ctx,
2173 (void *)(unsigned long)m);
2174 }
2175
2176 out_rpm:
2177 intel_runtime_pm_put(dev_priv);
2178 mutex_unlock(&dev->struct_mutex);
2179 out_unlock:
2180 mutex_unlock(&dev->filelist_mutex);
2181 return ret;
2182 }
2183
count_irq_waiters(struct drm_i915_private * i915)2184 static int count_irq_waiters(struct drm_i915_private *i915)
2185 {
2186 struct intel_engine_cs *engine;
2187 enum intel_engine_id id;
2188 int count = 0;
2189
2190 for_each_engine(engine, i915, id)
2191 count += intel_engine_has_waiter(engine);
2192
2193 return count;
2194 }
2195
rps_power_to_str(unsigned int power)2196 static const char *rps_power_to_str(unsigned int power)
2197 {
2198 static const char * const strings[] = {
2199 [LOW_POWER] = "low power",
2200 [BETWEEN] = "mixed",
2201 [HIGH_POWER] = "high power",
2202 };
2203
2204 if (power >= ARRAY_SIZE(strings) || !strings[power])
2205 return "unknown";
2206
2207 return strings[power];
2208 }
2209
i915_rps_boost_info(struct seq_file * m,void * data)2210 static int i915_rps_boost_info(struct seq_file *m, void *data)
2211 {
2212 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2213 struct drm_device *dev = &dev_priv->drm;
2214 struct intel_rps *rps = &dev_priv->gt_pm.rps;
2215 struct drm_file *file;
2216
2217 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2218 seq_printf(m, "GPU busy? %s [%d requests]\n",
2219 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2220 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2221 seq_printf(m, "Boosts outstanding? %d\n",
2222 atomic_read(&rps->num_waiters));
2223 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2224 seq_printf(m, "Frequency requested %d\n",
2225 intel_gpu_freq(dev_priv, rps->cur_freq));
2226 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2227 intel_gpu_freq(dev_priv, rps->min_freq),
2228 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2229 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2230 intel_gpu_freq(dev_priv, rps->max_freq));
2231 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2232 intel_gpu_freq(dev_priv, rps->idle_freq),
2233 intel_gpu_freq(dev_priv, rps->efficient_freq),
2234 intel_gpu_freq(dev_priv, rps->boost_freq));
2235
2236 mutex_lock(&dev->filelist_mutex);
2237 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2238 struct drm_i915_file_private *file_priv = file->driver_priv;
2239 struct task_struct *task;
2240
2241 rcu_read_lock();
2242 task = pid_task(file->pid, PIDTYPE_PID);
2243 seq_printf(m, "%s [%d]: %d boosts\n",
2244 task ? task->comm : "<unknown>",
2245 task ? task->pid : -1,
2246 atomic_read(&file_priv->rps_client.boosts));
2247 rcu_read_unlock();
2248 }
2249 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2250 atomic_read(&rps->boosts));
2251 mutex_unlock(&dev->filelist_mutex);
2252
2253 if (INTEL_GEN(dev_priv) >= 6 &&
2254 rps->enabled &&
2255 dev_priv->gt.active_requests) {
2256 u32 rpup, rpupei;
2257 u32 rpdown, rpdownei;
2258
2259 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2260 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2261 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2262 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2263 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2264 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2265
2266 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2267 rps_power_to_str(rps->power.mode));
2268 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
2269 rpup && rpupei ? 100 * rpup / rpupei : 0,
2270 rps->power.up_threshold);
2271 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
2272 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2273 rps->power.down_threshold);
2274 } else {
2275 seq_puts(m, "\nRPS Autotuning inactive\n");
2276 }
2277
2278 return 0;
2279 }
2280
i915_llc(struct seq_file * m,void * data)2281 static int i915_llc(struct seq_file *m, void *data)
2282 {
2283 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2284 const bool edram = INTEL_GEN(dev_priv) > 8;
2285
2286 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2287 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2288 intel_uncore_edram_size(dev_priv)/1024/1024);
2289
2290 return 0;
2291 }
2292
i915_huc_load_status_info(struct seq_file * m,void * data)2293 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2294 {
2295 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2296 struct drm_printer p;
2297
2298 if (!HAS_HUC(dev_priv))
2299 return -ENODEV;
2300
2301 p = drm_seq_file_printer(m);
2302 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2303
2304 intel_runtime_pm_get(dev_priv);
2305 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2306 intel_runtime_pm_put(dev_priv);
2307
2308 return 0;
2309 }
2310
i915_guc_load_status_info(struct seq_file * m,void * data)2311 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2312 {
2313 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2314 struct drm_printer p;
2315 u32 tmp, i;
2316
2317 if (!HAS_GUC(dev_priv))
2318 return -ENODEV;
2319
2320 p = drm_seq_file_printer(m);
2321 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2322
2323 intel_runtime_pm_get(dev_priv);
2324
2325 tmp = I915_READ(GUC_STATUS);
2326
2327 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2328 seq_printf(m, "\tBootrom status = 0x%x\n",
2329 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2330 seq_printf(m, "\tuKernel status = 0x%x\n",
2331 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2332 seq_printf(m, "\tMIA Core status = 0x%x\n",
2333 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2334 seq_puts(m, "\nScratch registers:\n");
2335 for (i = 0; i < 16; i++)
2336 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2337
2338 intel_runtime_pm_put(dev_priv);
2339
2340 return 0;
2341 }
2342
2343 static const char *
stringify_guc_log_type(enum guc_log_buffer_type type)2344 stringify_guc_log_type(enum guc_log_buffer_type type)
2345 {
2346 switch (type) {
2347 case GUC_ISR_LOG_BUFFER:
2348 return "ISR";
2349 case GUC_DPC_LOG_BUFFER:
2350 return "DPC";
2351 case GUC_CRASH_DUMP_LOG_BUFFER:
2352 return "CRASH";
2353 default:
2354 MISSING_CASE(type);
2355 }
2356
2357 return "";
2358 }
2359
i915_guc_log_info(struct seq_file * m,struct drm_i915_private * dev_priv)2360 static void i915_guc_log_info(struct seq_file *m,
2361 struct drm_i915_private *dev_priv)
2362 {
2363 struct intel_guc_log *log = &dev_priv->guc.log;
2364 enum guc_log_buffer_type type;
2365
2366 if (!intel_guc_log_relay_enabled(log)) {
2367 seq_puts(m, "GuC log relay disabled\n");
2368 return;
2369 }
2370
2371 seq_puts(m, "GuC logging stats:\n");
2372
2373 seq_printf(m, "\tRelay full count: %u\n",
2374 log->relay.full_count);
2375
2376 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2377 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2378 stringify_guc_log_type(type),
2379 log->stats[type].flush,
2380 log->stats[type].sampled_overflow);
2381 }
2382 }
2383
i915_guc_client_info(struct seq_file * m,struct drm_i915_private * dev_priv,struct intel_guc_client * client)2384 static void i915_guc_client_info(struct seq_file *m,
2385 struct drm_i915_private *dev_priv,
2386 struct intel_guc_client *client)
2387 {
2388 struct intel_engine_cs *engine;
2389 enum intel_engine_id id;
2390 uint64_t tot = 0;
2391
2392 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2393 client->priority, client->stage_id, client->proc_desc_offset);
2394 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2395 client->doorbell_id, client->doorbell_offset);
2396
2397 for_each_engine(engine, dev_priv, id) {
2398 u64 submissions = client->submissions[id];
2399 tot += submissions;
2400 seq_printf(m, "\tSubmissions: %llu %s\n",
2401 submissions, engine->name);
2402 }
2403 seq_printf(m, "\tTotal: %llu\n", tot);
2404 }
2405
i915_guc_info(struct seq_file * m,void * data)2406 static int i915_guc_info(struct seq_file *m, void *data)
2407 {
2408 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2409 const struct intel_guc *guc = &dev_priv->guc;
2410
2411 if (!USES_GUC(dev_priv))
2412 return -ENODEV;
2413
2414 i915_guc_log_info(m, dev_priv);
2415
2416 if (!USES_GUC_SUBMISSION(dev_priv))
2417 return 0;
2418
2419 GEM_BUG_ON(!guc->execbuf_client);
2420
2421 seq_printf(m, "\nDoorbell map:\n");
2422 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2423 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2424
2425 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2426 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2427 if (guc->preempt_client) {
2428 seq_printf(m, "\nGuC preempt client @ %p:\n",
2429 guc->preempt_client);
2430 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2431 }
2432
2433 /* Add more as required ... */
2434
2435 return 0;
2436 }
2437
i915_guc_stage_pool(struct seq_file * m,void * data)2438 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2439 {
2440 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2441 const struct intel_guc *guc = &dev_priv->guc;
2442 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2443 struct intel_guc_client *client = guc->execbuf_client;
2444 unsigned int tmp;
2445 int index;
2446
2447 if (!USES_GUC_SUBMISSION(dev_priv))
2448 return -ENODEV;
2449
2450 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2451 struct intel_engine_cs *engine;
2452
2453 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2454 continue;
2455
2456 seq_printf(m, "GuC stage descriptor %u:\n", index);
2457 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2458 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2459 seq_printf(m, "\tPriority: %d\n", desc->priority);
2460 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2461 seq_printf(m, "\tEngines used: 0x%x\n",
2462 desc->engines_used);
2463 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2464 desc->db_trigger_phy,
2465 desc->db_trigger_cpu,
2466 desc->db_trigger_uk);
2467 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2468 desc->process_desc);
2469 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2470 desc->wq_addr, desc->wq_size);
2471 seq_putc(m, '\n');
2472
2473 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2474 u32 guc_engine_id = engine->guc_id;
2475 struct guc_execlist_context *lrc =
2476 &desc->lrc[guc_engine_id];
2477
2478 seq_printf(m, "\t%s LRC:\n", engine->name);
2479 seq_printf(m, "\t\tContext desc: 0x%x\n",
2480 lrc->context_desc);
2481 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2482 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2483 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2484 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2485 seq_putc(m, '\n');
2486 }
2487 }
2488
2489 return 0;
2490 }
2491
i915_guc_log_dump(struct seq_file * m,void * data)2492 static int i915_guc_log_dump(struct seq_file *m, void *data)
2493 {
2494 struct drm_info_node *node = m->private;
2495 struct drm_i915_private *dev_priv = node_to_i915(node);
2496 bool dump_load_err = !!node->info_ent->data;
2497 struct drm_i915_gem_object *obj = NULL;
2498 u32 *log;
2499 int i = 0;
2500
2501 if (!HAS_GUC(dev_priv))
2502 return -ENODEV;
2503
2504 if (dump_load_err)
2505 obj = dev_priv->guc.load_err_log;
2506 else if (dev_priv->guc.log.vma)
2507 obj = dev_priv->guc.log.vma->obj;
2508
2509 if (!obj)
2510 return 0;
2511
2512 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2513 if (IS_ERR(log)) {
2514 DRM_DEBUG("Failed to pin object\n");
2515 seq_puts(m, "(log data unaccessible)\n");
2516 return PTR_ERR(log);
2517 }
2518
2519 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2520 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2521 *(log + i), *(log + i + 1),
2522 *(log + i + 2), *(log + i + 3));
2523
2524 seq_putc(m, '\n');
2525
2526 i915_gem_object_unpin_map(obj);
2527
2528 return 0;
2529 }
2530
i915_guc_log_level_get(void * data,u64 * val)2531 static int i915_guc_log_level_get(void *data, u64 *val)
2532 {
2533 struct drm_i915_private *dev_priv = data;
2534
2535 if (!USES_GUC(dev_priv))
2536 return -ENODEV;
2537
2538 *val = intel_guc_log_get_level(&dev_priv->guc.log);
2539
2540 return 0;
2541 }
2542
i915_guc_log_level_set(void * data,u64 val)2543 static int i915_guc_log_level_set(void *data, u64 val)
2544 {
2545 struct drm_i915_private *dev_priv = data;
2546
2547 if (!USES_GUC(dev_priv))
2548 return -ENODEV;
2549
2550 return intel_guc_log_set_level(&dev_priv->guc.log, val);
2551 }
2552
2553 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2554 i915_guc_log_level_get, i915_guc_log_level_set,
2555 "%lld\n");
2556
i915_guc_log_relay_open(struct inode * inode,struct file * file)2557 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2558 {
2559 struct drm_i915_private *dev_priv = inode->i_private;
2560
2561 if (!USES_GUC(dev_priv))
2562 return -ENODEV;
2563
2564 file->private_data = &dev_priv->guc.log;
2565
2566 return intel_guc_log_relay_open(&dev_priv->guc.log);
2567 }
2568
2569 static ssize_t
i915_guc_log_relay_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2570 i915_guc_log_relay_write(struct file *filp,
2571 const char __user *ubuf,
2572 size_t cnt,
2573 loff_t *ppos)
2574 {
2575 struct intel_guc_log *log = filp->private_data;
2576
2577 intel_guc_log_relay_flush(log);
2578
2579 return cnt;
2580 }
2581
i915_guc_log_relay_release(struct inode * inode,struct file * file)2582 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2583 {
2584 struct drm_i915_private *dev_priv = inode->i_private;
2585
2586 intel_guc_log_relay_close(&dev_priv->guc.log);
2587
2588 return 0;
2589 }
2590
2591 static const struct file_operations i915_guc_log_relay_fops = {
2592 .owner = THIS_MODULE,
2593 .open = i915_guc_log_relay_open,
2594 .write = i915_guc_log_relay_write,
2595 .release = i915_guc_log_relay_release,
2596 };
2597
i915_psr_sink_status_show(struct seq_file * m,void * data)2598 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2599 {
2600 u8 val;
2601 static const char * const sink_status[] = {
2602 "inactive",
2603 "transition to active, capture and display",
2604 "active, display from RFB",
2605 "active, capture and display on sink device timings",
2606 "transition to inactive, capture and display, timing re-sync",
2607 "reserved",
2608 "reserved",
2609 "sink internal error",
2610 };
2611 struct drm_connector *connector = m->private;
2612 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2613 struct intel_dp *intel_dp =
2614 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2615 int ret;
2616
2617 if (!CAN_PSR(dev_priv)) {
2618 seq_puts(m, "PSR Unsupported\n");
2619 return -ENODEV;
2620 }
2621
2622 if (connector->status != connector_status_connected)
2623 return -ENODEV;
2624
2625 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2626
2627 if (ret == 1) {
2628 const char *str = "unknown";
2629
2630 val &= DP_PSR_SINK_STATE_MASK;
2631 if (val < ARRAY_SIZE(sink_status))
2632 str = sink_status[val];
2633 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2634 } else {
2635 return ret;
2636 }
2637
2638 return 0;
2639 }
2640 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2641
2642 static void
psr_source_status(struct drm_i915_private * dev_priv,struct seq_file * m)2643 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2644 {
2645 u32 val, psr_status;
2646
2647 if (dev_priv->psr.psr2_enabled) {
2648 static const char * const live_status[] = {
2649 "IDLE",
2650 "CAPTURE",
2651 "CAPTURE_FS",
2652 "SLEEP",
2653 "BUFON_FW",
2654 "ML_UP",
2655 "SU_STANDBY",
2656 "FAST_SLEEP",
2657 "DEEP_SLEEP",
2658 "BUF_ON",
2659 "TG_ON"
2660 };
2661 psr_status = I915_READ(EDP_PSR2_STATUS);
2662 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2663 EDP_PSR2_STATUS_STATE_SHIFT;
2664 if (val < ARRAY_SIZE(live_status)) {
2665 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2666 psr_status, live_status[val]);
2667 return;
2668 }
2669 } else {
2670 static const char * const live_status[] = {
2671 "IDLE",
2672 "SRDONACK",
2673 "SRDENT",
2674 "BUFOFF",
2675 "BUFON",
2676 "AUXACK",
2677 "SRDOFFACK",
2678 "SRDENT_ON",
2679 };
2680 psr_status = I915_READ(EDP_PSR_STATUS);
2681 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2682 EDP_PSR_STATUS_STATE_SHIFT;
2683 if (val < ARRAY_SIZE(live_status)) {
2684 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2685 psr_status, live_status[val]);
2686 return;
2687 }
2688 }
2689
2690 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2691 }
2692
i915_edp_psr_status(struct seq_file * m,void * data)2693 static int i915_edp_psr_status(struct seq_file *m, void *data)
2694 {
2695 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2696 u32 psrperf = 0;
2697 bool enabled = false;
2698 bool sink_support;
2699
2700 if (!HAS_PSR(dev_priv))
2701 return -ENODEV;
2702
2703 sink_support = dev_priv->psr.sink_support;
2704 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2705 if (!sink_support)
2706 return 0;
2707
2708 intel_runtime_pm_get(dev_priv);
2709
2710 mutex_lock(&dev_priv->psr.lock);
2711 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2712 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2713 dev_priv->psr.busy_frontbuffer_bits);
2714
2715 if (dev_priv->psr.psr2_enabled)
2716 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2717 else
2718 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2719
2720 seq_printf(m, "Main link in standby mode: %s\n",
2721 yesno(dev_priv->psr.link_standby));
2722
2723 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2724
2725 /*
2726 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2727 */
2728 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2729 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2730 EDP_PSR_PERF_CNT_MASK;
2731
2732 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2733 }
2734
2735 psr_source_status(dev_priv, m);
2736 mutex_unlock(&dev_priv->psr.lock);
2737
2738 if (READ_ONCE(dev_priv->psr.debug)) {
2739 seq_printf(m, "Last attempted entry at: %lld\n",
2740 dev_priv->psr.last_entry_attempt);
2741 seq_printf(m, "Last exit at: %lld\n",
2742 dev_priv->psr.last_exit);
2743 }
2744
2745 intel_runtime_pm_put(dev_priv);
2746 return 0;
2747 }
2748
2749 static int
i915_edp_psr_debug_set(void * data,u64 val)2750 i915_edp_psr_debug_set(void *data, u64 val)
2751 {
2752 struct drm_i915_private *dev_priv = data;
2753
2754 if (!CAN_PSR(dev_priv))
2755 return -ENODEV;
2756
2757 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2758
2759 intel_runtime_pm_get(dev_priv);
2760 intel_psr_irq_control(dev_priv, !!val);
2761 intel_runtime_pm_put(dev_priv);
2762
2763 return 0;
2764 }
2765
2766 static int
i915_edp_psr_debug_get(void * data,u64 * val)2767 i915_edp_psr_debug_get(void *data, u64 *val)
2768 {
2769 struct drm_i915_private *dev_priv = data;
2770
2771 if (!CAN_PSR(dev_priv))
2772 return -ENODEV;
2773
2774 *val = READ_ONCE(dev_priv->psr.debug);
2775 return 0;
2776 }
2777
2778 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2779 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2780 "%llu\n");
2781
i915_energy_uJ(struct seq_file * m,void * data)2782 static int i915_energy_uJ(struct seq_file *m, void *data)
2783 {
2784 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2785 unsigned long long power;
2786 u32 units;
2787
2788 if (INTEL_GEN(dev_priv) < 6)
2789 return -ENODEV;
2790
2791 intel_runtime_pm_get(dev_priv);
2792
2793 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2794 intel_runtime_pm_put(dev_priv);
2795 return -ENODEV;
2796 }
2797
2798 units = (power & 0x1f00) >> 8;
2799 power = I915_READ(MCH_SECP_NRG_STTS);
2800 power = (1000000 * power) >> units; /* convert to uJ */
2801
2802 intel_runtime_pm_put(dev_priv);
2803
2804 seq_printf(m, "%llu", power);
2805
2806 return 0;
2807 }
2808
i915_runtime_pm_status(struct seq_file * m,void * unused)2809 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2810 {
2811 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2812 struct pci_dev *pdev = dev_priv->drm.pdev;
2813
2814 if (!HAS_RUNTIME_PM(dev_priv))
2815 seq_puts(m, "Runtime power management not supported\n");
2816
2817 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2818 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2819 seq_printf(m, "IRQs disabled: %s\n",
2820 yesno(!intel_irqs_enabled(dev_priv)));
2821 #ifdef CONFIG_PM
2822 seq_printf(m, "Usage count: %d\n",
2823 atomic_read(&dev_priv->drm.dev->power.usage_count));
2824 #else
2825 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2826 #endif
2827 seq_printf(m, "PCI device power state: %s [%d]\n",
2828 pci_power_name(pdev->current_state),
2829 pdev->current_state);
2830
2831 return 0;
2832 }
2833
i915_power_domain_info(struct seq_file * m,void * unused)2834 static int i915_power_domain_info(struct seq_file *m, void *unused)
2835 {
2836 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2837 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2838 int i;
2839
2840 mutex_lock(&power_domains->lock);
2841
2842 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2843 for (i = 0; i < power_domains->power_well_count; i++) {
2844 struct i915_power_well *power_well;
2845 enum intel_display_power_domain power_domain;
2846
2847 power_well = &power_domains->power_wells[i];
2848 seq_printf(m, "%-25s %d\n", power_well->name,
2849 power_well->count);
2850
2851 for_each_power_domain(power_domain, power_well->domains)
2852 seq_printf(m, " %-23s %d\n",
2853 intel_display_power_domain_str(power_domain),
2854 power_domains->domain_use_count[power_domain]);
2855 }
2856
2857 mutex_unlock(&power_domains->lock);
2858
2859 return 0;
2860 }
2861
i915_dmc_info(struct seq_file * m,void * unused)2862 static int i915_dmc_info(struct seq_file *m, void *unused)
2863 {
2864 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2865 struct intel_csr *csr;
2866
2867 if (!HAS_CSR(dev_priv))
2868 return -ENODEV;
2869
2870 csr = &dev_priv->csr;
2871
2872 intel_runtime_pm_get(dev_priv);
2873
2874 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2875 seq_printf(m, "path: %s\n", csr->fw_path);
2876
2877 if (!csr->dmc_payload)
2878 goto out;
2879
2880 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2881 CSR_VERSION_MINOR(csr->version));
2882
2883 if (IS_KABYLAKE(dev_priv) ||
2884 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2885 seq_printf(m, "DC3 -> DC5 count: %d\n",
2886 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2887 seq_printf(m, "DC5 -> DC6 count: %d\n",
2888 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2889 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2890 seq_printf(m, "DC3 -> DC5 count: %d\n",
2891 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2892 }
2893
2894 out:
2895 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2896 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2897 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2898
2899 intel_runtime_pm_put(dev_priv);
2900
2901 return 0;
2902 }
2903
intel_seq_print_mode(struct seq_file * m,int tabs,struct drm_display_mode * mode)2904 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2905 struct drm_display_mode *mode)
2906 {
2907 int i;
2908
2909 for (i = 0; i < tabs; i++)
2910 seq_putc(m, '\t');
2911
2912 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2913 mode->base.id, mode->name,
2914 mode->vrefresh, mode->clock,
2915 mode->hdisplay, mode->hsync_start,
2916 mode->hsync_end, mode->htotal,
2917 mode->vdisplay, mode->vsync_start,
2918 mode->vsync_end, mode->vtotal,
2919 mode->type, mode->flags);
2920 }
2921
intel_encoder_info(struct seq_file * m,struct intel_crtc * intel_crtc,struct intel_encoder * intel_encoder)2922 static void intel_encoder_info(struct seq_file *m,
2923 struct intel_crtc *intel_crtc,
2924 struct intel_encoder *intel_encoder)
2925 {
2926 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2927 struct drm_device *dev = &dev_priv->drm;
2928 struct drm_crtc *crtc = &intel_crtc->base;
2929 struct intel_connector *intel_connector;
2930 struct drm_encoder *encoder;
2931
2932 encoder = &intel_encoder->base;
2933 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2934 encoder->base.id, encoder->name);
2935 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2936 struct drm_connector *connector = &intel_connector->base;
2937 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2938 connector->base.id,
2939 connector->name,
2940 drm_get_connector_status_name(connector->status));
2941 if (connector->status == connector_status_connected) {
2942 struct drm_display_mode *mode = &crtc->mode;
2943 seq_printf(m, ", mode:\n");
2944 intel_seq_print_mode(m, 2, mode);
2945 } else {
2946 seq_putc(m, '\n');
2947 }
2948 }
2949 }
2950
intel_crtc_info(struct seq_file * m,struct intel_crtc * intel_crtc)2951 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2952 {
2953 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2954 struct drm_device *dev = &dev_priv->drm;
2955 struct drm_crtc *crtc = &intel_crtc->base;
2956 struct intel_encoder *intel_encoder;
2957 struct drm_plane_state *plane_state = crtc->primary->state;
2958 struct drm_framebuffer *fb = plane_state->fb;
2959
2960 if (fb)
2961 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2962 fb->base.id, plane_state->src_x >> 16,
2963 plane_state->src_y >> 16, fb->width, fb->height);
2964 else
2965 seq_puts(m, "\tprimary plane disabled\n");
2966 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2967 intel_encoder_info(m, intel_crtc, intel_encoder);
2968 }
2969
intel_panel_info(struct seq_file * m,struct intel_panel * panel)2970 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2971 {
2972 struct drm_display_mode *mode = panel->fixed_mode;
2973
2974 seq_printf(m, "\tfixed mode:\n");
2975 intel_seq_print_mode(m, 2, mode);
2976 }
2977
intel_dp_info(struct seq_file * m,struct intel_connector * intel_connector)2978 static void intel_dp_info(struct seq_file *m,
2979 struct intel_connector *intel_connector)
2980 {
2981 struct intel_encoder *intel_encoder = intel_connector->encoder;
2982 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2983
2984 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2985 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2986 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2987 intel_panel_info(m, &intel_connector->panel);
2988
2989 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2990 &intel_dp->aux);
2991 }
2992
intel_dp_mst_info(struct seq_file * m,struct intel_connector * intel_connector)2993 static void intel_dp_mst_info(struct seq_file *m,
2994 struct intel_connector *intel_connector)
2995 {
2996 struct intel_encoder *intel_encoder = intel_connector->encoder;
2997 struct intel_dp_mst_encoder *intel_mst =
2998 enc_to_mst(&intel_encoder->base);
2999 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3000 struct intel_dp *intel_dp = &intel_dig_port->dp;
3001 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3002 intel_connector->port);
3003
3004 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3005 }
3006
intel_hdmi_info(struct seq_file * m,struct intel_connector * intel_connector)3007 static void intel_hdmi_info(struct seq_file *m,
3008 struct intel_connector *intel_connector)
3009 {
3010 struct intel_encoder *intel_encoder = intel_connector->encoder;
3011 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3012
3013 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3014 }
3015
intel_lvds_info(struct seq_file * m,struct intel_connector * intel_connector)3016 static void intel_lvds_info(struct seq_file *m,
3017 struct intel_connector *intel_connector)
3018 {
3019 intel_panel_info(m, &intel_connector->panel);
3020 }
3021
intel_connector_info(struct seq_file * m,struct drm_connector * connector)3022 static void intel_connector_info(struct seq_file *m,
3023 struct drm_connector *connector)
3024 {
3025 struct intel_connector *intel_connector = to_intel_connector(connector);
3026 struct intel_encoder *intel_encoder = intel_connector->encoder;
3027 struct drm_display_mode *mode;
3028
3029 seq_printf(m, "connector %d: type %s, status: %s\n",
3030 connector->base.id, connector->name,
3031 drm_get_connector_status_name(connector->status));
3032 if (connector->status == connector_status_connected) {
3033 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3034 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3035 connector->display_info.width_mm,
3036 connector->display_info.height_mm);
3037 seq_printf(m, "\tsubpixel order: %s\n",
3038 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3039 seq_printf(m, "\tCEA rev: %d\n",
3040 connector->display_info.cea_rev);
3041 }
3042
3043 if (!intel_encoder)
3044 return;
3045
3046 switch (connector->connector_type) {
3047 case DRM_MODE_CONNECTOR_DisplayPort:
3048 case DRM_MODE_CONNECTOR_eDP:
3049 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3050 intel_dp_mst_info(m, intel_connector);
3051 else
3052 intel_dp_info(m, intel_connector);
3053 break;
3054 case DRM_MODE_CONNECTOR_LVDS:
3055 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3056 intel_lvds_info(m, intel_connector);
3057 break;
3058 case DRM_MODE_CONNECTOR_HDMIA:
3059 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3060 intel_encoder->type == INTEL_OUTPUT_DDI)
3061 intel_hdmi_info(m, intel_connector);
3062 break;
3063 default:
3064 break;
3065 }
3066
3067 seq_printf(m, "\tmodes:\n");
3068 list_for_each_entry(mode, &connector->modes, head)
3069 intel_seq_print_mode(m, 2, mode);
3070 }
3071
plane_type(enum drm_plane_type type)3072 static const char *plane_type(enum drm_plane_type type)
3073 {
3074 switch (type) {
3075 case DRM_PLANE_TYPE_OVERLAY:
3076 return "OVL";
3077 case DRM_PLANE_TYPE_PRIMARY:
3078 return "PRI";
3079 case DRM_PLANE_TYPE_CURSOR:
3080 return "CUR";
3081 /*
3082 * Deliberately omitting default: to generate compiler warnings
3083 * when a new drm_plane_type gets added.
3084 */
3085 }
3086
3087 return "unknown";
3088 }
3089
plane_rotation(unsigned int rotation)3090 static const char *plane_rotation(unsigned int rotation)
3091 {
3092 static char buf[48];
3093 /*
3094 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3095 * will print them all to visualize if the values are misused
3096 */
3097 snprintf(buf, sizeof(buf),
3098 "%s%s%s%s%s%s(0x%08x)",
3099 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3100 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3101 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3102 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3103 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3104 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3105 rotation);
3106
3107 return buf;
3108 }
3109
intel_plane_info(struct seq_file * m,struct intel_crtc * intel_crtc)3110 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3111 {
3112 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3113 struct drm_device *dev = &dev_priv->drm;
3114 struct intel_plane *intel_plane;
3115
3116 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3117 struct drm_plane_state *state;
3118 struct drm_plane *plane = &intel_plane->base;
3119 struct drm_format_name_buf format_name;
3120
3121 if (!plane->state) {
3122 seq_puts(m, "plane->state is NULL!\n");
3123 continue;
3124 }
3125
3126 state = plane->state;
3127
3128 if (state->fb) {
3129 drm_get_format_name(state->fb->format->format,
3130 &format_name);
3131 } else {
3132 sprintf(format_name.str, "N/A");
3133 }
3134
3135 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3136 plane->base.id,
3137 plane_type(intel_plane->base.type),
3138 state->crtc_x, state->crtc_y,
3139 state->crtc_w, state->crtc_h,
3140 (state->src_x >> 16),
3141 ((state->src_x & 0xffff) * 15625) >> 10,
3142 (state->src_y >> 16),
3143 ((state->src_y & 0xffff) * 15625) >> 10,
3144 (state->src_w >> 16),
3145 ((state->src_w & 0xffff) * 15625) >> 10,
3146 (state->src_h >> 16),
3147 ((state->src_h & 0xffff) * 15625) >> 10,
3148 format_name.str,
3149 plane_rotation(state->rotation));
3150 }
3151 }
3152
intel_scaler_info(struct seq_file * m,struct intel_crtc * intel_crtc)3153 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3154 {
3155 struct intel_crtc_state *pipe_config;
3156 int num_scalers = intel_crtc->num_scalers;
3157 int i;
3158
3159 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3160
3161 /* Not all platformas have a scaler */
3162 if (num_scalers) {
3163 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3164 num_scalers,
3165 pipe_config->scaler_state.scaler_users,
3166 pipe_config->scaler_state.scaler_id);
3167
3168 for (i = 0; i < num_scalers; i++) {
3169 struct intel_scaler *sc =
3170 &pipe_config->scaler_state.scalers[i];
3171
3172 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3173 i, yesno(sc->in_use), sc->mode);
3174 }
3175 seq_puts(m, "\n");
3176 } else {
3177 seq_puts(m, "\tNo scalers available on this platform\n");
3178 }
3179 }
3180
i915_display_info(struct seq_file * m,void * unused)3181 static int i915_display_info(struct seq_file *m, void *unused)
3182 {
3183 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3184 struct drm_device *dev = &dev_priv->drm;
3185 struct intel_crtc *crtc;
3186 struct drm_connector *connector;
3187 struct drm_connector_list_iter conn_iter;
3188
3189 intel_runtime_pm_get(dev_priv);
3190 seq_printf(m, "CRTC info\n");
3191 seq_printf(m, "---------\n");
3192 for_each_intel_crtc(dev, crtc) {
3193 struct intel_crtc_state *pipe_config;
3194
3195 drm_modeset_lock(&crtc->base.mutex, NULL);
3196 pipe_config = to_intel_crtc_state(crtc->base.state);
3197
3198 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3199 crtc->base.base.id, pipe_name(crtc->pipe),
3200 yesno(pipe_config->base.active),
3201 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3202 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3203
3204 if (pipe_config->base.active) {
3205 struct intel_plane *cursor =
3206 to_intel_plane(crtc->base.cursor);
3207
3208 intel_crtc_info(m, crtc);
3209
3210 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3211 yesno(cursor->base.state->visible),
3212 cursor->base.state->crtc_x,
3213 cursor->base.state->crtc_y,
3214 cursor->base.state->crtc_w,
3215 cursor->base.state->crtc_h,
3216 cursor->cursor.base);
3217 intel_scaler_info(m, crtc);
3218 intel_plane_info(m, crtc);
3219 }
3220
3221 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3222 yesno(!crtc->cpu_fifo_underrun_disabled),
3223 yesno(!crtc->pch_fifo_underrun_disabled));
3224 drm_modeset_unlock(&crtc->base.mutex);
3225 }
3226
3227 seq_printf(m, "\n");
3228 seq_printf(m, "Connector info\n");
3229 seq_printf(m, "--------------\n");
3230 mutex_lock(&dev->mode_config.mutex);
3231 drm_connector_list_iter_begin(dev, &conn_iter);
3232 drm_for_each_connector_iter(connector, &conn_iter)
3233 intel_connector_info(m, connector);
3234 drm_connector_list_iter_end(&conn_iter);
3235 mutex_unlock(&dev->mode_config.mutex);
3236
3237 intel_runtime_pm_put(dev_priv);
3238
3239 return 0;
3240 }
3241
i915_engine_info(struct seq_file * m,void * unused)3242 static int i915_engine_info(struct seq_file *m, void *unused)
3243 {
3244 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3245 struct intel_engine_cs *engine;
3246 enum intel_engine_id id;
3247 struct drm_printer p;
3248
3249 intel_runtime_pm_get(dev_priv);
3250
3251 seq_printf(m, "GT awake? %s (epoch %u)\n",
3252 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3253 seq_printf(m, "Global active requests: %d\n",
3254 dev_priv->gt.active_requests);
3255 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3256 dev_priv->info.cs_timestamp_frequency_khz);
3257
3258 p = drm_seq_file_printer(m);
3259 for_each_engine(engine, dev_priv, id)
3260 intel_engine_dump(engine, &p, "%s\n", engine->name);
3261
3262 intel_runtime_pm_put(dev_priv);
3263
3264 return 0;
3265 }
3266
i915_rcs_topology(struct seq_file * m,void * unused)3267 static int i915_rcs_topology(struct seq_file *m, void *unused)
3268 {
3269 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3270 struct drm_printer p = drm_seq_file_printer(m);
3271
3272 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3273
3274 return 0;
3275 }
3276
i915_shrinker_info(struct seq_file * m,void * unused)3277 static int i915_shrinker_info(struct seq_file *m, void *unused)
3278 {
3279 struct drm_i915_private *i915 = node_to_i915(m->private);
3280
3281 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3282 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3283
3284 return 0;
3285 }
3286
i915_shared_dplls_info(struct seq_file * m,void * unused)3287 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3288 {
3289 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3290 struct drm_device *dev = &dev_priv->drm;
3291 int i;
3292
3293 drm_modeset_lock_all(dev);
3294 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3295 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3296
3297 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3298 pll->info->id);
3299 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3300 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3301 seq_printf(m, " tracked hardware state:\n");
3302 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3303 seq_printf(m, " dpll_md: 0x%08x\n",
3304 pll->state.hw_state.dpll_md);
3305 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3306 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3307 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
3308 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3309 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3310 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3311 pll->state.hw_state.mg_refclkin_ctl);
3312 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3313 pll->state.hw_state.mg_clktop2_coreclkctl1);
3314 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3315 pll->state.hw_state.mg_clktop2_hsclkctl);
3316 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3317 pll->state.hw_state.mg_pll_div0);
3318 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3319 pll->state.hw_state.mg_pll_div1);
3320 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3321 pll->state.hw_state.mg_pll_lf);
3322 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3323 pll->state.hw_state.mg_pll_frac_lock);
3324 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3325 pll->state.hw_state.mg_pll_ssc);
3326 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3327 pll->state.hw_state.mg_pll_bias);
3328 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3329 pll->state.hw_state.mg_pll_tdc_coldst_bias);
3330 }
3331 drm_modeset_unlock_all(dev);
3332
3333 return 0;
3334 }
3335
i915_wa_registers(struct seq_file * m,void * unused)3336 static int i915_wa_registers(struct seq_file *m, void *unused)
3337 {
3338 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3339 int i;
3340
3341 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3342 for (i = 0; i < wa->count; ++i)
3343 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3344 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
3345
3346 return 0;
3347 }
3348
i915_ipc_status_show(struct seq_file * m,void * data)3349 static int i915_ipc_status_show(struct seq_file *m, void *data)
3350 {
3351 struct drm_i915_private *dev_priv = m->private;
3352
3353 seq_printf(m, "Isochronous Priority Control: %s\n",
3354 yesno(dev_priv->ipc_enabled));
3355 return 0;
3356 }
3357
i915_ipc_status_open(struct inode * inode,struct file * file)3358 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3359 {
3360 struct drm_i915_private *dev_priv = inode->i_private;
3361
3362 if (!HAS_IPC(dev_priv))
3363 return -ENODEV;
3364
3365 return single_open(file, i915_ipc_status_show, dev_priv);
3366 }
3367
i915_ipc_status_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3368 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3369 size_t len, loff_t *offp)
3370 {
3371 struct seq_file *m = file->private_data;
3372 struct drm_i915_private *dev_priv = m->private;
3373 int ret;
3374 bool enable;
3375
3376 ret = kstrtobool_from_user(ubuf, len, &enable);
3377 if (ret < 0)
3378 return ret;
3379
3380 intel_runtime_pm_get(dev_priv);
3381 if (!dev_priv->ipc_enabled && enable)
3382 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3383 dev_priv->wm.distrust_bios_wm = true;
3384 dev_priv->ipc_enabled = enable;
3385 intel_enable_ipc(dev_priv);
3386 intel_runtime_pm_put(dev_priv);
3387
3388 return len;
3389 }
3390
3391 static const struct file_operations i915_ipc_status_fops = {
3392 .owner = THIS_MODULE,
3393 .open = i915_ipc_status_open,
3394 .read = seq_read,
3395 .llseek = seq_lseek,
3396 .release = single_release,
3397 .write = i915_ipc_status_write
3398 };
3399
i915_ddb_info(struct seq_file * m,void * unused)3400 static int i915_ddb_info(struct seq_file *m, void *unused)
3401 {
3402 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3403 struct drm_device *dev = &dev_priv->drm;
3404 struct skl_ddb_allocation *ddb;
3405 struct skl_ddb_entry *entry;
3406 enum pipe pipe;
3407 int plane;
3408
3409 if (INTEL_GEN(dev_priv) < 9)
3410 return -ENODEV;
3411
3412 drm_modeset_lock_all(dev);
3413
3414 ddb = &dev_priv->wm.skl_hw.ddb;
3415
3416 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3417
3418 for_each_pipe(dev_priv, pipe) {
3419 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3420
3421 for_each_universal_plane(dev_priv, pipe, plane) {
3422 entry = &ddb->plane[pipe][plane];
3423 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3424 entry->start, entry->end,
3425 skl_ddb_entry_size(entry));
3426 }
3427
3428 entry = &ddb->plane[pipe][PLANE_CURSOR];
3429 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3430 entry->end, skl_ddb_entry_size(entry));
3431 }
3432
3433 drm_modeset_unlock_all(dev);
3434
3435 return 0;
3436 }
3437
drrs_status_per_crtc(struct seq_file * m,struct drm_device * dev,struct intel_crtc * intel_crtc)3438 static void drrs_status_per_crtc(struct seq_file *m,
3439 struct drm_device *dev,
3440 struct intel_crtc *intel_crtc)
3441 {
3442 struct drm_i915_private *dev_priv = to_i915(dev);
3443 struct i915_drrs *drrs = &dev_priv->drrs;
3444 int vrefresh = 0;
3445 struct drm_connector *connector;
3446 struct drm_connector_list_iter conn_iter;
3447
3448 drm_connector_list_iter_begin(dev, &conn_iter);
3449 drm_for_each_connector_iter(connector, &conn_iter) {
3450 if (connector->state->crtc != &intel_crtc->base)
3451 continue;
3452
3453 seq_printf(m, "%s:\n", connector->name);
3454 }
3455 drm_connector_list_iter_end(&conn_iter);
3456
3457 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3458 seq_puts(m, "\tVBT: DRRS_type: Static");
3459 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3460 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3461 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3462 seq_puts(m, "\tVBT: DRRS_type: None");
3463 else
3464 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3465
3466 seq_puts(m, "\n\n");
3467
3468 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3469 struct intel_panel *panel;
3470
3471 mutex_lock(&drrs->mutex);
3472 /* DRRS Supported */
3473 seq_puts(m, "\tDRRS Supported: Yes\n");
3474
3475 /* disable_drrs() will make drrs->dp NULL */
3476 if (!drrs->dp) {
3477 seq_puts(m, "Idleness DRRS: Disabled\n");
3478 if (dev_priv->psr.enabled)
3479 seq_puts(m,
3480 "\tAs PSR is enabled, DRRS is not enabled\n");
3481 mutex_unlock(&drrs->mutex);
3482 return;
3483 }
3484
3485 panel = &drrs->dp->attached_connector->panel;
3486 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3487 drrs->busy_frontbuffer_bits);
3488
3489 seq_puts(m, "\n\t\t");
3490 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3491 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3492 vrefresh = panel->fixed_mode->vrefresh;
3493 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3494 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3495 vrefresh = panel->downclock_mode->vrefresh;
3496 } else {
3497 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3498 drrs->refresh_rate_type);
3499 mutex_unlock(&drrs->mutex);
3500 return;
3501 }
3502 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3503
3504 seq_puts(m, "\n\t\t");
3505 mutex_unlock(&drrs->mutex);
3506 } else {
3507 /* DRRS not supported. Print the VBT parameter*/
3508 seq_puts(m, "\tDRRS Supported : No");
3509 }
3510 seq_puts(m, "\n");
3511 }
3512
i915_drrs_status(struct seq_file * m,void * unused)3513 static int i915_drrs_status(struct seq_file *m, void *unused)
3514 {
3515 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3516 struct drm_device *dev = &dev_priv->drm;
3517 struct intel_crtc *intel_crtc;
3518 int active_crtc_cnt = 0;
3519
3520 drm_modeset_lock_all(dev);
3521 for_each_intel_crtc(dev, intel_crtc) {
3522 if (intel_crtc->base.state->active) {
3523 active_crtc_cnt++;
3524 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3525
3526 drrs_status_per_crtc(m, dev, intel_crtc);
3527 }
3528 }
3529 drm_modeset_unlock_all(dev);
3530
3531 if (!active_crtc_cnt)
3532 seq_puts(m, "No active crtc found\n");
3533
3534 return 0;
3535 }
3536
i915_dp_mst_info(struct seq_file * m,void * unused)3537 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3538 {
3539 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3540 struct drm_device *dev = &dev_priv->drm;
3541 struct intel_encoder *intel_encoder;
3542 struct intel_digital_port *intel_dig_port;
3543 struct drm_connector *connector;
3544 struct drm_connector_list_iter conn_iter;
3545
3546 drm_connector_list_iter_begin(dev, &conn_iter);
3547 drm_for_each_connector_iter(connector, &conn_iter) {
3548 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3549 continue;
3550
3551 intel_encoder = intel_attached_encoder(connector);
3552 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3553 continue;
3554
3555 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3556 if (!intel_dig_port->dp.can_mst)
3557 continue;
3558
3559 seq_printf(m, "MST Source Port %c\n",
3560 port_name(intel_dig_port->base.port));
3561 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3562 }
3563 drm_connector_list_iter_end(&conn_iter);
3564
3565 return 0;
3566 }
3567
i915_displayport_test_active_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3568 static ssize_t i915_displayport_test_active_write(struct file *file,
3569 const char __user *ubuf,
3570 size_t len, loff_t *offp)
3571 {
3572 char *input_buffer;
3573 int status = 0;
3574 struct drm_device *dev;
3575 struct drm_connector *connector;
3576 struct drm_connector_list_iter conn_iter;
3577 struct intel_dp *intel_dp;
3578 int val = 0;
3579
3580 dev = ((struct seq_file *)file->private_data)->private;
3581
3582 if (len == 0)
3583 return 0;
3584
3585 input_buffer = memdup_user_nul(ubuf, len);
3586 if (IS_ERR(input_buffer))
3587 return PTR_ERR(input_buffer);
3588
3589 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3590
3591 drm_connector_list_iter_begin(dev, &conn_iter);
3592 drm_for_each_connector_iter(connector, &conn_iter) {
3593 struct intel_encoder *encoder;
3594
3595 if (connector->connector_type !=
3596 DRM_MODE_CONNECTOR_DisplayPort)
3597 continue;
3598
3599 encoder = to_intel_encoder(connector->encoder);
3600 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3601 continue;
3602
3603 if (encoder && connector->status == connector_status_connected) {
3604 intel_dp = enc_to_intel_dp(&encoder->base);
3605 status = kstrtoint(input_buffer, 10, &val);
3606 if (status < 0)
3607 break;
3608 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3609 /* To prevent erroneous activation of the compliance
3610 * testing code, only accept an actual value of 1 here
3611 */
3612 if (val == 1)
3613 intel_dp->compliance.test_active = 1;
3614 else
3615 intel_dp->compliance.test_active = 0;
3616 }
3617 }
3618 drm_connector_list_iter_end(&conn_iter);
3619 kfree(input_buffer);
3620 if (status < 0)
3621 return status;
3622
3623 *offp += len;
3624 return len;
3625 }
3626
i915_displayport_test_active_show(struct seq_file * m,void * data)3627 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3628 {
3629 struct drm_i915_private *dev_priv = m->private;
3630 struct drm_device *dev = &dev_priv->drm;
3631 struct drm_connector *connector;
3632 struct drm_connector_list_iter conn_iter;
3633 struct intel_dp *intel_dp;
3634
3635 drm_connector_list_iter_begin(dev, &conn_iter);
3636 drm_for_each_connector_iter(connector, &conn_iter) {
3637 struct intel_encoder *encoder;
3638
3639 if (connector->connector_type !=
3640 DRM_MODE_CONNECTOR_DisplayPort)
3641 continue;
3642
3643 encoder = to_intel_encoder(connector->encoder);
3644 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3645 continue;
3646
3647 if (encoder && connector->status == connector_status_connected) {
3648 intel_dp = enc_to_intel_dp(&encoder->base);
3649 if (intel_dp->compliance.test_active)
3650 seq_puts(m, "1");
3651 else
3652 seq_puts(m, "0");
3653 } else
3654 seq_puts(m, "0");
3655 }
3656 drm_connector_list_iter_end(&conn_iter);
3657
3658 return 0;
3659 }
3660
i915_displayport_test_active_open(struct inode * inode,struct file * file)3661 static int i915_displayport_test_active_open(struct inode *inode,
3662 struct file *file)
3663 {
3664 return single_open(file, i915_displayport_test_active_show,
3665 inode->i_private);
3666 }
3667
3668 static const struct file_operations i915_displayport_test_active_fops = {
3669 .owner = THIS_MODULE,
3670 .open = i915_displayport_test_active_open,
3671 .read = seq_read,
3672 .llseek = seq_lseek,
3673 .release = single_release,
3674 .write = i915_displayport_test_active_write
3675 };
3676
i915_displayport_test_data_show(struct seq_file * m,void * data)3677 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3678 {
3679 struct drm_i915_private *dev_priv = m->private;
3680 struct drm_device *dev = &dev_priv->drm;
3681 struct drm_connector *connector;
3682 struct drm_connector_list_iter conn_iter;
3683 struct intel_dp *intel_dp;
3684
3685 drm_connector_list_iter_begin(dev, &conn_iter);
3686 drm_for_each_connector_iter(connector, &conn_iter) {
3687 struct intel_encoder *encoder;
3688
3689 if (connector->connector_type !=
3690 DRM_MODE_CONNECTOR_DisplayPort)
3691 continue;
3692
3693 encoder = to_intel_encoder(connector->encoder);
3694 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3695 continue;
3696
3697 if (encoder && connector->status == connector_status_connected) {
3698 intel_dp = enc_to_intel_dp(&encoder->base);
3699 if (intel_dp->compliance.test_type ==
3700 DP_TEST_LINK_EDID_READ)
3701 seq_printf(m, "%lx",
3702 intel_dp->compliance.test_data.edid);
3703 else if (intel_dp->compliance.test_type ==
3704 DP_TEST_LINK_VIDEO_PATTERN) {
3705 seq_printf(m, "hdisplay: %d\n",
3706 intel_dp->compliance.test_data.hdisplay);
3707 seq_printf(m, "vdisplay: %d\n",
3708 intel_dp->compliance.test_data.vdisplay);
3709 seq_printf(m, "bpc: %u\n",
3710 intel_dp->compliance.test_data.bpc);
3711 }
3712 } else
3713 seq_puts(m, "0");
3714 }
3715 drm_connector_list_iter_end(&conn_iter);
3716
3717 return 0;
3718 }
3719 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3720
i915_displayport_test_type_show(struct seq_file * m,void * data)3721 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3722 {
3723 struct drm_i915_private *dev_priv = m->private;
3724 struct drm_device *dev = &dev_priv->drm;
3725 struct drm_connector *connector;
3726 struct drm_connector_list_iter conn_iter;
3727 struct intel_dp *intel_dp;
3728
3729 drm_connector_list_iter_begin(dev, &conn_iter);
3730 drm_for_each_connector_iter(connector, &conn_iter) {
3731 struct intel_encoder *encoder;
3732
3733 if (connector->connector_type !=
3734 DRM_MODE_CONNECTOR_DisplayPort)
3735 continue;
3736
3737 encoder = to_intel_encoder(connector->encoder);
3738 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3739 continue;
3740
3741 if (encoder && connector->status == connector_status_connected) {
3742 intel_dp = enc_to_intel_dp(&encoder->base);
3743 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3744 } else
3745 seq_puts(m, "0");
3746 }
3747 drm_connector_list_iter_end(&conn_iter);
3748
3749 return 0;
3750 }
3751 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3752
wm_latency_show(struct seq_file * m,const uint16_t wm[8])3753 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3754 {
3755 struct drm_i915_private *dev_priv = m->private;
3756 struct drm_device *dev = &dev_priv->drm;
3757 int level;
3758 int num_levels;
3759
3760 if (IS_CHERRYVIEW(dev_priv))
3761 num_levels = 3;
3762 else if (IS_VALLEYVIEW(dev_priv))
3763 num_levels = 1;
3764 else if (IS_G4X(dev_priv))
3765 num_levels = 3;
3766 else
3767 num_levels = ilk_wm_max_level(dev_priv) + 1;
3768
3769 drm_modeset_lock_all(dev);
3770
3771 for (level = 0; level < num_levels; level++) {
3772 unsigned int latency = wm[level];
3773
3774 /*
3775 * - WM1+ latency values in 0.5us units
3776 * - latencies are in us on gen9/vlv/chv
3777 */
3778 if (INTEL_GEN(dev_priv) >= 9 ||
3779 IS_VALLEYVIEW(dev_priv) ||
3780 IS_CHERRYVIEW(dev_priv) ||
3781 IS_G4X(dev_priv))
3782 latency *= 10;
3783 else if (level > 0)
3784 latency *= 5;
3785
3786 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3787 level, wm[level], latency / 10, latency % 10);
3788 }
3789
3790 drm_modeset_unlock_all(dev);
3791 }
3792
pri_wm_latency_show(struct seq_file * m,void * data)3793 static int pri_wm_latency_show(struct seq_file *m, void *data)
3794 {
3795 struct drm_i915_private *dev_priv = m->private;
3796 const uint16_t *latencies;
3797
3798 if (INTEL_GEN(dev_priv) >= 9)
3799 latencies = dev_priv->wm.skl_latency;
3800 else
3801 latencies = dev_priv->wm.pri_latency;
3802
3803 wm_latency_show(m, latencies);
3804
3805 return 0;
3806 }
3807
spr_wm_latency_show(struct seq_file * m,void * data)3808 static int spr_wm_latency_show(struct seq_file *m, void *data)
3809 {
3810 struct drm_i915_private *dev_priv = m->private;
3811 const uint16_t *latencies;
3812
3813 if (INTEL_GEN(dev_priv) >= 9)
3814 latencies = dev_priv->wm.skl_latency;
3815 else
3816 latencies = dev_priv->wm.spr_latency;
3817
3818 wm_latency_show(m, latencies);
3819
3820 return 0;
3821 }
3822
cur_wm_latency_show(struct seq_file * m,void * data)3823 static int cur_wm_latency_show(struct seq_file *m, void *data)
3824 {
3825 struct drm_i915_private *dev_priv = m->private;
3826 const uint16_t *latencies;
3827
3828 if (INTEL_GEN(dev_priv) >= 9)
3829 latencies = dev_priv->wm.skl_latency;
3830 else
3831 latencies = dev_priv->wm.cur_latency;
3832
3833 wm_latency_show(m, latencies);
3834
3835 return 0;
3836 }
3837
pri_wm_latency_open(struct inode * inode,struct file * file)3838 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3839 {
3840 struct drm_i915_private *dev_priv = inode->i_private;
3841
3842 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3843 return -ENODEV;
3844
3845 return single_open(file, pri_wm_latency_show, dev_priv);
3846 }
3847
spr_wm_latency_open(struct inode * inode,struct file * file)3848 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3849 {
3850 struct drm_i915_private *dev_priv = inode->i_private;
3851
3852 if (HAS_GMCH_DISPLAY(dev_priv))
3853 return -ENODEV;
3854
3855 return single_open(file, spr_wm_latency_show, dev_priv);
3856 }
3857
cur_wm_latency_open(struct inode * inode,struct file * file)3858 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3859 {
3860 struct drm_i915_private *dev_priv = inode->i_private;
3861
3862 if (HAS_GMCH_DISPLAY(dev_priv))
3863 return -ENODEV;
3864
3865 return single_open(file, cur_wm_latency_show, dev_priv);
3866 }
3867
wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp,uint16_t wm[8])3868 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3869 size_t len, loff_t *offp, uint16_t wm[8])
3870 {
3871 struct seq_file *m = file->private_data;
3872 struct drm_i915_private *dev_priv = m->private;
3873 struct drm_device *dev = &dev_priv->drm;
3874 uint16_t new[8] = { 0 };
3875 int num_levels;
3876 int level;
3877 int ret;
3878 char tmp[32];
3879
3880 if (IS_CHERRYVIEW(dev_priv))
3881 num_levels = 3;
3882 else if (IS_VALLEYVIEW(dev_priv))
3883 num_levels = 1;
3884 else if (IS_G4X(dev_priv))
3885 num_levels = 3;
3886 else
3887 num_levels = ilk_wm_max_level(dev_priv) + 1;
3888
3889 if (len >= sizeof(tmp))
3890 return -EINVAL;
3891
3892 if (copy_from_user(tmp, ubuf, len))
3893 return -EFAULT;
3894
3895 tmp[len] = '\0';
3896
3897 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3898 &new[0], &new[1], &new[2], &new[3],
3899 &new[4], &new[5], &new[6], &new[7]);
3900 if (ret != num_levels)
3901 return -EINVAL;
3902
3903 drm_modeset_lock_all(dev);
3904
3905 for (level = 0; level < num_levels; level++)
3906 wm[level] = new[level];
3907
3908 drm_modeset_unlock_all(dev);
3909
3910 return len;
3911 }
3912
3913
pri_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3914 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3915 size_t len, loff_t *offp)
3916 {
3917 struct seq_file *m = file->private_data;
3918 struct drm_i915_private *dev_priv = m->private;
3919 uint16_t *latencies;
3920
3921 if (INTEL_GEN(dev_priv) >= 9)
3922 latencies = dev_priv->wm.skl_latency;
3923 else
3924 latencies = dev_priv->wm.pri_latency;
3925
3926 return wm_latency_write(file, ubuf, len, offp, latencies);
3927 }
3928
spr_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3929 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3930 size_t len, loff_t *offp)
3931 {
3932 struct seq_file *m = file->private_data;
3933 struct drm_i915_private *dev_priv = m->private;
3934 uint16_t *latencies;
3935
3936 if (INTEL_GEN(dev_priv) >= 9)
3937 latencies = dev_priv->wm.skl_latency;
3938 else
3939 latencies = dev_priv->wm.spr_latency;
3940
3941 return wm_latency_write(file, ubuf, len, offp, latencies);
3942 }
3943
cur_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3944 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3945 size_t len, loff_t *offp)
3946 {
3947 struct seq_file *m = file->private_data;
3948 struct drm_i915_private *dev_priv = m->private;
3949 uint16_t *latencies;
3950
3951 if (INTEL_GEN(dev_priv) >= 9)
3952 latencies = dev_priv->wm.skl_latency;
3953 else
3954 latencies = dev_priv->wm.cur_latency;
3955
3956 return wm_latency_write(file, ubuf, len, offp, latencies);
3957 }
3958
3959 static const struct file_operations i915_pri_wm_latency_fops = {
3960 .owner = THIS_MODULE,
3961 .open = pri_wm_latency_open,
3962 .read = seq_read,
3963 .llseek = seq_lseek,
3964 .release = single_release,
3965 .write = pri_wm_latency_write
3966 };
3967
3968 static const struct file_operations i915_spr_wm_latency_fops = {
3969 .owner = THIS_MODULE,
3970 .open = spr_wm_latency_open,
3971 .read = seq_read,
3972 .llseek = seq_lseek,
3973 .release = single_release,
3974 .write = spr_wm_latency_write
3975 };
3976
3977 static const struct file_operations i915_cur_wm_latency_fops = {
3978 .owner = THIS_MODULE,
3979 .open = cur_wm_latency_open,
3980 .read = seq_read,
3981 .llseek = seq_lseek,
3982 .release = single_release,
3983 .write = cur_wm_latency_write
3984 };
3985
3986 static int
i915_wedged_get(void * data,u64 * val)3987 i915_wedged_get(void *data, u64 *val)
3988 {
3989 struct drm_i915_private *dev_priv = data;
3990
3991 *val = i915_terminally_wedged(&dev_priv->gpu_error);
3992
3993 return 0;
3994 }
3995
3996 static int
i915_wedged_set(void * data,u64 val)3997 i915_wedged_set(void *data, u64 val)
3998 {
3999 struct drm_i915_private *i915 = data;
4000 struct intel_engine_cs *engine;
4001 unsigned int tmp;
4002
4003 /*
4004 * There is no safeguard against this debugfs entry colliding
4005 * with the hangcheck calling same i915_handle_error() in
4006 * parallel, causing an explosion. For now we assume that the
4007 * test harness is responsible enough not to inject gpu hangs
4008 * while it is writing to 'i915_wedged'
4009 */
4010
4011 if (i915_reset_backoff(&i915->gpu_error))
4012 return -EAGAIN;
4013
4014 for_each_engine_masked(engine, i915, val, tmp) {
4015 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4016 engine->hangcheck.stalled = true;
4017 }
4018
4019 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4020 "Manually set wedged engine mask = %llx", val);
4021
4022 wait_on_bit(&i915->gpu_error.flags,
4023 I915_RESET_HANDOFF,
4024 TASK_UNINTERRUPTIBLE);
4025
4026 return 0;
4027 }
4028
4029 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4030 i915_wedged_get, i915_wedged_set,
4031 "%llu\n");
4032
4033 static int
fault_irq_set(struct drm_i915_private * i915,unsigned long * irq,unsigned long val)4034 fault_irq_set(struct drm_i915_private *i915,
4035 unsigned long *irq,
4036 unsigned long val)
4037 {
4038 int err;
4039
4040 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4041 if (err)
4042 return err;
4043
4044 err = i915_gem_wait_for_idle(i915,
4045 I915_WAIT_LOCKED |
4046 I915_WAIT_INTERRUPTIBLE,
4047 MAX_SCHEDULE_TIMEOUT);
4048 if (err)
4049 goto err_unlock;
4050
4051 *irq = val;
4052 mutex_unlock(&i915->drm.struct_mutex);
4053
4054 /* Flush idle worker to disarm irq */
4055 drain_delayed_work(&i915->gt.idle_work);
4056
4057 return 0;
4058
4059 err_unlock:
4060 mutex_unlock(&i915->drm.struct_mutex);
4061 return err;
4062 }
4063
4064 static int
i915_ring_missed_irq_get(void * data,u64 * val)4065 i915_ring_missed_irq_get(void *data, u64 *val)
4066 {
4067 struct drm_i915_private *dev_priv = data;
4068
4069 *val = dev_priv->gpu_error.missed_irq_rings;
4070 return 0;
4071 }
4072
4073 static int
i915_ring_missed_irq_set(void * data,u64 val)4074 i915_ring_missed_irq_set(void *data, u64 val)
4075 {
4076 struct drm_i915_private *i915 = data;
4077
4078 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4079 }
4080
4081 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4082 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4083 "0x%08llx\n");
4084
4085 static int
i915_ring_test_irq_get(void * data,u64 * val)4086 i915_ring_test_irq_get(void *data, u64 *val)
4087 {
4088 struct drm_i915_private *dev_priv = data;
4089
4090 *val = dev_priv->gpu_error.test_irq_rings;
4091
4092 return 0;
4093 }
4094
4095 static int
i915_ring_test_irq_set(void * data,u64 val)4096 i915_ring_test_irq_set(void *data, u64 val)
4097 {
4098 struct drm_i915_private *i915 = data;
4099
4100 val &= INTEL_INFO(i915)->ring_mask;
4101 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4102
4103 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4104 }
4105
4106 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4107 i915_ring_test_irq_get, i915_ring_test_irq_set,
4108 "0x%08llx\n");
4109
4110 #define DROP_UNBOUND BIT(0)
4111 #define DROP_BOUND BIT(1)
4112 #define DROP_RETIRE BIT(2)
4113 #define DROP_ACTIVE BIT(3)
4114 #define DROP_FREED BIT(4)
4115 #define DROP_SHRINK_ALL BIT(5)
4116 #define DROP_IDLE BIT(6)
4117 #define DROP_ALL (DROP_UNBOUND | \
4118 DROP_BOUND | \
4119 DROP_RETIRE | \
4120 DROP_ACTIVE | \
4121 DROP_FREED | \
4122 DROP_SHRINK_ALL |\
4123 DROP_IDLE)
4124 static int
i915_drop_caches_get(void * data,u64 * val)4125 i915_drop_caches_get(void *data, u64 *val)
4126 {
4127 *val = DROP_ALL;
4128
4129 return 0;
4130 }
4131
4132 static int
i915_drop_caches_set(void * data,u64 val)4133 i915_drop_caches_set(void *data, u64 val)
4134 {
4135 struct drm_i915_private *dev_priv = data;
4136 struct drm_device *dev = &dev_priv->drm;
4137 int ret = 0;
4138
4139 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4140 val, val & DROP_ALL);
4141
4142 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4143 * on ioctls on -EAGAIN. */
4144 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4145 ret = mutex_lock_interruptible(&dev->struct_mutex);
4146 if (ret)
4147 return ret;
4148
4149 if (val & DROP_ACTIVE)
4150 ret = i915_gem_wait_for_idle(dev_priv,
4151 I915_WAIT_INTERRUPTIBLE |
4152 I915_WAIT_LOCKED,
4153 MAX_SCHEDULE_TIMEOUT);
4154
4155 if (val & DROP_RETIRE)
4156 i915_retire_requests(dev_priv);
4157
4158 mutex_unlock(&dev->struct_mutex);
4159 }
4160
4161 fs_reclaim_acquire(GFP_KERNEL);
4162 if (val & DROP_BOUND)
4163 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4164
4165 if (val & DROP_UNBOUND)
4166 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4167
4168 if (val & DROP_SHRINK_ALL)
4169 i915_gem_shrink_all(dev_priv);
4170 fs_reclaim_release(GFP_KERNEL);
4171
4172 if (val & DROP_IDLE) {
4173 do {
4174 if (READ_ONCE(dev_priv->gt.active_requests))
4175 flush_delayed_work(&dev_priv->gt.retire_work);
4176 drain_delayed_work(&dev_priv->gt.idle_work);
4177 } while (READ_ONCE(dev_priv->gt.awake));
4178 }
4179
4180 if (val & DROP_FREED)
4181 i915_gem_drain_freed_objects(dev_priv);
4182
4183 return ret;
4184 }
4185
4186 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4187 i915_drop_caches_get, i915_drop_caches_set,
4188 "0x%08llx\n");
4189
4190 static int
i915_cache_sharing_get(void * data,u64 * val)4191 i915_cache_sharing_get(void *data, u64 *val)
4192 {
4193 struct drm_i915_private *dev_priv = data;
4194 u32 snpcr;
4195
4196 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4197 return -ENODEV;
4198
4199 intel_runtime_pm_get(dev_priv);
4200
4201 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4202
4203 intel_runtime_pm_put(dev_priv);
4204
4205 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4206
4207 return 0;
4208 }
4209
4210 static int
i915_cache_sharing_set(void * data,u64 val)4211 i915_cache_sharing_set(void *data, u64 val)
4212 {
4213 struct drm_i915_private *dev_priv = data;
4214 u32 snpcr;
4215
4216 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4217 return -ENODEV;
4218
4219 if (val > 3)
4220 return -EINVAL;
4221
4222 intel_runtime_pm_get(dev_priv);
4223 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4224
4225 /* Update the cache sharing policy here as well */
4226 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4227 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4228 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4229 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4230
4231 intel_runtime_pm_put(dev_priv);
4232 return 0;
4233 }
4234
4235 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4236 i915_cache_sharing_get, i915_cache_sharing_set,
4237 "%llu\n");
4238
cherryview_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)4239 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4240 struct sseu_dev_info *sseu)
4241 {
4242 #define SS_MAX 2
4243 const int ss_max = SS_MAX;
4244 u32 sig1[SS_MAX], sig2[SS_MAX];
4245 int ss;
4246
4247 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4248 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4249 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4250 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4251
4252 for (ss = 0; ss < ss_max; ss++) {
4253 unsigned int eu_cnt;
4254
4255 if (sig1[ss] & CHV_SS_PG_ENABLE)
4256 /* skip disabled subslice */
4257 continue;
4258
4259 sseu->slice_mask = BIT(0);
4260 sseu->subslice_mask[0] |= BIT(ss);
4261 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4262 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4263 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4264 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4265 sseu->eu_total += eu_cnt;
4266 sseu->eu_per_subslice = max_t(unsigned int,
4267 sseu->eu_per_subslice, eu_cnt);
4268 }
4269 #undef SS_MAX
4270 }
4271
gen10_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)4272 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4273 struct sseu_dev_info *sseu)
4274 {
4275 #define SS_MAX 6
4276 const struct intel_device_info *info = INTEL_INFO(dev_priv);
4277 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4278 int s, ss;
4279
4280 for (s = 0; s < info->sseu.max_slices; s++) {
4281 /*
4282 * FIXME: Valid SS Mask respects the spec and read
4283 * only valid bits for those registers, excluding reserverd
4284 * although this seems wrong because it would leave many
4285 * subslices without ACK.
4286 */
4287 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4288 GEN10_PGCTL_VALID_SS_MASK(s);
4289 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4290 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4291 }
4292
4293 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4294 GEN9_PGCTL_SSA_EU19_ACK |
4295 GEN9_PGCTL_SSA_EU210_ACK |
4296 GEN9_PGCTL_SSA_EU311_ACK;
4297 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4298 GEN9_PGCTL_SSB_EU19_ACK |
4299 GEN9_PGCTL_SSB_EU210_ACK |
4300 GEN9_PGCTL_SSB_EU311_ACK;
4301
4302 for (s = 0; s < info->sseu.max_slices; s++) {
4303 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4304 /* skip disabled slice */
4305 continue;
4306
4307 sseu->slice_mask |= BIT(s);
4308 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4309
4310 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4311 unsigned int eu_cnt;
4312
4313 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4314 /* skip disabled subslice */
4315 continue;
4316
4317 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4318 eu_mask[ss % 2]);
4319 sseu->eu_total += eu_cnt;
4320 sseu->eu_per_subslice = max_t(unsigned int,
4321 sseu->eu_per_subslice,
4322 eu_cnt);
4323 }
4324 }
4325 #undef SS_MAX
4326 }
4327
gen9_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)4328 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4329 struct sseu_dev_info *sseu)
4330 {
4331 #define SS_MAX 3
4332 const struct intel_device_info *info = INTEL_INFO(dev_priv);
4333 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4334 int s, ss;
4335
4336 for (s = 0; s < info->sseu.max_slices; s++) {
4337 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4338 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4339 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4340 }
4341
4342 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4343 GEN9_PGCTL_SSA_EU19_ACK |
4344 GEN9_PGCTL_SSA_EU210_ACK |
4345 GEN9_PGCTL_SSA_EU311_ACK;
4346 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4347 GEN9_PGCTL_SSB_EU19_ACK |
4348 GEN9_PGCTL_SSB_EU210_ACK |
4349 GEN9_PGCTL_SSB_EU311_ACK;
4350
4351 for (s = 0; s < info->sseu.max_slices; s++) {
4352 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4353 /* skip disabled slice */
4354 continue;
4355
4356 sseu->slice_mask |= BIT(s);
4357
4358 if (IS_GEN9_BC(dev_priv))
4359 sseu->subslice_mask[s] =
4360 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4361
4362 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4363 unsigned int eu_cnt;
4364
4365 if (IS_GEN9_LP(dev_priv)) {
4366 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4367 /* skip disabled subslice */
4368 continue;
4369
4370 sseu->subslice_mask[s] |= BIT(ss);
4371 }
4372
4373 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4374 eu_mask[ss%2]);
4375 sseu->eu_total += eu_cnt;
4376 sseu->eu_per_subslice = max_t(unsigned int,
4377 sseu->eu_per_subslice,
4378 eu_cnt);
4379 }
4380 }
4381 #undef SS_MAX
4382 }
4383
broadwell_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)4384 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4385 struct sseu_dev_info *sseu)
4386 {
4387 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4388 int s;
4389
4390 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4391
4392 if (sseu->slice_mask) {
4393 sseu->eu_per_subslice =
4394 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4395 for (s = 0; s < fls(sseu->slice_mask); s++) {
4396 sseu->subslice_mask[s] =
4397 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4398 }
4399 sseu->eu_total = sseu->eu_per_subslice *
4400 sseu_subslice_total(sseu);
4401
4402 /* subtract fused off EU(s) from enabled slice(s) */
4403 for (s = 0; s < fls(sseu->slice_mask); s++) {
4404 u8 subslice_7eu =
4405 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4406
4407 sseu->eu_total -= hweight8(subslice_7eu);
4408 }
4409 }
4410 }
4411
i915_print_sseu_info(struct seq_file * m,bool is_available_info,const struct sseu_dev_info * sseu)4412 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4413 const struct sseu_dev_info *sseu)
4414 {
4415 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4416 const char *type = is_available_info ? "Available" : "Enabled";
4417 int s;
4418
4419 seq_printf(m, " %s Slice Mask: %04x\n", type,
4420 sseu->slice_mask);
4421 seq_printf(m, " %s Slice Total: %u\n", type,
4422 hweight8(sseu->slice_mask));
4423 seq_printf(m, " %s Subslice Total: %u\n", type,
4424 sseu_subslice_total(sseu));
4425 for (s = 0; s < fls(sseu->slice_mask); s++) {
4426 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4427 s, hweight8(sseu->subslice_mask[s]));
4428 }
4429 seq_printf(m, " %s EU Total: %u\n", type,
4430 sseu->eu_total);
4431 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4432 sseu->eu_per_subslice);
4433
4434 if (!is_available_info)
4435 return;
4436
4437 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4438 if (HAS_POOLED_EU(dev_priv))
4439 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4440
4441 seq_printf(m, " Has Slice Power Gating: %s\n",
4442 yesno(sseu->has_slice_pg));
4443 seq_printf(m, " Has Subslice Power Gating: %s\n",
4444 yesno(sseu->has_subslice_pg));
4445 seq_printf(m, " Has EU Power Gating: %s\n",
4446 yesno(sseu->has_eu_pg));
4447 }
4448
i915_sseu_status(struct seq_file * m,void * unused)4449 static int i915_sseu_status(struct seq_file *m, void *unused)
4450 {
4451 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4452 struct sseu_dev_info sseu;
4453
4454 if (INTEL_GEN(dev_priv) < 8)
4455 return -ENODEV;
4456
4457 seq_puts(m, "SSEU Device Info\n");
4458 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4459
4460 seq_puts(m, "SSEU Device Status\n");
4461 memset(&sseu, 0, sizeof(sseu));
4462 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4463 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4464 sseu.max_eus_per_subslice =
4465 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4466
4467 intel_runtime_pm_get(dev_priv);
4468
4469 if (IS_CHERRYVIEW(dev_priv)) {
4470 cherryview_sseu_device_status(dev_priv, &sseu);
4471 } else if (IS_BROADWELL(dev_priv)) {
4472 broadwell_sseu_device_status(dev_priv, &sseu);
4473 } else if (IS_GEN9(dev_priv)) {
4474 gen9_sseu_device_status(dev_priv, &sseu);
4475 } else if (INTEL_GEN(dev_priv) >= 10) {
4476 gen10_sseu_device_status(dev_priv, &sseu);
4477 }
4478
4479 intel_runtime_pm_put(dev_priv);
4480
4481 i915_print_sseu_info(m, false, &sseu);
4482
4483 return 0;
4484 }
4485
i915_forcewake_open(struct inode * inode,struct file * file)4486 static int i915_forcewake_open(struct inode *inode, struct file *file)
4487 {
4488 struct drm_i915_private *i915 = inode->i_private;
4489
4490 if (INTEL_GEN(i915) < 6)
4491 return 0;
4492
4493 intel_runtime_pm_get(i915);
4494 intel_uncore_forcewake_user_get(i915);
4495
4496 return 0;
4497 }
4498
i915_forcewake_release(struct inode * inode,struct file * file)4499 static int i915_forcewake_release(struct inode *inode, struct file *file)
4500 {
4501 struct drm_i915_private *i915 = inode->i_private;
4502
4503 if (INTEL_GEN(i915) < 6)
4504 return 0;
4505
4506 intel_uncore_forcewake_user_put(i915);
4507 intel_runtime_pm_put(i915);
4508
4509 return 0;
4510 }
4511
4512 static const struct file_operations i915_forcewake_fops = {
4513 .owner = THIS_MODULE,
4514 .open = i915_forcewake_open,
4515 .release = i915_forcewake_release,
4516 };
4517
i915_hpd_storm_ctl_show(struct seq_file * m,void * data)4518 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4519 {
4520 struct drm_i915_private *dev_priv = m->private;
4521 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4522
4523 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4524 seq_printf(m, "Detected: %s\n",
4525 yesno(delayed_work_pending(&hotplug->reenable_work)));
4526
4527 return 0;
4528 }
4529
i915_hpd_storm_ctl_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)4530 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4531 const char __user *ubuf, size_t len,
4532 loff_t *offp)
4533 {
4534 struct seq_file *m = file->private_data;
4535 struct drm_i915_private *dev_priv = m->private;
4536 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4537 unsigned int new_threshold;
4538 int i;
4539 char *newline;
4540 char tmp[16];
4541
4542 if (len >= sizeof(tmp))
4543 return -EINVAL;
4544
4545 if (copy_from_user(tmp, ubuf, len))
4546 return -EFAULT;
4547
4548 tmp[len] = '\0';
4549
4550 /* Strip newline, if any */
4551 newline = strchr(tmp, '\n');
4552 if (newline)
4553 *newline = '\0';
4554
4555 if (strcmp(tmp, "reset") == 0)
4556 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4557 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4558 return -EINVAL;
4559
4560 if (new_threshold > 0)
4561 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4562 new_threshold);
4563 else
4564 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4565
4566 spin_lock_irq(&dev_priv->irq_lock);
4567 hotplug->hpd_storm_threshold = new_threshold;
4568 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4569 for_each_hpd_pin(i)
4570 hotplug->stats[i].count = 0;
4571 spin_unlock_irq(&dev_priv->irq_lock);
4572
4573 /* Re-enable hpd immediately if we were in an irq storm */
4574 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4575
4576 return len;
4577 }
4578
i915_hpd_storm_ctl_open(struct inode * inode,struct file * file)4579 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4580 {
4581 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4582 }
4583
4584 static const struct file_operations i915_hpd_storm_ctl_fops = {
4585 .owner = THIS_MODULE,
4586 .open = i915_hpd_storm_ctl_open,
4587 .read = seq_read,
4588 .llseek = seq_lseek,
4589 .release = single_release,
4590 .write = i915_hpd_storm_ctl_write
4591 };
4592
i915_drrs_ctl_set(void * data,u64 val)4593 static int i915_drrs_ctl_set(void *data, u64 val)
4594 {
4595 struct drm_i915_private *dev_priv = data;
4596 struct drm_device *dev = &dev_priv->drm;
4597 struct intel_crtc *intel_crtc;
4598 struct intel_encoder *encoder;
4599 struct intel_dp *intel_dp;
4600
4601 if (INTEL_GEN(dev_priv) < 7)
4602 return -ENODEV;
4603
4604 drm_modeset_lock_all(dev);
4605 for_each_intel_crtc(dev, intel_crtc) {
4606 if (!intel_crtc->base.state->active ||
4607 !intel_crtc->config->has_drrs)
4608 continue;
4609
4610 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4611 if (encoder->type != INTEL_OUTPUT_EDP)
4612 continue;
4613
4614 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4615 val ? "en" : "dis", val);
4616
4617 intel_dp = enc_to_intel_dp(&encoder->base);
4618 if (val)
4619 intel_edp_drrs_enable(intel_dp,
4620 intel_crtc->config);
4621 else
4622 intel_edp_drrs_disable(intel_dp,
4623 intel_crtc->config);
4624 }
4625 }
4626 drm_modeset_unlock_all(dev);
4627
4628 return 0;
4629 }
4630
4631 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4632
4633 static ssize_t
i915_fifo_underrun_reset_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)4634 i915_fifo_underrun_reset_write(struct file *filp,
4635 const char __user *ubuf,
4636 size_t cnt, loff_t *ppos)
4637 {
4638 struct drm_i915_private *dev_priv = filp->private_data;
4639 struct intel_crtc *intel_crtc;
4640 struct drm_device *dev = &dev_priv->drm;
4641 int ret;
4642 bool reset;
4643
4644 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4645 if (ret)
4646 return ret;
4647
4648 if (!reset)
4649 return cnt;
4650
4651 for_each_intel_crtc(dev, intel_crtc) {
4652 struct drm_crtc_commit *commit;
4653 struct intel_crtc_state *crtc_state;
4654
4655 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4656 if (ret)
4657 return ret;
4658
4659 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4660 commit = crtc_state->base.commit;
4661 if (commit) {
4662 ret = wait_for_completion_interruptible(&commit->hw_done);
4663 if (!ret)
4664 ret = wait_for_completion_interruptible(&commit->flip_done);
4665 }
4666
4667 if (!ret && crtc_state->base.active) {
4668 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4669 pipe_name(intel_crtc->pipe));
4670
4671 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4672 }
4673
4674 drm_modeset_unlock(&intel_crtc->base.mutex);
4675
4676 if (ret)
4677 return ret;
4678 }
4679
4680 ret = intel_fbc_reset_underrun(dev_priv);
4681 if (ret)
4682 return ret;
4683
4684 return cnt;
4685 }
4686
4687 static const struct file_operations i915_fifo_underrun_reset_ops = {
4688 .owner = THIS_MODULE,
4689 .open = simple_open,
4690 .write = i915_fifo_underrun_reset_write,
4691 .llseek = default_llseek,
4692 };
4693
4694 static const struct drm_info_list i915_debugfs_list[] = {
4695 {"i915_capabilities", i915_capabilities, 0},
4696 {"i915_gem_objects", i915_gem_object_info, 0},
4697 {"i915_gem_gtt", i915_gem_gtt_info, 0},
4698 {"i915_gem_stolen", i915_gem_stolen_list_info },
4699 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4700 {"i915_gem_interrupt", i915_interrupt_info, 0},
4701 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4702 {"i915_guc_info", i915_guc_info, 0},
4703 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4704 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4705 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4706 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4707 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4708 {"i915_frequency_info", i915_frequency_info, 0},
4709 {"i915_hangcheck_info", i915_hangcheck_info, 0},
4710 {"i915_reset_info", i915_reset_info, 0},
4711 {"i915_drpc_info", i915_drpc_info, 0},
4712 {"i915_emon_status", i915_emon_status, 0},
4713 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4714 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4715 {"i915_fbc_status", i915_fbc_status, 0},
4716 {"i915_ips_status", i915_ips_status, 0},
4717 {"i915_sr_status", i915_sr_status, 0},
4718 {"i915_opregion", i915_opregion, 0},
4719 {"i915_vbt", i915_vbt, 0},
4720 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4721 {"i915_context_status", i915_context_status, 0},
4722 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4723 {"i915_swizzle_info", i915_swizzle_info, 0},
4724 {"i915_ppgtt_info", i915_ppgtt_info, 0},
4725 {"i915_llc", i915_llc, 0},
4726 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4727 {"i915_energy_uJ", i915_energy_uJ, 0},
4728 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4729 {"i915_power_domain_info", i915_power_domain_info, 0},
4730 {"i915_dmc_info", i915_dmc_info, 0},
4731 {"i915_display_info", i915_display_info, 0},
4732 {"i915_engine_info", i915_engine_info, 0},
4733 {"i915_rcs_topology", i915_rcs_topology, 0},
4734 {"i915_shrinker_info", i915_shrinker_info, 0},
4735 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4736 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4737 {"i915_wa_registers", i915_wa_registers, 0},
4738 {"i915_ddb_info", i915_ddb_info, 0},
4739 {"i915_sseu_status", i915_sseu_status, 0},
4740 {"i915_drrs_status", i915_drrs_status, 0},
4741 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4742 };
4743 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4744
4745 static const struct i915_debugfs_files {
4746 const char *name;
4747 const struct file_operations *fops;
4748 } i915_debugfs_files[] = {
4749 {"i915_wedged", &i915_wedged_fops},
4750 {"i915_cache_sharing", &i915_cache_sharing_fops},
4751 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4752 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4753 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4754 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4755 {"i915_error_state", &i915_error_state_fops},
4756 {"i915_gpu_info", &i915_gpu_info_fops},
4757 #endif
4758 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4759 {"i915_next_seqno", &i915_next_seqno_fops},
4760 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4761 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4762 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4763 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4764 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4765 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4766 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4767 {"i915_guc_log_level", &i915_guc_log_level_fops},
4768 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4769 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4770 {"i915_ipc_status", &i915_ipc_status_fops},
4771 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4772 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4773 };
4774
i915_debugfs_register(struct drm_i915_private * dev_priv)4775 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4776 {
4777 struct drm_minor *minor = dev_priv->drm.primary;
4778 struct dentry *ent;
4779 int i;
4780
4781 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4782 minor->debugfs_root, to_i915(minor->dev),
4783 &i915_forcewake_fops);
4784 if (!ent)
4785 return -ENOMEM;
4786
4787 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4788 ent = debugfs_create_file(i915_debugfs_files[i].name,
4789 S_IRUGO | S_IWUSR,
4790 minor->debugfs_root,
4791 to_i915(minor->dev),
4792 i915_debugfs_files[i].fops);
4793 if (!ent)
4794 return -ENOMEM;
4795 }
4796
4797 return drm_debugfs_create_files(i915_debugfs_list,
4798 I915_DEBUGFS_ENTRIES,
4799 minor->debugfs_root, minor);
4800 }
4801
4802 struct dpcd_block {
4803 /* DPCD dump start address. */
4804 unsigned int offset;
4805 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4806 unsigned int end;
4807 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4808 size_t size;
4809 /* Only valid for eDP. */
4810 bool edp;
4811 };
4812
4813 static const struct dpcd_block i915_dpcd_debug[] = {
4814 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4815 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4816 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4817 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4818 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4819 { .offset = DP_SET_POWER },
4820 { .offset = DP_EDP_DPCD_REV },
4821 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4822 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4823 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4824 };
4825
i915_dpcd_show(struct seq_file * m,void * data)4826 static int i915_dpcd_show(struct seq_file *m, void *data)
4827 {
4828 struct drm_connector *connector = m->private;
4829 struct intel_dp *intel_dp =
4830 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4831 uint8_t buf[16];
4832 ssize_t err;
4833 int i;
4834
4835 if (connector->status != connector_status_connected)
4836 return -ENODEV;
4837
4838 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4839 const struct dpcd_block *b = &i915_dpcd_debug[i];
4840 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4841
4842 if (b->edp &&
4843 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4844 continue;
4845
4846 /* low tech for now */
4847 if (WARN_ON(size > sizeof(buf)))
4848 continue;
4849
4850 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4851 if (err <= 0) {
4852 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4853 size, b->offset, err);
4854 continue;
4855 }
4856
4857 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4858 }
4859
4860 return 0;
4861 }
4862 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4863
i915_panel_show(struct seq_file * m,void * data)4864 static int i915_panel_show(struct seq_file *m, void *data)
4865 {
4866 struct drm_connector *connector = m->private;
4867 struct intel_dp *intel_dp =
4868 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4869
4870 if (connector->status != connector_status_connected)
4871 return -ENODEV;
4872
4873 seq_printf(m, "Panel power up delay: %d\n",
4874 intel_dp->panel_power_up_delay);
4875 seq_printf(m, "Panel power down delay: %d\n",
4876 intel_dp->panel_power_down_delay);
4877 seq_printf(m, "Backlight on delay: %d\n",
4878 intel_dp->backlight_on_delay);
4879 seq_printf(m, "Backlight off delay: %d\n",
4880 intel_dp->backlight_off_delay);
4881
4882 return 0;
4883 }
4884 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4885
4886 /**
4887 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4888 * @connector: pointer to a registered drm_connector
4889 *
4890 * Cleanup will be done by drm_connector_unregister() through a call to
4891 * drm_debugfs_connector_remove().
4892 *
4893 * Returns 0 on success, negative error codes on error.
4894 */
i915_debugfs_connector_add(struct drm_connector * connector)4895 int i915_debugfs_connector_add(struct drm_connector *connector)
4896 {
4897 struct dentry *root = connector->debugfs_entry;
4898
4899 /* The connector must have been registered beforehands. */
4900 if (!root)
4901 return -ENODEV;
4902
4903 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4904 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4905 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4906 connector, &i915_dpcd_fops);
4907
4908 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4909 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4910 connector, &i915_panel_fops);
4911 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4912 connector, &i915_psr_sink_status_fops);
4913 }
4914
4915 return 0;
4916 }
4917