Merge tag 'drm-misc-next-2019-03-21' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_fourcc.h>
33 #include "intel_drv.h"
34 #include "intel_guc_submission.h"
35
36 #include "i915_reset.h"
37
38 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39 {
40         return to_i915(node->minor->dev);
41 }
42
43 static int i915_capabilities(struct seq_file *m, void *data)
44 {
45         struct drm_i915_private *dev_priv = node_to_i915(m->private);
46         const struct intel_device_info *info = INTEL_INFO(dev_priv);
47         struct drm_printer p = drm_seq_file_printer(m);
48
49         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
50         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
51         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
52
53         intel_device_info_dump_flags(info, &p);
54         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
55         intel_driver_caps_print(&dev_priv->caps, &p);
56
57         kernel_param_lock(THIS_MODULE);
58         i915_params_dump(&i915_modparams, &p);
59         kernel_param_unlock(THIS_MODULE);
60
61         return 0;
62 }
63
64 static char get_active_flag(struct drm_i915_gem_object *obj)
65 {
66         return i915_gem_object_is_active(obj) ? '*' : ' ';
67 }
68
69 static char get_pin_flag(struct drm_i915_gem_object *obj)
70 {
71         return obj->pin_global ? 'p' : ' ';
72 }
73
74 static char get_tiling_flag(struct drm_i915_gem_object *obj)
75 {
76         switch (i915_gem_object_get_tiling(obj)) {
77         default:
78         case I915_TILING_NONE: return ' ';
79         case I915_TILING_X: return 'X';
80         case I915_TILING_Y: return 'Y';
81         }
82 }
83
84 static char get_global_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->userfault_count ? 'g' : ' ';
87 }
88
89 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
90 {
91         return obj->mm.mapping ? 'M' : ' ';
92 }
93
94 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95 {
96         u64 size = 0;
97         struct i915_vma *vma;
98
99         for_each_ggtt_vma(vma, obj) {
100                 if (drm_mm_node_allocated(&vma->node))
101                         size += vma->node.size;
102         }
103
104         return size;
105 }
106
107 static const char *
108 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109 {
110         size_t x = 0;
111
112         switch (page_sizes) {
113         case 0:
114                 return "";
115         case I915_GTT_PAGE_SIZE_4K:
116                 return "4K";
117         case I915_GTT_PAGE_SIZE_64K:
118                 return "64K";
119         case I915_GTT_PAGE_SIZE_2M:
120                 return "2M";
121         default:
122                 if (!buf)
123                         return "M";
124
125                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126                         x += snprintf(buf + x, len - x, "2M, ");
127                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128                         x += snprintf(buf + x, len - x, "64K, ");
129                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130                         x += snprintf(buf + x, len - x, "4K, ");
131                 buf[x-2] = '\0';
132
133                 return buf;
134         }
135 }
136
137 static void
138 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 {
140         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
141         struct intel_engine_cs *engine;
142         struct i915_vma *vma;
143         unsigned int frontbuffer_bits;
144         int pin_count = 0;
145
146         lockdep_assert_held(&obj->base.dev->struct_mutex);
147
148         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149                    &obj->base,
150                    get_active_flag(obj),
151                    get_pin_flag(obj),
152                    get_tiling_flag(obj),
153                    get_global_flag(obj),
154                    get_pin_mapped_flag(obj),
155                    obj->base.size / 1024,
156                    obj->read_domains,
157                    obj->write_domain,
158                    i915_cache_level_str(dev_priv, obj->cache_level),
159                    obj->mm.dirty ? " dirty" : "",
160                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161         if (obj->base.name)
162                 seq_printf(m, " (name: %d)", obj->base.name);
163         list_for_each_entry(vma, &obj->vma.list, obj_link) {
164                 if (i915_vma_is_pinned(vma))
165                         pin_count++;
166         }
167         seq_printf(m, " (pinned x %d)", pin_count);
168         if (obj->pin_global)
169                 seq_printf(m, " (global)");
170         list_for_each_entry(vma, &obj->vma.list, obj_link) {
171                 if (!drm_mm_node_allocated(&vma->node))
172                         continue;
173
174                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
175                            i915_vma_is_ggtt(vma) ? "g" : "pp",
176                            vma->node.start, vma->node.size,
177                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
178                 if (i915_vma_is_ggtt(vma)) {
179                         switch (vma->ggtt_view.type) {
180                         case I915_GGTT_VIEW_NORMAL:
181                                 seq_puts(m, ", normal");
182                                 break;
183
184                         case I915_GGTT_VIEW_PARTIAL:
185                                 seq_printf(m, ", partial [%08llx+%x]",
186                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
187                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
188                                 break;
189
190                         case I915_GGTT_VIEW_ROTATED:
191                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192                                            vma->ggtt_view.rotated.plane[0].width,
193                                            vma->ggtt_view.rotated.plane[0].height,
194                                            vma->ggtt_view.rotated.plane[0].stride,
195                                            vma->ggtt_view.rotated.plane[0].offset,
196                                            vma->ggtt_view.rotated.plane[1].width,
197                                            vma->ggtt_view.rotated.plane[1].height,
198                                            vma->ggtt_view.rotated.plane[1].stride,
199                                            vma->ggtt_view.rotated.plane[1].offset);
200                                 break;
201
202                         default:
203                                 MISSING_CASE(vma->ggtt_view.type);
204                                 break;
205                         }
206                 }
207                 if (vma->fence)
208                         seq_printf(m, " , fence: %d%s",
209                                    vma->fence->id,
210                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
211                 seq_puts(m, ")");
212         }
213         if (obj->stolen)
214                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
215
216         engine = i915_gem_object_last_write_engine(obj);
217         if (engine)
218                 seq_printf(m, " (%s)", engine->name);
219
220         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221         if (frontbuffer_bits)
222                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
223 }
224
225 static int obj_rank_by_stolen(const void *A, const void *B)
226 {
227         const struct drm_i915_gem_object *a =
228                 *(const struct drm_i915_gem_object **)A;
229         const struct drm_i915_gem_object *b =
230                 *(const struct drm_i915_gem_object **)B;
231
232         if (a->stolen->start < b->stolen->start)
233                 return -1;
234         if (a->stolen->start > b->stolen->start)
235                 return 1;
236         return 0;
237 }
238
239 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 {
241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
242         struct drm_device *dev = &dev_priv->drm;
243         struct drm_i915_gem_object **objects;
244         struct drm_i915_gem_object *obj;
245         u64 total_obj_size, total_gtt_size;
246         unsigned long total, count, n;
247         int ret;
248
249         total = READ_ONCE(dev_priv->mm.object_count);
250         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
251         if (!objects)
252                 return -ENOMEM;
253
254         ret = mutex_lock_interruptible(&dev->struct_mutex);
255         if (ret)
256                 goto out;
257
258         total_obj_size = total_gtt_size = count = 0;
259
260         spin_lock(&dev_priv->mm.obj_lock);
261         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
262                 if (count == total)
263                         break;
264
265                 if (obj->stolen == NULL)
266                         continue;
267
268                 objects[count++] = obj;
269                 total_obj_size += obj->base.size;
270                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
271
272         }
273         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
274                 if (count == total)
275                         break;
276
277                 if (obj->stolen == NULL)
278                         continue;
279
280                 objects[count++] = obj;
281                 total_obj_size += obj->base.size;
282         }
283         spin_unlock(&dev_priv->mm.obj_lock);
284
285         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287         seq_puts(m, "Stolen:\n");
288         for (n = 0; n < count; n++) {
289                 seq_puts(m, "   ");
290                 describe_obj(m, objects[n]);
291                 seq_putc(m, '\n');
292         }
293         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
294                    count, total_obj_size, total_gtt_size);
295
296         mutex_unlock(&dev->struct_mutex);
297 out:
298         kvfree(objects);
299         return ret;
300 }
301
302 struct file_stats {
303         struct i915_address_space *vm;
304         unsigned long count;
305         u64 total, unbound;
306         u64 global, shared;
307         u64 active, inactive;
308         u64 closed;
309 };
310
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313         struct drm_i915_gem_object *obj = ptr;
314         struct file_stats *stats = data;
315         struct i915_vma *vma;
316
317         lockdep_assert_held(&obj->base.dev->struct_mutex);
318
319         stats->count++;
320         stats->total += obj->base.size;
321         if (!obj->bind_count)
322                 stats->unbound += obj->base.size;
323         if (obj->base.name || obj->base.dma_buf)
324                 stats->shared += obj->base.size;
325
326         list_for_each_entry(vma, &obj->vma.list, obj_link) {
327                 if (!drm_mm_node_allocated(&vma->node))
328                         continue;
329
330                 if (i915_vma_is_ggtt(vma)) {
331                         stats->global += vma->node.size;
332                 } else {
333                         if (vma->vm != stats->vm)
334                                 continue;
335                 }
336
337                 if (i915_vma_is_active(vma))
338                         stats->active += vma->node.size;
339                 else
340                         stats->inactive += vma->node.size;
341
342                 if (i915_vma_is_closed(vma))
343                         stats->closed += vma->node.size;
344         }
345
346         return 0;
347 }
348
349 #define print_file_stats(m, name, stats) do { \
350         if (stats.count) \
351                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
352                            name, \
353                            stats.count, \
354                            stats.total, \
355                            stats.active, \
356                            stats.inactive, \
357                            stats.global, \
358                            stats.shared, \
359                            stats.unbound, \
360                            stats.closed); \
361 } while (0)
362
363 static void print_batch_pool_stats(struct seq_file *m,
364                                    struct drm_i915_private *dev_priv)
365 {
366         struct drm_i915_gem_object *obj;
367         struct intel_engine_cs *engine;
368         struct file_stats stats = {};
369         enum intel_engine_id id;
370         int j;
371
372         for_each_engine(engine, dev_priv, id) {
373                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
374                         list_for_each_entry(obj,
375                                             &engine->batch_pool.cache_list[j],
376                                             batch_pool_link)
377                                 per_file_stats(0, obj, &stats);
378                 }
379         }
380
381         print_file_stats(m, "[k]batch pool", stats);
382 }
383
384 static void print_context_stats(struct seq_file *m,
385                                 struct drm_i915_private *i915)
386 {
387         struct file_stats kstats = {};
388         struct i915_gem_context *ctx;
389
390         list_for_each_entry(ctx, &i915->contexts.list, link) {
391                 struct intel_engine_cs *engine;
392                 enum intel_engine_id id;
393
394                 for_each_engine(engine, i915, id) {
395                         struct intel_context *ce = to_intel_context(ctx, engine);
396
397                         if (ce->state)
398                                 per_file_stats(0, ce->state->obj, &kstats);
399                         if (ce->ring)
400                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
401                 }
402
403                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
404                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
405                         struct drm_file *file = ctx->file_priv->file;
406                         struct task_struct *task;
407                         char name[80];
408
409                         spin_lock(&file->table_lock);
410                         idr_for_each(&file->object_idr, per_file_stats, &stats);
411                         spin_unlock(&file->table_lock);
412
413                         rcu_read_lock();
414                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
415                         snprintf(name, sizeof(name), "%s/%d",
416                                  task ? task->comm : "<unknown>",
417                                  ctx->user_handle);
418                         rcu_read_unlock();
419
420                         print_file_stats(m, name, stats);
421                 }
422         }
423
424         print_file_stats(m, "[k]contexts", kstats);
425 }
426
427 static int i915_gem_object_info(struct seq_file *m, void *data)
428 {
429         struct drm_i915_private *dev_priv = node_to_i915(m->private);
430         struct drm_device *dev = &dev_priv->drm;
431         struct i915_ggtt *ggtt = &dev_priv->ggtt;
432         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
433         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
434         struct drm_i915_gem_object *obj;
435         unsigned int page_sizes = 0;
436         char buf[80];
437         int ret;
438
439         seq_printf(m, "%u objects, %llu bytes\n",
440                    dev_priv->mm.object_count,
441                    dev_priv->mm.object_memory);
442
443         size = count = 0;
444         mapped_size = mapped_count = 0;
445         purgeable_size = purgeable_count = 0;
446         huge_size = huge_count = 0;
447
448         spin_lock(&dev_priv->mm.obj_lock);
449         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
450                 size += obj->base.size;
451                 ++count;
452
453                 if (obj->mm.madv == I915_MADV_DONTNEED) {
454                         purgeable_size += obj->base.size;
455                         ++purgeable_count;
456                 }
457
458                 if (obj->mm.mapping) {
459                         mapped_count++;
460                         mapped_size += obj->base.size;
461                 }
462
463                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
464                         huge_count++;
465                         huge_size += obj->base.size;
466                         page_sizes |= obj->mm.page_sizes.sg;
467                 }
468         }
469         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
470
471         size = count = dpy_size = dpy_count = 0;
472         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
473                 size += obj->base.size;
474                 ++count;
475
476                 if (obj->pin_global) {
477                         dpy_size += obj->base.size;
478                         ++dpy_count;
479                 }
480
481                 if (obj->mm.madv == I915_MADV_DONTNEED) {
482                         purgeable_size += obj->base.size;
483                         ++purgeable_count;
484                 }
485
486                 if (obj->mm.mapping) {
487                         mapped_count++;
488                         mapped_size += obj->base.size;
489                 }
490
491                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
492                         huge_count++;
493                         huge_size += obj->base.size;
494                         page_sizes |= obj->mm.page_sizes.sg;
495                 }
496         }
497         spin_unlock(&dev_priv->mm.obj_lock);
498
499         seq_printf(m, "%u bound objects, %llu bytes\n",
500                    count, size);
501         seq_printf(m, "%u purgeable objects, %llu bytes\n",
502                    purgeable_count, purgeable_size);
503         seq_printf(m, "%u mapped objects, %llu bytes\n",
504                    mapped_count, mapped_size);
505         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
506                    huge_count,
507                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
508                    huge_size);
509         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
510                    dpy_count, dpy_size);
511
512         seq_printf(m, "%llu [%pa] gtt total\n",
513                    ggtt->vm.total, &ggtt->mappable_end);
514         seq_printf(m, "Supported page sizes: %s\n",
515                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
516                                         buf, sizeof(buf)));
517
518         seq_putc(m, '\n');
519
520         ret = mutex_lock_interruptible(&dev->struct_mutex);
521         if (ret)
522                 return ret;
523
524         print_batch_pool_stats(m, dev_priv);
525         print_context_stats(m, dev_priv);
526         mutex_unlock(&dev->struct_mutex);
527
528         return 0;
529 }
530
531 static int i915_gem_gtt_info(struct seq_file *m, void *data)
532 {
533         struct drm_info_node *node = m->private;
534         struct drm_i915_private *dev_priv = node_to_i915(node);
535         struct drm_device *dev = &dev_priv->drm;
536         struct drm_i915_gem_object **objects;
537         struct drm_i915_gem_object *obj;
538         u64 total_obj_size, total_gtt_size;
539         unsigned long nobject, n;
540         int count, ret;
541
542         nobject = READ_ONCE(dev_priv->mm.object_count);
543         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
544         if (!objects)
545                 return -ENOMEM;
546
547         ret = mutex_lock_interruptible(&dev->struct_mutex);
548         if (ret)
549                 return ret;
550
551         count = 0;
552         spin_lock(&dev_priv->mm.obj_lock);
553         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
554                 objects[count++] = obj;
555                 if (count == nobject)
556                         break;
557         }
558         spin_unlock(&dev_priv->mm.obj_lock);
559
560         total_obj_size = total_gtt_size = 0;
561         for (n = 0;  n < count; n++) {
562                 obj = objects[n];
563
564                 seq_puts(m, "   ");
565                 describe_obj(m, obj);
566                 seq_putc(m, '\n');
567                 total_obj_size += obj->base.size;
568                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
569         }
570
571         mutex_unlock(&dev->struct_mutex);
572
573         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
574                    count, total_obj_size, total_gtt_size);
575         kvfree(objects);
576
577         return 0;
578 }
579
580 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
581 {
582         struct drm_i915_private *dev_priv = node_to_i915(m->private);
583         struct drm_device *dev = &dev_priv->drm;
584         struct drm_i915_gem_object *obj;
585         struct intel_engine_cs *engine;
586         enum intel_engine_id id;
587         int total = 0;
588         int ret, j;
589
590         ret = mutex_lock_interruptible(&dev->struct_mutex);
591         if (ret)
592                 return ret;
593
594         for_each_engine(engine, dev_priv, id) {
595                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
596                         int count;
597
598                         count = 0;
599                         list_for_each_entry(obj,
600                                             &engine->batch_pool.cache_list[j],
601                                             batch_pool_link)
602                                 count++;
603                         seq_printf(m, "%s cache[%d]: %d objects\n",
604                                    engine->name, j, count);
605
606                         list_for_each_entry(obj,
607                                             &engine->batch_pool.cache_list[j],
608                                             batch_pool_link) {
609                                 seq_puts(m, "   ");
610                                 describe_obj(m, obj);
611                                 seq_putc(m, '\n');
612                         }
613
614                         total += count;
615                 }
616         }
617
618         seq_printf(m, "total: %d\n", total);
619
620         mutex_unlock(&dev->struct_mutex);
621
622         return 0;
623 }
624
625 static void gen8_display_interrupt_info(struct seq_file *m)
626 {
627         struct drm_i915_private *dev_priv = node_to_i915(m->private);
628         int pipe;
629
630         for_each_pipe(dev_priv, pipe) {
631                 enum intel_display_power_domain power_domain;
632                 intel_wakeref_t wakeref;
633
634                 power_domain = POWER_DOMAIN_PIPE(pipe);
635                 wakeref = intel_display_power_get_if_enabled(dev_priv,
636                                                              power_domain);
637                 if (!wakeref) {
638                         seq_printf(m, "Pipe %c power disabled\n",
639                                    pipe_name(pipe));
640                         continue;
641                 }
642                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
643                            pipe_name(pipe),
644                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
645                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
646                            pipe_name(pipe),
647                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
648                 seq_printf(m, "Pipe %c IER:\t%08x\n",
649                            pipe_name(pipe),
650                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
651
652                 intel_display_power_put(dev_priv, power_domain, wakeref);
653         }
654
655         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
656                    I915_READ(GEN8_DE_PORT_IMR));
657         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
658                    I915_READ(GEN8_DE_PORT_IIR));
659         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
660                    I915_READ(GEN8_DE_PORT_IER));
661
662         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
663                    I915_READ(GEN8_DE_MISC_IMR));
664         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
665                    I915_READ(GEN8_DE_MISC_IIR));
666         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
667                    I915_READ(GEN8_DE_MISC_IER));
668
669         seq_printf(m, "PCU interrupt mask:\t%08x\n",
670                    I915_READ(GEN8_PCU_IMR));
671         seq_printf(m, "PCU interrupt identity:\t%08x\n",
672                    I915_READ(GEN8_PCU_IIR));
673         seq_printf(m, "PCU interrupt enable:\t%08x\n",
674                    I915_READ(GEN8_PCU_IER));
675 }
676
677 static int i915_interrupt_info(struct seq_file *m, void *data)
678 {
679         struct drm_i915_private *dev_priv = node_to_i915(m->private);
680         struct intel_engine_cs *engine;
681         enum intel_engine_id id;
682         intel_wakeref_t wakeref;
683         int i, pipe;
684
685         wakeref = intel_runtime_pm_get(dev_priv);
686
687         if (IS_CHERRYVIEW(dev_priv)) {
688                 intel_wakeref_t pref;
689
690                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
691                            I915_READ(GEN8_MASTER_IRQ));
692
693                 seq_printf(m, "Display IER:\t%08x\n",
694                            I915_READ(VLV_IER));
695                 seq_printf(m, "Display IIR:\t%08x\n",
696                            I915_READ(VLV_IIR));
697                 seq_printf(m, "Display IIR_RW:\t%08x\n",
698                            I915_READ(VLV_IIR_RW));
699                 seq_printf(m, "Display IMR:\t%08x\n",
700                            I915_READ(VLV_IMR));
701                 for_each_pipe(dev_priv, pipe) {
702                         enum intel_display_power_domain power_domain;
703
704                         power_domain = POWER_DOMAIN_PIPE(pipe);
705                         pref = intel_display_power_get_if_enabled(dev_priv,
706                                                                   power_domain);
707                         if (!pref) {
708                                 seq_printf(m, "Pipe %c power disabled\n",
709                                            pipe_name(pipe));
710                                 continue;
711                         }
712
713                         seq_printf(m, "Pipe %c stat:\t%08x\n",
714                                    pipe_name(pipe),
715                                    I915_READ(PIPESTAT(pipe)));
716
717                         intel_display_power_put(dev_priv, power_domain, pref);
718                 }
719
720                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
721                 seq_printf(m, "Port hotplug:\t%08x\n",
722                            I915_READ(PORT_HOTPLUG_EN));
723                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
724                            I915_READ(VLV_DPFLIPSTAT));
725                 seq_printf(m, "DPINVGTT:\t%08x\n",
726                            I915_READ(DPINVGTT));
727                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
728
729                 for (i = 0; i < 4; i++) {
730                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
731                                    i, I915_READ(GEN8_GT_IMR(i)));
732                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
733                                    i, I915_READ(GEN8_GT_IIR(i)));
734                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
735                                    i, I915_READ(GEN8_GT_IER(i)));
736                 }
737
738                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
739                            I915_READ(GEN8_PCU_IMR));
740                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
741                            I915_READ(GEN8_PCU_IIR));
742                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
743                            I915_READ(GEN8_PCU_IER));
744         } else if (INTEL_GEN(dev_priv) >= 11) {
745                 seq_printf(m, "Master Interrupt Control:  %08x\n",
746                            I915_READ(GEN11_GFX_MSTR_IRQ));
747
748                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
749                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
750                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
751                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
752                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
753                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
754                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
755                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
756                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
757                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
758                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
759                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
760
761                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
762                            I915_READ(GEN11_DISPLAY_INT_CTL));
763
764                 gen8_display_interrupt_info(m);
765         } else if (INTEL_GEN(dev_priv) >= 8) {
766                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
767                            I915_READ(GEN8_MASTER_IRQ));
768
769                 for (i = 0; i < 4; i++) {
770                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
771                                    i, I915_READ(GEN8_GT_IMR(i)));
772                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
773                                    i, I915_READ(GEN8_GT_IIR(i)));
774                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
775                                    i, I915_READ(GEN8_GT_IER(i)));
776                 }
777
778                 gen8_display_interrupt_info(m);
779         } else if (IS_VALLEYVIEW(dev_priv)) {
780                 seq_printf(m, "Display IER:\t%08x\n",
781                            I915_READ(VLV_IER));
782                 seq_printf(m, "Display IIR:\t%08x\n",
783                            I915_READ(VLV_IIR));
784                 seq_printf(m, "Display IIR_RW:\t%08x\n",
785                            I915_READ(VLV_IIR_RW));
786                 seq_printf(m, "Display IMR:\t%08x\n",
787                            I915_READ(VLV_IMR));
788                 for_each_pipe(dev_priv, pipe) {
789                         enum intel_display_power_domain power_domain;
790                         intel_wakeref_t pref;
791
792                         power_domain = POWER_DOMAIN_PIPE(pipe);
793                         pref = intel_display_power_get_if_enabled(dev_priv,
794                                                                   power_domain);
795                         if (!pref) {
796                                 seq_printf(m, "Pipe %c power disabled\n",
797                                            pipe_name(pipe));
798                                 continue;
799                         }
800
801                         seq_printf(m, "Pipe %c stat:\t%08x\n",
802                                    pipe_name(pipe),
803                                    I915_READ(PIPESTAT(pipe)));
804                         intel_display_power_put(dev_priv, power_domain, pref);
805                 }
806
807                 seq_printf(m, "Master IER:\t%08x\n",
808                            I915_READ(VLV_MASTER_IER));
809
810                 seq_printf(m, "Render IER:\t%08x\n",
811                            I915_READ(GTIER));
812                 seq_printf(m, "Render IIR:\t%08x\n",
813                            I915_READ(GTIIR));
814                 seq_printf(m, "Render IMR:\t%08x\n",
815                            I915_READ(GTIMR));
816
817                 seq_printf(m, "PM IER:\t\t%08x\n",
818                            I915_READ(GEN6_PMIER));
819                 seq_printf(m, "PM IIR:\t\t%08x\n",
820                            I915_READ(GEN6_PMIIR));
821                 seq_printf(m, "PM IMR:\t\t%08x\n",
822                            I915_READ(GEN6_PMIMR));
823
824                 seq_printf(m, "Port hotplug:\t%08x\n",
825                            I915_READ(PORT_HOTPLUG_EN));
826                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
827                            I915_READ(VLV_DPFLIPSTAT));
828                 seq_printf(m, "DPINVGTT:\t%08x\n",
829                            I915_READ(DPINVGTT));
830
831         } else if (!HAS_PCH_SPLIT(dev_priv)) {
832                 seq_printf(m, "Interrupt enable:    %08x\n",
833                            I915_READ(IER));
834                 seq_printf(m, "Interrupt identity:  %08x\n",
835                            I915_READ(IIR));
836                 seq_printf(m, "Interrupt mask:      %08x\n",
837                            I915_READ(IMR));
838                 for_each_pipe(dev_priv, pipe)
839                         seq_printf(m, "Pipe %c stat:         %08x\n",
840                                    pipe_name(pipe),
841                                    I915_READ(PIPESTAT(pipe)));
842         } else {
843                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
844                            I915_READ(DEIER));
845                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
846                            I915_READ(DEIIR));
847                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
848                            I915_READ(DEIMR));
849                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
850                            I915_READ(SDEIER));
851                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
852                            I915_READ(SDEIIR));
853                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
854                            I915_READ(SDEIMR));
855                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
856                            I915_READ(GTIER));
857                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
858                            I915_READ(GTIIR));
859                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
860                            I915_READ(GTIMR));
861         }
862
863         if (INTEL_GEN(dev_priv) >= 11) {
864                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
865                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
866                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
867                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
868                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
869                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
870                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
871                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
872                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
873                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
874                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
875                            I915_READ(GEN11_GUC_SG_INTR_MASK));
876                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
877                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
878                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
879                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
880                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
881                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
882
883         } else if (INTEL_GEN(dev_priv) >= 6) {
884                 for_each_engine(engine, dev_priv, id) {
885                         seq_printf(m,
886                                    "Graphics Interrupt mask (%s):       %08x\n",
887                                    engine->name, I915_READ_IMR(engine));
888                 }
889         }
890
891         intel_runtime_pm_put(dev_priv, wakeref);
892
893         return 0;
894 }
895
896 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
897 {
898         struct drm_i915_private *dev_priv = node_to_i915(m->private);
899         struct drm_device *dev = &dev_priv->drm;
900         int i, ret;
901
902         ret = mutex_lock_interruptible(&dev->struct_mutex);
903         if (ret)
904                 return ret;
905
906         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907         for (i = 0; i < dev_priv->num_fence_regs; i++) {
908                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
909
910                 seq_printf(m, "Fence %d, pin count = %d, object = ",
911                            i, dev_priv->fence_regs[i].pin_count);
912                 if (!vma)
913                         seq_puts(m, "unused");
914                 else
915                         describe_obj(m, vma->obj);
916                 seq_putc(m, '\n');
917         }
918
919         mutex_unlock(&dev->struct_mutex);
920         return 0;
921 }
922
923 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
924 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
925                               size_t count, loff_t *pos)
926 {
927         struct i915_gpu_state *error;
928         ssize_t ret;
929         void *buf;
930
931         error = file->private_data;
932         if (!error)
933                 return 0;
934
935         /* Bounce buffer required because of kernfs __user API convenience. */
936         buf = kmalloc(count, GFP_KERNEL);
937         if (!buf)
938                 return -ENOMEM;
939
940         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
941         if (ret <= 0)
942                 goto out;
943
944         if (!copy_to_user(ubuf, buf, ret))
945                 *pos += ret;
946         else
947                 ret = -EFAULT;
948
949 out:
950         kfree(buf);
951         return ret;
952 }
953
954 static int gpu_state_release(struct inode *inode, struct file *file)
955 {
956         i915_gpu_state_put(file->private_data);
957         return 0;
958 }
959
960 static int i915_gpu_info_open(struct inode *inode, struct file *file)
961 {
962         struct drm_i915_private *i915 = inode->i_private;
963         struct i915_gpu_state *gpu;
964         intel_wakeref_t wakeref;
965
966         gpu = NULL;
967         with_intel_runtime_pm(i915, wakeref)
968                 gpu = i915_capture_gpu_state(i915);
969         if (IS_ERR(gpu))
970                 return PTR_ERR(gpu);
971
972         file->private_data = gpu;
973         return 0;
974 }
975
976 static const struct file_operations i915_gpu_info_fops = {
977         .owner = THIS_MODULE,
978         .open = i915_gpu_info_open,
979         .read = gpu_state_read,
980         .llseek = default_llseek,
981         .release = gpu_state_release,
982 };
983
984 static ssize_t
985 i915_error_state_write(struct file *filp,
986                        const char __user *ubuf,
987                        size_t cnt,
988                        loff_t *ppos)
989 {
990         struct i915_gpu_state *error = filp->private_data;
991
992         if (!error)
993                 return 0;
994
995         DRM_DEBUG_DRIVER("Resetting error state\n");
996         i915_reset_error_state(error->i915);
997
998         return cnt;
999 }
1000
1001 static int i915_error_state_open(struct inode *inode, struct file *file)
1002 {
1003         struct i915_gpu_state *error;
1004
1005         error = i915_first_error_state(inode->i_private);
1006         if (IS_ERR(error))
1007                 return PTR_ERR(error);
1008
1009         file->private_data  = error;
1010         return 0;
1011 }
1012
1013 static const struct file_operations i915_error_state_fops = {
1014         .owner = THIS_MODULE,
1015         .open = i915_error_state_open,
1016         .read = gpu_state_read,
1017         .write = i915_error_state_write,
1018         .llseek = default_llseek,
1019         .release = gpu_state_release,
1020 };
1021 #endif
1022
1023 static int i915_frequency_info(struct seq_file *m, void *unused)
1024 {
1025         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1026         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1027         intel_wakeref_t wakeref;
1028         int ret = 0;
1029
1030         wakeref = intel_runtime_pm_get(dev_priv);
1031
1032         if (IS_GEN(dev_priv, 5)) {
1033                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1034                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1035
1036                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1037                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1038                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1039                            MEMSTAT_VID_SHIFT);
1040                 seq_printf(m, "Current P-state: %d\n",
1041                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1042         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1043                 u32 rpmodectl, freq_sts;
1044
1045                 mutex_lock(&dev_priv->pcu_lock);
1046
1047                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1048                 seq_printf(m, "Video Turbo Mode: %s\n",
1049                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1050                 seq_printf(m, "HW control enabled: %s\n",
1051                            yesno(rpmodectl & GEN6_RP_ENABLE));
1052                 seq_printf(m, "SW control enabled: %s\n",
1053                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1054                                   GEN6_RP_MEDIA_SW_MODE));
1055
1056                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1057                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1058                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1059
1060                 seq_printf(m, "actual GPU freq: %d MHz\n",
1061                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1062
1063                 seq_printf(m, "current GPU freq: %d MHz\n",
1064                            intel_gpu_freq(dev_priv, rps->cur_freq));
1065
1066                 seq_printf(m, "max GPU freq: %d MHz\n",
1067                            intel_gpu_freq(dev_priv, rps->max_freq));
1068
1069                 seq_printf(m, "min GPU freq: %d MHz\n",
1070                            intel_gpu_freq(dev_priv, rps->min_freq));
1071
1072                 seq_printf(m, "idle GPU freq: %d MHz\n",
1073                            intel_gpu_freq(dev_priv, rps->idle_freq));
1074
1075                 seq_printf(m,
1076                            "efficient (RPe) frequency: %d MHz\n",
1077                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1078                 mutex_unlock(&dev_priv->pcu_lock);
1079         } else if (INTEL_GEN(dev_priv) >= 6) {
1080                 u32 rp_state_limits;
1081                 u32 gt_perf_status;
1082                 u32 rp_state_cap;
1083                 u32 rpmodectl, rpinclimit, rpdeclimit;
1084                 u32 rpstat, cagf, reqf;
1085                 u32 rpupei, rpcurup, rpprevup;
1086                 u32 rpdownei, rpcurdown, rpprevdown;
1087                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1088                 int max_freq;
1089
1090                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1091                 if (IS_GEN9_LP(dev_priv)) {
1092                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1093                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1094                 } else {
1095                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1096                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1097                 }
1098
1099                 /* RPSTAT1 is in the GT power well */
1100                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1101
1102                 reqf = I915_READ(GEN6_RPNSWREQ);
1103                 if (INTEL_GEN(dev_priv) >= 9)
1104                         reqf >>= 23;
1105                 else {
1106                         reqf &= ~GEN6_TURBO_DISABLE;
1107                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1108                                 reqf >>= 24;
1109                         else
1110                                 reqf >>= 25;
1111                 }
1112                 reqf = intel_gpu_freq(dev_priv, reqf);
1113
1114                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1115                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1116                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1117
1118                 rpstat = I915_READ(GEN6_RPSTAT1);
1119                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1120                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1121                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1122                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1123                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1124                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1125                 cagf = intel_gpu_freq(dev_priv,
1126                                       intel_get_cagf(dev_priv, rpstat));
1127
1128                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1129
1130                 if (INTEL_GEN(dev_priv) >= 11) {
1131                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1132                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1133                         /*
1134                          * The equivalent to the PM ISR & IIR cannot be read
1135                          * without affecting the current state of the system
1136                          */
1137                         pm_isr = 0;
1138                         pm_iir = 0;
1139                 } else if (INTEL_GEN(dev_priv) >= 8) {
1140                         pm_ier = I915_READ(GEN8_GT_IER(2));
1141                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1142                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1143                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1144                 } else {
1145                         pm_ier = I915_READ(GEN6_PMIER);
1146                         pm_imr = I915_READ(GEN6_PMIMR);
1147                         pm_isr = I915_READ(GEN6_PMISR);
1148                         pm_iir = I915_READ(GEN6_PMIIR);
1149                 }
1150                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1151
1152                 seq_printf(m, "Video Turbo Mode: %s\n",
1153                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1154                 seq_printf(m, "HW control enabled: %s\n",
1155                            yesno(rpmodectl & GEN6_RP_ENABLE));
1156                 seq_printf(m, "SW control enabled: %s\n",
1157                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1158                                   GEN6_RP_MEDIA_SW_MODE));
1159
1160                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1161                            pm_ier, pm_imr, pm_mask);
1162                 if (INTEL_GEN(dev_priv) <= 10)
1163                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1164                                    pm_isr, pm_iir);
1165                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1166                            rps->pm_intrmsk_mbz);
1167                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1168                 seq_printf(m, "Render p-state ratio: %d\n",
1169                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1170                 seq_printf(m, "Render p-state VID: %d\n",
1171                            gt_perf_status & 0xff);
1172                 seq_printf(m, "Render p-state limit: %d\n",
1173                            rp_state_limits & 0xff);
1174                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1175                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1176                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1177                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1178                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1179                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1180                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1181                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1182                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1183                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1184                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1185                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1186                 seq_printf(m, "Up threshold: %d%%\n",
1187                            rps->power.up_threshold);
1188
1189                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1190                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1191                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1192                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1193                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1194                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1195                 seq_printf(m, "Down threshold: %d%%\n",
1196                            rps->power.down_threshold);
1197
1198                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1199                             rp_state_cap >> 16) & 0xff;
1200                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1201                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1202                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1203                            intel_gpu_freq(dev_priv, max_freq));
1204
1205                 max_freq = (rp_state_cap & 0xff00) >> 8;
1206                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1207                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1208                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1209                            intel_gpu_freq(dev_priv, max_freq));
1210
1211                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1212                             rp_state_cap >> 0) & 0xff;
1213                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1214                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1215                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1216                            intel_gpu_freq(dev_priv, max_freq));
1217                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1218                            intel_gpu_freq(dev_priv, rps->max_freq));
1219
1220                 seq_printf(m, "Current freq: %d MHz\n",
1221                            intel_gpu_freq(dev_priv, rps->cur_freq));
1222                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1223                 seq_printf(m, "Idle freq: %d MHz\n",
1224                            intel_gpu_freq(dev_priv, rps->idle_freq));
1225                 seq_printf(m, "Min freq: %d MHz\n",
1226                            intel_gpu_freq(dev_priv, rps->min_freq));
1227                 seq_printf(m, "Boost freq: %d MHz\n",
1228                            intel_gpu_freq(dev_priv, rps->boost_freq));
1229                 seq_printf(m, "Max freq: %d MHz\n",
1230                            intel_gpu_freq(dev_priv, rps->max_freq));
1231                 seq_printf(m,
1232                            "efficient (RPe) frequency: %d MHz\n",
1233                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1234         } else {
1235                 seq_puts(m, "no P-state info available\n");
1236         }
1237
1238         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1239         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1240         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1241
1242         intel_runtime_pm_put(dev_priv, wakeref);
1243         return ret;
1244 }
1245
1246 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1247                                struct seq_file *m,
1248                                struct intel_instdone *instdone)
1249 {
1250         int slice;
1251         int subslice;
1252
1253         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1254                    instdone->instdone);
1255
1256         if (INTEL_GEN(dev_priv) <= 3)
1257                 return;
1258
1259         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1260                    instdone->slice_common);
1261
1262         if (INTEL_GEN(dev_priv) <= 6)
1263                 return;
1264
1265         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1267                            slice, subslice, instdone->sampler[slice][subslice]);
1268
1269         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1270                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1271                            slice, subslice, instdone->row[slice][subslice]);
1272 }
1273
1274 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1275 {
1276         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1277         struct intel_engine_cs *engine;
1278         u64 acthd[I915_NUM_ENGINES];
1279         u32 seqno[I915_NUM_ENGINES];
1280         struct intel_instdone instdone;
1281         intel_wakeref_t wakeref;
1282         enum intel_engine_id id;
1283
1284         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1285                 seq_puts(m, "Wedged\n");
1286         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1287                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1288         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1289                 seq_puts(m, "Waiter holding struct mutex\n");
1290         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1291                 seq_puts(m, "struct_mutex blocked for reset\n");
1292
1293         if (!i915_modparams.enable_hangcheck) {
1294                 seq_puts(m, "Hangcheck disabled\n");
1295                 return 0;
1296         }
1297
1298         with_intel_runtime_pm(dev_priv, wakeref) {
1299                 for_each_engine(engine, dev_priv, id) {
1300                         acthd[id] = intel_engine_get_active_head(engine);
1301                         seqno[id] = intel_engine_get_seqno(engine);
1302                 }
1303
1304                 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1305         }
1306
1307         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1308                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1309                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1310                                             jiffies));
1311         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1312                 seq_puts(m, "Hangcheck active, work pending\n");
1313         else
1314                 seq_puts(m, "Hangcheck inactive\n");
1315
1316         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1317
1318         for_each_engine(engine, dev_priv, id) {
1319                 seq_printf(m, "%s:\n", engine->name);
1320                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1321                            engine->hangcheck.seqno, seqno[id],
1322                            intel_engine_last_submit(engine),
1323                            jiffies_to_msecs(jiffies -
1324                                             engine->hangcheck.action_timestamp));
1325
1326                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1327                            (long long)engine->hangcheck.acthd,
1328                            (long long)acthd[id]);
1329
1330                 if (engine->id == RCS) {
1331                         seq_puts(m, "\tinstdone read =\n");
1332
1333                         i915_instdone_info(dev_priv, m, &instdone);
1334
1335                         seq_puts(m, "\tinstdone accu =\n");
1336
1337                         i915_instdone_info(dev_priv, m,
1338                                            &engine->hangcheck.instdone);
1339                 }
1340         }
1341
1342         return 0;
1343 }
1344
1345 static int i915_reset_info(struct seq_file *m, void *unused)
1346 {
1347         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1348         struct i915_gpu_error *error = &dev_priv->gpu_error;
1349         struct intel_engine_cs *engine;
1350         enum intel_engine_id id;
1351
1352         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1353
1354         for_each_engine(engine, dev_priv, id) {
1355                 seq_printf(m, "%s = %u\n", engine->name,
1356                            i915_reset_engine_count(error, engine));
1357         }
1358
1359         return 0;
1360 }
1361
1362 static int ironlake_drpc_info(struct seq_file *m)
1363 {
1364         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1365         u32 rgvmodectl, rstdbyctl;
1366         u16 crstandvid;
1367
1368         rgvmodectl = I915_READ(MEMMODECTL);
1369         rstdbyctl = I915_READ(RSTDBYCTL);
1370         crstandvid = I915_READ16(CRSTANDVID);
1371
1372         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1373         seq_printf(m, "Boost freq: %d\n",
1374                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1375                    MEMMODE_BOOST_FREQ_SHIFT);
1376         seq_printf(m, "HW control enabled: %s\n",
1377                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1378         seq_printf(m, "SW control enabled: %s\n",
1379                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1380         seq_printf(m, "Gated voltage change: %s\n",
1381                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1382         seq_printf(m, "Starting frequency: P%d\n",
1383                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1384         seq_printf(m, "Max P-state: P%d\n",
1385                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1386         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1387         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1388         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1389         seq_printf(m, "Render standby enabled: %s\n",
1390                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1391         seq_puts(m, "Current RS state: ");
1392         switch (rstdbyctl & RSX_STATUS_MASK) {
1393         case RSX_STATUS_ON:
1394                 seq_puts(m, "on\n");
1395                 break;
1396         case RSX_STATUS_RC1:
1397                 seq_puts(m, "RC1\n");
1398                 break;
1399         case RSX_STATUS_RC1E:
1400                 seq_puts(m, "RC1E\n");
1401                 break;
1402         case RSX_STATUS_RS1:
1403                 seq_puts(m, "RS1\n");
1404                 break;
1405         case RSX_STATUS_RS2:
1406                 seq_puts(m, "RS2 (RC6)\n");
1407                 break;
1408         case RSX_STATUS_RS3:
1409                 seq_puts(m, "RC3 (RC6+)\n");
1410                 break;
1411         default:
1412                 seq_puts(m, "unknown\n");
1413                 break;
1414         }
1415
1416         return 0;
1417 }
1418
1419 static int i915_forcewake_domains(struct seq_file *m, void *data)
1420 {
1421         struct drm_i915_private *i915 = node_to_i915(m->private);
1422         struct intel_uncore_forcewake_domain *fw_domain;
1423         unsigned int tmp;
1424
1425         seq_printf(m, "user.bypass_count = %u\n",
1426                    i915->uncore.user_forcewake.count);
1427
1428         for_each_fw_domain(fw_domain, i915, tmp)
1429                 seq_printf(m, "%s.wake_count = %u\n",
1430                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1431                            READ_ONCE(fw_domain->wake_count));
1432
1433         return 0;
1434 }
1435
1436 static void print_rc6_res(struct seq_file *m,
1437                           const char *title,
1438                           const i915_reg_t reg)
1439 {
1440         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1441
1442         seq_printf(m, "%s %u (%llu us)\n",
1443                    title, I915_READ(reg),
1444                    intel_rc6_residency_us(dev_priv, reg));
1445 }
1446
1447 static int vlv_drpc_info(struct seq_file *m)
1448 {
1449         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1450         u32 rcctl1, pw_status;
1451
1452         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1453         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1454
1455         seq_printf(m, "RC6 Enabled: %s\n",
1456                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1457                                         GEN6_RC_CTL_EI_MODE(1))));
1458         seq_printf(m, "Render Power Well: %s\n",
1459                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1460         seq_printf(m, "Media Power Well: %s\n",
1461                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1462
1463         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1464         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1465
1466         return i915_forcewake_domains(m, NULL);
1467 }
1468
1469 static int gen6_drpc_info(struct seq_file *m)
1470 {
1471         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1472         u32 gt_core_status, rcctl1, rc6vids = 0;
1473         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1474
1475         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1476         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1477
1478         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1479         if (INTEL_GEN(dev_priv) >= 9) {
1480                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1481                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1482         }
1483
1484         if (INTEL_GEN(dev_priv) <= 7) {
1485                 mutex_lock(&dev_priv->pcu_lock);
1486                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1487                                        &rc6vids);
1488                 mutex_unlock(&dev_priv->pcu_lock);
1489         }
1490
1491         seq_printf(m, "RC1e Enabled: %s\n",
1492                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1493         seq_printf(m, "RC6 Enabled: %s\n",
1494                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1495         if (INTEL_GEN(dev_priv) >= 9) {
1496                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1497                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1498                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1499                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1500         }
1501         seq_printf(m, "Deep RC6 Enabled: %s\n",
1502                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1503         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1504                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1505         seq_puts(m, "Current RC state: ");
1506         switch (gt_core_status & GEN6_RCn_MASK) {
1507         case GEN6_RC0:
1508                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1509                         seq_puts(m, "Core Power Down\n");
1510                 else
1511                         seq_puts(m, "on\n");
1512                 break;
1513         case GEN6_RC3:
1514                 seq_puts(m, "RC3\n");
1515                 break;
1516         case GEN6_RC6:
1517                 seq_puts(m, "RC6\n");
1518                 break;
1519         case GEN6_RC7:
1520                 seq_puts(m, "RC7\n");
1521                 break;
1522         default:
1523                 seq_puts(m, "Unknown\n");
1524                 break;
1525         }
1526
1527         seq_printf(m, "Core Power Down: %s\n",
1528                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1529         if (INTEL_GEN(dev_priv) >= 9) {
1530                 seq_printf(m, "Render Power Well: %s\n",
1531                         (gen9_powergate_status &
1532                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1533                 seq_printf(m, "Media Power Well: %s\n",
1534                         (gen9_powergate_status &
1535                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1536         }
1537
1538         /* Not exactly sure what this is */
1539         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1540                       GEN6_GT_GFX_RC6_LOCKED);
1541         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1542         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1543         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1544
1545         if (INTEL_GEN(dev_priv) <= 7) {
1546                 seq_printf(m, "RC6   voltage: %dmV\n",
1547                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1548                 seq_printf(m, "RC6+  voltage: %dmV\n",
1549                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1550                 seq_printf(m, "RC6++ voltage: %dmV\n",
1551                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1552         }
1553
1554         return i915_forcewake_domains(m, NULL);
1555 }
1556
1557 static int i915_drpc_info(struct seq_file *m, void *unused)
1558 {
1559         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1560         intel_wakeref_t wakeref;
1561         int err = -ENODEV;
1562
1563         with_intel_runtime_pm(dev_priv, wakeref) {
1564                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1565                         err = vlv_drpc_info(m);
1566                 else if (INTEL_GEN(dev_priv) >= 6)
1567                         err = gen6_drpc_info(m);
1568                 else
1569                         err = ironlake_drpc_info(m);
1570         }
1571
1572         return err;
1573 }
1574
1575 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1576 {
1577         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1578
1579         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1580                    dev_priv->fb_tracking.busy_bits);
1581
1582         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1583                    dev_priv->fb_tracking.flip_bits);
1584
1585         return 0;
1586 }
1587
1588 static int i915_fbc_status(struct seq_file *m, void *unused)
1589 {
1590         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1591         struct intel_fbc *fbc = &dev_priv->fbc;
1592         intel_wakeref_t wakeref;
1593
1594         if (!HAS_FBC(dev_priv))
1595                 return -ENODEV;
1596
1597         wakeref = intel_runtime_pm_get(dev_priv);
1598         mutex_lock(&fbc->lock);
1599
1600         if (intel_fbc_is_active(dev_priv))
1601                 seq_puts(m, "FBC enabled\n");
1602         else
1603                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1604
1605         if (intel_fbc_is_active(dev_priv)) {
1606                 u32 mask;
1607
1608                 if (INTEL_GEN(dev_priv) >= 8)
1609                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1610                 else if (INTEL_GEN(dev_priv) >= 7)
1611                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1612                 else if (INTEL_GEN(dev_priv) >= 5)
1613                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1614                 else if (IS_G4X(dev_priv))
1615                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1616                 else
1617                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1618                                                         FBC_STAT_COMPRESSED);
1619
1620                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1621         }
1622
1623         mutex_unlock(&fbc->lock);
1624         intel_runtime_pm_put(dev_priv, wakeref);
1625
1626         return 0;
1627 }
1628
1629 static int i915_fbc_false_color_get(void *data, u64 *val)
1630 {
1631         struct drm_i915_private *dev_priv = data;
1632
1633         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1634                 return -ENODEV;
1635
1636         *val = dev_priv->fbc.false_color;
1637
1638         return 0;
1639 }
1640
1641 static int i915_fbc_false_color_set(void *data, u64 val)
1642 {
1643         struct drm_i915_private *dev_priv = data;
1644         u32 reg;
1645
1646         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1647                 return -ENODEV;
1648
1649         mutex_lock(&dev_priv->fbc.lock);
1650
1651         reg = I915_READ(ILK_DPFC_CONTROL);
1652         dev_priv->fbc.false_color = val;
1653
1654         I915_WRITE(ILK_DPFC_CONTROL, val ?
1655                    (reg | FBC_CTL_FALSE_COLOR) :
1656                    (reg & ~FBC_CTL_FALSE_COLOR));
1657
1658         mutex_unlock(&dev_priv->fbc.lock);
1659         return 0;
1660 }
1661
1662 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1663                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1664                         "%llu\n");
1665
1666 static int i915_ips_status(struct seq_file *m, void *unused)
1667 {
1668         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1669         intel_wakeref_t wakeref;
1670
1671         if (!HAS_IPS(dev_priv))
1672                 return -ENODEV;
1673
1674         wakeref = intel_runtime_pm_get(dev_priv);
1675
1676         seq_printf(m, "Enabled by kernel parameter: %s\n",
1677                    yesno(i915_modparams.enable_ips));
1678
1679         if (INTEL_GEN(dev_priv) >= 8) {
1680                 seq_puts(m, "Currently: unknown\n");
1681         } else {
1682                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1683                         seq_puts(m, "Currently: enabled\n");
1684                 else
1685                         seq_puts(m, "Currently: disabled\n");
1686         }
1687
1688         intel_runtime_pm_put(dev_priv, wakeref);
1689
1690         return 0;
1691 }
1692
1693 static int i915_sr_status(struct seq_file *m, void *unused)
1694 {
1695         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1696         intel_wakeref_t wakeref;
1697         bool sr_enabled = false;
1698
1699         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1700
1701         if (INTEL_GEN(dev_priv) >= 9)
1702                 /* no global SR status; inspect per-plane WM */;
1703         else if (HAS_PCH_SPLIT(dev_priv))
1704                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1705         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1706                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1707                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1708         else if (IS_I915GM(dev_priv))
1709                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1710         else if (IS_PINEVIEW(dev_priv))
1711                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1712         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1713                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1714
1715         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1716
1717         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1718
1719         return 0;
1720 }
1721
1722 static int i915_emon_status(struct seq_file *m, void *unused)
1723 {
1724         struct drm_i915_private *i915 = node_to_i915(m->private);
1725         intel_wakeref_t wakeref;
1726
1727         if (!IS_GEN(i915, 5))
1728                 return -ENODEV;
1729
1730         with_intel_runtime_pm(i915, wakeref) {
1731                 unsigned long temp, chipset, gfx;
1732
1733                 temp = i915_mch_val(i915);
1734                 chipset = i915_chipset_val(i915);
1735                 gfx = i915_gfx_val(i915);
1736
1737                 seq_printf(m, "GMCH temp: %ld\n", temp);
1738                 seq_printf(m, "Chipset power: %ld\n", chipset);
1739                 seq_printf(m, "GFX power: %ld\n", gfx);
1740                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1741         }
1742
1743         return 0;
1744 }
1745
1746 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1747 {
1748         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1749         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1750         unsigned int max_gpu_freq, min_gpu_freq;
1751         intel_wakeref_t wakeref;
1752         int gpu_freq, ia_freq;
1753         int ret;
1754
1755         if (!HAS_LLC(dev_priv))
1756                 return -ENODEV;
1757
1758         wakeref = intel_runtime_pm_get(dev_priv);
1759
1760         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1761         if (ret)
1762                 goto out;
1763
1764         min_gpu_freq = rps->min_freq;
1765         max_gpu_freq = rps->max_freq;
1766         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1767                 /* Convert GT frequency to 50 HZ units */
1768                 min_gpu_freq /= GEN9_FREQ_SCALER;
1769                 max_gpu_freq /= GEN9_FREQ_SCALER;
1770         }
1771
1772         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1773
1774         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1775                 ia_freq = gpu_freq;
1776                 sandybridge_pcode_read(dev_priv,
1777                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1778                                        &ia_freq);
1779                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1780                            intel_gpu_freq(dev_priv, (gpu_freq *
1781                                                      (IS_GEN9_BC(dev_priv) ||
1782                                                       INTEL_GEN(dev_priv) >= 10 ?
1783                                                       GEN9_FREQ_SCALER : 1))),
1784                            ((ia_freq >> 0) & 0xff) * 100,
1785                            ((ia_freq >> 8) & 0xff) * 100);
1786         }
1787
1788         mutex_unlock(&dev_priv->pcu_lock);
1789
1790 out:
1791         intel_runtime_pm_put(dev_priv, wakeref);
1792         return ret;
1793 }
1794
1795 static int i915_opregion(struct seq_file *m, void *unused)
1796 {
1797         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1798         struct drm_device *dev = &dev_priv->drm;
1799         struct intel_opregion *opregion = &dev_priv->opregion;
1800         int ret;
1801
1802         ret = mutex_lock_interruptible(&dev->struct_mutex);
1803         if (ret)
1804                 goto out;
1805
1806         if (opregion->header)
1807                 seq_write(m, opregion->header, OPREGION_SIZE);
1808
1809         mutex_unlock(&dev->struct_mutex);
1810
1811 out:
1812         return 0;
1813 }
1814
1815 static int i915_vbt(struct seq_file *m, void *unused)
1816 {
1817         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1818
1819         if (opregion->vbt)
1820                 seq_write(m, opregion->vbt, opregion->vbt_size);
1821
1822         return 0;
1823 }
1824
1825 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1826 {
1827         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1828         struct drm_device *dev = &dev_priv->drm;
1829         struct intel_framebuffer *fbdev_fb = NULL;
1830         struct drm_framebuffer *drm_fb;
1831         int ret;
1832
1833         ret = mutex_lock_interruptible(&dev->struct_mutex);
1834         if (ret)
1835                 return ret;
1836
1837 #ifdef CONFIG_DRM_FBDEV_EMULATION
1838         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1839                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1840
1841                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1842                            fbdev_fb->base.width,
1843                            fbdev_fb->base.height,
1844                            fbdev_fb->base.format->depth,
1845                            fbdev_fb->base.format->cpp[0] * 8,
1846                            fbdev_fb->base.modifier,
1847                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1848                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1849                 seq_putc(m, '\n');
1850         }
1851 #endif
1852
1853         mutex_lock(&dev->mode_config.fb_lock);
1854         drm_for_each_fb(drm_fb, dev) {
1855                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1856                 if (fb == fbdev_fb)
1857                         continue;
1858
1859                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1860                            fb->base.width,
1861                            fb->base.height,
1862                            fb->base.format->depth,
1863                            fb->base.format->cpp[0] * 8,
1864                            fb->base.modifier,
1865                            drm_framebuffer_read_refcount(&fb->base));
1866                 describe_obj(m, intel_fb_obj(&fb->base));
1867                 seq_putc(m, '\n');
1868         }
1869         mutex_unlock(&dev->mode_config.fb_lock);
1870         mutex_unlock(&dev->struct_mutex);
1871
1872         return 0;
1873 }
1874
1875 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1876 {
1877         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1878                    ring->space, ring->head, ring->tail, ring->emit);
1879 }
1880
1881 static int i915_context_status(struct seq_file *m, void *unused)
1882 {
1883         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884         struct drm_device *dev = &dev_priv->drm;
1885         struct intel_engine_cs *engine;
1886         struct i915_gem_context *ctx;
1887         enum intel_engine_id id;
1888         int ret;
1889
1890         ret = mutex_lock_interruptible(&dev->struct_mutex);
1891         if (ret)
1892                 return ret;
1893
1894         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1895                 seq_puts(m, "HW context ");
1896                 if (!list_empty(&ctx->hw_id_link))
1897                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1898                                    atomic_read(&ctx->hw_id_pin_count));
1899                 if (ctx->pid) {
1900                         struct task_struct *task;
1901
1902                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1903                         if (task) {
1904                                 seq_printf(m, "(%s [%d]) ",
1905                                            task->comm, task->pid);
1906                                 put_task_struct(task);
1907                         }
1908                 } else if (IS_ERR(ctx->file_priv)) {
1909                         seq_puts(m, "(deleted) ");
1910                 } else {
1911                         seq_puts(m, "(kernel) ");
1912                 }
1913
1914                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1915                 seq_putc(m, '\n');
1916
1917                 for_each_engine(engine, dev_priv, id) {
1918                         struct intel_context *ce =
1919                                 to_intel_context(ctx, engine);
1920
1921                         seq_printf(m, "%s: ", engine->name);
1922                         if (ce->state)
1923                                 describe_obj(m, ce->state->obj);
1924                         if (ce->ring)
1925                                 describe_ctx_ring(m, ce->ring);
1926                         seq_putc(m, '\n');
1927                 }
1928
1929                 seq_putc(m, '\n');
1930         }
1931
1932         mutex_unlock(&dev->struct_mutex);
1933
1934         return 0;
1935 }
1936
1937 static const char *swizzle_string(unsigned swizzle)
1938 {
1939         switch (swizzle) {
1940         case I915_BIT_6_SWIZZLE_NONE:
1941                 return "none";
1942         case I915_BIT_6_SWIZZLE_9:
1943                 return "bit9";
1944         case I915_BIT_6_SWIZZLE_9_10:
1945                 return "bit9/bit10";
1946         case I915_BIT_6_SWIZZLE_9_11:
1947                 return "bit9/bit11";
1948         case I915_BIT_6_SWIZZLE_9_10_11:
1949                 return "bit9/bit10/bit11";
1950         case I915_BIT_6_SWIZZLE_9_17:
1951                 return "bit9/bit17";
1952         case I915_BIT_6_SWIZZLE_9_10_17:
1953                 return "bit9/bit10/bit17";
1954         case I915_BIT_6_SWIZZLE_UNKNOWN:
1955                 return "unknown";
1956         }
1957
1958         return "bug";
1959 }
1960
1961 static int i915_swizzle_info(struct seq_file *m, void *data)
1962 {
1963         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1964         intel_wakeref_t wakeref;
1965
1966         wakeref = intel_runtime_pm_get(dev_priv);
1967
1968         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1969                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1970         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1971                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1972
1973         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1974                 seq_printf(m, "DDC = 0x%08x\n",
1975                            I915_READ(DCC));
1976                 seq_printf(m, "DDC2 = 0x%08x\n",
1977                            I915_READ(DCC2));
1978                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1979                            I915_READ16(C0DRB3));
1980                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1981                            I915_READ16(C1DRB3));
1982         } else if (INTEL_GEN(dev_priv) >= 6) {
1983                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1984                            I915_READ(MAD_DIMM_C0));
1985                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1986                            I915_READ(MAD_DIMM_C1));
1987                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1988                            I915_READ(MAD_DIMM_C2));
1989                 seq_printf(m, "TILECTL = 0x%08x\n",
1990                            I915_READ(TILECTL));
1991                 if (INTEL_GEN(dev_priv) >= 8)
1992                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1993                                    I915_READ(GAMTARBMODE));
1994                 else
1995                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1996                                    I915_READ(ARB_MODE));
1997                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998                            I915_READ(DISP_ARB_CTL));
1999         }
2000
2001         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2002                 seq_puts(m, "L-shaped memory detected\n");
2003
2004         intel_runtime_pm_put(dev_priv, wakeref);
2005
2006         return 0;
2007 }
2008
2009 static const char *rps_power_to_str(unsigned int power)
2010 {
2011         static const char * const strings[] = {
2012                 [LOW_POWER] = "low power",
2013                 [BETWEEN] = "mixed",
2014                 [HIGH_POWER] = "high power",
2015         };
2016
2017         if (power >= ARRAY_SIZE(strings) || !strings[power])
2018                 return "unknown";
2019
2020         return strings[power];
2021 }
2022
2023 static int i915_rps_boost_info(struct seq_file *m, void *data)
2024 {
2025         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2026         struct drm_device *dev = &dev_priv->drm;
2027         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2028         u32 act_freq = rps->cur_freq;
2029         intel_wakeref_t wakeref;
2030         struct drm_file *file;
2031
2032         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2033                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2034                         mutex_lock(&dev_priv->pcu_lock);
2035                         act_freq = vlv_punit_read(dev_priv,
2036                                                   PUNIT_REG_GPU_FREQ_STS);
2037                         act_freq = (act_freq >> 8) & 0xff;
2038                         mutex_unlock(&dev_priv->pcu_lock);
2039                 } else {
2040                         act_freq = intel_get_cagf(dev_priv,
2041                                                   I915_READ(GEN6_RPSTAT1));
2042                 }
2043         }
2044
2045         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2046         seq_printf(m, "GPU busy? %s [%d requests]\n",
2047                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2048         seq_printf(m, "Boosts outstanding? %d\n",
2049                    atomic_read(&rps->num_waiters));
2050         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2051         seq_printf(m, "Frequency requested %d, actual %d\n",
2052                    intel_gpu_freq(dev_priv, rps->cur_freq),
2053                    intel_gpu_freq(dev_priv, act_freq));
2054         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2055                    intel_gpu_freq(dev_priv, rps->min_freq),
2056                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2057                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2058                    intel_gpu_freq(dev_priv, rps->max_freq));
2059         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2060                    intel_gpu_freq(dev_priv, rps->idle_freq),
2061                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2062                    intel_gpu_freq(dev_priv, rps->boost_freq));
2063
2064         mutex_lock(&dev->filelist_mutex);
2065         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2066                 struct drm_i915_file_private *file_priv = file->driver_priv;
2067                 struct task_struct *task;
2068
2069                 rcu_read_lock();
2070                 task = pid_task(file->pid, PIDTYPE_PID);
2071                 seq_printf(m, "%s [%d]: %d boosts\n",
2072                            task ? task->comm : "<unknown>",
2073                            task ? task->pid : -1,
2074                            atomic_read(&file_priv->rps_client.boosts));
2075                 rcu_read_unlock();
2076         }
2077         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2078                    atomic_read(&rps->boosts));
2079         mutex_unlock(&dev->filelist_mutex);
2080
2081         if (INTEL_GEN(dev_priv) >= 6 &&
2082             rps->enabled &&
2083             dev_priv->gt.active_requests) {
2084                 u32 rpup, rpupei;
2085                 u32 rpdown, rpdownei;
2086
2087                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2088                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2089                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2090                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2091                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2092                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2093
2094                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2095                            rps_power_to_str(rps->power.mode));
2096                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2097                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2098                            rps->power.up_threshold);
2099                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2100                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2101                            rps->power.down_threshold);
2102         } else {
2103                 seq_puts(m, "\nRPS Autotuning inactive\n");
2104         }
2105
2106         return 0;
2107 }
2108
2109 static int i915_llc(struct seq_file *m, void *data)
2110 {
2111         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2112         const bool edram = INTEL_GEN(dev_priv) > 8;
2113
2114         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2115         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2116                    intel_uncore_edram_size(dev_priv)/1024/1024);
2117
2118         return 0;
2119 }
2120
2121 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2122 {
2123         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2124         intel_wakeref_t wakeref;
2125         struct drm_printer p;
2126
2127         if (!HAS_HUC(dev_priv))
2128                 return -ENODEV;
2129
2130         p = drm_seq_file_printer(m);
2131         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2132
2133         with_intel_runtime_pm(dev_priv, wakeref)
2134                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2135
2136         return 0;
2137 }
2138
2139 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2140 {
2141         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2142         intel_wakeref_t wakeref;
2143         struct drm_printer p;
2144
2145         if (!HAS_GUC(dev_priv))
2146                 return -ENODEV;
2147
2148         p = drm_seq_file_printer(m);
2149         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2150
2151         with_intel_runtime_pm(dev_priv, wakeref) {
2152                 u32 tmp = I915_READ(GUC_STATUS);
2153                 u32 i;
2154
2155                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2156                 seq_printf(m, "\tBootrom status = 0x%x\n",
2157                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2158                 seq_printf(m, "\tuKernel status = 0x%x\n",
2159                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2160                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2161                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2162                 seq_puts(m, "\nScratch registers:\n");
2163                 for (i = 0; i < 16; i++) {
2164                         seq_printf(m, "\t%2d: \t0x%x\n",
2165                                    i, I915_READ(SOFT_SCRATCH(i)));
2166                 }
2167         }
2168
2169         return 0;
2170 }
2171
2172 static const char *
2173 stringify_guc_log_type(enum guc_log_buffer_type type)
2174 {
2175         switch (type) {
2176         case GUC_ISR_LOG_BUFFER:
2177                 return "ISR";
2178         case GUC_DPC_LOG_BUFFER:
2179                 return "DPC";
2180         case GUC_CRASH_DUMP_LOG_BUFFER:
2181                 return "CRASH";
2182         default:
2183                 MISSING_CASE(type);
2184         }
2185
2186         return "";
2187 }
2188
2189 static void i915_guc_log_info(struct seq_file *m,
2190                               struct drm_i915_private *dev_priv)
2191 {
2192         struct intel_guc_log *log = &dev_priv->guc.log;
2193         enum guc_log_buffer_type type;
2194
2195         if (!intel_guc_log_relay_enabled(log)) {
2196                 seq_puts(m, "GuC log relay disabled\n");
2197                 return;
2198         }
2199
2200         seq_puts(m, "GuC logging stats:\n");
2201
2202         seq_printf(m, "\tRelay full count: %u\n",
2203                    log->relay.full_count);
2204
2205         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2206                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2207                            stringify_guc_log_type(type),
2208                            log->stats[type].flush,
2209                            log->stats[type].sampled_overflow);
2210         }
2211 }
2212
2213 static void i915_guc_client_info(struct seq_file *m,
2214                                  struct drm_i915_private *dev_priv,
2215                                  struct intel_guc_client *client)
2216 {
2217         struct intel_engine_cs *engine;
2218         enum intel_engine_id id;
2219         u64 tot = 0;
2220
2221         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2222                 client->priority, client->stage_id, client->proc_desc_offset);
2223         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2224                 client->doorbell_id, client->doorbell_offset);
2225
2226         for_each_engine(engine, dev_priv, id) {
2227                 u64 submissions = client->submissions[id];
2228                 tot += submissions;
2229                 seq_printf(m, "\tSubmissions: %llu %s\n",
2230                                 submissions, engine->name);
2231         }
2232         seq_printf(m, "\tTotal: %llu\n", tot);
2233 }
2234
2235 static int i915_guc_info(struct seq_file *m, void *data)
2236 {
2237         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2238         const struct intel_guc *guc = &dev_priv->guc;
2239
2240         if (!USES_GUC(dev_priv))
2241                 return -ENODEV;
2242
2243         i915_guc_log_info(m, dev_priv);
2244
2245         if (!USES_GUC_SUBMISSION(dev_priv))
2246                 return 0;
2247
2248         GEM_BUG_ON(!guc->execbuf_client);
2249
2250         seq_printf(m, "\nDoorbell map:\n");
2251         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2252         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2253
2254         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2255         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2256         if (guc->preempt_client) {
2257                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2258                            guc->preempt_client);
2259                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2260         }
2261
2262         /* Add more as required ... */
2263
2264         return 0;
2265 }
2266
2267 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2268 {
2269         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2270         const struct intel_guc *guc = &dev_priv->guc;
2271         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2272         struct intel_guc_client *client = guc->execbuf_client;
2273         unsigned int tmp;
2274         int index;
2275
2276         if (!USES_GUC_SUBMISSION(dev_priv))
2277                 return -ENODEV;
2278
2279         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2280                 struct intel_engine_cs *engine;
2281
2282                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2283                         continue;
2284
2285                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2286                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2287                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2288                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2289                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2290                 seq_printf(m, "\tEngines used: 0x%x\n",
2291                            desc->engines_used);
2292                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2293                            desc->db_trigger_phy,
2294                            desc->db_trigger_cpu,
2295                            desc->db_trigger_uk);
2296                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2297                            desc->process_desc);
2298                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2299                            desc->wq_addr, desc->wq_size);
2300                 seq_putc(m, '\n');
2301
2302                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2303                         u32 guc_engine_id = engine->guc_id;
2304                         struct guc_execlist_context *lrc =
2305                                                 &desc->lrc[guc_engine_id];
2306
2307                         seq_printf(m, "\t%s LRC:\n", engine->name);
2308                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2309                                    lrc->context_desc);
2310                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2311                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2312                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2313                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2314                         seq_putc(m, '\n');
2315                 }
2316         }
2317
2318         return 0;
2319 }
2320
2321 static int i915_guc_log_dump(struct seq_file *m, void *data)
2322 {
2323         struct drm_info_node *node = m->private;
2324         struct drm_i915_private *dev_priv = node_to_i915(node);
2325         bool dump_load_err = !!node->info_ent->data;
2326         struct drm_i915_gem_object *obj = NULL;
2327         u32 *log;
2328         int i = 0;
2329
2330         if (!HAS_GUC(dev_priv))
2331                 return -ENODEV;
2332
2333         if (dump_load_err)
2334                 obj = dev_priv->guc.load_err_log;
2335         else if (dev_priv->guc.log.vma)
2336                 obj = dev_priv->guc.log.vma->obj;
2337
2338         if (!obj)
2339                 return 0;
2340
2341         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2342         if (IS_ERR(log)) {
2343                 DRM_DEBUG("Failed to pin object\n");
2344                 seq_puts(m, "(log data unaccessible)\n");
2345                 return PTR_ERR(log);
2346         }
2347
2348         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2349                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2350                            *(log + i), *(log + i + 1),
2351                            *(log + i + 2), *(log + i + 3));
2352
2353         seq_putc(m, '\n');
2354
2355         i915_gem_object_unpin_map(obj);
2356
2357         return 0;
2358 }
2359
2360 static int i915_guc_log_level_get(void *data, u64 *val)
2361 {
2362         struct drm_i915_private *dev_priv = data;
2363
2364         if (!USES_GUC(dev_priv))
2365                 return -ENODEV;
2366
2367         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2368
2369         return 0;
2370 }
2371
2372 static int i915_guc_log_level_set(void *data, u64 val)
2373 {
2374         struct drm_i915_private *dev_priv = data;
2375
2376         if (!USES_GUC(dev_priv))
2377                 return -ENODEV;
2378
2379         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2380 }
2381
2382 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2383                         i915_guc_log_level_get, i915_guc_log_level_set,
2384                         "%lld\n");
2385
2386 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2387 {
2388         struct drm_i915_private *dev_priv = inode->i_private;
2389
2390         if (!USES_GUC(dev_priv))
2391                 return -ENODEV;
2392
2393         file->private_data = &dev_priv->guc.log;
2394
2395         return intel_guc_log_relay_open(&dev_priv->guc.log);
2396 }
2397
2398 static ssize_t
2399 i915_guc_log_relay_write(struct file *filp,
2400                          const char __user *ubuf,
2401                          size_t cnt,
2402                          loff_t *ppos)
2403 {
2404         struct intel_guc_log *log = filp->private_data;
2405
2406         intel_guc_log_relay_flush(log);
2407
2408         return cnt;
2409 }
2410
2411 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2412 {
2413         struct drm_i915_private *dev_priv = inode->i_private;
2414
2415         intel_guc_log_relay_close(&dev_priv->guc.log);
2416
2417         return 0;
2418 }
2419
2420 static const struct file_operations i915_guc_log_relay_fops = {
2421         .owner = THIS_MODULE,
2422         .open = i915_guc_log_relay_open,
2423         .write = i915_guc_log_relay_write,
2424         .release = i915_guc_log_relay_release,
2425 };
2426
2427 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2428 {
2429         u8 val;
2430         static const char * const sink_status[] = {
2431                 "inactive",
2432                 "transition to active, capture and display",
2433                 "active, display from RFB",
2434                 "active, capture and display on sink device timings",
2435                 "transition to inactive, capture and display, timing re-sync",
2436                 "reserved",
2437                 "reserved",
2438                 "sink internal error",
2439         };
2440         struct drm_connector *connector = m->private;
2441         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2442         struct intel_dp *intel_dp =
2443                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2444         int ret;
2445
2446         if (!CAN_PSR(dev_priv)) {
2447                 seq_puts(m, "PSR Unsupported\n");
2448                 return -ENODEV;
2449         }
2450
2451         if (connector->status != connector_status_connected)
2452                 return -ENODEV;
2453
2454         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2455
2456         if (ret == 1) {
2457                 const char *str = "unknown";
2458
2459                 val &= DP_PSR_SINK_STATE_MASK;
2460                 if (val < ARRAY_SIZE(sink_status))
2461                         str = sink_status[val];
2462                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2463         } else {
2464                 return ret;
2465         }
2466
2467         return 0;
2468 }
2469 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2470
2471 static void
2472 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2473 {
2474         u32 val, status_val;
2475         const char *status = "unknown";
2476
2477         if (dev_priv->psr.psr2_enabled) {
2478                 static const char * const live_status[] = {
2479                         "IDLE",
2480                         "CAPTURE",
2481                         "CAPTURE_FS",
2482                         "SLEEP",
2483                         "BUFON_FW",
2484                         "ML_UP",
2485                         "SU_STANDBY",
2486                         "FAST_SLEEP",
2487                         "DEEP_SLEEP",
2488                         "BUF_ON",
2489                         "TG_ON"
2490                 };
2491                 val = I915_READ(EDP_PSR2_STATUS);
2492                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2493                               EDP_PSR2_STATUS_STATE_SHIFT;
2494                 if (status_val < ARRAY_SIZE(live_status))
2495                         status = live_status[status_val];
2496         } else {
2497                 static const char * const live_status[] = {
2498                         "IDLE",
2499                         "SRDONACK",
2500                         "SRDENT",
2501                         "BUFOFF",
2502                         "BUFON",
2503                         "AUXACK",
2504                         "SRDOFFACK",
2505                         "SRDENT_ON",
2506                 };
2507                 val = I915_READ(EDP_PSR_STATUS);
2508                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2509                               EDP_PSR_STATUS_STATE_SHIFT;
2510                 if (status_val < ARRAY_SIZE(live_status))
2511                         status = live_status[status_val];
2512         }
2513
2514         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2515 }
2516
2517 static int i915_edp_psr_status(struct seq_file *m, void *data)
2518 {
2519         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2520         struct i915_psr *psr = &dev_priv->psr;
2521         intel_wakeref_t wakeref;
2522         const char *status;
2523         bool enabled;
2524         u32 val;
2525
2526         if (!HAS_PSR(dev_priv))
2527                 return -ENODEV;
2528
2529         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2530         if (psr->dp)
2531                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2532         seq_puts(m, "\n");
2533
2534         if (!psr->sink_support)
2535                 return 0;
2536
2537         wakeref = intel_runtime_pm_get(dev_priv);
2538         mutex_lock(&psr->lock);
2539
2540         if (psr->enabled)
2541                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2542         else
2543                 status = "disabled";
2544         seq_printf(m, "PSR mode: %s\n", status);
2545
2546         if (!psr->enabled)
2547                 goto unlock;
2548
2549         if (psr->psr2_enabled) {
2550                 val = I915_READ(EDP_PSR2_CTL);
2551                 enabled = val & EDP_PSR2_ENABLE;
2552         } else {
2553                 val = I915_READ(EDP_PSR_CTL);
2554                 enabled = val & EDP_PSR_ENABLE;
2555         }
2556         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2557                    enableddisabled(enabled), val);
2558         psr_source_status(dev_priv, m);
2559         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2560                    psr->busy_frontbuffer_bits);
2561
2562         /*
2563          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2564          */
2565         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2566                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2567                 seq_printf(m, "Performance counter: %u\n", val);
2568         }
2569
2570         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2571                 seq_printf(m, "Last attempted entry at: %lld\n",
2572                            psr->last_entry_attempt);
2573                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2574         }
2575
2576         if (psr->psr2_enabled) {
2577                 u32 su_frames_val[3];
2578                 int frame;
2579
2580                 /*
2581                  * Reading all 3 registers before hand to minimize crossing a
2582                  * frame boundary between register reads
2583                  */
2584                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2585                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2586
2587                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2588
2589                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2590                         u32 su_blocks;
2591
2592                         su_blocks = su_frames_val[frame / 3] &
2593                                     PSR2_SU_STATUS_MASK(frame);
2594                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2595                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2596                 }
2597         }
2598
2599 unlock:
2600         mutex_unlock(&psr->lock);
2601         intel_runtime_pm_put(dev_priv, wakeref);
2602
2603         return 0;
2604 }
2605
2606 static int
2607 i915_edp_psr_debug_set(void *data, u64 val)
2608 {
2609         struct drm_i915_private *dev_priv = data;
2610         struct drm_modeset_acquire_ctx ctx;
2611         intel_wakeref_t wakeref;
2612         int ret;
2613
2614         if (!CAN_PSR(dev_priv))
2615                 return -ENODEV;
2616
2617         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2618
2619         wakeref = intel_runtime_pm_get(dev_priv);
2620
2621         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2622
2623 retry:
2624         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2625         if (ret == -EDEADLK) {
2626                 ret = drm_modeset_backoff(&ctx);
2627                 if (!ret)
2628                         goto retry;
2629         }
2630
2631         drm_modeset_drop_locks(&ctx);
2632         drm_modeset_acquire_fini(&ctx);
2633
2634         intel_runtime_pm_put(dev_priv, wakeref);
2635
2636         return ret;
2637 }
2638
2639 static int
2640 i915_edp_psr_debug_get(void *data, u64 *val)
2641 {
2642         struct drm_i915_private *dev_priv = data;
2643
2644         if (!CAN_PSR(dev_priv))
2645                 return -ENODEV;
2646
2647         *val = READ_ONCE(dev_priv->psr.debug);
2648         return 0;
2649 }
2650
2651 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2652                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2653                         "%llu\n");
2654
2655 static int i915_energy_uJ(struct seq_file *m, void *data)
2656 {
2657         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2658         unsigned long long power;
2659         intel_wakeref_t wakeref;
2660         u32 units;
2661
2662         if (INTEL_GEN(dev_priv) < 6)
2663                 return -ENODEV;
2664
2665         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2666                 return -ENODEV;
2667
2668         units = (power & 0x1f00) >> 8;
2669         with_intel_runtime_pm(dev_priv, wakeref)
2670                 power = I915_READ(MCH_SECP_NRG_STTS);
2671
2672         power = (1000000 * power) >> units; /* convert to uJ */
2673         seq_printf(m, "%llu", power);
2674
2675         return 0;
2676 }
2677
2678 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2679 {
2680         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2681         struct pci_dev *pdev = dev_priv->drm.pdev;
2682
2683         if (!HAS_RUNTIME_PM(dev_priv))
2684                 seq_puts(m, "Runtime power management not supported\n");
2685
2686         seq_printf(m, "Runtime power status: %s\n",
2687                    enableddisabled(!dev_priv->power_domains.wakeref));
2688
2689         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2690                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2691         seq_printf(m, "IRQs disabled: %s\n",
2692                    yesno(!intel_irqs_enabled(dev_priv)));
2693 #ifdef CONFIG_PM
2694         seq_printf(m, "Usage count: %d\n",
2695                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2696 #else
2697         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2698 #endif
2699         seq_printf(m, "PCI device power state: %s [%d]\n",
2700                    pci_power_name(pdev->current_state),
2701                    pdev->current_state);
2702
2703         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2704                 struct drm_printer p = drm_seq_file_printer(m);
2705
2706                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2707         }
2708
2709         return 0;
2710 }
2711
2712 static int i915_power_domain_info(struct seq_file *m, void *unused)
2713 {
2714         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2715         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2716         int i;
2717
2718         mutex_lock(&power_domains->lock);
2719
2720         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2721         for (i = 0; i < power_domains->power_well_count; i++) {
2722                 struct i915_power_well *power_well;
2723                 enum intel_display_power_domain power_domain;
2724
2725                 power_well = &power_domains->power_wells[i];
2726                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2727                            power_well->count);
2728
2729                 for_each_power_domain(power_domain, power_well->desc->domains)
2730                         seq_printf(m, "  %-23s %d\n",
2731                                  intel_display_power_domain_str(power_domain),
2732                                  power_domains->domain_use_count[power_domain]);
2733         }
2734
2735         mutex_unlock(&power_domains->lock);
2736
2737         return 0;
2738 }
2739
2740 static int i915_dmc_info(struct seq_file *m, void *unused)
2741 {
2742         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2743         intel_wakeref_t wakeref;
2744         struct intel_csr *csr;
2745
2746         if (!HAS_CSR(dev_priv))
2747                 return -ENODEV;
2748
2749         csr = &dev_priv->csr;
2750
2751         wakeref = intel_runtime_pm_get(dev_priv);
2752
2753         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2754         seq_printf(m, "path: %s\n", csr->fw_path);
2755
2756         if (!csr->dmc_payload)
2757                 goto out;
2758
2759         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2760                    CSR_VERSION_MINOR(csr->version));
2761
2762         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2763                 goto out;
2764
2765         seq_printf(m, "DC3 -> DC5 count: %d\n",
2766                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2767                                                     SKL_CSR_DC3_DC5_COUNT));
2768         if (!IS_GEN9_LP(dev_priv))
2769                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2770                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2771
2772 out:
2773         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2774         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2775         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2776
2777         intel_runtime_pm_put(dev_priv, wakeref);
2778
2779         return 0;
2780 }
2781
2782 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2783                                  struct drm_display_mode *mode)
2784 {
2785         int i;
2786
2787         for (i = 0; i < tabs; i++)
2788                 seq_putc(m, '\t');
2789
2790         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2791 }
2792
2793 static void intel_encoder_info(struct seq_file *m,
2794                                struct intel_crtc *intel_crtc,
2795                                struct intel_encoder *intel_encoder)
2796 {
2797         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2798         struct drm_device *dev = &dev_priv->drm;
2799         struct drm_crtc *crtc = &intel_crtc->base;
2800         struct intel_connector *intel_connector;
2801         struct drm_encoder *encoder;
2802
2803         encoder = &intel_encoder->base;
2804         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2805                    encoder->base.id, encoder->name);
2806         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2807                 struct drm_connector *connector = &intel_connector->base;
2808                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2809                            connector->base.id,
2810                            connector->name,
2811                            drm_get_connector_status_name(connector->status));
2812                 if (connector->status == connector_status_connected) {
2813                         struct drm_display_mode *mode = &crtc->mode;
2814                         seq_printf(m, ", mode:\n");
2815                         intel_seq_print_mode(m, 2, mode);
2816                 } else {
2817                         seq_putc(m, '\n');
2818                 }
2819         }
2820 }
2821
2822 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2823 {
2824         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2825         struct drm_device *dev = &dev_priv->drm;
2826         struct drm_crtc *crtc = &intel_crtc->base;
2827         struct intel_encoder *intel_encoder;
2828         struct drm_plane_state *plane_state = crtc->primary->state;
2829         struct drm_framebuffer *fb = plane_state->fb;
2830
2831         if (fb)
2832                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2833                            fb->base.id, plane_state->src_x >> 16,
2834                            plane_state->src_y >> 16, fb->width, fb->height);
2835         else
2836                 seq_puts(m, "\tprimary plane disabled\n");
2837         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2838                 intel_encoder_info(m, intel_crtc, intel_encoder);
2839 }
2840
2841 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2842 {
2843         struct drm_display_mode *mode = panel->fixed_mode;
2844
2845         seq_printf(m, "\tfixed mode:\n");
2846         intel_seq_print_mode(m, 2, mode);
2847 }
2848
2849 static void intel_dp_info(struct seq_file *m,
2850                           struct intel_connector *intel_connector)
2851 {
2852         struct intel_encoder *intel_encoder = intel_connector->encoder;
2853         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2854
2855         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2856         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2857         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2858                 intel_panel_info(m, &intel_connector->panel);
2859
2860         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2861                                 &intel_dp->aux);
2862 }
2863
2864 static void intel_dp_mst_info(struct seq_file *m,
2865                           struct intel_connector *intel_connector)
2866 {
2867         struct intel_encoder *intel_encoder = intel_connector->encoder;
2868         struct intel_dp_mst_encoder *intel_mst =
2869                 enc_to_mst(&intel_encoder->base);
2870         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2871         struct intel_dp *intel_dp = &intel_dig_port->dp;
2872         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2873                                         intel_connector->port);
2874
2875         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2876 }
2877
2878 static void intel_hdmi_info(struct seq_file *m,
2879                             struct intel_connector *intel_connector)
2880 {
2881         struct intel_encoder *intel_encoder = intel_connector->encoder;
2882         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2883
2884         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2885 }
2886
2887 static void intel_lvds_info(struct seq_file *m,
2888                             struct intel_connector *intel_connector)
2889 {
2890         intel_panel_info(m, &intel_connector->panel);
2891 }
2892
2893 static void intel_connector_info(struct seq_file *m,
2894                                  struct drm_connector *connector)
2895 {
2896         struct intel_connector *intel_connector = to_intel_connector(connector);
2897         struct intel_encoder *intel_encoder = intel_connector->encoder;
2898         struct drm_display_mode *mode;
2899
2900         seq_printf(m, "connector %d: type %s, status: %s\n",
2901                    connector->base.id, connector->name,
2902                    drm_get_connector_status_name(connector->status));
2903
2904         if (connector->status == connector_status_disconnected)
2905                 return;
2906
2907         seq_printf(m, "\tname: %s\n", connector->display_info.name);
2908         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2909                    connector->display_info.width_mm,
2910                    connector->display_info.height_mm);
2911         seq_printf(m, "\tsubpixel order: %s\n",
2912                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2913         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2914
2915         if (!intel_encoder)
2916                 return;
2917
2918         switch (connector->connector_type) {
2919         case DRM_MODE_CONNECTOR_DisplayPort:
2920         case DRM_MODE_CONNECTOR_eDP:
2921                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2922                         intel_dp_mst_info(m, intel_connector);
2923                 else
2924                         intel_dp_info(m, intel_connector);
2925                 break;
2926         case DRM_MODE_CONNECTOR_LVDS:
2927                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2928                         intel_lvds_info(m, intel_connector);
2929                 break;
2930         case DRM_MODE_CONNECTOR_HDMIA:
2931                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2932                     intel_encoder->type == INTEL_OUTPUT_DDI)
2933                         intel_hdmi_info(m, intel_connector);
2934                 break;
2935         default:
2936                 break;
2937         }
2938
2939         seq_printf(m, "\tmodes:\n");
2940         list_for_each_entry(mode, &connector->modes, head)
2941                 intel_seq_print_mode(m, 2, mode);
2942 }
2943
2944 static const char *plane_type(enum drm_plane_type type)
2945 {
2946         switch (type) {
2947         case DRM_PLANE_TYPE_OVERLAY:
2948                 return "OVL";
2949         case DRM_PLANE_TYPE_PRIMARY:
2950                 return "PRI";
2951         case DRM_PLANE_TYPE_CURSOR:
2952                 return "CUR";
2953         /*
2954          * Deliberately omitting default: to generate compiler warnings
2955          * when a new drm_plane_type gets added.
2956          */
2957         }
2958
2959         return "unknown";
2960 }
2961
2962 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2963 {
2964         /*
2965          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2966          * will print them all to visualize if the values are misused
2967          */
2968         snprintf(buf, bufsize,
2969                  "%s%s%s%s%s%s(0x%08x)",
2970                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2971                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2972                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2973                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2974                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2975                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2976                  rotation);
2977 }
2978
2979 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2980 {
2981         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2982         struct drm_device *dev = &dev_priv->drm;
2983         struct intel_plane *intel_plane;
2984
2985         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2986                 struct drm_plane_state *state;
2987                 struct drm_plane *plane = &intel_plane->base;
2988                 struct drm_format_name_buf format_name;
2989                 char rot_str[48];
2990
2991                 if (!plane->state) {
2992                         seq_puts(m, "plane->state is NULL!\n");
2993                         continue;
2994                 }
2995
2996                 state = plane->state;
2997
2998                 if (state->fb) {
2999                         drm_get_format_name(state->fb->format->format,
3000                                             &format_name);
3001                 } else {
3002                         sprintf(format_name.str, "N/A");
3003                 }
3004
3005                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3006
3007                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3008