Merge tag 'drm-intel-next-2019-03-20' of git://anongit.freedesktop.org/drm/drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_fourcc.h>
33 #include "intel_drv.h"
34 #include "intel_guc_submission.h"
35
36 #include "i915_reset.h"
37
38 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39 {
40         return to_i915(node->minor->dev);
41 }
42
43 static int i915_capabilities(struct seq_file *m, void *data)
44 {
45         struct drm_i915_private *dev_priv = node_to_i915(m->private);
46         const struct intel_device_info *info = INTEL_INFO(dev_priv);
47         struct drm_printer p = drm_seq_file_printer(m);
48
49         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
50         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
51         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
52
53         intel_device_info_dump_flags(info, &p);
54         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
55         intel_driver_caps_print(&dev_priv->caps, &p);
56
57         kernel_param_lock(THIS_MODULE);
58         i915_params_dump(&i915_modparams, &p);
59         kernel_param_unlock(THIS_MODULE);
60
61         return 0;
62 }
63
64 static char get_active_flag(struct drm_i915_gem_object *obj)
65 {
66         return i915_gem_object_is_active(obj) ? '*' : ' ';
67 }
68
69 static char get_pin_flag(struct drm_i915_gem_object *obj)
70 {
71         return obj->pin_global ? 'p' : ' ';
72 }
73
74 static char get_tiling_flag(struct drm_i915_gem_object *obj)
75 {
76         switch (i915_gem_object_get_tiling(obj)) {
77         default:
78         case I915_TILING_NONE: return ' ';
79         case I915_TILING_X: return 'X';
80         case I915_TILING_Y: return 'Y';
81         }
82 }
83
84 static char get_global_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->userfault_count ? 'g' : ' ';
87 }
88
89 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
90 {
91         return obj->mm.mapping ? 'M' : ' ';
92 }
93
94 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95 {
96         u64 size = 0;
97         struct i915_vma *vma;
98
99         for_each_ggtt_vma(vma, obj) {
100                 if (drm_mm_node_allocated(&vma->node))
101                         size += vma->node.size;
102         }
103
104         return size;
105 }
106
107 static const char *
108 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109 {
110         size_t x = 0;
111
112         switch (page_sizes) {
113         case 0:
114                 return "";
115         case I915_GTT_PAGE_SIZE_4K:
116                 return "4K";
117         case I915_GTT_PAGE_SIZE_64K:
118                 return "64K";
119         case I915_GTT_PAGE_SIZE_2M:
120                 return "2M";
121         default:
122                 if (!buf)
123                         return "M";
124
125                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126                         x += snprintf(buf + x, len - x, "2M, ");
127                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128                         x += snprintf(buf + x, len - x, "64K, ");
129                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130                         x += snprintf(buf + x, len - x, "4K, ");
131                 buf[x-2] = '\0';
132
133                 return buf;
134         }
135 }
136
137 static void
138 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 {
140         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
141         struct intel_engine_cs *engine;
142         struct i915_vma *vma;
143         unsigned int frontbuffer_bits;
144         int pin_count = 0;
145
146         lockdep_assert_held(&obj->base.dev->struct_mutex);
147
148         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149                    &obj->base,
150                    get_active_flag(obj),
151                    get_pin_flag(obj),
152                    get_tiling_flag(obj),
153                    get_global_flag(obj),
154                    get_pin_mapped_flag(obj),
155                    obj->base.size / 1024,
156                    obj->read_domains,
157                    obj->write_domain,
158                    i915_cache_level_str(dev_priv, obj->cache_level),
159                    obj->mm.dirty ? " dirty" : "",
160                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161         if (obj->base.name)
162                 seq_printf(m, " (name: %d)", obj->base.name);
163         list_for_each_entry(vma, &obj->vma.list, obj_link) {
164                 if (i915_vma_is_pinned(vma))
165                         pin_count++;
166         }
167         seq_printf(m, " (pinned x %d)", pin_count);
168         if (obj->pin_global)
169                 seq_printf(m, " (global)");
170         list_for_each_entry(vma, &obj->vma.list, obj_link) {
171                 if (!drm_mm_node_allocated(&vma->node))
172                         continue;
173
174                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
175                            i915_vma_is_ggtt(vma) ? "g" : "pp",
176                            vma->node.start, vma->node.size,
177                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
178                 if (i915_vma_is_ggtt(vma)) {
179                         switch (vma->ggtt_view.type) {
180                         case I915_GGTT_VIEW_NORMAL:
181                                 seq_puts(m, ", normal");
182                                 break;
183
184                         case I915_GGTT_VIEW_PARTIAL:
185                                 seq_printf(m, ", partial [%08llx+%x]",
186                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
187                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
188                                 break;
189
190                         case I915_GGTT_VIEW_ROTATED:
191                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192                                            vma->ggtt_view.rotated.plane[0].width,
193                                            vma->ggtt_view.rotated.plane[0].height,
194                                            vma->ggtt_view.rotated.plane[0].stride,
195                                            vma->ggtt_view.rotated.plane[0].offset,
196                                            vma->ggtt_view.rotated.plane[1].width,
197                                            vma->ggtt_view.rotated.plane[1].height,
198                                            vma->ggtt_view.rotated.plane[1].stride,
199                                            vma->ggtt_view.rotated.plane[1].offset);
200                                 break;
201
202                         default:
203                                 MISSING_CASE(vma->ggtt_view.type);
204                                 break;
205                         }
206                 }
207                 if (vma->fence)
208                         seq_printf(m, " , fence: %d%s",
209                                    vma->fence->id,
210                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
211                 seq_puts(m, ")");
212         }
213         if (obj->stolen)
214                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
215
216         engine = i915_gem_object_last_write_engine(obj);
217         if (engine)
218                 seq_printf(m, " (%s)", engine->name);
219
220         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221         if (frontbuffer_bits)
222                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
223 }
224
225 static int obj_rank_by_stolen(const void *A, const void *B)
226 {
227         const struct drm_i915_gem_object *a =
228                 *(const struct drm_i915_gem_object **)A;
229         const struct drm_i915_gem_object *b =
230                 *(const struct drm_i915_gem_object **)B;
231
232         if (a->stolen->start < b->stolen->start)
233                 return -1;
234         if (a->stolen->start > b->stolen->start)
235                 return 1;
236         return 0;
237 }
238
239 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 {
241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
242         struct drm_device *dev = &dev_priv->drm;
243         struct drm_i915_gem_object **objects;
244         struct drm_i915_gem_object *obj;
245         u64 total_obj_size, total_gtt_size;
246         unsigned long total, count, n;
247         int ret;
248
249         total = READ_ONCE(dev_priv->mm.object_count);
250         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
251         if (!objects)
252                 return -ENOMEM;
253
254         ret = mutex_lock_interruptible(&dev->struct_mutex);
255         if (ret)
256                 goto out;
257
258         total_obj_size = total_gtt_size = count = 0;
259
260         spin_lock(&dev_priv->mm.obj_lock);
261         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
262                 if (count == total)
263                         break;
264
265                 if (obj->stolen == NULL)
266                         continue;
267
268                 objects[count++] = obj;
269                 total_obj_size += obj->base.size;
270                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
271
272         }
273         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
274                 if (count == total)
275                         break;
276
277                 if (obj->stolen == NULL)
278                         continue;
279
280                 objects[count++] = obj;
281                 total_obj_size += obj->base.size;
282         }
283         spin_unlock(&dev_priv->mm.obj_lock);
284
285         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287         seq_puts(m, "Stolen:\n");
288         for (n = 0; n < count; n++) {
289                 seq_puts(m, "   ");
290                 describe_obj(m, objects[n]);
291                 seq_putc(m, '\n');
292         }
293         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
294                    count, total_obj_size, total_gtt_size);
295
296         mutex_unlock(&dev->struct_mutex);
297 out:
298         kvfree(objects);
299         return ret;
300 }
301
302 struct file_stats {
303         struct i915_address_space *vm;
304         unsigned long count;
305         u64 total, unbound;
306         u64 global, shared;
307         u64 active, inactive;
308         u64 closed;
309 };
310
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313         struct drm_i915_gem_object *obj = ptr;
314         struct file_stats *stats = data;
315         struct i915_vma *vma;
316
317         lockdep_assert_held(&obj->base.dev->struct_mutex);
318
319         stats->count++;
320         stats->total += obj->base.size;
321         if (!obj->bind_count)
322                 stats->unbound += obj->base.size;
323         if (obj->base.name || obj->base.dma_buf)
324                 stats->shared += obj->base.size;
325
326         list_for_each_entry(vma, &obj->vma.list, obj_link) {
327                 if (!drm_mm_node_allocated(&vma->node))
328                         continue;
329
330                 if (i915_vma_is_ggtt(vma)) {
331                         stats->global += vma->node.size;
332                 } else {
333                         if (vma->vm != stats->vm)
334                                 continue;
335                 }
336
337                 if (i915_vma_is_active(vma))
338                         stats->active += vma->node.size;
339                 else
340                         stats->inactive += vma->node.size;
341
342                 if (i915_vma_is_closed(vma))
343                         stats->closed += vma->node.size;
344         }
345
346         return 0;
347 }
348
349 #define print_file_stats(m, name, stats) do { \
350         if (stats.count) \
351                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
352                            name, \
353                            stats.count, \
354                            stats.total, \
355                            stats.active, \
356                            stats.inactive, \
357                            stats.global, \
358                            stats.shared, \
359                            stats.unbound, \
360                            stats.closed); \
361 } while (0)
362
363 static void print_batch_pool_stats(struct seq_file *m,
364                                    struct drm_i915_private *dev_priv)
365 {
366         struct drm_i915_gem_object *obj;
367         struct intel_engine_cs *engine;
368         struct file_stats stats = {};
369         enum intel_engine_id id;
370         int j;
371
372         for_each_engine(engine, dev_priv, id) {
373                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
374                         list_for_each_entry(obj,
375                                             &engine->batch_pool.cache_list[j],
376                                             batch_pool_link)
377                                 per_file_stats(0, obj, &stats);
378                 }
379         }
380
381         print_file_stats(m, "[k]batch pool", stats);
382 }
383
384 static void print_context_stats(struct seq_file *m,
385                                 struct drm_i915_private *i915)
386 {
387         struct file_stats kstats = {};
388         struct i915_gem_context *ctx;
389
390         list_for_each_entry(ctx, &i915->contexts.list, link) {
391                 struct intel_context *ce;
392
393                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
394                         if (ce->state)
395                                 per_file_stats(0, ce->state->obj, &kstats);
396                         if (ce->ring)
397                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
398                 }
399
400                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402                         struct drm_file *file = ctx->file_priv->file;
403                         struct task_struct *task;
404                         char name[80];
405
406                         spin_lock(&file->table_lock);
407                         idr_for_each(&file->object_idr, per_file_stats, &stats);
408                         spin_unlock(&file->table_lock);
409
410                         rcu_read_lock();
411                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412                         snprintf(name, sizeof(name), "%s/%d",
413                                  task ? task->comm : "<unknown>",
414                                  ctx->user_handle);
415                         rcu_read_unlock();
416
417                         print_file_stats(m, name, stats);
418                 }
419         }
420
421         print_file_stats(m, "[k]contexts", kstats);
422 }
423
424 static int i915_gem_object_info(struct seq_file *m, void *data)
425 {
426         struct drm_i915_private *dev_priv = node_to_i915(m->private);
427         struct drm_device *dev = &dev_priv->drm;
428         struct i915_ggtt *ggtt = &dev_priv->ggtt;
429         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
430         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
431         struct drm_i915_gem_object *obj;
432         unsigned int page_sizes = 0;
433         char buf[80];
434         int ret;
435
436         seq_printf(m, "%u objects, %llu bytes\n",
437                    dev_priv->mm.object_count,
438                    dev_priv->mm.object_memory);
439
440         size = count = 0;
441         mapped_size = mapped_count = 0;
442         purgeable_size = purgeable_count = 0;
443         huge_size = huge_count = 0;
444
445         spin_lock(&dev_priv->mm.obj_lock);
446         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
447                 size += obj->base.size;
448                 ++count;
449
450                 if (obj->mm.madv == I915_MADV_DONTNEED) {
451                         purgeable_size += obj->base.size;
452                         ++purgeable_count;
453                 }
454
455                 if (obj->mm.mapping) {
456                         mapped_count++;
457                         mapped_size += obj->base.size;
458                 }
459
460                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
461                         huge_count++;
462                         huge_size += obj->base.size;
463                         page_sizes |= obj->mm.page_sizes.sg;
464                 }
465         }
466         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
467
468         size = count = dpy_size = dpy_count = 0;
469         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
470                 size += obj->base.size;
471                 ++count;
472
473                 if (obj->pin_global) {
474                         dpy_size += obj->base.size;
475                         ++dpy_count;
476                 }
477
478                 if (obj->mm.madv == I915_MADV_DONTNEED) {
479                         purgeable_size += obj->base.size;
480                         ++purgeable_count;
481                 }
482
483                 if (obj->mm.mapping) {
484                         mapped_count++;
485                         mapped_size += obj->base.size;
486                 }
487
488                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
489                         huge_count++;
490                         huge_size += obj->base.size;
491                         page_sizes |= obj->mm.page_sizes.sg;
492                 }
493         }
494         spin_unlock(&dev_priv->mm.obj_lock);
495
496         seq_printf(m, "%u bound objects, %llu bytes\n",
497                    count, size);
498         seq_printf(m, "%u purgeable objects, %llu bytes\n",
499                    purgeable_count, purgeable_size);
500         seq_printf(m, "%u mapped objects, %llu bytes\n",
501                    mapped_count, mapped_size);
502         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
503                    huge_count,
504                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
505                    huge_size);
506         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
507                    dpy_count, dpy_size);
508
509         seq_printf(m, "%llu [%pa] gtt total\n",
510                    ggtt->vm.total, &ggtt->mappable_end);
511         seq_printf(m, "Supported page sizes: %s\n",
512                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
513                                         buf, sizeof(buf)));
514
515         seq_putc(m, '\n');
516
517         ret = mutex_lock_interruptible(&dev->struct_mutex);
518         if (ret)
519                 return ret;
520
521         print_batch_pool_stats(m, dev_priv);
522         print_context_stats(m, dev_priv);
523         mutex_unlock(&dev->struct_mutex);
524
525         return 0;
526 }
527
528 static int i915_gem_gtt_info(struct seq_file *m, void *data)
529 {
530         struct drm_info_node *node = m->private;
531         struct drm_i915_private *dev_priv = node_to_i915(node);
532         struct drm_device *dev = &dev_priv->drm;
533         struct drm_i915_gem_object **objects;
534         struct drm_i915_gem_object *obj;
535         u64 total_obj_size, total_gtt_size;
536         unsigned long nobject, n;
537         int count, ret;
538
539         nobject = READ_ONCE(dev_priv->mm.object_count);
540         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
541         if (!objects)
542                 return -ENOMEM;
543
544         ret = mutex_lock_interruptible(&dev->struct_mutex);
545         if (ret)
546                 return ret;
547
548         count = 0;
549         spin_lock(&dev_priv->mm.obj_lock);
550         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
551                 objects[count++] = obj;
552                 if (count == nobject)
553                         break;
554         }
555         spin_unlock(&dev_priv->mm.obj_lock);
556
557         total_obj_size = total_gtt_size = 0;
558         for (n = 0;  n < count; n++) {
559                 obj = objects[n];
560
561                 seq_puts(m, "   ");
562                 describe_obj(m, obj);
563                 seq_putc(m, '\n');
564                 total_obj_size += obj->base.size;
565                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
566         }
567
568         mutex_unlock(&dev->struct_mutex);
569
570         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
571                    count, total_obj_size, total_gtt_size);
572         kvfree(objects);
573
574         return 0;
575 }
576
577 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
578 {
579         struct drm_i915_private *dev_priv = node_to_i915(m->private);
580         struct drm_device *dev = &dev_priv->drm;
581         struct drm_i915_gem_object *obj;
582         struct intel_engine_cs *engine;
583         enum intel_engine_id id;
584         int total = 0;
585         int ret, j;
586
587         ret = mutex_lock_interruptible(&dev->struct_mutex);
588         if (ret)
589                 return ret;
590
591         for_each_engine(engine, dev_priv, id) {
592                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
593                         int count;
594
595                         count = 0;
596                         list_for_each_entry(obj,
597                                             &engine->batch_pool.cache_list[j],
598                                             batch_pool_link)
599                                 count++;
600                         seq_printf(m, "%s cache[%d]: %d objects\n",
601                                    engine->name, j, count);
602
603                         list_for_each_entry(obj,
604                                             &engine->batch_pool.cache_list[j],
605                                             batch_pool_link) {
606                                 seq_puts(m, "   ");
607                                 describe_obj(m, obj);
608                                 seq_putc(m, '\n');
609                         }
610
611                         total += count;
612                 }
613         }
614
615         seq_printf(m, "total: %d\n", total);
616
617         mutex_unlock(&dev->struct_mutex);
618
619         return 0;
620 }
621
622 static void gen8_display_interrupt_info(struct seq_file *m)
623 {
624         struct drm_i915_private *dev_priv = node_to_i915(m->private);
625         int pipe;
626
627         for_each_pipe(dev_priv, pipe) {
628                 enum intel_display_power_domain power_domain;
629                 intel_wakeref_t wakeref;
630
631                 power_domain = POWER_DOMAIN_PIPE(pipe);
632                 wakeref = intel_display_power_get_if_enabled(dev_priv,
633                                                              power_domain);
634                 if (!wakeref) {
635                         seq_printf(m, "Pipe %c power disabled\n",
636                                    pipe_name(pipe));
637                         continue;
638                 }
639                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
640                            pipe_name(pipe),
641                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
642                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
643                            pipe_name(pipe),
644                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
645                 seq_printf(m, "Pipe %c IER:\t%08x\n",
646                            pipe_name(pipe),
647                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
648
649                 intel_display_power_put(dev_priv, power_domain, wakeref);
650         }
651
652         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
653                    I915_READ(GEN8_DE_PORT_IMR));
654         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
655                    I915_READ(GEN8_DE_PORT_IIR));
656         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
657                    I915_READ(GEN8_DE_PORT_IER));
658
659         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
660                    I915_READ(GEN8_DE_MISC_IMR));
661         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
662                    I915_READ(GEN8_DE_MISC_IIR));
663         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
664                    I915_READ(GEN8_DE_MISC_IER));
665
666         seq_printf(m, "PCU interrupt mask:\t%08x\n",
667                    I915_READ(GEN8_PCU_IMR));
668         seq_printf(m, "PCU interrupt identity:\t%08x\n",
669                    I915_READ(GEN8_PCU_IIR));
670         seq_printf(m, "PCU interrupt enable:\t%08x\n",
671                    I915_READ(GEN8_PCU_IER));
672 }
673
674 static int i915_interrupt_info(struct seq_file *m, void *data)
675 {
676         struct drm_i915_private *dev_priv = node_to_i915(m->private);
677         struct intel_engine_cs *engine;
678         enum intel_engine_id id;
679         intel_wakeref_t wakeref;
680         int i, pipe;
681
682         wakeref = intel_runtime_pm_get(dev_priv);
683
684         if (IS_CHERRYVIEW(dev_priv)) {
685                 intel_wakeref_t pref;
686
687                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
688                            I915_READ(GEN8_MASTER_IRQ));
689
690                 seq_printf(m, "Display IER:\t%08x\n",
691                            I915_READ(VLV_IER));
692                 seq_printf(m, "Display IIR:\t%08x\n",
693                            I915_READ(VLV_IIR));
694                 seq_printf(m, "Display IIR_RW:\t%08x\n",
695                            I915_READ(VLV_IIR_RW));
696                 seq_printf(m, "Display IMR:\t%08x\n",
697                            I915_READ(VLV_IMR));
698                 for_each_pipe(dev_priv, pipe) {
699                         enum intel_display_power_domain power_domain;
700
701                         power_domain = POWER_DOMAIN_PIPE(pipe);
702                         pref = intel_display_power_get_if_enabled(dev_priv,
703                                                                   power_domain);
704                         if (!pref) {
705                                 seq_printf(m, "Pipe %c power disabled\n",
706                                            pipe_name(pipe));
707                                 continue;
708                         }
709
710                         seq_printf(m, "Pipe %c stat:\t%08x\n",
711                                    pipe_name(pipe),
712                                    I915_READ(PIPESTAT(pipe)));
713
714                         intel_display_power_put(dev_priv, power_domain, pref);
715                 }
716
717                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
718                 seq_printf(m, "Port hotplug:\t%08x\n",
719                            I915_READ(PORT_HOTPLUG_EN));
720                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
721                            I915_READ(VLV_DPFLIPSTAT));
722                 seq_printf(m, "DPINVGTT:\t%08x\n",
723                            I915_READ(DPINVGTT));
724                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
725
726                 for (i = 0; i < 4; i++) {
727                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
728                                    i, I915_READ(GEN8_GT_IMR(i)));
729                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
730                                    i, I915_READ(GEN8_GT_IIR(i)));
731                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
732                                    i, I915_READ(GEN8_GT_IER(i)));
733                 }
734
735                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
736                            I915_READ(GEN8_PCU_IMR));
737                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
738                            I915_READ(GEN8_PCU_IIR));
739                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
740                            I915_READ(GEN8_PCU_IER));
741         } else if (INTEL_GEN(dev_priv) >= 11) {
742                 seq_printf(m, "Master Interrupt Control:  %08x\n",
743                            I915_READ(GEN11_GFX_MSTR_IRQ));
744
745                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
746                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
747                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
748                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
749                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
750                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
751                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
752                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
753                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
754                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
755                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
756                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
757
758                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
759                            I915_READ(GEN11_DISPLAY_INT_CTL));
760
761                 gen8_display_interrupt_info(m);
762         } else if (INTEL_GEN(dev_priv) >= 8) {
763                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
764                            I915_READ(GEN8_MASTER_IRQ));
765
766                 for (i = 0; i < 4; i++) {
767                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
768                                    i, I915_READ(GEN8_GT_IMR(i)));
769                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
770                                    i, I915_READ(GEN8_GT_IIR(i)));
771                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
772                                    i, I915_READ(GEN8_GT_IER(i)));
773                 }
774
775                 gen8_display_interrupt_info(m);
776         } else if (IS_VALLEYVIEW(dev_priv)) {
777                 seq_printf(m, "Display IER:\t%08x\n",
778                            I915_READ(VLV_IER));
779                 seq_printf(m, "Display IIR:\t%08x\n",
780                            I915_READ(VLV_IIR));
781                 seq_printf(m, "Display IIR_RW:\t%08x\n",
782                            I915_READ(VLV_IIR_RW));
783                 seq_printf(m, "Display IMR:\t%08x\n",
784                            I915_READ(VLV_IMR));
785                 for_each_pipe(dev_priv, pipe) {
786                         enum intel_display_power_domain power_domain;
787                         intel_wakeref_t pref;
788
789                         power_domain = POWER_DOMAIN_PIPE(pipe);
790                         pref = intel_display_power_get_if_enabled(dev_priv,
791                                                                   power_domain);
792                         if (!pref) {
793                                 seq_printf(m, "Pipe %c power disabled\n",
794                                            pipe_name(pipe));
795                                 continue;
796                         }
797
798                         seq_printf(m, "Pipe %c stat:\t%08x\n",
799                                    pipe_name(pipe),
800                                    I915_READ(PIPESTAT(pipe)));
801                         intel_display_power_put(dev_priv, power_domain, pref);
802                 }
803
804                 seq_printf(m, "Master IER:\t%08x\n",
805                            I915_READ(VLV_MASTER_IER));
806
807                 seq_printf(m, "Render IER:\t%08x\n",
808                            I915_READ(GTIER));
809                 seq_printf(m, "Render IIR:\t%08x\n",
810                            I915_READ(GTIIR));
811                 seq_printf(m, "Render IMR:\t%08x\n",
812                            I915_READ(GTIMR));
813
814                 seq_printf(m, "PM IER:\t\t%08x\n",
815                            I915_READ(GEN6_PMIER));
816                 seq_printf(m, "PM IIR:\t\t%08x\n",
817                            I915_READ(GEN6_PMIIR));
818                 seq_printf(m, "PM IMR:\t\t%08x\n",
819                            I915_READ(GEN6_PMIMR));
820
821                 seq_printf(m, "Port hotplug:\t%08x\n",
822                            I915_READ(PORT_HOTPLUG_EN));
823                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
824                            I915_READ(VLV_DPFLIPSTAT));
825                 seq_printf(m, "DPINVGTT:\t%08x\n",
826                            I915_READ(DPINVGTT));
827
828         } else if (!HAS_PCH_SPLIT(dev_priv)) {
829                 seq_printf(m, "Interrupt enable:    %08x\n",
830                            I915_READ(IER));
831                 seq_printf(m, "Interrupt identity:  %08x\n",
832                            I915_READ(IIR));
833                 seq_printf(m, "Interrupt mask:      %08x\n",
834                            I915_READ(IMR));
835                 for_each_pipe(dev_priv, pipe)
836                         seq_printf(m, "Pipe %c stat:         %08x\n",
837                                    pipe_name(pipe),
838                                    I915_READ(PIPESTAT(pipe)));
839         } else {
840                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
841                            I915_READ(DEIER));
842                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
843                            I915_READ(DEIIR));
844                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
845                            I915_READ(DEIMR));
846                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
847                            I915_READ(SDEIER));
848                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
849                            I915_READ(SDEIIR));
850                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
851                            I915_READ(SDEIMR));
852                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
853                            I915_READ(GTIER));
854                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
855                            I915_READ(GTIIR));
856                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
857                            I915_READ(GTIMR));
858         }
859
860         if (INTEL_GEN(dev_priv) >= 11) {
861                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
862                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
863                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
864                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
865                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
866                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
867                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
868                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
869                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
870                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
871                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
872                            I915_READ(GEN11_GUC_SG_INTR_MASK));
873                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
874                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
875                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
876                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
877                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
878                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
879
880         } else if (INTEL_GEN(dev_priv) >= 6) {
881                 for_each_engine(engine, dev_priv, id) {
882                         seq_printf(m,
883                                    "Graphics Interrupt mask (%s):       %08x\n",
884                                    engine->name, I915_READ_IMR(engine));
885                 }
886         }
887
888         intel_runtime_pm_put(dev_priv, wakeref);
889
890         return 0;
891 }
892
893 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
894 {
895         struct drm_i915_private *dev_priv = node_to_i915(m->private);
896         struct drm_device *dev = &dev_priv->drm;
897         int i, ret;
898
899         ret = mutex_lock_interruptible(&dev->struct_mutex);
900         if (ret)
901                 return ret;
902
903         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
904         for (i = 0; i < dev_priv->num_fence_regs; i++) {
905                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
906
907                 seq_printf(m, "Fence %d, pin count = %d, object = ",
908                            i, dev_priv->fence_regs[i].pin_count);
909                 if (!vma)
910                         seq_puts(m, "unused");
911                 else
912                         describe_obj(m, vma->obj);
913                 seq_putc(m, '\n');
914         }
915
916         mutex_unlock(&dev->struct_mutex);
917         return 0;
918 }
919
920 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
921 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
922                               size_t count, loff_t *pos)
923 {
924         struct i915_gpu_state *error;
925         ssize_t ret;
926         void *buf;
927
928         error = file->private_data;
929         if (!error)
930                 return 0;
931
932         /* Bounce buffer required because of kernfs __user API convenience. */
933         buf = kmalloc(count, GFP_KERNEL);
934         if (!buf)
935                 return -ENOMEM;
936
937         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
938         if (ret <= 0)
939                 goto out;
940
941         if (!copy_to_user(ubuf, buf, ret))
942                 *pos += ret;
943         else
944                 ret = -EFAULT;
945
946 out:
947         kfree(buf);
948         return ret;
949 }
950
951 static int gpu_state_release(struct inode *inode, struct file *file)
952 {
953         i915_gpu_state_put(file->private_data);
954         return 0;
955 }
956
957 static int i915_gpu_info_open(struct inode *inode, struct file *file)
958 {
959         struct drm_i915_private *i915 = inode->i_private;
960         struct i915_gpu_state *gpu;
961         intel_wakeref_t wakeref;
962
963         gpu = NULL;
964         with_intel_runtime_pm(i915, wakeref)
965                 gpu = i915_capture_gpu_state(i915);
966         if (IS_ERR(gpu))
967                 return PTR_ERR(gpu);
968
969         file->private_data = gpu;
970         return 0;
971 }
972
973 static const struct file_operations i915_gpu_info_fops = {
974         .owner = THIS_MODULE,
975         .open = i915_gpu_info_open,
976         .read = gpu_state_read,
977         .llseek = default_llseek,
978         .release = gpu_state_release,
979 };
980
981 static ssize_t
982 i915_error_state_write(struct file *filp,
983                        const char __user *ubuf,
984                        size_t cnt,
985                        loff_t *ppos)
986 {
987         struct i915_gpu_state *error = filp->private_data;
988
989         if (!error)
990                 return 0;
991
992         DRM_DEBUG_DRIVER("Resetting error state\n");
993         i915_reset_error_state(error->i915);
994
995         return cnt;
996 }
997
998 static int i915_error_state_open(struct inode *inode, struct file *file)
999 {
1000         struct i915_gpu_state *error;
1001
1002         error = i915_first_error_state(inode->i_private);
1003         if (IS_ERR(error))
1004                 return PTR_ERR(error);
1005
1006         file->private_data  = error;
1007         return 0;
1008 }
1009
1010 static const struct file_operations i915_error_state_fops = {
1011         .owner = THIS_MODULE,
1012         .open = i915_error_state_open,
1013         .read = gpu_state_read,
1014         .write = i915_error_state_write,
1015         .llseek = default_llseek,
1016         .release = gpu_state_release,
1017 };
1018 #endif
1019
1020 static int i915_frequency_info(struct seq_file *m, void *unused)
1021 {
1022         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1023         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1024         intel_wakeref_t wakeref;
1025         int ret = 0;
1026
1027         wakeref = intel_runtime_pm_get(dev_priv);
1028
1029         if (IS_GEN(dev_priv, 5)) {
1030                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1031                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1032
1033                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1034                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1035                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1036                            MEMSTAT_VID_SHIFT);
1037                 seq_printf(m, "Current P-state: %d\n",
1038                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1039         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1040                 u32 rpmodectl, freq_sts;
1041
1042                 mutex_lock(&dev_priv->pcu_lock);
1043
1044                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1045                 seq_printf(m, "Video Turbo Mode: %s\n",
1046                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1047                 seq_printf(m, "HW control enabled: %s\n",
1048                            yesno(rpmodectl & GEN6_RP_ENABLE));
1049                 seq_printf(m, "SW control enabled: %s\n",
1050                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1051                                   GEN6_RP_MEDIA_SW_MODE));
1052
1053                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1054                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1055                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1056
1057                 seq_printf(m, "actual GPU freq: %d MHz\n",
1058                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1059
1060                 seq_printf(m, "current GPU freq: %d MHz\n",
1061                            intel_gpu_freq(dev_priv, rps->cur_freq));
1062
1063                 seq_printf(m, "max GPU freq: %d MHz\n",
1064                            intel_gpu_freq(dev_priv, rps->max_freq));
1065
1066                 seq_printf(m, "min GPU freq: %d MHz\n",
1067                            intel_gpu_freq(dev_priv, rps->min_freq));
1068
1069                 seq_printf(m, "idle GPU freq: %d MHz\n",
1070                            intel_gpu_freq(dev_priv, rps->idle_freq));
1071
1072                 seq_printf(m,
1073                            "efficient (RPe) frequency: %d MHz\n",
1074                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1075                 mutex_unlock(&dev_priv->pcu_lock);
1076         } else if (INTEL_GEN(dev_priv) >= 6) {
1077                 u32 rp_state_limits;
1078                 u32 gt_perf_status;
1079                 u32 rp_state_cap;
1080                 u32 rpmodectl, rpinclimit, rpdeclimit;
1081                 u32 rpstat, cagf, reqf;
1082                 u32 rpupei, rpcurup, rpprevup;
1083                 u32 rpdownei, rpcurdown, rpprevdown;
1084                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1085                 int max_freq;
1086
1087                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1088                 if (IS_GEN9_LP(dev_priv)) {
1089                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1090                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1091                 } else {
1092                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1093                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1094                 }
1095
1096                 /* RPSTAT1 is in the GT power well */
1097                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1098
1099                 reqf = I915_READ(GEN6_RPNSWREQ);
1100                 if (INTEL_GEN(dev_priv) >= 9)
1101                         reqf >>= 23;
1102                 else {
1103                         reqf &= ~GEN6_TURBO_DISABLE;
1104                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1105                                 reqf >>= 24;
1106                         else
1107                                 reqf >>= 25;
1108                 }
1109                 reqf = intel_gpu_freq(dev_priv, reqf);
1110
1111                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1112                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1113                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1114
1115                 rpstat = I915_READ(GEN6_RPSTAT1);
1116                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1117                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1118                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1119                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1120                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1121                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1122                 cagf = intel_gpu_freq(dev_priv,
1123                                       intel_get_cagf(dev_priv, rpstat));
1124
1125                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1126
1127                 if (INTEL_GEN(dev_priv) >= 11) {
1128                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1129                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1130                         /*
1131                          * The equivalent to the PM ISR & IIR cannot be read
1132                          * without affecting the current state of the system
1133                          */
1134                         pm_isr = 0;
1135                         pm_iir = 0;
1136                 } else if (INTEL_GEN(dev_priv) >= 8) {
1137                         pm_ier = I915_READ(GEN8_GT_IER(2));
1138                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1139                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1140                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1141                 } else {
1142                         pm_ier = I915_READ(GEN6_PMIER);
1143                         pm_imr = I915_READ(GEN6_PMIMR);
1144                         pm_isr = I915_READ(GEN6_PMISR);
1145                         pm_iir = I915_READ(GEN6_PMIIR);
1146                 }
1147                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1148
1149                 seq_printf(m, "Video Turbo Mode: %s\n",
1150                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1151                 seq_printf(m, "HW control enabled: %s\n",
1152                            yesno(rpmodectl & GEN6_RP_ENABLE));
1153                 seq_printf(m, "SW control enabled: %s\n",
1154                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1155                                   GEN6_RP_MEDIA_SW_MODE));
1156
1157                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1158                            pm_ier, pm_imr, pm_mask);
1159                 if (INTEL_GEN(dev_priv) <= 10)
1160                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1161                                    pm_isr, pm_iir);
1162                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1163                            rps->pm_intrmsk_mbz);
1164                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1165                 seq_printf(m, "Render p-state ratio: %d\n",
1166                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1167                 seq_printf(m, "Render p-state VID: %d\n",
1168                            gt_perf_status & 0xff);
1169                 seq_printf(m, "Render p-state limit: %d\n",
1170                            rp_state_limits & 0xff);
1171                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1172                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1173                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1174                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1175                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1176                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1177                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1178                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1179                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1180                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1181                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1182                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1183                 seq_printf(m, "Up threshold: %d%%\n",
1184                            rps->power.up_threshold);
1185
1186                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1187                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1188                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1189                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1190                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1191                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1192                 seq_printf(m, "Down threshold: %d%%\n",
1193                            rps->power.down_threshold);
1194
1195                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1196                             rp_state_cap >> 16) & 0xff;
1197                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1198                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1199                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1200                            intel_gpu_freq(dev_priv, max_freq));
1201
1202                 max_freq = (rp_state_cap & 0xff00) >> 8;
1203                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1204                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1205                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1206                            intel_gpu_freq(dev_priv, max_freq));
1207
1208                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1209                             rp_state_cap >> 0) & 0xff;
1210                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1211                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1212                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1213                            intel_gpu_freq(dev_priv, max_freq));
1214                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1215                            intel_gpu_freq(dev_priv, rps->max_freq));
1216
1217                 seq_printf(m, "Current freq: %d MHz\n",
1218                            intel_gpu_freq(dev_priv, rps->cur_freq));
1219                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1220                 seq_printf(m, "Idle freq: %d MHz\n",
1221                            intel_gpu_freq(dev_priv, rps->idle_freq));
1222                 seq_printf(m, "Min freq: %d MHz\n",
1223                            intel_gpu_freq(dev_priv, rps->min_freq));
1224                 seq_printf(m, "Boost freq: %d MHz\n",
1225                            intel_gpu_freq(dev_priv, rps->boost_freq));
1226                 seq_printf(m, "Max freq: %d MHz\n",
1227                            intel_gpu_freq(dev_priv, rps->max_freq));
1228                 seq_printf(m,
1229                            "efficient (RPe) frequency: %d MHz\n",
1230                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1231         } else {
1232                 seq_puts(m, "no P-state info available\n");
1233         }
1234
1235         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1236         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1237         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1238
1239         intel_runtime_pm_put(dev_priv, wakeref);
1240         return ret;
1241 }
1242
1243 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1244                                struct seq_file *m,
1245                                struct intel_instdone *instdone)
1246 {
1247         int slice;
1248         int subslice;
1249
1250         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1251                    instdone->instdone);
1252
1253         if (INTEL_GEN(dev_priv) <= 3)
1254                 return;
1255
1256         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1257                    instdone->slice_common);
1258
1259         if (INTEL_GEN(dev_priv) <= 6)
1260                 return;
1261
1262         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1263                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1264                            slice, subslice, instdone->sampler[slice][subslice]);
1265
1266         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1267                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1268                            slice, subslice, instdone->row[slice][subslice]);
1269 }
1270
1271 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1272 {
1273         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1274         struct intel_engine_cs *engine;
1275         u64 acthd[I915_NUM_ENGINES];
1276         u32 seqno[I915_NUM_ENGINES];
1277         struct intel_instdone instdone;
1278         intel_wakeref_t wakeref;
1279         enum intel_engine_id id;
1280
1281         seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1282         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1283                 seq_puts(m, "\tWedged\n");
1284         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1285                 seq_puts(m, "\tDevice (global) reset in progress\n");
1286
1287         if (!i915_modparams.enable_hangcheck) {
1288                 seq_puts(m, "Hangcheck disabled\n");
1289                 return 0;
1290         }
1291
1292         with_intel_runtime_pm(dev_priv, wakeref) {
1293                 for_each_engine(engine, dev_priv, id) {
1294                         acthd[id] = intel_engine_get_active_head(engine);
1295                         seqno[id] = intel_engine_get_hangcheck_seqno(engine);
1296                 }
1297
1298                 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1299         }
1300
1301         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1302                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1303                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1304                                             jiffies));
1305         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1306                 seq_puts(m, "Hangcheck active, work pending\n");
1307         else
1308                 seq_puts(m, "Hangcheck inactive\n");
1309
1310         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1311
1312         for_each_engine(engine, dev_priv, id) {
1313                 seq_printf(m, "%s:\n", engine->name);
1314                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1315                            engine->hangcheck.last_seqno,
1316                            seqno[id],
1317                            engine->hangcheck.next_seqno,
1318                            jiffies_to_msecs(jiffies -
1319                                             engine->hangcheck.action_timestamp));
1320
1321                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1322                            (long long)engine->hangcheck.acthd,
1323                            (long long)acthd[id]);
1324
1325                 if (engine->id == RCS0) {
1326                         seq_puts(m, "\tinstdone read =\n");
1327
1328                         i915_instdone_info(dev_priv, m, &instdone);
1329
1330                         seq_puts(m, "\tinstdone accu =\n");
1331
1332                         i915_instdone_info(dev_priv, m,
1333                                            &engine->hangcheck.instdone);
1334                 }
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int i915_reset_info(struct seq_file *m, void *unused)
1341 {
1342         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1343         struct i915_gpu_error *error = &dev_priv->gpu_error;
1344         struct intel_engine_cs *engine;
1345         enum intel_engine_id id;
1346
1347         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1348
1349         for_each_engine(engine, dev_priv, id) {
1350                 seq_printf(m, "%s = %u\n", engine->name,
1351                            i915_reset_engine_count(error, engine));
1352         }
1353
1354         return 0;
1355 }
1356
1357 static int ironlake_drpc_info(struct seq_file *m)
1358 {
1359         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1360         u32 rgvmodectl, rstdbyctl;
1361         u16 crstandvid;
1362
1363         rgvmodectl = I915_READ(MEMMODECTL);
1364         rstdbyctl = I915_READ(RSTDBYCTL);
1365         crstandvid = I915_READ16(CRSTANDVID);
1366
1367         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1368         seq_printf(m, "Boost freq: %d\n",
1369                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1370                    MEMMODE_BOOST_FREQ_SHIFT);
1371         seq_printf(m, "HW control enabled: %s\n",
1372                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1373         seq_printf(m, "SW control enabled: %s\n",
1374                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1375         seq_printf(m, "Gated voltage change: %s\n",
1376                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1377         seq_printf(m, "Starting frequency: P%d\n",
1378                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1379         seq_printf(m, "Max P-state: P%d\n",
1380                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1381         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1382         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1383         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1384         seq_printf(m, "Render standby enabled: %s\n",
1385                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1386         seq_puts(m, "Current RS state: ");
1387         switch (rstdbyctl & RSX_STATUS_MASK) {
1388         case RSX_STATUS_ON:
1389                 seq_puts(m, "on\n");
1390                 break;
1391         case RSX_STATUS_RC1:
1392                 seq_puts(m, "RC1\n");
1393                 break;
1394         case RSX_STATUS_RC1E:
1395                 seq_puts(m, "RC1E\n");
1396                 break;
1397         case RSX_STATUS_RS1:
1398                 seq_puts(m, "RS1\n");
1399                 break;
1400         case RSX_STATUS_RS2:
1401                 seq_puts(m, "RS2 (RC6)\n");
1402                 break;
1403         case RSX_STATUS_RS3:
1404                 seq_puts(m, "RC3 (RC6+)\n");
1405                 break;
1406         default:
1407                 seq_puts(m, "unknown\n");
1408                 break;
1409         }
1410
1411         return 0;
1412 }
1413
1414 static int i915_forcewake_domains(struct seq_file *m, void *data)
1415 {
1416         struct drm_i915_private *i915 = node_to_i915(m->private);
1417         struct intel_uncore_forcewake_domain *fw_domain;
1418         unsigned int tmp;
1419
1420         seq_printf(m, "user.bypass_count = %u\n",
1421                    i915->uncore.user_forcewake.count);
1422
1423         for_each_fw_domain(fw_domain, i915, tmp)
1424                 seq_printf(m, "%s.wake_count = %u\n",
1425                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1426                            READ_ONCE(fw_domain->wake_count));
1427
1428         return 0;
1429 }
1430
1431 static void print_rc6_res(struct seq_file *m,
1432                           const char *title,
1433                           const i915_reg_t reg)
1434 {
1435         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1436
1437         seq_printf(m, "%s %u (%llu us)\n",
1438                    title, I915_READ(reg),
1439                    intel_rc6_residency_us(dev_priv, reg));
1440 }
1441
1442 static int vlv_drpc_info(struct seq_file *m)
1443 {
1444         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1445         u32 rcctl1, pw_status;
1446
1447         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1448         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1449
1450         seq_printf(m, "RC6 Enabled: %s\n",
1451                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1452                                         GEN6_RC_CTL_EI_MODE(1))));
1453         seq_printf(m, "Render Power Well: %s\n",
1454                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1455         seq_printf(m, "Media Power Well: %s\n",
1456                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1457
1458         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1459         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1460
1461         return i915_forcewake_domains(m, NULL);
1462 }
1463
1464 static int gen6_drpc_info(struct seq_file *m)
1465 {
1466         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467         u32 gt_core_status, rcctl1, rc6vids = 0;
1468         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1469
1470         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1471         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1472
1473         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1474         if (INTEL_GEN(dev_priv) >= 9) {
1475                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1476                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1477         }
1478
1479         if (INTEL_GEN(dev_priv) <= 7) {
1480                 mutex_lock(&dev_priv->pcu_lock);
1481                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1482                                        &rc6vids);
1483                 mutex_unlock(&dev_priv->pcu_lock);
1484         }
1485
1486         seq_printf(m, "RC1e Enabled: %s\n",
1487                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1488         seq_printf(m, "RC6 Enabled: %s\n",
1489                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1490         if (INTEL_GEN(dev_priv) >= 9) {
1491                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1492                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1493                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1494                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1495         }
1496         seq_printf(m, "Deep RC6 Enabled: %s\n",
1497                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1498         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1499                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1500         seq_puts(m, "Current RC state: ");
1501         switch (gt_core_status & GEN6_RCn_MASK) {
1502         case GEN6_RC0:
1503                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1504                         seq_puts(m, "Core Power Down\n");
1505                 else
1506                         seq_puts(m, "on\n");
1507                 break;
1508         case GEN6_RC3:
1509                 seq_puts(m, "RC3\n");
1510                 break;
1511         case GEN6_RC6:
1512                 seq_puts(m, "RC6\n");
1513                 break;
1514         case GEN6_RC7:
1515                 seq_puts(m, "RC7\n");
1516                 break;
1517         default:
1518                 seq_puts(m, "Unknown\n");
1519                 break;
1520         }
1521
1522         seq_printf(m, "Core Power Down: %s\n",
1523                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1524         if (INTEL_GEN(dev_priv) >= 9) {
1525                 seq_printf(m, "Render Power Well: %s\n",
1526                         (gen9_powergate_status &
1527                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1528                 seq_printf(m, "Media Power Well: %s\n",
1529                         (gen9_powergate_status &
1530                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1531         }
1532
1533         /* Not exactly sure what this is */
1534         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1535                       GEN6_GT_GFX_RC6_LOCKED);
1536         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1537         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1538         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1539
1540         if (INTEL_GEN(dev_priv) <= 7) {
1541                 seq_printf(m, "RC6   voltage: %dmV\n",
1542                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1543                 seq_printf(m, "RC6+  voltage: %dmV\n",
1544                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1545                 seq_printf(m, "RC6++ voltage: %dmV\n",
1546                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1547         }
1548
1549         return i915_forcewake_domains(m, NULL);
1550 }
1551
1552 static int i915_drpc_info(struct seq_file *m, void *unused)
1553 {
1554         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1555         intel_wakeref_t wakeref;
1556         int err = -ENODEV;
1557
1558         with_intel_runtime_pm(dev_priv, wakeref) {
1559                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1560                         err = vlv_drpc_info(m);
1561                 else if (INTEL_GEN(dev_priv) >= 6)
1562                         err = gen6_drpc_info(m);
1563                 else
1564                         err = ironlake_drpc_info(m);
1565         }
1566
1567         return err;
1568 }
1569
1570 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1571 {
1572         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1573
1574         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1575                    dev_priv->fb_tracking.busy_bits);
1576
1577         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1578                    dev_priv->fb_tracking.flip_bits);
1579
1580         return 0;
1581 }
1582
1583 static int i915_fbc_status(struct seq_file *m, void *unused)
1584 {
1585         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1586         struct intel_fbc *fbc = &dev_priv->fbc;
1587         intel_wakeref_t wakeref;
1588
1589         if (!HAS_FBC(dev_priv))
1590                 return -ENODEV;
1591
1592         wakeref = intel_runtime_pm_get(dev_priv);
1593         mutex_lock(&fbc->lock);
1594
1595         if (intel_fbc_is_active(dev_priv))
1596                 seq_puts(m, "FBC enabled\n");
1597         else
1598                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1599
1600         if (intel_fbc_is_active(dev_priv)) {
1601                 u32 mask;
1602
1603                 if (INTEL_GEN(dev_priv) >= 8)
1604                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1605                 else if (INTEL_GEN(dev_priv) >= 7)
1606                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1607                 else if (INTEL_GEN(dev_priv) >= 5)
1608                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1609                 else if (IS_G4X(dev_priv))
1610                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1611                 else
1612                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1613                                                         FBC_STAT_COMPRESSED);
1614
1615                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1616         }
1617
1618         mutex_unlock(&fbc->lock);
1619         intel_runtime_pm_put(dev_priv, wakeref);
1620
1621         return 0;
1622 }
1623
1624 static int i915_fbc_false_color_get(void *data, u64 *val)
1625 {
1626         struct drm_i915_private *dev_priv = data;
1627
1628         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1629                 return -ENODEV;
1630
1631         *val = dev_priv->fbc.false_color;
1632
1633         return 0;
1634 }
1635
1636 static int i915_fbc_false_color_set(void *data, u64 val)
1637 {
1638         struct drm_i915_private *dev_priv = data;
1639         u32 reg;
1640
1641         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1642                 return -ENODEV;
1643
1644         mutex_lock(&dev_priv->fbc.lock);
1645
1646         reg = I915_READ(ILK_DPFC_CONTROL);
1647         dev_priv->fbc.false_color = val;
1648
1649         I915_WRITE(ILK_DPFC_CONTROL, val ?
1650                    (reg | FBC_CTL_FALSE_COLOR) :
1651                    (reg & ~FBC_CTL_FALSE_COLOR));
1652
1653         mutex_unlock(&dev_priv->fbc.lock);
1654         return 0;
1655 }
1656
1657 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1658                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1659                         "%llu\n");
1660
1661 static int i915_ips_status(struct seq_file *m, void *unused)
1662 {
1663         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1664         intel_wakeref_t wakeref;
1665
1666         if (!HAS_IPS(dev_priv))
1667                 return -ENODEV;
1668
1669         wakeref = intel_runtime_pm_get(dev_priv);
1670
1671         seq_printf(m, "Enabled by kernel parameter: %s\n",
1672                    yesno(i915_modparams.enable_ips));
1673
1674         if (INTEL_GEN(dev_priv) >= 8) {
1675                 seq_puts(m, "Currently: unknown\n");
1676         } else {
1677                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1678                         seq_puts(m, "Currently: enabled\n");
1679                 else
1680                         seq_puts(m, "Currently: disabled\n");
1681         }
1682
1683         intel_runtime_pm_put(dev_priv, wakeref);
1684
1685         return 0;
1686 }
1687
1688 static int i915_sr_status(struct seq_file *m, void *unused)
1689 {
1690         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1691         intel_wakeref_t wakeref;
1692         bool sr_enabled = false;
1693
1694         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1695
1696         if (INTEL_GEN(dev_priv) >= 9)
1697                 /* no global SR status; inspect per-plane WM */;
1698         else if (HAS_PCH_SPLIT(dev_priv))
1699                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1700         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1701                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1702                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1703         else if (IS_I915GM(dev_priv))
1704                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1705         else if (IS_PINEVIEW(dev_priv))
1706                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1707         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1708                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1709
1710         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1711
1712         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1713
1714         return 0;
1715 }
1716
1717 static int i915_emon_status(struct seq_file *m, void *unused)
1718 {
1719         struct drm_i915_private *i915 = node_to_i915(m->private);
1720         intel_wakeref_t wakeref;
1721
1722         if (!IS_GEN(i915, 5))
1723                 return -ENODEV;
1724
1725         with_intel_runtime_pm(i915, wakeref) {
1726                 unsigned long temp, chipset, gfx;
1727
1728                 temp = i915_mch_val(i915);
1729                 chipset = i915_chipset_val(i915);
1730                 gfx = i915_gfx_val(i915);
1731
1732                 seq_printf(m, "GMCH temp: %ld\n", temp);
1733                 seq_printf(m, "Chipset power: %ld\n", chipset);
1734                 seq_printf(m, "GFX power: %ld\n", gfx);
1735                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1736         }
1737
1738         return 0;
1739 }
1740
1741 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1742 {
1743         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1744         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1745         unsigned int max_gpu_freq, min_gpu_freq;
1746         intel_wakeref_t wakeref;
1747         int gpu_freq, ia_freq;
1748         int ret;
1749
1750         if (!HAS_LLC(dev_priv))
1751                 return -ENODEV;
1752
1753         wakeref = intel_runtime_pm_get(dev_priv);
1754
1755         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1756         if (ret)
1757                 goto out;
1758
1759         min_gpu_freq = rps->min_freq;
1760         max_gpu_freq = rps->max_freq;
1761         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1762                 /* Convert GT frequency to 50 HZ units */
1763                 min_gpu_freq /= GEN9_FREQ_SCALER;
1764                 max_gpu_freq /= GEN9_FREQ_SCALER;
1765         }
1766
1767         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1768
1769         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1770                 ia_freq = gpu_freq;
1771                 sandybridge_pcode_read(dev_priv,
1772                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1773                                        &ia_freq);
1774                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1775                            intel_gpu_freq(dev_priv, (gpu_freq *
1776                                                      (IS_GEN9_BC(dev_priv) ||
1777                                                       INTEL_GEN(dev_priv) >= 10 ?
1778                                                       GEN9_FREQ_SCALER : 1))),
1779                            ((ia_freq >> 0) & 0xff) * 100,
1780                            ((ia_freq >> 8) & 0xff) * 100);
1781         }
1782
1783         mutex_unlock(&dev_priv->pcu_lock);
1784
1785 out:
1786         intel_runtime_pm_put(dev_priv, wakeref);
1787         return ret;
1788 }
1789
1790 static int i915_opregion(struct seq_file *m, void *unused)
1791 {
1792         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1793         struct drm_device *dev = &dev_priv->drm;
1794         struct intel_opregion *opregion = &dev_priv->opregion;
1795         int ret;
1796
1797         ret = mutex_lock_interruptible(&dev->struct_mutex);
1798         if (ret)
1799                 goto out;
1800
1801         if (opregion->header)
1802                 seq_write(m, opregion->header, OPREGION_SIZE);
1803
1804         mutex_unlock(&dev->struct_mutex);
1805
1806 out:
1807         return 0;
1808 }
1809
1810 static int i915_vbt(struct seq_file *m, void *unused)
1811 {
1812         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1813
1814         if (opregion->vbt)
1815                 seq_write(m, opregion->vbt, opregion->vbt_size);
1816
1817         return 0;
1818 }
1819
1820 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1821 {
1822         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1823         struct drm_device *dev = &dev_priv->drm;
1824         struct intel_framebuffer *fbdev_fb = NULL;
1825         struct drm_framebuffer *drm_fb;
1826         int ret;
1827
1828         ret = mutex_lock_interruptible(&dev->struct_mutex);
1829         if (ret)
1830                 return ret;
1831
1832 #ifdef CONFIG_DRM_FBDEV_EMULATION
1833         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1834                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1835
1836                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1837                            fbdev_fb->base.width,
1838                            fbdev_fb->base.height,
1839                            fbdev_fb->base.format->depth,
1840                            fbdev_fb->base.format->cpp[0] * 8,
1841                            fbdev_fb->base.modifier,
1842                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1843                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1844                 seq_putc(m, '\n');
1845         }
1846 #endif
1847
1848         mutex_lock(&dev->mode_config.fb_lock);
1849         drm_for_each_fb(drm_fb, dev) {
1850                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1851                 if (fb == fbdev_fb)
1852                         continue;
1853
1854                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1855                            fb->base.width,
1856                            fb->base.height,
1857                            fb->base.format->depth,
1858                            fb->base.format->cpp[0] * 8,
1859                            fb->base.modifier,
1860                            drm_framebuffer_read_refcount(&fb->base));
1861                 describe_obj(m, intel_fb_obj(&fb->base));
1862                 seq_putc(m, '\n');
1863         }
1864         mutex_unlock(&dev->mode_config.fb_lock);
1865         mutex_unlock(&dev->struct_mutex);
1866
1867         return 0;
1868 }
1869
1870 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1871 {
1872         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1873                    ring->space, ring->head, ring->tail, ring->emit);
1874 }
1875
1876 static int i915_context_status(struct seq_file *m, void *unused)
1877 {
1878         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1879         struct drm_device *dev = &dev_priv->drm;
1880         struct i915_gem_context *ctx;
1881         int ret;
1882
1883         ret = mutex_lock_interruptible(&dev->struct_mutex);
1884         if (ret)
1885                 return ret;
1886
1887         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1888                 struct intel_context *ce;
1889
1890                 seq_puts(m, "HW context ");
1891                 if (!list_empty(&ctx->hw_id_link))
1892                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1893                                    atomic_read(&ctx->hw_id_pin_count));
1894                 if (ctx->pid) {
1895                         struct task_struct *task;
1896
1897                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1898                         if (task) {
1899                                 seq_printf(m, "(%s [%d]) ",
1900                                            task->comm, task->pid);
1901                                 put_task_struct(task);
1902                         }
1903                 } else if (IS_ERR(ctx->file_priv)) {
1904                         seq_puts(m, "(deleted) ");
1905                 } else {
1906                         seq_puts(m, "(kernel) ");
1907                 }
1908
1909                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1910                 seq_putc(m, '\n');
1911
1912                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1913                         seq_printf(m, "%s: ", ce->engine->name);
1914                         if (ce->state)
1915                                 describe_obj(m, ce->state->obj);
1916                         if (ce->ring)
1917                                 describe_ctx_ring(m, ce->ring);
1918                         seq_putc(m, '\n');
1919                 }
1920
1921                 seq_putc(m, '\n');
1922         }
1923
1924         mutex_unlock(&dev->struct_mutex);
1925
1926         return 0;
1927 }
1928
1929 static const char *swizzle_string(unsigned swizzle)
1930 {
1931         switch (swizzle) {
1932         case I915_BIT_6_SWIZZLE_NONE:
1933                 return "none";
1934         case I915_BIT_6_SWIZZLE_9:
1935                 return "bit9";
1936         case I915_BIT_6_SWIZZLE_9_10:
1937                 return "bit9/bit10";
1938         case I915_BIT_6_SWIZZLE_9_11:
1939                 return "bit9/bit11";
1940         case I915_BIT_6_SWIZZLE_9_10_11:
1941                 return "bit9/bit10/bit11";
1942         case I915_BIT_6_SWIZZLE_9_17:
1943                 return "bit9/bit17";
1944         case I915_BIT_6_SWIZZLE_9_10_17:
1945                 return "bit9/bit10/bit17";
1946         case I915_BIT_6_SWIZZLE_UNKNOWN:
1947                 return "unknown";
1948         }
1949
1950         return "bug";
1951 }
1952
1953 static int i915_swizzle_info(struct seq_file *m, void *data)
1954 {
1955         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1956         intel_wakeref_t wakeref;
1957
1958         wakeref = intel_runtime_pm_get(dev_priv);
1959
1960         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1961                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1962         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1963                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1964
1965         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1966                 seq_printf(m, "DDC = 0x%08x\n",
1967                            I915_READ(DCC));
1968                 seq_printf(m, "DDC2 = 0x%08x\n",
1969                            I915_READ(DCC2));
1970                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1971                            I915_READ16(C0DRB3));
1972                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1973                            I915_READ16(C1DRB3));
1974         } else if (INTEL_GEN(dev_priv) >= 6) {
1975                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1976                            I915_READ(MAD_DIMM_C0));
1977                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1978                            I915_READ(MAD_DIMM_C1));
1979                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1980                            I915_READ(MAD_DIMM_C2));
1981                 seq_printf(m, "TILECTL = 0x%08x\n",
1982                            I915_READ(TILECTL));
1983                 if (INTEL_GEN(dev_priv) >= 8)
1984                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1985                                    I915_READ(GAMTARBMODE));
1986                 else
1987                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1988                                    I915_READ(ARB_MODE));
1989                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1990                            I915_READ(DISP_ARB_CTL));
1991         }
1992
1993         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1994                 seq_puts(m, "L-shaped memory detected\n");
1995
1996         intel_runtime_pm_put(dev_priv, wakeref);
1997
1998         return 0;
1999 }
2000
2001 static const char *rps_power_to_str(unsigned int power)
2002 {
2003         static const char * const strings[] = {
2004                 [LOW_POWER] = "low power",
2005                 [BETWEEN] = "mixed",
2006                 [HIGH_POWER] = "high power",
2007         };
2008
2009         if (power >= ARRAY_SIZE(strings) || !strings[power])
2010                 return "unknown";
2011
2012         return strings[power];
2013 }
2014
2015 static int i915_rps_boost_info(struct seq_file *m, void *data)
2016 {
2017         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2018         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2019         u32 act_freq = rps->cur_freq;
2020         intel_wakeref_t wakeref;
2021
2022         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2023                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2024                         mutex_lock(&dev_priv->pcu_lock);
2025                         act_freq = vlv_punit_read(dev_priv,
2026                                                   PUNIT_REG_GPU_FREQ_STS);
2027                         act_freq = (act_freq >> 8) & 0xff;
2028                         mutex_unlock(&dev_priv->pcu_lock);
2029                 } else {
2030                         act_freq = intel_get_cagf(dev_priv,
2031                                                   I915_READ(GEN6_RPSTAT1));
2032                 }
2033         }
2034
2035         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2036         seq_printf(m, "GPU busy? %s [%d requests]\n",
2037                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2038         seq_printf(m, "Boosts outstanding? %d\n",
2039                    atomic_read(&rps->num_waiters));
2040         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2041         seq_printf(m, "Frequency requested %d, actual %d\n",
2042                    intel_gpu_freq(dev_priv, rps->cur_freq),
2043                    intel_gpu_freq(dev_priv, act_freq));
2044         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2045                    intel_gpu_freq(dev_priv, rps->min_freq),
2046                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2047                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2048                    intel_gpu_freq(dev_priv, rps->max_freq));
2049         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2050                    intel_gpu_freq(dev_priv, rps->idle_freq),
2051                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2052                    intel_gpu_freq(dev_priv, rps->boost_freq));
2053
2054         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2055
2056         if (INTEL_GEN(dev_priv) >= 6 &&
2057             rps->enabled &&
2058             dev_priv->gt.active_requests) {
2059                 u32 rpup, rpupei;
2060                 u32 rpdown, rpdownei;
2061
2062                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2063                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2064                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2065                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2066                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2067                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2068
2069                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2070                            rps_power_to_str(rps->power.mode));
2071                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2072                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2073                            rps->power.up_threshold);
2074                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2075                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2076                            rps->power.down_threshold);
2077         } else {
2078                 seq_puts(m, "\nRPS Autotuning inactive\n");
2079         }
2080
2081         return 0;
2082 }
2083
2084 static int i915_llc(struct seq_file *m, void *data)
2085 {
2086         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2087         const bool edram = INTEL_GEN(dev_priv) > 8;
2088
2089         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2090         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2091                    intel_uncore_edram_size(dev_priv)/1024/1024);
2092
2093         return 0;
2094 }
2095
2096 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2097 {
2098         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2099         intel_wakeref_t wakeref;
2100         struct drm_printer p;
2101
2102         if (!HAS_HUC(dev_priv))
2103                 return -ENODEV;
2104
2105         p = drm_seq_file_printer(m);
2106         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2107
2108         with_intel_runtime_pm(dev_priv, wakeref)
2109                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2110
2111         return 0;
2112 }
2113
2114 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2115 {
2116         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2117         intel_wakeref_t wakeref;
2118         struct drm_printer p;
2119
2120         if (!HAS_GUC(dev_priv))
2121                 return -ENODEV;
2122
2123         p = drm_seq_file_printer(m);
2124         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2125
2126         with_intel_runtime_pm(dev_priv, wakeref) {
2127                 u32 tmp = I915_READ(GUC_STATUS);
2128                 u32 i;
2129
2130                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2131                 seq_printf(m, "\tBootrom status = 0x%x\n",
2132                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2133                 seq_printf(m, "\tuKernel status = 0x%x\n",
2134                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2135                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2136                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2137                 seq_puts(m, "\nScratch registers:\n");
2138                 for (i = 0; i < 16; i++) {
2139                         seq_printf(m, "\t%2d: \t0x%x\n",
2140                                    i, I915_READ(SOFT_SCRATCH(i)));
2141                 }
2142         }
2143
2144         return 0;
2145 }
2146
2147 static const char *
2148 stringify_guc_log_type(enum guc_log_buffer_type type)
2149 {
2150         switch (type) {
2151         case GUC_ISR_LOG_BUFFER:
2152                 return "ISR";
2153         case GUC_DPC_LOG_BUFFER:
2154                 return "DPC";
2155         case GUC_CRASH_DUMP_LOG_BUFFER:
2156                 return "CRASH";
2157         default:
2158                 MISSING_CASE(type);
2159         }
2160
2161         return "";
2162 }
2163
2164 static void i915_guc_log_info(struct seq_file *m,
2165                               struct drm_i915_private *dev_priv)
2166 {
2167         struct intel_guc_log *log = &dev_priv->guc.log;
2168         enum guc_log_buffer_type type;
2169
2170         if (!intel_guc_log_relay_enabled(log)) {
2171                 seq_puts(m, "GuC log relay disabled\n");
2172                 return;
2173         }
2174
2175         seq_puts(m, "GuC logging stats:\n");
2176
2177         seq_printf(m, "\tRelay full count: %u\n",
2178                    log->relay.full_count);
2179
2180         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2181                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2182                            stringify_guc_log_type(type),
2183                            log->stats[type].flush,
2184                            log->stats[type].sampled_overflow);
2185         }
2186 }
2187
2188 static void i915_guc_client_info(struct seq_file *m,
2189                                  struct drm_i915_private *dev_priv,
2190                                  struct intel_guc_client *client)
2191 {
2192         struct intel_engine_cs *engine;
2193         enum intel_engine_id id;
2194         u64 tot = 0;
2195
2196         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2197                 client->priority, client->stage_id, client->proc_desc_offset);
2198         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2199                 client->doorbell_id, client->doorbell_offset);
2200
2201         for_each_engine(engine, dev_priv, id) {
2202                 u64 submissions = client->submissions[id];
2203                 tot += submissions;
2204                 seq_printf(m, "\tSubmissions: %llu %s\n",
2205                                 submissions, engine->name);
2206         }
2207         seq_printf(m, "\tTotal: %llu\n", tot);
2208 }
2209
2210 static int i915_guc_info(struct seq_file *m, void *data)
2211 {
2212         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2213         const struct intel_guc *guc = &dev_priv->guc;
2214
2215         if (!USES_GUC(dev_priv))
2216                 return -ENODEV;
2217
2218         i915_guc_log_info(m, dev_priv);
2219
2220         if (!USES_GUC_SUBMISSION(dev_priv))
2221                 return 0;
2222
2223         GEM_BUG_ON(!guc->execbuf_client);
2224
2225         seq_printf(m, "\nDoorbell map:\n");
2226         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2227         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2228
2229         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2230         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2231         if (guc->preempt_client) {
2232                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2233                            guc->preempt_client);
2234                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2235         }
2236
2237         /* Add more as required ... */
2238
2239         return 0;
2240 }
2241
2242 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2243 {
2244         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2245         const struct intel_guc *guc = &dev_priv->guc;
2246         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2247         struct intel_guc_client *client = guc->execbuf_client;
2248         unsigned int tmp;
2249         int index;
2250
2251         if (!USES_GUC_SUBMISSION(dev_priv))
2252                 return -ENODEV;
2253
2254         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2255                 struct intel_engine_cs *engine;
2256
2257                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2258                         continue;
2259
2260                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2261                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2262                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2263                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2264                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2265                 seq_printf(m, "\tEngines used: 0x%x\n",
2266                            desc->engines_used);
2267                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2268                            desc->db_trigger_phy,
2269                            desc->db_trigger_cpu,
2270                            desc->db_trigger_uk);
2271                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2272                            desc->process_desc);
2273                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2274                            desc->wq_addr, desc->wq_size);
2275                 seq_putc(m, '\n');
2276
2277                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2278                         u32 guc_engine_id = engine->guc_id;
2279                         struct guc_execlist_context *lrc =
2280                                                 &desc->lrc[guc_engine_id];
2281
2282                         seq_printf(m, "\t%s LRC:\n", engine->name);
2283                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2284                                    lrc->context_desc);
2285                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2286                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2287                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2288                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2289                         seq_putc(m, '\n');
2290                 }
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int i915_guc_log_dump(struct seq_file *m, void *data)
2297 {
2298         struct drm_info_node *node = m->private;
2299         struct drm_i915_private *dev_priv = node_to_i915(node);
2300         bool dump_load_err = !!node->info_ent->data;
2301         struct drm_i915_gem_object *obj = NULL;
2302         u32 *log;
2303         int i = 0;
2304
2305         if (!HAS_GUC(dev_priv))
2306                 return -ENODEV;
2307
2308         if (dump_load_err)
2309                 obj = dev_priv->guc.load_err_log;
2310         else if (dev_priv->guc.log.vma)
2311                 obj = dev_priv->guc.log.vma->obj;
2312
2313         if (!obj)
2314                 return 0;
2315
2316         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2317         if (IS_ERR(log)) {
2318                 DRM_DEBUG("Failed to pin object\n");
2319                 seq_puts(m, "(log data unaccessible)\n");
2320                 return PTR_ERR(log);
2321         }
2322
2323         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2324                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2325                            *(log + i), *(log + i + 1),
2326                            *(log + i + 2), *(log + i + 3));
2327
2328         seq_putc(m, '\n');
2329
2330         i915_gem_object_unpin_map(obj);
2331
2332         return 0;
2333 }
2334
2335 static int i915_guc_log_level_get(void *data, u64 *val)
2336 {
2337         struct drm_i915_private *dev_priv = data;
2338
2339         if (!USES_GUC(dev_priv))
2340                 return -ENODEV;
2341
2342         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2343
2344         return 0;
2345 }
2346
2347 static int i915_guc_log_level_set(void *data, u64 val)
2348 {
2349         struct drm_i915_private *dev_priv = data;
2350
2351         if (!USES_GUC(dev_priv))
2352                 return -ENODEV;
2353
2354         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2355 }
2356
2357 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2358                         i915_guc_log_level_get, i915_guc_log_level_set,
2359                         "%lld\n");
2360
2361 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2362 {
2363         struct drm_i915_private *dev_priv = inode->i_private;
2364
2365         if (!USES_GUC(dev_priv))
2366                 return -ENODEV;
2367
2368         file->private_data = &dev_priv->guc.log;
2369
2370         return intel_guc_log_relay_open(&dev_priv->guc.log);
2371 }
2372
2373 static ssize_t
2374 i915_guc_log_relay_write(struct file *filp,
2375                          const char __user *ubuf,
2376                          size_t cnt,
2377                          loff_t *ppos)
2378 {
2379         struct intel_guc_log *log = filp->private_data;
2380
2381         intel_guc_log_relay_flush(log);
2382
2383         return cnt;
2384 }
2385
2386 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2387 {
2388         struct drm_i915_private *dev_priv = inode->i_private;
2389
2390         intel_guc_log_relay_close(&dev_priv->guc.log);
2391
2392         return 0;
2393 }
2394
2395 static const struct file_operations i915_guc_log_relay_fops = {
2396         .owner = THIS_MODULE,
2397         .open = i915_guc_log_relay_open,
2398         .write = i915_guc_log_relay_write,
2399         .release = i915_guc_log_relay_release,
2400 };
2401
2402 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2403 {
2404         u8 val;
2405         static const char * const sink_status[] = {
2406                 "inactive",
2407                 "transition to active, capture and display",
2408                 "active, display from RFB",
2409                 "active, capture and display on sink device timings",
2410                 "transition to inactive, capture and display, timing re-sync",
2411                 "reserved",
2412                 "reserved",
2413                 "sink internal error",
2414         };
2415         struct drm_connector *connector = m->private;
2416         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2417         struct intel_dp *intel_dp =
2418                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2419         int ret;
2420
2421         if (!CAN_PSR(dev_priv)) {
2422                 seq_puts(m, "PSR Unsupported\n");
2423                 return -ENODEV;
2424         }
2425
2426         if (connector->status != connector_status_connected)
2427                 return -ENODEV;
2428
2429         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2430
2431         if (ret == 1) {
2432                 const char *str = "unknown";
2433
2434                 val &= DP_PSR_SINK_STATE_MASK;
2435                 if (val < ARRAY_SIZE(sink_status))
2436                         str = sink_status[val];
2437                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2438         } else {
2439                 return ret;
2440         }
2441
2442         return 0;
2443 }
2444 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2445
2446 static void
2447 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2448 {
2449         u32 val, status_val;
2450         const char *status = "unknown";
2451
2452         if (dev_priv->psr.psr2_enabled) {
2453                 static const char * const live_status[] = {
2454                         "IDLE",
2455                         "CAPTURE",
2456                         "CAPTURE_FS",
2457                         "SLEEP",
2458                         "BUFON_FW",
2459                         "ML_UP",
2460                         "SU_STANDBY",
2461                         "FAST_SLEEP",
2462                         "DEEP_SLEEP",
2463                         "BUF_ON",
2464                         "TG_ON"
2465                 };
2466                 val = I915_READ(EDP_PSR2_STATUS);
2467                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2468                               EDP_PSR2_STATUS_STATE_SHIFT;
2469                 if (status_val < ARRAY_SIZE(live_status))
2470                         status = live_status[status_val];
2471         } else {
2472                 static const char * const live_status[] = {
2473                         "IDLE",
2474                         "SRDONACK",
2475                         "SRDENT",
2476                         "BUFOFF",
2477                         "BUFON",
2478                         "AUXACK",
2479                         "SRDOFFACK",
2480                         "SRDENT_ON",
2481                 };
2482                 val = I915_READ(EDP_PSR_STATUS);
2483                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2484                               EDP_PSR_STATUS_STATE_SHIFT;
2485                 if (status_val < ARRAY_SIZE(live_status))
2486                         status = live_status[status_val];
2487         }
2488
2489         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2490 }
2491
2492 static int i915_edp_psr_status(struct seq_file *m, void *data)
2493 {
2494         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2495         struct i915_psr *psr = &dev_priv->psr;
2496         intel_wakeref_t wakeref;
2497         const char *status;
2498         bool enabled;
2499         u32 val;
2500
2501         if (!HAS_PSR(dev_priv))
2502                 return -ENODEV;
2503
2504         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2505         if (psr->dp)
2506                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2507         seq_puts(m, "\n");
2508
2509         if (!psr->sink_support)
2510                 return 0;
2511
2512         wakeref = intel_runtime_pm_get(dev_priv);
2513         mutex_lock(&psr->lock);
2514
2515         if (psr->enabled)
2516                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2517         else
2518                 status = "disabled";
2519         seq_printf(m, "PSR mode: %s\n", status);
2520
2521         if (!psr->enabled)
2522                 goto unlock;
2523
2524         if (psr->psr2_enabled) {
2525                 val = I915_READ(EDP_PSR2_CTL);
2526                 enabled = val & EDP_PSR2_ENABLE;
2527         } else {
2528                 val = I915_READ(EDP_PSR_CTL);
2529                 enabled = val & EDP_PSR_ENABLE;
2530         }
2531         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2532                    enableddisabled(enabled), val);
2533         psr_source_status(dev_priv, m);
2534         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2535                    psr->busy_frontbuffer_bits);
2536
2537         /*
2538          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2539          */
2540         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2541                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2542                 seq_printf(m, "Performance counter: %u\n", val);
2543         }
2544
2545         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2546                 seq_printf(m, "Last attempted entry at: %lld\n",
2547                            psr->last_entry_attempt);
2548                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2549         }
2550
2551         if (psr->psr2_enabled) {
2552                 u32 su_frames_val[3];
2553                 int frame;
2554
2555                 /*
2556                  * Reading all 3 registers before hand to minimize crossing a
2557                  * frame boundary between register reads
2558                  */
2559                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2560                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2561
2562                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2563
2564                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2565                         u32 su_blocks;
2566
2567                         su_blocks = su_frames_val[frame / 3] &
2568                                     PSR2_SU_STATUS_MASK(frame);
2569                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2570                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2571                 }
2572         }
2573
2574 unlock:
2575         mutex_unlock(&psr->lock);
2576         intel_runtime_pm_put(dev_priv, wakeref);
2577
2578         return 0;
2579 }
2580
2581 static int
2582 i915_edp_psr_debug_set(void *data, u64 val)
2583 {
2584         struct drm_i915_private *dev_priv = data;
2585         intel_wakeref_t wakeref;
2586         int ret;
2587
2588         if (!CAN_PSR(dev_priv))
2589                 return -ENODEV;
2590
2591         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2592
2593         wakeref = intel_runtime_pm_get(dev_priv);
2594
2595         ret = intel_psr_debug_set(dev_priv, val);
2596
2597         intel_runtime_pm_put(dev_priv, wakeref);
2598
2599         return ret;
2600 }
2601
2602 static int
2603 i915_edp_psr_debug_get(void *data, u64 *val)
2604 {
2605         struct drm_i915_private *dev_priv = data;
2606
2607         if (!CAN_PSR(dev_priv))
2608                 return -ENODEV;
2609
2610         *val = READ_ONCE(dev_priv->psr.debug);
2611         return 0;
2612 }
2613
2614 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2615                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2616                         "%llu\n");
2617
2618 static int i915_energy_uJ(struct seq_file *m, void *data)
2619 {
2620         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2621         unsigned long long power;
2622         intel_wakeref_t wakeref;
2623         u32 units;
2624
2625         if (INTEL_GEN(dev_priv) < 6)
2626                 return -ENODEV;
2627
2628         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2629                 return -ENODEV;
2630
2631         units = (power & 0x1f00) >> 8;
2632         with_intel_runtime_pm(dev_priv, wakeref)
2633                 power = I915_READ(MCH_SECP_NRG_STTS);
2634
2635         power = (1000000 * power) >> units; /* convert to uJ */
2636         seq_printf(m, "%llu", power);
2637
2638         return 0;
2639 }
2640
2641 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2642 {
2643         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2644         struct pci_dev *pdev = dev_priv->drm.pdev;
2645
2646         if (!HAS_RUNTIME_PM(dev_priv))
2647                 seq_puts(m, "Runtime power management not supported\n");
2648
2649         seq_printf(m, "Runtime power status: %s\n",
2650                    enableddisabled(!dev_priv->power_domains.wakeref));
2651
2652         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2653         seq_printf(m, "IRQs disabled: %s\n",
2654                    yesno(!intel_irqs_enabled(dev_priv)));
2655 #ifdef CONFIG_PM
2656         seq_printf(m, "Usage count: %d\n",
2657                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2658 #else
2659         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2660 #endif
2661         seq_printf(m, "PCI device power state: %s [%d]\n",
2662                    pci_power_name(pdev->current_state),
2663                    pdev->current_state);
2664
2665         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2666                 struct drm_printer p = drm_seq_file_printer(m);
2667
2668                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2669         }
2670
2671         return 0;
2672 }
2673
2674 static int i915_power_domain_info(struct seq_file *m, void *unused)
2675 {
2676         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2677         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2678         int i;
2679
2680         mutex_lock(&power_domains->lock);
2681
2682         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2683         for (i = 0; i < power_domains->power_well_count; i++) {
2684                 struct i915_power_well *power_well;
2685                 enum intel_display_power_domain power_domain;
2686
2687                 power_well = &power_domains->power_wells[i];
2688                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2689                            power_well->count);
2690
2691                 for_each_power_domain(power_domain, power_well->desc->domains)
2692                         seq_printf(m, "  %-23s %d\n",
2693                                  intel_display_power_domain_str(power_domain),
2694                                  power_domains->domain_use_count[power_domain]);
2695         }
2696
2697         mutex_unlock(&power_domains->lock);
2698
2699         return 0;
2700 }
2701
2702 static int i915_dmc_info(struct seq_file *m, void *unused)
2703 {
2704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2705         intel_wakeref_t wakeref;
2706         struct intel_csr *csr;
2707
2708         if (!HAS_CSR(dev_priv))
2709                 return -ENODEV;
2710
2711         csr = &dev_priv->csr;
2712
2713         wakeref = intel_runtime_pm_get(dev_priv);
2714
2715         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2716         seq_printf(m, "path: %s\n", csr->fw_path);
2717
2718         if (!csr->dmc_payload)
2719                 goto out;
2720
2721         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2722                    CSR_VERSION_MINOR(csr->version));
2723
2724         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2725                 goto out;
2726
2727         seq_printf(m, "DC3 -> DC5 count: %d\n",
2728                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2729                                                     SKL_CSR_DC3_DC5_COUNT));
2730         if (!IS_GEN9_LP(dev_priv))
2731                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2732                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2733
2734 out:
2735         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2736         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2737         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2738
2739         intel_runtime_pm_put(dev_priv, wakeref);
2740
2741         return 0;
2742 }
2743
2744 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2745                                  struct drm_display_mode *mode)
2746 {
2747         int i;
2748
2749         for (i = 0; i < tabs; i++)
2750                 seq_putc(m, '\t');
2751
2752         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2753 }
2754
2755 static void intel_encoder_info(struct seq_file *m,
2756                                struct intel_crtc *intel_crtc,
2757                                struct intel_encoder *intel_encoder)
2758 {
2759         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2760         struct drm_device *dev = &dev_priv->drm;
2761         struct drm_crtc *crtc = &intel_crtc->base;
2762         struct intel_connector *intel_connector;
2763         struct drm_encoder *encoder;
2764
2765         encoder = &intel_encoder->base;
2766         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2767                    encoder->base.id, encoder->name);
2768         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2769                 struct drm_connector *connector = &intel_connector->base;
2770                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2771                            connector->base.id,
2772                            connector->name,
2773                            drm_get_connector_status_name(connector->status));
2774                 if (connector->status == connector_status_connected) {
2775                         struct drm_display_mode *mode = &crtc->mode;
2776                         seq_printf(m, ", mode:\n");
2777                         intel_seq_print_mode(m, 2, mode);
2778                 } else {
2779                         seq_putc(m, '\n');
2780                 }
2781         }
2782 }
2783
2784 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2785 {
2786         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2787         struct drm_device *dev = &dev_priv->drm;
2788         struct drm_crtc *crtc = &intel_crtc->base;
2789         struct intel_encoder *intel_encoder;
2790         struct drm_plane_state *plane_state = crtc->primary->state;
2791         struct drm_framebuffer *fb = plane_state->fb;
2792
2793         if (fb)
2794                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2795                            fb->base.id, plane_state->src_x >> 16,
2796                            plane_state->src_y >> 16, fb->width, fb->height);
2797         else
2798                 seq_puts(m, "\tprimary plane disabled\n");
2799         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2800                 intel_encoder_info(m, intel_crtc, intel_encoder);
2801 }
2802
2803 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2804 {
2805         struct drm_display_mode *mode = panel->fixed_mode;
2806
2807         seq_printf(m, "\tfixed mode:\n");
2808         intel_seq_print_mode(m, 2, mode);
2809 }
2810
2811 static void intel_dp_info(struct seq_file *m,
2812                           struct intel_connector *intel_connector)
2813 {
2814         struct intel_encoder *intel_encoder = intel_connector->encoder;
2815         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2816
2817         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2818         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2819         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2820                 intel_panel_info(m, &intel_connector->panel);
2821
2822         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2823                                 &intel_dp->aux);
2824 }
2825
2826 static void intel_dp_mst_info(struct seq_file *m,
2827                           struct intel_connector *intel_connector)
2828 {
2829         struct intel_encoder *intel_encoder = intel_connector->encoder;
2830         struct intel_dp_mst_encoder *intel_mst =
2831                 enc_to_mst(&intel_encoder->base);
2832         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2833         struct intel_dp *intel_dp = &intel_dig_port->dp;
2834         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2835                                         intel_connector->port);
2836
2837         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2838 }
2839
2840 static void intel_hdmi_info(struct seq_file *m,
2841                             struct intel_connector *intel_connector)
2842 {
2843         struct intel_encoder *intel_encoder = intel_connector->encoder;
2844         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2845
2846         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2847 }
2848
2849 static void intel_lvds_info(struct seq_file *m,
2850                             struct intel_connector *intel_connector)
2851 {
2852         intel_panel_info(m, &intel_connector->panel);
2853 }
2854
2855 static void intel_connector_info(struct seq_file *m,
2856                                  struct drm_connector *connector)
2857 {
2858         struct intel_connector *intel_connector = to_intel_connector(connector);
2859         struct intel_encoder *intel_encoder = intel_connector->encoder;
2860         struct drm_display_mode *mode;
2861
2862         seq_printf(m, "connector %d: type %s, status: %s\n",
2863                    connector->base.id, connector->name,
2864                    drm_get_connector_status_name(connector->status));
2865
2866         if (connector->status == connector_status_disconnected)
2867                 return;
2868
2869         seq_printf(m, "\tname: %s\n", connector->display_info.name);
2870         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2871                    connector->display_info.width_mm,
2872                    connector->display_info.height_mm);
2873         seq_printf(m, "\tsubpixel order: %s\n",
2874                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2875         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2876
2877         if (!intel_encoder)
2878                 return;
2879
2880         switch (connector->connector_type) {
2881         case DRM_MODE_CONNECTOR_DisplayPort:
2882         case DRM_MODE_CONNECTOR_eDP:
2883                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2884                         intel_dp_mst_info(m, intel_connector);
2885                 else
2886                         intel_dp_info(m, intel_connector);
2887                 break;
2888         case DRM_MODE_CONNECTOR_LVDS:
2889                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2890                         intel_lvds_info(m, intel_connector);
2891                 break;
2892         case DRM_MODE_CONNECTOR_HDMIA:
2893                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2894                     intel_encoder->type == INTEL_OUTPUT_DDI)
2895                         intel_hdmi_info(m, intel_connector);
2896                 break;
2897         default:
2898                 break;
2899         }
2900
2901         seq_printf(m, "\tmodes:\n");
2902         list_for_each_entry(mode, &connector->modes, head)
2903                 intel_seq_print_mode(m, 2, mode);
2904 }
2905
2906 static const char *plane_type(enum drm_plane_type type)
2907 {
2908         switch (type) {
2909         case DRM_PLANE_TYPE_OVERLAY:
2910                 return "OVL";
2911         case DRM_PLANE_TYPE_PRIMARY:
2912                 return "PRI";
2913         case DRM_PLANE_TYPE_CURSOR:
2914                 return "CUR";
2915         /*
2916          * Deliberately omitting default: to generate compiler warnings
2917          * when a new drm_plane_type gets added.
2918          */
2919         }
2920
2921         return "unknown";
2922 }
2923
2924 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2925 {
2926         /*
2927          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2928          * will print them all to visualize if the values are misused
2929          */
2930         snprintf(buf, bufsize,
2931                  "%s%s%s%s%s%s(0x%08x)",
2932                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2933                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2934                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2935                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2936                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2937                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2938                  rotation);
2939 }
2940
2941 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2942 {
2943         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2944         struct drm_device *dev = &dev_priv->drm;
2945         struct intel_plane *intel_plane;
2946
2947         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2948                 struct drm_plane_state *state;
2949                 struct drm_plane *plane = &intel_plane->base;
2950                 struct drm_format_name_buf format_name;
2951                 char rot_str[48];
2952
2953                 if (!plane->state) {
2954                         seq_puts(m, "plane->state is NULL!\n");
2955                         continue;
2956                 }
2957
2958                 state = plane->state;
2959
2960                 if (state->fb) {
2961                         drm_get_format_name(state->fb->format->format,
2962                                             &format_name);
2963                 } else {
2964                         sprintf(format_name.str, "N/A");
2965                 }
2966
2967                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2968
2969                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2970                            plane->base.id,
2971                            plane_type(intel_plane->base.type),
2972                            state->crtc_x, state->crtc_y,
2973                            state->crtc_w, state->crtc_h,
2974                            (state->src_x >> 16),
2975                            ((state->src_x & 0xffff) * 15625) >> 10,
2976                            (state->src_y >> 16),
2977                            ((state->src_y & 0xffff) * 15625) >> 10,
2978                            (state->src_w >> 16),
2979                            ((state->src_w & 0xffff) * 15625) >> 10,
2980                            (state->src_h >> 16),
2981                            ((state->src_h & 0xffff) * 15625) >> 10,
2982                            format_name.str,
2983                            rot_str);
2984         }
2985 }
2986
2987 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2988 {
2989         struct intel_crtc_state *pipe_config;
2990         int num_scalers = intel_crtc->num_scalers;
2991         int i;
2992
2993         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2994
2995         /* Not all platformas have a scaler */
2996         if (num_scalers) {
2997                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2998                            num_scalers,
2999                            pipe_config->scaler_state.scaler_users,
3000                            pipe_config->scaler_state.scaler_id);
3001
3002                 for (i = 0; i < num_scalers; i++) {
3003                         struct intel_scaler *sc =
3004                                         &pipe_config->scaler_state.scalers[i];