drm/i915: Fix the static code analysis warning in debugfs
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct i915_address_space *vm;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305         u64 closed;
306 };
307
308 static int per_file_stats(int id, void *ptr, void *data)
309 {
310         struct drm_i915_gem_object *obj = ptr;
311         struct file_stats *stats = data;
312         struct i915_vma *vma;
313
314         lockdep_assert_held(&obj->base.dev->struct_mutex);
315
316         stats->count++;
317         stats->total += obj->base.size;
318         if (!obj->bind_count)
319                 stats->unbound += obj->base.size;
320         if (obj->base.name || obj->base.dma_buf)
321                 stats->shared += obj->base.size;
322
323         list_for_each_entry(vma, &obj->vma_list, obj_link) {
324                 if (!drm_mm_node_allocated(&vma->node))
325                         continue;
326
327                 if (i915_vma_is_ggtt(vma)) {
328                         stats->global += vma->node.size;
329                 } else {
330                         if (vma->vm != stats->vm)
331                                 continue;
332                 }
333
334                 if (i915_vma_is_active(vma))
335                         stats->active += vma->node.size;
336                 else
337                         stats->inactive += vma->node.size;
338
339                 if (i915_vma_is_closed(vma))
340                         stats->closed += vma->node.size;
341         }
342
343         return 0;
344 }
345
346 #define print_file_stats(m, name, stats) do { \
347         if (stats.count) \
348                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
349                            name, \
350                            stats.count, \
351                            stats.total, \
352                            stats.active, \
353                            stats.inactive, \
354                            stats.global, \
355                            stats.shared, \
356                            stats.unbound, \
357                            stats.closed); \
358 } while (0)
359
360 static void print_batch_pool_stats(struct seq_file *m,
361                                    struct drm_i915_private *dev_priv)
362 {
363         struct drm_i915_gem_object *obj;
364         struct intel_engine_cs *engine;
365         struct file_stats stats = {};
366         enum intel_engine_id id;
367         int j;
368
369         for_each_engine(engine, dev_priv, id) {
370                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
371                         list_for_each_entry(obj,
372                                             &engine->batch_pool.cache_list[j],
373                                             batch_pool_link)
374                                 per_file_stats(0, obj, &stats);
375                 }
376         }
377
378         print_file_stats(m, "[k]batch pool", stats);
379 }
380
381 static void print_context_stats(struct seq_file *m,
382                                 struct drm_i915_private *i915)
383 {
384         struct file_stats kstats = {};
385         struct i915_gem_context *ctx;
386
387         list_for_each_entry(ctx, &i915->contexts.list, link) {
388                 struct intel_engine_cs *engine;
389                 enum intel_engine_id id;
390
391                 for_each_engine(engine, i915, id) {
392                         struct intel_context *ce = to_intel_context(ctx, engine);
393
394                         if (ce->state)
395                                 per_file_stats(0, ce->state->obj, &kstats);
396                         if (ce->ring)
397                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
398                 }
399
400                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402                         struct drm_file *file = ctx->file_priv->file;
403                         struct task_struct *task;
404                         char name[80];
405
406                         spin_lock(&file->table_lock);
407                         idr_for_each(&file->object_idr, per_file_stats, &stats);
408                         spin_unlock(&file->table_lock);
409
410                         rcu_read_lock();
411                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412                         snprintf(name, sizeof(name), "%s/%d",
413                                  task ? task->comm : "<unknown>",
414                                  ctx->user_handle);
415                         rcu_read_unlock();
416
417                         print_file_stats(m, name, stats);
418                 }
419         }
420
421         print_file_stats(m, "[k]contexts", kstats);
422 }
423
424 static int i915_gem_object_info(struct seq_file *m, void *data)
425 {
426         struct drm_i915_private *dev_priv = node_to_i915(m->private);
427         struct drm_device *dev = &dev_priv->drm;
428         struct i915_ggtt *ggtt = &dev_priv->ggtt;
429         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
430         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
431         struct drm_i915_gem_object *obj;
432         unsigned int page_sizes = 0;
433         char buf[80];
434         int ret;
435
436         seq_printf(m, "%u objects, %llu bytes\n",
437                    dev_priv->mm.object_count,
438                    dev_priv->mm.object_memory);
439
440         size = count = 0;
441         mapped_size = mapped_count = 0;
442         purgeable_size = purgeable_count = 0;
443         huge_size = huge_count = 0;
444
445         spin_lock(&dev_priv->mm.obj_lock);
446         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
447                 size += obj->base.size;
448                 ++count;
449
450                 if (obj->mm.madv == I915_MADV_DONTNEED) {
451                         purgeable_size += obj->base.size;
452                         ++purgeable_count;
453                 }
454
455                 if (obj->mm.mapping) {
456                         mapped_count++;
457                         mapped_size += obj->base.size;
458                 }
459
460                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
461                         huge_count++;
462                         huge_size += obj->base.size;
463                         page_sizes |= obj->mm.page_sizes.sg;
464                 }
465         }
466         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
467
468         size = count = dpy_size = dpy_count = 0;
469         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
470                 size += obj->base.size;
471                 ++count;
472
473                 if (obj->pin_global) {
474                         dpy_size += obj->base.size;
475                         ++dpy_count;
476                 }
477
478                 if (obj->mm.madv == I915_MADV_DONTNEED) {
479                         purgeable_size += obj->base.size;
480                         ++purgeable_count;
481                 }
482
483                 if (obj->mm.mapping) {
484                         mapped_count++;
485                         mapped_size += obj->base.size;
486                 }
487
488                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
489                         huge_count++;
490                         huge_size += obj->base.size;
491                         page_sizes |= obj->mm.page_sizes.sg;
492                 }
493         }
494         spin_unlock(&dev_priv->mm.obj_lock);
495
496         seq_printf(m, "%u bound objects, %llu bytes\n",
497                    count, size);
498         seq_printf(m, "%u purgeable objects, %llu bytes\n",
499                    purgeable_count, purgeable_size);
500         seq_printf(m, "%u mapped objects, %llu bytes\n",
501                    mapped_count, mapped_size);
502         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
503                    huge_count,
504                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
505                    huge_size);
506         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
507                    dpy_count, dpy_size);
508
509         seq_printf(m, "%llu [%pa] gtt total\n",
510                    ggtt->vm.total, &ggtt->mappable_end);
511         seq_printf(m, "Supported page sizes: %s\n",
512                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
513                                         buf, sizeof(buf)));
514
515         seq_putc(m, '\n');
516
517         ret = mutex_lock_interruptible(&dev->struct_mutex);
518         if (ret)
519                 return ret;
520
521         print_batch_pool_stats(m, dev_priv);
522         print_context_stats(m, dev_priv);
523         mutex_unlock(&dev->struct_mutex);
524
525         return 0;
526 }
527
528 static int i915_gem_gtt_info(struct seq_file *m, void *data)
529 {
530         struct drm_info_node *node = m->private;
531         struct drm_i915_private *dev_priv = node_to_i915(node);
532         struct drm_device *dev = &dev_priv->drm;
533         struct drm_i915_gem_object **objects;
534         struct drm_i915_gem_object *obj;
535         u64 total_obj_size, total_gtt_size;
536         unsigned long nobject, n;
537         int count, ret;
538
539         nobject = READ_ONCE(dev_priv->mm.object_count);
540         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
541         if (!objects)
542                 return -ENOMEM;
543
544         ret = mutex_lock_interruptible(&dev->struct_mutex);
545         if (ret)
546                 return ret;
547
548         count = 0;
549         spin_lock(&dev_priv->mm.obj_lock);
550         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
551                 objects[count++] = obj;
552                 if (count == nobject)
553                         break;
554         }
555         spin_unlock(&dev_priv->mm.obj_lock);
556
557         total_obj_size = total_gtt_size = 0;
558         for (n = 0;  n < count; n++) {
559                 obj = objects[n];
560
561                 seq_puts(m, "   ");
562                 describe_obj(m, obj);
563                 seq_putc(m, '\n');
564                 total_obj_size += obj->base.size;
565                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
566         }
567
568         mutex_unlock(&dev->struct_mutex);
569
570         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
571                    count, total_obj_size, total_gtt_size);
572         kvfree(objects);
573
574         return 0;
575 }
576
577 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
578 {
579         struct drm_i915_private *dev_priv = node_to_i915(m->private);
580         struct drm_device *dev = &dev_priv->drm;
581         struct drm_i915_gem_object *obj;
582         struct intel_engine_cs *engine;
583         enum intel_engine_id id;
584         int total = 0;
585         int ret, j;
586
587         ret = mutex_lock_interruptible(&dev->struct_mutex);
588         if (ret)
589                 return ret;
590
591         for_each_engine(engine, dev_priv, id) {
592                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
593                         int count;
594
595                         count = 0;
596                         list_for_each_entry(obj,
597                                             &engine->batch_pool.cache_list[j],
598                                             batch_pool_link)
599                                 count++;
600                         seq_printf(m, "%s cache[%d]: %d objects\n",
601                                    engine->name, j, count);
602
603                         list_for_each_entry(obj,
604                                             &engine->batch_pool.cache_list[j],
605                                             batch_pool_link) {
606                                 seq_puts(m, "   ");
607                                 describe_obj(m, obj);
608                                 seq_putc(m, '\n');
609                         }
610
611                         total += count;
612                 }
613         }
614
615         seq_printf(m, "total: %d\n", total);
616
617         mutex_unlock(&dev->struct_mutex);
618
619         return 0;
620 }
621
622 static void gen8_display_interrupt_info(struct seq_file *m)
623 {
624         struct drm_i915_private *dev_priv = node_to_i915(m->private);
625         int pipe;
626
627         for_each_pipe(dev_priv, pipe) {
628                 enum intel_display_power_domain power_domain;
629                 intel_wakeref_t wakeref;
630
631                 power_domain = POWER_DOMAIN_PIPE(pipe);
632                 wakeref = intel_display_power_get_if_enabled(dev_priv,
633                                                              power_domain);
634                 if (!wakeref) {
635                         seq_printf(m, "Pipe %c power disabled\n",
636                                    pipe_name(pipe));
637                         continue;
638                 }
639                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
640                            pipe_name(pipe),
641                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
642                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
643                            pipe_name(pipe),
644                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
645                 seq_printf(m, "Pipe %c IER:\t%08x\n",
646                            pipe_name(pipe),
647                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
648
649                 intel_display_power_put(dev_priv, power_domain, wakeref);
650         }
651
652         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
653                    I915_READ(GEN8_DE_PORT_IMR));
654         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
655                    I915_READ(GEN8_DE_PORT_IIR));
656         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
657                    I915_READ(GEN8_DE_PORT_IER));
658
659         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
660                    I915_READ(GEN8_DE_MISC_IMR));
661         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
662                    I915_READ(GEN8_DE_MISC_IIR));
663         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
664                    I915_READ(GEN8_DE_MISC_IER));
665
666         seq_printf(m, "PCU interrupt mask:\t%08x\n",
667                    I915_READ(GEN8_PCU_IMR));
668         seq_printf(m, "PCU interrupt identity:\t%08x\n",
669                    I915_READ(GEN8_PCU_IIR));
670         seq_printf(m, "PCU interrupt enable:\t%08x\n",
671                    I915_READ(GEN8_PCU_IER));
672 }
673
674 static int i915_interrupt_info(struct seq_file *m, void *data)
675 {
676         struct drm_i915_private *dev_priv = node_to_i915(m->private);
677         struct intel_engine_cs *engine;
678         enum intel_engine_id id;
679         intel_wakeref_t wakeref;
680         int i, pipe;
681
682         wakeref = intel_runtime_pm_get(dev_priv);
683
684         if (IS_CHERRYVIEW(dev_priv)) {
685                 intel_wakeref_t pref;
686
687                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
688                            I915_READ(GEN8_MASTER_IRQ));
689
690                 seq_printf(m, "Display IER:\t%08x\n",
691                            I915_READ(VLV_IER));
692                 seq_printf(m, "Display IIR:\t%08x\n",
693                            I915_READ(VLV_IIR));
694                 seq_printf(m, "Display IIR_RW:\t%08x\n",
695                            I915_READ(VLV_IIR_RW));
696                 seq_printf(m, "Display IMR:\t%08x\n",
697                            I915_READ(VLV_IMR));
698                 for_each_pipe(dev_priv, pipe) {
699                         enum intel_display_power_domain power_domain;
700
701                         power_domain = POWER_DOMAIN_PIPE(pipe);
702                         pref = intel_display_power_get_if_enabled(dev_priv,
703                                                                   power_domain);
704                         if (!pref) {
705                                 seq_printf(m, "Pipe %c power disabled\n",
706                                            pipe_name(pipe));
707                                 continue;
708                         }
709
710                         seq_printf(m, "Pipe %c stat:\t%08x\n",
711                                    pipe_name(pipe),
712                                    I915_READ(PIPESTAT(pipe)));
713
714                         intel_display_power_put(dev_priv, power_domain, pref);
715                 }
716
717                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
718                 seq_printf(m, "Port hotplug:\t%08x\n",
719                            I915_READ(PORT_HOTPLUG_EN));
720                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
721                            I915_READ(VLV_DPFLIPSTAT));
722                 seq_printf(m, "DPINVGTT:\t%08x\n",
723                            I915_READ(DPINVGTT));
724                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
725
726                 for (i = 0; i < 4; i++) {
727                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
728                                    i, I915_READ(GEN8_GT_IMR(i)));
729                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
730                                    i, I915_READ(GEN8_GT_IIR(i)));
731                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
732                                    i, I915_READ(GEN8_GT_IER(i)));
733                 }
734
735                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
736                            I915_READ(GEN8_PCU_IMR));
737                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
738                            I915_READ(GEN8_PCU_IIR));
739                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
740                            I915_READ(GEN8_PCU_IER));
741         } else if (INTEL_GEN(dev_priv) >= 11) {
742                 seq_printf(m, "Master Interrupt Control:  %08x\n",
743                            I915_READ(GEN11_GFX_MSTR_IRQ));
744
745                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
746                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
747                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
748                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
749                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
750                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
751                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
752                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
753                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
754                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
755                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
756                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
757
758                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
759                            I915_READ(GEN11_DISPLAY_INT_CTL));
760
761                 gen8_display_interrupt_info(m);
762         } else if (INTEL_GEN(dev_priv) >= 8) {
763                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
764                            I915_READ(GEN8_MASTER_IRQ));
765
766                 for (i = 0; i < 4; i++) {
767                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
768                                    i, I915_READ(GEN8_GT_IMR(i)));
769                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
770                                    i, I915_READ(GEN8_GT_IIR(i)));
771                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
772                                    i, I915_READ(GEN8_GT_IER(i)));
773                 }
774
775                 gen8_display_interrupt_info(m);
776         } else if (IS_VALLEYVIEW(dev_priv)) {
777                 seq_printf(m, "Display IER:\t%08x\n",
778                            I915_READ(VLV_IER));
779                 seq_printf(m, "Display IIR:\t%08x\n",
780                            I915_READ(VLV_IIR));
781                 seq_printf(m, "Display IIR_RW:\t%08x\n",
782                            I915_READ(VLV_IIR_RW));
783                 seq_printf(m, "Display IMR:\t%08x\n",
784                            I915_READ(VLV_IMR));
785                 for_each_pipe(dev_priv, pipe) {
786                         enum intel_display_power_domain power_domain;
787                         intel_wakeref_t pref;
788
789                         power_domain = POWER_DOMAIN_PIPE(pipe);
790                         pref = intel_display_power_get_if_enabled(dev_priv,
791                                                                   power_domain);
792                         if (!pref) {
793                                 seq_printf(m, "Pipe %c power disabled\n",
794                                            pipe_name(pipe));
795                                 continue;
796                         }
797
798                         seq_printf(m, "Pipe %c stat:\t%08x\n",
799                                    pipe_name(pipe),
800                                    I915_READ(PIPESTAT(pipe)));
801                         intel_display_power_put(dev_priv, power_domain, pref);
802                 }
803
804                 seq_printf(m, "Master IER:\t%08x\n",
805                            I915_READ(VLV_MASTER_IER));
806
807                 seq_printf(m, "Render IER:\t%08x\n",
808                            I915_READ(GTIER));
809                 seq_printf(m, "Render IIR:\t%08x\n",
810                            I915_READ(GTIIR));
811                 seq_printf(m, "Render IMR:\t%08x\n",
812                            I915_READ(GTIMR));
813
814                 seq_printf(m, "PM IER:\t\t%08x\n",
815                            I915_READ(GEN6_PMIER));
816                 seq_printf(m, "PM IIR:\t\t%08x\n",
817                            I915_READ(GEN6_PMIIR));
818                 seq_printf(m, "PM IMR:\t\t%08x\n",
819                            I915_READ(GEN6_PMIMR));
820
821                 seq_printf(m, "Port hotplug:\t%08x\n",
822                            I915_READ(PORT_HOTPLUG_EN));
823                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
824                            I915_READ(VLV_DPFLIPSTAT));
825                 seq_printf(m, "DPINVGTT:\t%08x\n",
826                            I915_READ(DPINVGTT));
827
828         } else if (!HAS_PCH_SPLIT(dev_priv)) {
829                 seq_printf(m, "Interrupt enable:    %08x\n",
830                            I915_READ(IER));
831                 seq_printf(m, "Interrupt identity:  %08x\n",
832                            I915_READ(IIR));
833                 seq_printf(m, "Interrupt mask:      %08x\n",
834                            I915_READ(IMR));
835                 for_each_pipe(dev_priv, pipe)
836                         seq_printf(m, "Pipe %c stat:         %08x\n",
837                                    pipe_name(pipe),
838                                    I915_READ(PIPESTAT(pipe)));
839         } else {
840                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
841                            I915_READ(DEIER));
842                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
843                            I915_READ(DEIIR));
844                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
845                            I915_READ(DEIMR));
846                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
847                            I915_READ(SDEIER));
848                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
849                            I915_READ(SDEIIR));
850                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
851                            I915_READ(SDEIMR));
852                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
853                            I915_READ(GTIER));
854                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
855                            I915_READ(GTIIR));
856                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
857                            I915_READ(GTIMR));
858         }
859
860         if (INTEL_GEN(dev_priv) >= 11) {
861                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
862                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
863                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
864                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
865                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
866                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
867                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
868                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
869                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
870                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
871                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
872                            I915_READ(GEN11_GUC_SG_INTR_MASK));
873                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
874                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
875                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
876                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
877                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
878                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
879
880         } else if (INTEL_GEN(dev_priv) >= 6) {
881                 for_each_engine(engine, dev_priv, id) {
882                         seq_printf(m,
883                                    "Graphics Interrupt mask (%s):       %08x\n",
884                                    engine->name, I915_READ_IMR(engine));
885                 }
886         }
887
888         intel_runtime_pm_put(dev_priv, wakeref);
889
890         return 0;
891 }
892
893 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
894 {
895         struct drm_i915_private *dev_priv = node_to_i915(m->private);
896         struct drm_device *dev = &dev_priv->drm;
897         int i, ret;
898
899         ret = mutex_lock_interruptible(&dev->struct_mutex);
900         if (ret)
901                 return ret;
902
903         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
904         for (i = 0; i < dev_priv->num_fence_regs; i++) {
905                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
906
907                 seq_printf(m, "Fence %d, pin count = %d, object = ",
908                            i, dev_priv->fence_regs[i].pin_count);
909                 if (!vma)
910                         seq_puts(m, "unused");
911                 else
912                         describe_obj(m, vma->obj);
913                 seq_putc(m, '\n');
914         }
915
916         mutex_unlock(&dev->struct_mutex);
917         return 0;
918 }
919
920 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
921 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
922                               size_t count, loff_t *pos)
923 {
924         struct i915_gpu_state *error;
925         ssize_t ret;
926         void *buf;
927
928         error = file->private_data;
929         if (!error)
930                 return 0;
931
932         /* Bounce buffer required because of kernfs __user API convenience. */
933         buf = kmalloc(count, GFP_KERNEL);
934         if (!buf)
935                 return -ENOMEM;
936
937         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
938         if (ret <= 0)
939                 goto out;
940
941         if (!copy_to_user(ubuf, buf, ret))
942                 *pos += ret;
943         else
944                 ret = -EFAULT;
945
946 out:
947         kfree(buf);
948         return ret;
949 }
950
951 static int gpu_state_release(struct inode *inode, struct file *file)
952 {
953         i915_gpu_state_put(file->private_data);
954         return 0;
955 }
956
957 static int i915_gpu_info_open(struct inode *inode, struct file *file)
958 {
959         struct drm_i915_private *i915 = inode->i_private;
960         struct i915_gpu_state *gpu;
961         intel_wakeref_t wakeref;
962
963         gpu = NULL;
964         with_intel_runtime_pm(i915, wakeref)
965                 gpu = i915_capture_gpu_state(i915);
966         if (IS_ERR(gpu))
967                 return PTR_ERR(gpu);
968
969         file->private_data = gpu;
970         return 0;
971 }
972
973 static const struct file_operations i915_gpu_info_fops = {
974         .owner = THIS_MODULE,
975         .open = i915_gpu_info_open,
976         .read = gpu_state_read,
977         .llseek = default_llseek,
978         .release = gpu_state_release,
979 };
980
981 static ssize_t
982 i915_error_state_write(struct file *filp,
983                        const char __user *ubuf,
984                        size_t cnt,
985                        loff_t *ppos)
986 {
987         struct i915_gpu_state *error = filp->private_data;
988
989         if (!error)
990                 return 0;
991
992         DRM_DEBUG_DRIVER("Resetting error state\n");
993         i915_reset_error_state(error->i915);
994
995         return cnt;
996 }
997
998 static int i915_error_state_open(struct inode *inode, struct file *file)
999 {
1000         struct i915_gpu_state *error;
1001
1002         error = i915_first_error_state(inode->i_private);
1003         if (IS_ERR(error))
1004                 return PTR_ERR(error);
1005
1006         file->private_data  = error;
1007         return 0;
1008 }
1009
1010 static const struct file_operations i915_error_state_fops = {
1011         .owner = THIS_MODULE,
1012         .open = i915_error_state_open,
1013         .read = gpu_state_read,
1014         .write = i915_error_state_write,
1015         .llseek = default_llseek,
1016         .release = gpu_state_release,
1017 };
1018 #endif
1019
1020 static int i915_frequency_info(struct seq_file *m, void *unused)
1021 {
1022         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1023         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1024         intel_wakeref_t wakeref;
1025         int ret = 0;
1026
1027         wakeref = intel_runtime_pm_get(dev_priv);
1028
1029         if (IS_GEN(dev_priv, 5)) {
1030                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1031                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1032
1033                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1034                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1035                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1036                            MEMSTAT_VID_SHIFT);
1037                 seq_printf(m, "Current P-state: %d\n",
1038                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1039         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1040                 u32 rpmodectl, freq_sts;
1041
1042                 mutex_lock(&dev_priv->pcu_lock);
1043
1044                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1045                 seq_printf(m, "Video Turbo Mode: %s\n",
1046                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1047                 seq_printf(m, "HW control enabled: %s\n",
1048                            yesno(rpmodectl & GEN6_RP_ENABLE));
1049                 seq_printf(m, "SW control enabled: %s\n",
1050                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1051                                   GEN6_RP_MEDIA_SW_MODE));
1052
1053                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1054                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1055                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1056
1057                 seq_printf(m, "actual GPU freq: %d MHz\n",
1058                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1059
1060                 seq_printf(m, "current GPU freq: %d MHz\n",
1061                            intel_gpu_freq(dev_priv, rps->cur_freq));
1062
1063                 seq_printf(m, "max GPU freq: %d MHz\n",
1064                            intel_gpu_freq(dev_priv, rps->max_freq));
1065
1066                 seq_printf(m, "min GPU freq: %d MHz\n",
1067                            intel_gpu_freq(dev_priv, rps->min_freq));
1068
1069                 seq_printf(m, "idle GPU freq: %d MHz\n",
1070                            intel_gpu_freq(dev_priv, rps->idle_freq));
1071
1072                 seq_printf(m,
1073                            "efficient (RPe) frequency: %d MHz\n",
1074                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1075                 mutex_unlock(&dev_priv->pcu_lock);
1076         } else if (INTEL_GEN(dev_priv) >= 6) {
1077                 u32 rp_state_limits;
1078                 u32 gt_perf_status;
1079                 u32 rp_state_cap;
1080                 u32 rpmodectl, rpinclimit, rpdeclimit;
1081                 u32 rpstat, cagf, reqf;
1082                 u32 rpupei, rpcurup, rpprevup;
1083                 u32 rpdownei, rpcurdown, rpprevdown;
1084                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1085                 int max_freq;
1086
1087                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1088                 if (IS_GEN9_LP(dev_priv)) {
1089                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1090                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1091                 } else {
1092                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1093                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1094                 }
1095
1096                 /* RPSTAT1 is in the GT power well */
1097                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1098
1099                 reqf = I915_READ(GEN6_RPNSWREQ);
1100                 if (INTEL_GEN(dev_priv) >= 9)
1101                         reqf >>= 23;
1102                 else {
1103                         reqf &= ~GEN6_TURBO_DISABLE;
1104                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1105                                 reqf >>= 24;
1106                         else
1107                                 reqf >>= 25;
1108                 }
1109                 reqf = intel_gpu_freq(dev_priv, reqf);
1110
1111                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1112                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1113                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1114
1115                 rpstat = I915_READ(GEN6_RPSTAT1);
1116                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1117                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1118                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1119                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1120                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1121                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1122                 cagf = intel_gpu_freq(dev_priv,
1123                                       intel_get_cagf(dev_priv, rpstat));
1124
1125                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1126
1127                 if (INTEL_GEN(dev_priv) >= 11) {
1128                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1129                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1130                         /*
1131                          * The equivalent to the PM ISR & IIR cannot be read
1132                          * without affecting the current state of the system
1133                          */
1134                         pm_isr = 0;
1135                         pm_iir = 0;
1136                 } else if (INTEL_GEN(dev_priv) >= 8) {
1137                         pm_ier = I915_READ(GEN8_GT_IER(2));
1138                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1139                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1140                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1141                 } else {
1142                         pm_ier = I915_READ(GEN6_PMIER);
1143                         pm_imr = I915_READ(GEN6_PMIMR);
1144                         pm_isr = I915_READ(GEN6_PMISR);
1145                         pm_iir = I915_READ(GEN6_PMIIR);
1146                 }
1147                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1148
1149                 seq_printf(m, "Video Turbo Mode: %s\n",
1150                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1151                 seq_printf(m, "HW control enabled: %s\n",
1152                            yesno(rpmodectl & GEN6_RP_ENABLE));
1153                 seq_printf(m, "SW control enabled: %s\n",
1154                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1155                                   GEN6_RP_MEDIA_SW_MODE));
1156
1157                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1158                            pm_ier, pm_imr, pm_mask);
1159                 if (INTEL_GEN(dev_priv) <= 10)
1160                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1161                                    pm_isr, pm_iir);
1162                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1163                            rps->pm_intrmsk_mbz);
1164                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1165                 seq_printf(m, "Render p-state ratio: %d\n",
1166                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1167                 seq_printf(m, "Render p-state VID: %d\n",
1168                            gt_perf_status & 0xff);
1169                 seq_printf(m, "Render p-state limit: %d\n",
1170                            rp_state_limits & 0xff);
1171                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1172                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1173                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1174                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1175                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1176                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1177                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1178                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1179                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1180                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1181                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1182                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1183                 seq_printf(m, "Up threshold: %d%%\n",
1184                            rps->power.up_threshold);
1185
1186                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1187                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1188                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1189                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1190                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1191                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1192                 seq_printf(m, "Down threshold: %d%%\n",
1193                            rps->power.down_threshold);
1194
1195                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1196                             rp_state_cap >> 16) & 0xff;
1197                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1198                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1199                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1200                            intel_gpu_freq(dev_priv, max_freq));
1201
1202                 max_freq = (rp_state_cap & 0xff00) >> 8;
1203                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1204                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1205                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1206                            intel_gpu_freq(dev_priv, max_freq));
1207
1208                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1209                             rp_state_cap >> 0) & 0xff;
1210                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1211                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1212                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1213                            intel_gpu_freq(dev_priv, max_freq));
1214                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1215                            intel_gpu_freq(dev_priv, rps->max_freq));
1216
1217                 seq_printf(m, "Current freq: %d MHz\n",
1218                            intel_gpu_freq(dev_priv, rps->cur_freq));
1219                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1220                 seq_printf(m, "Idle freq: %d MHz\n",
1221                            intel_gpu_freq(dev_priv, rps->idle_freq));
1222                 seq_printf(m, "Min freq: %d MHz\n",
1223                            intel_gpu_freq(dev_priv, rps->min_freq));
1224                 seq_printf(m, "Boost freq: %d MHz\n",
1225                            intel_gpu_freq(dev_priv, rps->boost_freq));
1226                 seq_printf(m, "Max freq: %d MHz\n",
1227                            intel_gpu_freq(dev_priv, rps->max_freq));
1228                 seq_printf(m,
1229                            "efficient (RPe) frequency: %d MHz\n",
1230                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1231         } else {
1232                 seq_puts(m, "no P-state info available\n");
1233         }
1234
1235         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1236         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1237         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1238
1239         intel_runtime_pm_put(dev_priv, wakeref);
1240         return ret;
1241 }
1242
1243 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1244                                struct seq_file *m,
1245                                struct intel_instdone *instdone)
1246 {
1247         int slice;
1248         int subslice;
1249
1250         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1251                    instdone->instdone);
1252
1253         if (INTEL_GEN(dev_priv) <= 3)
1254                 return;
1255
1256         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1257                    instdone->slice_common);
1258
1259         if (INTEL_GEN(dev_priv) <= 6)
1260                 return;
1261
1262         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1263                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1264                            slice, subslice, instdone->sampler[slice][subslice]);
1265
1266         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1267                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1268                            slice, subslice, instdone->row[slice][subslice]);
1269 }
1270
1271 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1272 {
1273         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1274         struct intel_engine_cs *engine;
1275         u64 acthd[I915_NUM_ENGINES];
1276         u32 seqno[I915_NUM_ENGINES];
1277         struct intel_instdone instdone;
1278         intel_wakeref_t wakeref;
1279         enum intel_engine_id id;
1280
1281         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1282                 seq_puts(m, "Wedged\n");
1283         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1284                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1285         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1286                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1287         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1288                 seq_puts(m, "Waiter holding struct mutex\n");
1289         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1290                 seq_puts(m, "struct_mutex blocked for reset\n");
1291
1292         if (!i915_modparams.enable_hangcheck) {
1293                 seq_puts(m, "Hangcheck disabled\n");
1294                 return 0;
1295         }
1296
1297         with_intel_runtime_pm(dev_priv, wakeref) {
1298                 for_each_engine(engine, dev_priv, id) {
1299                         acthd[id] = intel_engine_get_active_head(engine);
1300                         seqno[id] = intel_engine_get_seqno(engine);
1301                 }
1302
1303                 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1304         }
1305
1306         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1308                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309                                             jiffies));
1310         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311                 seq_puts(m, "Hangcheck active, work pending\n");
1312         else
1313                 seq_puts(m, "Hangcheck inactive\n");
1314
1315         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
1317         for_each_engine(engine, dev_priv, id) {
1318                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1319                 struct rb_node *rb;
1320
1321                 seq_printf(m, "%s:\n", engine->name);
1322                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1323                            engine->hangcheck.seqno, seqno[id],
1324                            intel_engine_last_submit(engine));
1325                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1326                            yesno(intel_engine_has_waiter(engine)),
1327                            yesno(test_bit(engine->id,
1328                                           &dev_priv->gpu_error.missed_irq_rings)),
1329                            yesno(engine->hangcheck.stalled),
1330                            yesno(engine->hangcheck.wedged));
1331
1332                 spin_lock_irq(&b->rb_lock);
1333                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1334                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1335
1336                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1337                                    w->tsk->comm, w->tsk->pid, w->seqno);
1338                 }
1339                 spin_unlock_irq(&b->rb_lock);
1340
1341                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1342                            (long long)engine->hangcheck.acthd,
1343                            (long long)acthd[id]);
1344                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1345                            hangcheck_action_to_str(engine->hangcheck.action),
1346                            engine->hangcheck.action,
1347                            jiffies_to_msecs(jiffies -
1348                                             engine->hangcheck.action_timestamp));
1349
1350                 if (engine->id == RCS) {
1351                         seq_puts(m, "\tinstdone read =\n");
1352
1353                         i915_instdone_info(dev_priv, m, &instdone);
1354
1355                         seq_puts(m, "\tinstdone accu =\n");
1356
1357                         i915_instdone_info(dev_priv, m,
1358                                            &engine->hangcheck.instdone);
1359                 }
1360         }
1361
1362         return 0;
1363 }
1364
1365 static int i915_reset_info(struct seq_file *m, void *unused)
1366 {
1367         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1368         struct i915_gpu_error *error = &dev_priv->gpu_error;
1369         struct intel_engine_cs *engine;
1370         enum intel_engine_id id;
1371
1372         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1373
1374         for_each_engine(engine, dev_priv, id) {
1375                 seq_printf(m, "%s = %u\n", engine->name,
1376                            i915_reset_engine_count(error, engine));
1377         }
1378
1379         return 0;
1380 }
1381
1382 static int ironlake_drpc_info(struct seq_file *m)
1383 {
1384         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1385         u32 rgvmodectl, rstdbyctl;
1386         u16 crstandvid;
1387
1388         rgvmodectl = I915_READ(MEMMODECTL);
1389         rstdbyctl = I915_READ(RSTDBYCTL);
1390         crstandvid = I915_READ16(CRSTANDVID);
1391
1392         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1393         seq_printf(m, "Boost freq: %d\n",
1394                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1395                    MEMMODE_BOOST_FREQ_SHIFT);
1396         seq_printf(m, "HW control enabled: %s\n",
1397                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1398         seq_printf(m, "SW control enabled: %s\n",
1399                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1400         seq_printf(m, "Gated voltage change: %s\n",
1401                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1402         seq_printf(m, "Starting frequency: P%d\n",
1403                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1404         seq_printf(m, "Max P-state: P%d\n",
1405                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1406         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1407         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1408         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1409         seq_printf(m, "Render standby enabled: %s\n",
1410                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1411         seq_puts(m, "Current RS state: ");
1412         switch (rstdbyctl & RSX_STATUS_MASK) {
1413         case RSX_STATUS_ON:
1414                 seq_puts(m, "on\n");
1415                 break;
1416         case RSX_STATUS_RC1:
1417                 seq_puts(m, "RC1\n");
1418                 break;
1419         case RSX_STATUS_RC1E:
1420                 seq_puts(m, "RC1E\n");
1421                 break;
1422         case RSX_STATUS_RS1:
1423                 seq_puts(m, "RS1\n");
1424                 break;
1425         case RSX_STATUS_RS2:
1426                 seq_puts(m, "RS2 (RC6)\n");
1427                 break;
1428         case RSX_STATUS_RS3:
1429                 seq_puts(m, "RC3 (RC6+)\n");
1430                 break;
1431         default:
1432                 seq_puts(m, "unknown\n");
1433                 break;
1434         }
1435
1436         return 0;
1437 }
1438
1439 static int i915_forcewake_domains(struct seq_file *m, void *data)
1440 {
1441         struct drm_i915_private *i915 = node_to_i915(m->private);
1442         struct intel_uncore_forcewake_domain *fw_domain;
1443         unsigned int tmp;
1444
1445         seq_printf(m, "user.bypass_count = %u\n",
1446                    i915->uncore.user_forcewake.count);
1447
1448         for_each_fw_domain(fw_domain, i915, tmp)
1449                 seq_printf(m, "%s.wake_count = %u\n",
1450                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1451                            READ_ONCE(fw_domain->wake_count));
1452
1453         return 0;
1454 }
1455
1456 static void print_rc6_res(struct seq_file *m,
1457                           const char *title,
1458                           const i915_reg_t reg)
1459 {
1460         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1461
1462         seq_printf(m, "%s %u (%llu us)\n",
1463                    title, I915_READ(reg),
1464                    intel_rc6_residency_us(dev_priv, reg));
1465 }
1466
1467 static int vlv_drpc_info(struct seq_file *m)
1468 {
1469         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1470         u32 rcctl1, pw_status;
1471
1472         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1473         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1474
1475         seq_printf(m, "RC6 Enabled: %s\n",
1476                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1477                                         GEN6_RC_CTL_EI_MODE(1))));
1478         seq_printf(m, "Render Power Well: %s\n",
1479                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1480         seq_printf(m, "Media Power Well: %s\n",
1481                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1482
1483         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1484         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1485
1486         return i915_forcewake_domains(m, NULL);
1487 }
1488
1489 static int gen6_drpc_info(struct seq_file *m)
1490 {
1491         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1492         u32 gt_core_status, rcctl1, rc6vids = 0;
1493         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1494
1495         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1496         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1497
1498         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1499         if (INTEL_GEN(dev_priv) >= 9) {
1500                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1501                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1502         }
1503
1504         if (INTEL_GEN(dev_priv) <= 7) {
1505                 mutex_lock(&dev_priv->pcu_lock);
1506                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1507                                        &rc6vids);
1508                 mutex_unlock(&dev_priv->pcu_lock);
1509         }
1510
1511         seq_printf(m, "RC1e Enabled: %s\n",
1512                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1513         seq_printf(m, "RC6 Enabled: %s\n",
1514                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1515         if (INTEL_GEN(dev_priv) >= 9) {
1516                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1517                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1518                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1519                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1520         }
1521         seq_printf(m, "Deep RC6 Enabled: %s\n",
1522                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1523         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1524                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1525         seq_puts(m, "Current RC state: ");
1526         switch (gt_core_status & GEN6_RCn_MASK) {
1527         case GEN6_RC0:
1528                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1529                         seq_puts(m, "Core Power Down\n");
1530                 else
1531                         seq_puts(m, "on\n");
1532                 break;
1533         case GEN6_RC3:
1534                 seq_puts(m, "RC3\n");
1535                 break;
1536         case GEN6_RC6:
1537                 seq_puts(m, "RC6\n");
1538                 break;
1539         case GEN6_RC7:
1540                 seq_puts(m, "RC7\n");
1541                 break;
1542         default:
1543                 seq_puts(m, "Unknown\n");
1544                 break;
1545         }
1546
1547         seq_printf(m, "Core Power Down: %s\n",
1548                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1549         if (INTEL_GEN(dev_priv) >= 9) {
1550                 seq_printf(m, "Render Power Well: %s\n",
1551                         (gen9_powergate_status &
1552                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1553                 seq_printf(m, "Media Power Well: %s\n",
1554                         (gen9_powergate_status &
1555                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1556         }
1557
1558         /* Not exactly sure what this is */
1559         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1560                       GEN6_GT_GFX_RC6_LOCKED);
1561         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1562         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1563         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1564
1565         if (INTEL_GEN(dev_priv) <= 7) {
1566                 seq_printf(m, "RC6   voltage: %dmV\n",
1567                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1568                 seq_printf(m, "RC6+  voltage: %dmV\n",
1569                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1570                 seq_printf(m, "RC6++ voltage: %dmV\n",
1571                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1572         }
1573
1574         return i915_forcewake_domains(m, NULL);
1575 }
1576
1577 static int i915_drpc_info(struct seq_file *m, void *unused)
1578 {
1579         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1580         intel_wakeref_t wakeref;
1581         int err = -ENODEV;
1582
1583         with_intel_runtime_pm(dev_priv, wakeref) {
1584                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1585                         err = vlv_drpc_info(m);
1586                 else if (INTEL_GEN(dev_priv) >= 6)
1587                         err = gen6_drpc_info(m);
1588                 else
1589                         err = ironlake_drpc_info(m);
1590         }
1591
1592         return err;
1593 }
1594
1595 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1596 {
1597         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1598
1599         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1600                    dev_priv->fb_tracking.busy_bits);
1601
1602         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1603                    dev_priv->fb_tracking.flip_bits);
1604
1605         return 0;
1606 }
1607
1608 static int i915_fbc_status(struct seq_file *m, void *unused)
1609 {
1610         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1611         struct intel_fbc *fbc = &dev_priv->fbc;
1612         intel_wakeref_t wakeref;
1613
1614         if (!HAS_FBC(dev_priv))
1615                 return -ENODEV;
1616
1617         wakeref = intel_runtime_pm_get(dev_priv);
1618         mutex_lock(&fbc->lock);
1619
1620         if (intel_fbc_is_active(dev_priv))
1621                 seq_puts(m, "FBC enabled\n");
1622         else
1623                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1624
1625         if (intel_fbc_is_active(dev_priv)) {
1626                 u32 mask;
1627
1628                 if (INTEL_GEN(dev_priv) >= 8)
1629                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1630                 else if (INTEL_GEN(dev_priv) >= 7)
1631                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1632                 else if (INTEL_GEN(dev_priv) >= 5)
1633                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1634                 else if (IS_G4X(dev_priv))
1635                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1636                 else
1637                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1638                                                         FBC_STAT_COMPRESSED);
1639
1640                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1641         }
1642
1643         mutex_unlock(&fbc->lock);
1644         intel_runtime_pm_put(dev_priv, wakeref);
1645
1646         return 0;
1647 }
1648
1649 static int i915_fbc_false_color_get(void *data, u64 *val)
1650 {
1651         struct drm_i915_private *dev_priv = data;
1652
1653         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1654                 return -ENODEV;
1655
1656         *val = dev_priv->fbc.false_color;
1657
1658         return 0;
1659 }
1660
1661 static int i915_fbc_false_color_set(void *data, u64 val)
1662 {
1663         struct drm_i915_private *dev_priv = data;
1664         u32 reg;
1665
1666         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1667                 return -ENODEV;
1668
1669         mutex_lock(&dev_priv->fbc.lock);
1670
1671         reg = I915_READ(ILK_DPFC_CONTROL);
1672         dev_priv->fbc.false_color = val;
1673
1674         I915_WRITE(ILK_DPFC_CONTROL, val ?
1675                    (reg | FBC_CTL_FALSE_COLOR) :
1676                    (reg & ~FBC_CTL_FALSE_COLOR));
1677
1678         mutex_unlock(&dev_priv->fbc.lock);
1679         return 0;
1680 }
1681
1682 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1683                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1684                         "%llu\n");
1685
1686 static int i915_ips_status(struct seq_file *m, void *unused)
1687 {
1688         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1689         intel_wakeref_t wakeref;
1690
1691         if (!HAS_IPS(dev_priv))
1692                 return -ENODEV;
1693
1694         wakeref = intel_runtime_pm_get(dev_priv);
1695
1696         seq_printf(m, "Enabled by kernel parameter: %s\n",
1697                    yesno(i915_modparams.enable_ips));
1698
1699         if (INTEL_GEN(dev_priv) >= 8) {
1700                 seq_puts(m, "Currently: unknown\n");
1701         } else {
1702                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1703                         seq_puts(m, "Currently: enabled\n");
1704                 else
1705                         seq_puts(m, "Currently: disabled\n");
1706         }
1707
1708         intel_runtime_pm_put(dev_priv, wakeref);
1709
1710         return 0;
1711 }
1712
1713 static int i915_sr_status(struct seq_file *m, void *unused)
1714 {
1715         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1716         intel_wakeref_t wakeref;
1717         bool sr_enabled = false;
1718
1719         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1720
1721         if (INTEL_GEN(dev_priv) >= 9)
1722                 /* no global SR status; inspect per-plane WM */;
1723         else if (HAS_PCH_SPLIT(dev_priv))
1724                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1725         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1726                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1727                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1728         else if (IS_I915GM(dev_priv))
1729                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1730         else if (IS_PINEVIEW(dev_priv))
1731                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1732         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1733                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1734
1735         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1736
1737         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1738
1739         return 0;
1740 }
1741
1742 static int i915_emon_status(struct seq_file *m, void *unused)
1743 {
1744         struct drm_i915_private *i915 = node_to_i915(m->private);
1745         intel_wakeref_t wakeref;
1746
1747         if (!IS_GEN(i915, 5))
1748                 return -ENODEV;
1749
1750         with_intel_runtime_pm(i915, wakeref) {
1751                 unsigned long temp, chipset, gfx;
1752
1753                 temp = i915_mch_val(i915);
1754                 chipset = i915_chipset_val(i915);
1755                 gfx = i915_gfx_val(i915);
1756
1757                 seq_printf(m, "GMCH temp: %ld\n", temp);
1758                 seq_printf(m, "Chipset power: %ld\n", chipset);
1759                 seq_printf(m, "GFX power: %ld\n", gfx);
1760                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1761         }
1762
1763         return 0;
1764 }
1765
1766 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1767 {
1768         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1769         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1770         unsigned int max_gpu_freq, min_gpu_freq;
1771         intel_wakeref_t wakeref;
1772         int gpu_freq, ia_freq;
1773         int ret;
1774
1775         if (!HAS_LLC(dev_priv))
1776                 return -ENODEV;
1777
1778         wakeref = intel_runtime_pm_get(dev_priv);
1779
1780         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1781         if (ret)
1782                 goto out;
1783
1784         min_gpu_freq = rps->min_freq;
1785         max_gpu_freq = rps->max_freq;
1786         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1787                 /* Convert GT frequency to 50 HZ units */
1788                 min_gpu_freq /= GEN9_FREQ_SCALER;
1789                 max_gpu_freq /= GEN9_FREQ_SCALER;
1790         }
1791
1792         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1793
1794         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1795                 ia_freq = gpu_freq;
1796                 sandybridge_pcode_read(dev_priv,
1797                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1798                                        &ia_freq);
1799                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1800                            intel_gpu_freq(dev_priv, (gpu_freq *
1801                                                      (IS_GEN9_BC(dev_priv) ||
1802                                                       INTEL_GEN(dev_priv) >= 10 ?
1803                                                       GEN9_FREQ_SCALER : 1))),
1804                            ((ia_freq >> 0) & 0xff) * 100,
1805                            ((ia_freq >> 8) & 0xff) * 100);
1806         }
1807
1808         mutex_unlock(&dev_priv->pcu_lock);
1809
1810 out:
1811         intel_runtime_pm_put(dev_priv, wakeref);
1812         return ret;
1813 }
1814
1815 static int i915_opregion(struct seq_file *m, void *unused)
1816 {
1817         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1818         struct drm_device *dev = &dev_priv->drm;
1819         struct intel_opregion *opregion = &dev_priv->opregion;
1820         int ret;
1821
1822         ret = mutex_lock_interruptible(&dev->struct_mutex);
1823         if (ret)
1824                 goto out;
1825
1826         if (opregion->header)
1827                 seq_write(m, opregion->header, OPREGION_SIZE);
1828
1829         mutex_unlock(&dev->struct_mutex);
1830
1831 out:
1832         return 0;
1833 }
1834
1835 static int i915_vbt(struct seq_file *m, void *unused)
1836 {
1837         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1838
1839         if (opregion->vbt)
1840                 seq_write(m, opregion->vbt, opregion->vbt_size);
1841
1842         return 0;
1843 }
1844
1845 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1846 {
1847         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1848         struct drm_device *dev = &dev_priv->drm;
1849         struct intel_framebuffer *fbdev_fb = NULL;
1850         struct drm_framebuffer *drm_fb;
1851         int ret;
1852
1853         ret = mutex_lock_interruptible(&dev->struct_mutex);
1854         if (ret)
1855                 return ret;
1856
1857 #ifdef CONFIG_DRM_FBDEV_EMULATION
1858         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1859                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1860
1861                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1862                            fbdev_fb->base.width,
1863                            fbdev_fb->base.height,
1864                            fbdev_fb->base.format->depth,
1865                            fbdev_fb->base.format->cpp[0] * 8,
1866                            fbdev_fb->base.modifier,
1867                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1868                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1869                 seq_putc(m, '\n');
1870         }
1871 #endif
1872
1873         mutex_lock(&dev->mode_config.fb_lock);
1874         drm_for_each_fb(drm_fb, dev) {
1875                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1876                 if (fb == fbdev_fb)
1877                         continue;
1878
1879                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1880                            fb->base.width,
1881                            fb->base.height,
1882                            fb->base.format->depth,
1883                            fb->base.format->cpp[0] * 8,
1884                            fb->base.modifier,
1885                            drm_framebuffer_read_refcount(&fb->base));
1886                 describe_obj(m, intel_fb_obj(&fb->base));
1887                 seq_putc(m, '\n');
1888         }
1889         mutex_unlock(&dev->mode_config.fb_lock);
1890         mutex_unlock(&dev->struct_mutex);
1891
1892         return 0;
1893 }
1894
1895 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1896 {
1897         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1898                    ring->space, ring->head, ring->tail, ring->emit);
1899 }
1900
1901 static int i915_context_status(struct seq_file *m, void *unused)
1902 {
1903         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1904         struct drm_device *dev = &dev_priv->drm;
1905         struct intel_engine_cs *engine;
1906         struct i915_gem_context *ctx;
1907         enum intel_engine_id id;
1908         int ret;
1909
1910         ret = mutex_lock_interruptible(&dev->struct_mutex);
1911         if (ret)
1912                 return ret;
1913
1914         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1915                 seq_puts(m, "HW context ");
1916                 if (!list_empty(&ctx->hw_id_link))
1917                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1918                                    atomic_read(&ctx->hw_id_pin_count));
1919                 if (ctx->pid) {
1920                         struct task_struct *task;
1921
1922                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1923                         if (task) {
1924                                 seq_printf(m, "(%s [%d]) ",
1925                                            task->comm, task->pid);
1926                                 put_task_struct(task);
1927                         }
1928                 } else if (IS_ERR(ctx->file_priv)) {
1929                         seq_puts(m, "(deleted) ");
1930                 } else {
1931                         seq_puts(m, "(kernel) ");
1932                 }
1933
1934                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1935                 seq_putc(m, '\n');
1936
1937                 for_each_engine(engine, dev_priv, id) {
1938                         struct intel_context *ce =
1939                                 to_intel_context(ctx, engine);
1940
1941                         seq_printf(m, "%s: ", engine->name);
1942                         if (ce->state)
1943                                 describe_obj(m, ce->state->obj);
1944                         if (ce->ring)
1945                                 describe_ctx_ring(m, ce->ring);
1946                         seq_putc(m, '\n');
1947                 }
1948
1949                 seq_putc(m, '\n');
1950         }
1951
1952         mutex_unlock(&dev->struct_mutex);
1953
1954         return 0;
1955 }
1956
1957 static const char *swizzle_string(unsigned swizzle)
1958 {
1959         switch (swizzle) {
1960         case I915_BIT_6_SWIZZLE_NONE:
1961                 return "none";
1962         case I915_BIT_6_SWIZZLE_9:
1963                 return "bit9";
1964         case I915_BIT_6_SWIZZLE_9_10:
1965                 return "bit9/bit10";
1966         case I915_BIT_6_SWIZZLE_9_11:
1967                 return "bit9/bit11";
1968         case I915_BIT_6_SWIZZLE_9_10_11:
1969                 return "bit9/bit10/bit11";
1970         case I915_BIT_6_SWIZZLE_9_17:
1971                 return "bit9/bit17";
1972         case I915_BIT_6_SWIZZLE_9_10_17:
1973                 return "bit9/bit10/bit17";
1974         case I915_BIT_6_SWIZZLE_UNKNOWN:
1975                 return "unknown";
1976         }
1977
1978         return "bug";
1979 }
1980
1981 static int i915_swizzle_info(struct seq_file *m, void *data)
1982 {
1983         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1984         intel_wakeref_t wakeref;
1985
1986         wakeref = intel_runtime_pm_get(dev_priv);
1987
1988         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1989                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1990         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1991                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1992
1993         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1994                 seq_printf(m, "DDC = 0x%08x\n",
1995                            I915_READ(DCC));
1996                 seq_printf(m, "DDC2 = 0x%08x\n",
1997                            I915_READ(DCC2));
1998                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1999                            I915_READ16(C0DRB3));
2000                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2001                            I915_READ16(C1DRB3));
2002         } else if (INTEL_GEN(dev_priv) >= 6) {
2003                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2004                            I915_READ(MAD_DIMM_C0));
2005                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2006                            I915_READ(MAD_DIMM_C1));
2007                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2008                            I915_READ(MAD_DIMM_C2));
2009                 seq_printf(m, "TILECTL = 0x%08x\n",
2010                            I915_READ(TILECTL));
2011                 if (INTEL_GEN(dev_priv) >= 8)
2012                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2013                                    I915_READ(GAMTARBMODE));
2014                 else
2015                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2016                                    I915_READ(ARB_MODE));
2017                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2018                            I915_READ(DISP_ARB_CTL));
2019         }
2020
2021         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2022                 seq_puts(m, "L-shaped memory detected\n");
2023
2024         intel_runtime_pm_put(dev_priv, wakeref);
2025
2026         return 0;
2027 }
2028
2029 static int count_irq_waiters(struct drm_i915_private *i915)
2030 {
2031         struct intel_engine_cs *engine;
2032         enum intel_engine_id id;
2033         int count = 0;
2034
2035         for_each_engine(engine, i915, id)
2036                 count += intel_engine_has_waiter(engine);
2037
2038         return count;
2039 }
2040
2041 static const char *rps_power_to_str(unsigned int power)
2042 {
2043         static const char * const strings[] = {
2044                 [LOW_POWER] = "low power",
2045                 [BETWEEN] = "mixed",
2046                 [HIGH_POWER] = "high power",
2047         };
2048
2049         if (power >= ARRAY_SIZE(strings) || !strings[power])
2050                 return "unknown";
2051
2052         return strings[power];
2053 }
2054
2055 static int i915_rps_boost_info(struct seq_file *m, void *data)
2056 {
2057         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2058         struct drm_device *dev = &dev_priv->drm;
2059         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2060         u32 act_freq = rps->cur_freq;
2061         intel_wakeref_t wakeref;
2062         struct drm_file *file;
2063
2064         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2065                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2066                         mutex_lock(&dev_priv->pcu_lock);
2067                         act_freq = vlv_punit_read(dev_priv,
2068                                                   PUNIT_REG_GPU_FREQ_STS);
2069                         act_freq = (act_freq >> 8) & 0xff;
2070                         mutex_unlock(&dev_priv->pcu_lock);
2071                 } else {
2072                         act_freq = intel_get_cagf(dev_priv,
2073                                                   I915_READ(GEN6_RPSTAT1));
2074                 }
2075         }
2076
2077         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2078         seq_printf(m, "GPU busy? %s [%d requests]\n",
2079                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2080         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2081         seq_printf(m, "Boosts outstanding? %d\n",
2082                    atomic_read(&rps->num_waiters));
2083         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2084         seq_printf(m, "Frequency requested %d, actual %d\n",
2085                    intel_gpu_freq(dev_priv, rps->cur_freq),
2086                    intel_gpu_freq(dev_priv, act_freq));
2087         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2088                    intel_gpu_freq(dev_priv, rps->min_freq),
2089                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2090                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2091                    intel_gpu_freq(dev_priv, rps->max_freq));
2092         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2093                    intel_gpu_freq(dev_priv, rps->idle_freq),
2094                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2095                    intel_gpu_freq(dev_priv, rps->boost_freq));
2096
2097         mutex_lock(&dev->filelist_mutex);
2098         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2099                 struct drm_i915_file_private *file_priv = file->driver_priv;
2100                 struct task_struct *task;
2101
2102                 rcu_read_lock();
2103                 task = pid_task(file->pid, PIDTYPE_PID);
2104                 seq_printf(m, "%s [%d]: %d boosts\n",
2105                            task ? task->comm : "<unknown>",
2106                            task ? task->pid : -1,
2107                            atomic_read(&file_priv->rps_client.boosts));
2108                 rcu_read_unlock();
2109         }
2110         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2111                    atomic_read(&rps->boosts));
2112         mutex_unlock(&dev->filelist_mutex);
2113
2114         if (INTEL_GEN(dev_priv) >= 6 &&
2115             rps->enabled &&
2116             dev_priv->gt.active_requests) {
2117                 u32 rpup, rpupei;
2118                 u32 rpdown, rpdownei;
2119
2120                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2121                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2122                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2123                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2124                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2125                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2126
2127                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2128                            rps_power_to_str(rps->power.mode));
2129                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2130                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2131                            rps->power.up_threshold);
2132                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2133                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2134                            rps->power.down_threshold);
2135         } else {
2136                 seq_puts(m, "\nRPS Autotuning inactive\n");
2137         }
2138
2139         return 0;
2140 }
2141
2142 static int i915_llc(struct seq_file *m, void *data)
2143 {
2144         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2145         const bool edram = INTEL_GEN(dev_priv) > 8;
2146
2147         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2148         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2149                    intel_uncore_edram_size(dev_priv)/1024/1024);
2150
2151         return 0;
2152 }
2153
2154 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2155 {
2156         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2157         intel_wakeref_t wakeref;
2158         struct drm_printer p;
2159
2160         if (!HAS_HUC(dev_priv))
2161                 return -ENODEV;
2162
2163         p = drm_seq_file_printer(m);
2164         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2165
2166         with_intel_runtime_pm(dev_priv, wakeref)
2167                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2168
2169         return 0;
2170 }
2171
2172 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2173 {
2174         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2175         intel_wakeref_t wakeref;
2176         struct drm_printer p;
2177
2178         if (!HAS_GUC(dev_priv))
2179                 return -ENODEV;
2180
2181         p = drm_seq_file_printer(m);
2182         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2183
2184         with_intel_runtime_pm(dev_priv, wakeref) {
2185                 u32 tmp = I915_READ(GUC_STATUS);
2186                 u32 i;
2187
2188                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2189                 seq_printf(m, "\tBootrom status = 0x%x\n",
2190                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2191                 seq_printf(m, "\tuKernel status = 0x%x\n",
2192                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2193                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2194                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2195                 seq_puts(m, "\nScratch registers:\n");
2196                 for (i = 0; i < 16; i++) {
2197                         seq_printf(m, "\t%2d: \t0x%x\n",
2198                                    i, I915_READ(SOFT_SCRATCH(i)));
2199                 }
2200         }
2201
2202         return 0;
2203 }
2204
2205 static const char *
2206 stringify_guc_log_type(enum guc_log_buffer_type type)
2207 {
2208         switch (type) {
2209         case GUC_ISR_LOG_BUFFER:
2210                 return "ISR";
2211         case GUC_DPC_LOG_BUFFER:
2212                 return "DPC";
2213         case GUC_CRASH_DUMP_LOG_BUFFER:
2214                 return "CRASH";
2215         default:
2216                 MISSING_CASE(type);
2217         }
2218
2219         return "";
2220 }
2221
2222 static void i915_guc_log_info(struct seq_file *m,
2223                               struct drm_i915_private *dev_priv)
2224 {
2225         struct intel_guc_log *log = &dev_priv->guc.log;
2226         enum guc_log_buffer_type type;
2227
2228         if (!intel_guc_log_relay_enabled(log)) {
2229                 seq_puts(m, "GuC log relay disabled\n");
2230                 return;
2231         }
2232
2233         seq_puts(m, "GuC logging stats:\n");
2234
2235         seq_printf(m, "\tRelay full count: %u\n",
2236                    log->relay.full_count);
2237
2238         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2239                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2240                            stringify_guc_log_type(type),
2241                            log->stats[type].flush,
2242                            log->stats[type].sampled_overflow);
2243         }
2244 }
2245
2246 static void i915_guc_client_info(struct seq_file *m,
2247                                  struct drm_i915_private *dev_priv,
2248                                  struct intel_guc_client *client)
2249 {
2250         struct intel_engine_cs *engine;
2251         enum intel_engine_id id;
2252         uint64_t tot = 0;
2253
2254         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2255                 client->priority, client->stage_id, client->proc_desc_offset);
2256         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2257                 client->doorbell_id, client->doorbell_offset);
2258
2259         for_each_engine(engine, dev_priv, id) {
2260                 u64 submissions = client->submissions[id];
2261                 tot += submissions;
2262                 seq_printf(m, "\tSubmissions: %llu %s\n",
2263                                 submissions, engine->name);
2264         }
2265         seq_printf(m, "\tTotal: %llu\n", tot);
2266 }
2267
2268 static int i915_guc_info(struct seq_file *m, void *data)
2269 {
2270         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2271         const struct intel_guc *guc = &dev_priv->guc;
2272
2273         if (!USES_GUC(dev_priv))
2274                 return -ENODEV;
2275
2276         i915_guc_log_info(m, dev_priv);
2277
2278         if (!USES_GUC_SUBMISSION(dev_priv))
2279                 return 0;
2280
2281         GEM_BUG_ON(!guc->execbuf_client);
2282
2283         seq_printf(m, "\nDoorbell map:\n");
2284         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2285         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2286
2287         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2288         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2289         if (guc->preempt_client) {
2290                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2291                            guc->preempt_client);
2292                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2293         }
2294
2295         /* Add more as required ... */
2296
2297         return 0;
2298 }
2299
2300 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2301 {
2302         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2303         const struct intel_guc *guc = &dev_priv->guc;
2304         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2305         struct intel_guc_client *client = guc->execbuf_client;
2306         unsigned int tmp;
2307         int index;
2308
2309         if (!USES_GUC_SUBMISSION(dev_priv))
2310                 return -ENODEV;
2311
2312         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2313                 struct intel_engine_cs *engine;
2314
2315                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2316                         continue;
2317
2318                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2319                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2320                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2321                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2322                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2323                 seq_printf(m, "\tEngines used: 0x%x\n",
2324                            desc->engines_used);
2325                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2326                            desc->db_trigger_phy,
2327                            desc->db_trigger_cpu,
2328                            desc->db_trigger_uk);
2329                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2330                            desc->process_desc);
2331                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2332                            desc->wq_addr, desc->wq_size);
2333                 seq_putc(m, '\n');
2334
2335                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2336                         u32 guc_engine_id = engine->guc_id;
2337                         struct guc_execlist_context *lrc =
2338                                                 &desc->lrc[guc_engine_id];
2339
2340                         seq_printf(m, "\t%s LRC:\n", engine->name);
2341                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2342                                    lrc->context_desc);
2343                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2344                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2345                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2346                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2347                         seq_putc(m, '\n');
2348                 }
2349         }
2350
2351         return 0;
2352 }
2353
2354 static int i915_guc_log_dump(struct seq_file *m, void *data)
2355 {
2356         struct drm_info_node *node = m->private;
2357         struct drm_i915_private *dev_priv = node_to_i915(node);
2358         bool dump_load_err = !!node->info_ent->data;
2359         struct drm_i915_gem_object *obj = NULL;
2360         u32 *log;
2361         int i = 0;
2362
2363         if (!HAS_GUC(dev_priv))
2364                 return -ENODEV;
2365
2366         if (dump_load_err)
2367                 obj = dev_priv->guc.load_err_log;
2368         else if (dev_priv->guc.log.vma)
2369                 obj = dev_priv->guc.log.vma->obj;
2370
2371         if (!obj)
2372                 return 0;
2373
2374         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2375         if (IS_ERR(log)) {
2376                 DRM_DEBUG("Failed to pin object\n");
2377                 seq_puts(m, "(log data unaccessible)\n");
2378                 return PTR_ERR(log);
2379         }
2380
2381         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2382                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2383                            *(log + i), *(log + i + 1),
2384                            *(log + i + 2), *(log + i + 3));
2385
2386         seq_putc(m, '\n');
2387
2388         i915_gem_object_unpin_map(obj);
2389
2390         return 0;
2391 }
2392
2393 static int i915_guc_log_level_get(void *data, u64 *val)
2394 {
2395         struct drm_i915_private *dev_priv = data;
2396
2397         if (!USES_GUC(dev_priv))
2398                 return -ENODEV;
2399
2400         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2401
2402         return 0;
2403 }
2404
2405 static int i915_guc_log_level_set(void *data, u64 val)
2406 {
2407         struct drm_i915_private *dev_priv = data;
2408
2409         if (!USES_GUC(dev_priv))
2410                 return -ENODEV;
2411
2412         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2413 }
2414
2415 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2416                         i915_guc_log_level_get, i915_guc_log_level_set,
2417                         "%lld\n");
2418
2419 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2420 {
2421         struct drm_i915_private *dev_priv = inode->i_private;
2422
2423         if (!USES_GUC(dev_priv))
2424                 return -ENODEV;
2425
2426         file->private_data = &dev_priv->guc.log;
2427
2428         return intel_guc_log_relay_open(&dev_priv->guc.log);
2429 }
2430
2431 static ssize_t
2432 i915_guc_log_relay_write(struct file *filp,
2433                          const char __user *ubuf,
2434                          size_t cnt,
2435                          loff_t *ppos)
2436 {
2437         struct intel_guc_log *log = filp->private_data;
2438
2439         intel_guc_log_relay_flush(log);
2440
2441         return cnt;
2442 }
2443
2444 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2445 {
2446         struct drm_i915_private *dev_priv = inode->i_private;
2447
2448         intel_guc_log_relay_close(&dev_priv->guc.log);
2449
2450         return 0;
2451 }
2452
2453 static const struct file_operations i915_guc_log_relay_fops = {
2454         .owner = THIS_MODULE,
2455         .open = i915_guc_log_relay_open,
2456         .write = i915_guc_log_relay_write,
2457         .release = i915_guc_log_relay_release,
2458 };
2459
2460 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2461 {
2462         u8 val;
2463         static const char * const sink_status[] = {
2464                 "inactive",
2465                 "transition to active, capture and display",
2466                 "active, display from RFB",
2467                 "active, capture and display on sink device timings",
2468                 "transition to inactive, capture and display, timing re-sync",
2469                 "reserved",
2470                 "reserved",
2471                 "sink internal error",
2472         };
2473         struct drm_connector *connector = m->private;
2474         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2475         struct intel_dp *intel_dp =
2476                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2477         int ret;
2478
2479         if (!CAN_PSR(dev_priv)) {
2480                 seq_puts(m, "PSR Unsupported\n");
2481                 return -ENODEV;
2482         }
2483
2484         if (connector->status != connector_status_connected)
2485                 return -ENODEV;
2486
2487         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2488
2489         if (ret == 1) {
2490                 const char *str = "unknown";
2491
2492                 val &= DP_PSR_SINK_STATE_MASK;
2493                 if (val < ARRAY_SIZE(sink_status))
2494                         str = sink_status[val];
2495                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2496         } else {
2497                 return ret;
2498         }
2499
2500         return 0;
2501 }
2502 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2503
2504 static void
2505 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2506 {
2507         u32 val, psr_status;
2508
2509         if (dev_priv->psr.psr2_enabled) {
2510                 static const char * const live_status[] = {
2511                         "IDLE",
2512                         "CAPTURE",
2513                         "CAPTURE_FS",
2514                         "SLEEP",
2515                         "BUFON_FW",
2516                         "ML_UP",
2517                         "SU_STANDBY",
2518                         "FAST_SLEEP",
2519                         "DEEP_SLEEP",
2520                         "BUF_ON",
2521                         "TG_ON"
2522                 };
2523                 psr_status = I915_READ(EDP_PSR2_STATUS);
2524                 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2525                         EDP_PSR2_STATUS_STATE_SHIFT;
2526                 if (val < ARRAY_SIZE(live_status)) {
2527                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2528                                    psr_status, live_status[val]);
2529                         return;
2530                 }
2531         } else {
2532                 static const char * const live_status[] = {
2533                         "IDLE",
2534                         "SRDONACK",
2535                         "SRDENT",
2536                         "BUFOFF",
2537                         "BUFON",
2538                         "AUXACK",
2539                         "SRDOFFACK",
2540                         "SRDENT_ON",
2541                 };
2542                 psr_status = I915_READ(EDP_PSR_STATUS);
2543                 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2544                         EDP_PSR_STATUS_STATE_SHIFT;
2545                 if (val < ARRAY_SIZE(live_status)) {
2546                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2547                                    psr_status, live_status[val]);
2548                         return;
2549                 }
2550         }
2551
2552         seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2553 }
2554
2555 static int i915_edp_psr_status(struct seq_file *m, void *data)
2556 {
2557         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2558         intel_wakeref_t wakeref;
2559         u32 psrperf = 0;
2560         bool enabled = false;
2561         bool sink_support;
2562
2563         if (!HAS_PSR(dev_priv))
2564                 return -ENODEV;
2565
2566         sink_support = dev_priv->psr.sink_support;
2567         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2568         if (!sink_support)
2569                 return 0;
2570
2571         wakeref = intel_runtime_pm_get(dev_priv);
2572
2573         mutex_lock(&dev_priv->psr.lock);
2574         seq_printf(m, "PSR mode: %s\n",
2575                    dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2576         seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2577         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2578                    dev_priv->psr.busy_frontbuffer_bits);
2579
2580         if (dev_priv->psr.psr2_enabled)
2581                 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2582         else
2583                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2584
2585         seq_printf(m, "Main link in standby mode: %s\n",
2586                    yesno(dev_priv->psr.link_standby));
2587
2588         seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2589
2590         /*
2591          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2592          */
2593         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2594                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2595                         EDP_PSR_PERF_CNT_MASK;
2596
2597                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2598         }
2599
2600         psr_source_status(dev_priv, m);
2601         mutex_unlock(&dev_priv->psr.lock);
2602
2603         if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2604                 seq_printf(m, "Last attempted entry at: %lld\n",
2605                            dev_priv->psr.last_entry_attempt);
2606                 seq_printf(m, "Last exit at: %lld\n",
2607                            dev_priv->psr.last_exit);
2608         }
2609
2610         intel_runtime_pm_put(dev_priv, wakeref);
2611         return 0;
2612 }
2613
2614 static int
2615 i915_edp_psr_debug_set(void *data, u64 val)
2616 {
2617         struct drm_i915_private *dev_priv = data;
2618         struct drm_modeset_acquire_ctx ctx;
2619         intel_wakeref_t wakeref;
2620         int ret;
2621
2622         if (!CAN_PSR(dev_priv))
2623                 return -ENODEV;
2624
2625         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2626
2627         wakeref = intel_runtime_pm_get(dev_priv);
2628
2629         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2630
2631 retry:
2632         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2633         if (ret == -EDEADLK) {
2634                 ret = drm_modeset_backoff(&ctx);
2635                 if (!ret)
2636                         goto retry;
2637         }
2638
2639         drm_modeset_drop_locks(&ctx);
2640         drm_modeset_acquire_fini(&ctx);
2641
2642         intel_runtime_pm_put(dev_priv, wakeref);
2643
2644         return ret;
2645 }
2646
2647 static int
2648 i915_edp_psr_debug_get(void *data, u64 *val)
2649 {
2650         struct drm_i915_private *dev_priv = data;
2651
2652         if (!CAN_PSR(dev_priv))
2653                 return -ENODEV;
2654
2655         *val = READ_ONCE(dev_priv->psr.debug);
2656         return 0;
2657 }
2658
2659 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2660                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2661                         "%llu\n");
2662
2663 static int i915_energy_uJ(struct seq_file *m, void *data)
2664 {
2665         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2666         unsigned long long power;
2667         intel_wakeref_t wakeref;
2668         u32 units;
2669
2670         if (INTEL_GEN(dev_priv) < 6)
2671                 return -ENODEV;
2672
2673         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2674                 return -ENODEV;
2675
2676         units = (power & 0x1f00) >> 8;
2677         with_intel_runtime_pm(dev_priv, wakeref)
2678                 power = I915_READ(MCH_SECP_NRG_STTS);
2679
2680         power = (1000000 * power) >> units; /* convert to uJ */
2681         seq_printf(m, "%llu", power);
2682
2683         return 0;
2684 }
2685
2686 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2687 {
2688         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2689         struct pci_dev *pdev = dev_priv->drm.pdev;
2690
2691         if (!HAS_RUNTIME_PM(dev_priv))
2692                 seq_puts(m, "Runtime power management not supported\n");
2693
2694         seq_printf(m, "Runtime power status: %s\n",
2695                    enableddisabled(!dev_priv->power_domains.wakeref));
2696
2697         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2698                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2699         seq_printf(m, "IRQs disabled: %s\n",
2700                    yesno(!intel_irqs_enabled(dev_priv)));
2701 #ifdef CONFIG_PM
2702         seq_printf(m, "Usage count: %d\n",
2703                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2704 #else
2705         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2706 #endif
2707         seq_printf(m, "PCI device power state: %s [%d]\n",
2708                    pci_power_name(pdev->current_state),
2709                    pdev->current_state);
2710
2711         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2712                 struct drm_printer p = drm_seq_file_printer(m);
2713
2714                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2715         }
2716
2717         return 0;
2718 }
2719
2720 static int i915_power_domain_info(struct seq_file *m, void *unused)
2721 {
2722         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2723         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2724         int i;
2725
2726         mutex_lock(&power_domains->lock);
2727
2728         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2729         for (i = 0; i < power_domains->power_well_count; i++) {
2730                 struct i915_power_well *power_well;
2731                 enum intel_display_power_domain power_domain;
2732
2733                 power_well = &power_domains->power_wells[i];
2734                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2735                            power_well->count);
2736
2737                 for_each_power_domain(power_domain, power_well->desc->domains)
2738                         seq_printf(m, "  %-23s %d\n",
2739                                  intel_display_power_domain_str(power_domain),
2740                                  power_domains->domain_use_count[power_domain]);
2741         }
2742
2743         mutex_unlock(&power_domains->lock);
2744
2745         return 0;
2746 }
2747
2748 static int i915_dmc_info(struct seq_file *m, void *unused)
2749 {
2750         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2751         intel_wakeref_t wakeref;
2752         struct intel_csr *csr;
2753
2754         if (!HAS_CSR(dev_priv))
2755                 return -ENODEV;
2756
2757         csr = &dev_priv->csr;
2758
2759         wakeref = intel_runtime_pm_get(dev_priv);
2760
2761         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2762         seq_printf(m, "path: %s\n", csr->fw_path);
2763
2764         if (!csr->dmc_payload)
2765                 goto out;
2766
2767         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2768                    CSR_VERSION_MINOR(csr->version));
2769
2770         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2771                 goto out;
2772
2773         seq_printf(m, "DC3 -> DC5 count: %d\n",
2774                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2775                                                     SKL_CSR_DC3_DC5_COUNT));
2776         if (!IS_GEN9_LP(dev_priv))
2777                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2778                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2779
2780 out:
2781         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2782         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2783         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2784
2785         intel_runtime_pm_put(dev_priv, wakeref);
2786
2787         return 0;
2788 }
2789
2790 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2791                                  struct drm_display_mode *mode)
2792 {
2793         int i;
2794
2795         for (i = 0; i < tabs; i++)
2796                 seq_putc(m, '\t');
2797
2798         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2799                    mode->base.id, mode->name,
2800                    mode->vrefresh, mode->clock,
2801                    mode->hdisplay, mode->hsync_start,
2802                    mode->hsync_end, mode->htotal,
2803                    mode->vdisplay, mode->vsync_start,
2804                    mode->vsync_end, mode->vtotal,
2805                    mode->type, mode->flags);
2806 }
2807
2808 static void intel_encoder_info(struct seq_file *m,
2809                                struct intel_crtc *intel_crtc,
2810                                struct intel_encoder *intel_encoder)
2811 {
2812         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2813         struct drm_device *dev = &dev_priv->drm;
2814         struct drm_crtc *crtc = &intel_crtc->base;
2815         struct intel_connector *intel_connector;
2816         struct drm_encoder *encoder;
2817
2818         encoder = &intel_encoder->base;
2819         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2820                    encoder->base.id, encoder->name);
2821         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2822                 struct drm_connector *connector = &intel_connector->base;
2823                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2824                            connector->base.id,
2825                            connector->name,
2826                            drm_get_connector_status_name(connector->status));
2827                 if (connector->status == connector_status_connected) {
2828                         struct drm_display_mode *mode = &crtc->mode;
2829                         seq_printf(m, ", mode:\n");
2830                         intel_seq_print_mode(m, 2, mode);
2831                 } else {
2832                         seq_putc(m, '\n');
2833                 }
2834         }
2835 }
2836
2837 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2838 {
2839         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2840         struct drm_device *dev = &dev_priv->drm;
2841         struct drm_crtc *crtc = &intel_crtc->base;
2842         struct intel_encoder *intel_encoder;
2843         struct drm_plane_state *plane_state = crtc->primary->state;
2844         struct drm_framebuffer *fb = plane_state->fb;
2845
2846         if (fb)
2847                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2848                            fb->base.id, plane_state->src_x >> 16,
2849                            plane_state->src_y >> 16, fb->width, fb->height);
2850         else
2851                 seq_puts(m, "\tprimary plane disabled\n");
2852         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2853                 intel_encoder_info(m, intel_crtc, intel_encoder);
2854 }
2855
2856 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2857 {
2858         struct drm_display_mode *mode = panel->fixed_mode;
2859
2860         seq_printf(m, "\tfixed mode:\n");
2861         intel_seq_print_mode(m, 2, mode);
2862 }
2863
2864 static void intel_dp_info(struct seq_file *m,
2865                           struct intel_connector *intel_connector)
2866 {
2867         struct intel_encoder *intel_encoder = intel_connector->encoder;
2868         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2869
2870         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2871         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2872         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2873                 intel_panel_info(m, &intel_connector->panel);
2874
2875         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2876                                 &intel_dp->aux);
2877 }
2878
2879 static void intel_dp_mst_info(struct seq_file *m,
2880                           struct intel_connector *intel_connector)
2881 {
2882         struct intel_encoder *intel_encoder = intel_connector->encoder;
2883         struct intel_dp_mst_encoder *intel_mst =
2884                 enc_to_mst(&intel_encoder->base);
2885         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2886         struct intel_dp *intel_dp = &intel_dig_port->dp;
2887         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2888                                         intel_connector->port);
2889
2890         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2891 }
2892
2893 static void intel_hdmi_info(struct seq_file *m,
2894                             struct intel_connector *intel_connector)
2895 {
2896         struct intel_encoder *intel_encoder = intel_connector->encoder;
2897         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2898
2899         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2900 }
2901
2902 static void intel_lvds_info(struct seq_file *m,
2903                             struct intel_connector *intel_connector)
2904 {
2905         intel_panel_info(m, &intel_connector->panel);
2906 }
2907
2908 static void intel_connector_info(struct seq_file *m,
2909                                  struct drm_connector *connector)
2910 {
2911         struct intel_connector *intel_connector = to_intel_connector(connector);
2912         struct intel_encoder *intel_encoder = intel_connector->encoder;
2913         struct drm_display_mode *mode;
2914
2915         seq_printf(m, "connector %d: type %s, status: %s\n",
2916                    connector->base.id, connector->name,
2917                    drm_get_connector_status_name(connector->status));
2918
2919         if (connector->status == connector_status_disconnected)
2920                 return;
2921
2922         seq_printf(m, "\tname: %s\n", connector->display_info.name);
2923         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2924                    connector->display_info.width_mm,
2925                    connector->display_info.height_mm);
2926         seq_printf(m, "\tsubpixel order: %s\n",
2927                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2928         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2929
2930         if (!intel_encoder)
2931                 return;
2932
2933         switch (connector->connector_type) {
2934         case DRM_MODE_CONNECTOR_DisplayPort:
2935         case DRM_MODE_CONNECTOR_eDP:
2936                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2937                         intel_dp_mst_info(m, intel_connector);
2938                 else
2939                         intel_dp_info(m, intel_connector);
2940                 break;
2941         case DRM_MODE_CONNECTOR_LVDS:
2942                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2943                         intel_lvds_info(m, intel_connector);
2944                 break;
2945         case DRM_MODE_CONNECTOR_HDMIA:
2946                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2947                     intel_encoder->type == INTEL_OUTPUT_DDI)
2948                         intel_hdmi_info(m, intel_connector);
2949                 break;
2950         default:
2951                 break;
2952         }
2953
2954         seq_printf(m, "\tmodes:\n");
2955         list_for_each_entry(mode, &connector->modes, head)
2956                 intel_seq_print_mode(m, 2, mode);
2957 }
2958
2959 static const char *plane_type(enum drm_plane_type type)
2960 {
2961         switch (type) {
2962         case DRM_PLANE_TYPE_OVERLAY:
2963                 return "OVL";
2964         case DRM_PLANE_TYPE_PRIMARY:
2965                 return "PRI";
2966         case DRM_PLANE_TYPE_CURSOR:
2967                 return "CUR";
2968         /*
2969          * Deliberately omitting default: to generate compiler warnings
2970          * when a new drm_plane_type gets added.
2971          */
2972         }
2973
2974         return "unknown";
2975 }
2976
2977 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2978 {
2979         /*
2980          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2981          * will print them all to visualize if the values are misused
2982          */
2983         snprintf(buf, bufsize,
2984                  "%s%s%s%s%s%s(0x%08x)",
2985                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2986                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2987                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2988                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2989                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2990                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2991                  rotation);
2992 }
2993
2994 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995 {
2996         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2997         struct drm_device *dev = &dev_priv->drm;
2998         struct intel_plane *intel_plane;
2999
3000         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3001                 struct drm_plane_state *state;
3002                 struct drm_plane *plane = &intel_plane->base;
3003                 struct drm_format_name_buf format_name;
3004                 char rot_str[48];
3005
3006                 if (!plane->state) {
3007                         seq_puts(m, "plane->state is NULL!\n");
3008                         continue;
3009                 }
3010
3011                 state = plane->state;
3012
3013                 if (state->fb) {
3014                         drm_get_format_name(state->fb->format->format,
3015                                             &format_name);
3016                 } else {
3017                         sprintf(format_name.str, "N/A");
3018                 }
3019
3020                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3021
3022                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3023                            plane->base.id,
3024                            plane_type(intel_plane->base.type),
3025                            state->crtc_x, state->crtc_y,
3026                            state->crtc_w, state->crtc_h,
3027                            (state->src_x >> 16),
3028                            ((state->src_x & 0xffff) * 15625) >> 10,
3029                            (state->src_y >> 16),
3030                            ((state->src_y & 0xffff) * 15625) >> 10,
3031                            (state->src_w >> 16),
3032                            ((state->src_w & 0xffff) * 15625) >> 10,
3033                            (state->src_h >> 16),
3034                            ((state->src_h & 0xffff) * 15625) >> 10,
3035                            format_name.str,
3036                            rot_str);
3037         }
3038 }
3039
3040 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3041 {
3042         struct intel_crtc_state *pipe_config;
3043         int num_scalers = intel_crtc->num_scalers;
3044         int i;
3045
3046         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3047
3048         /* Not all platformas have a scaler */
3049         if (num_scalers) {
3050                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3051                            num_scalers,
3052                            pipe_config->scaler_state.scaler_users,
3053                            pipe_config->scaler_state.scaler_id);
3054
3055                 for (i = 0; i < num_scalers; i++) {
3056                         struct intel_scaler *sc =
3057                                         &pipe_config->scaler_state.scalers[i];
3058
3059                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3060                                    i, yesno(sc->in_use), sc->mode);
3061                 }
3062                 seq_puts(m, "\n");
3063         } else {
3064                 seq_puts(m, "\tNo scalers available on this platform\n");
3065         }
3066 }
3067
3068 static int i915_display_info(struct seq_file *m, void *unused)
3069 {
3070         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3071         struct drm_device *dev = &dev_priv->drm;
3072         struct intel_crtc *crtc;
3073         struct drm_connector *connector;
3074         struct drm_connector_list_iter conn_iter;
3075         intel_wakeref_t wakeref;
3076
3077         wakeref = intel_runtime_pm_get(dev_priv);
3078
3079         seq_printf(m, "CRTC info\n");
3080         seq_printf(m, "---------\n");
3081         for_each_intel_crtc(dev, crtc) {
3082                 struct intel_crtc_state *pipe_config;
3083
3084                 drm_modeset_lock(&crtc->base.mutex, NULL);
3085                 pipe_config = to_intel_crtc_state(crtc->base.state);
3086
3087                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3088                            crtc->base.base.id, pipe_name(crtc->pipe),
3089                            yesno(pipe_config->base.active),
3090                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3091                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3092
3093                 if (pipe_config->base.active) {
3094                         struct intel_plane *cursor =
3095                                 to_intel_plane(crtc->base.cursor);
3096
3097                         intel_crtc_info(m, crtc);
3098
3099                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3100                                    yesno(cursor->base.state->visible),
3101                                    cursor->base.state->crtc_x,
3102                                    cursor->base.state->crtc_y,
3103                                    cursor->base.state->crtc_w,
3104                                    cursor->base.state->crtc_h,
3105                                    cursor->cursor.base);
3106                         intel_scaler_info(m, crtc);
3107                         intel_plane_info(m, crtc);
3108                 }
3109
3110                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3111                            yesno(!crtc->cpu_fifo_underrun_disabled),
3112                            yesno(!crtc->pch_fifo_underrun_disabled));
3113                 drm_modeset_unlock(&crtc->base.mutex);
3114         }
3115
3116         seq_printf(m, "\n");
3117         seq_printf(m, "Connector info\n");
3118         seq_printf(m, "--------------\n");
3119         mutex_lock(&dev->mode_config.mutex);
3120         drm_connector_list_iter_begin(dev, &conn_iter);
3121         drm_for_each_connector_iter(connector, &conn_iter)
3122                 intel_connector_info(m, connector);
3123         drm_connector_list_iter_end(&conn_iter);
3124         mutex_unlock(&dev->mode_config.mutex);
3125
3126         intel_runtime_pm_put(dev_priv, wakeref);
3127
3128         return 0;
3129 }
3130
3131 static int i915_engine_info(struct seq_file *m, void *unused)
3132 {
3133         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3134         struct intel_engine_cs *engine;
3135         intel_wakeref_t wakeref;
3136         enum intel_engine_id id;
3137         struct drm_printer p;
3138
3139         wakeref = intel_runtime_pm_get(dev_priv);
3140
3141         seq_printf(m, "GT awake? %s (epoch %u)\n",
3142                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3143         seq_printf(m, "Global active requests: %d\n",
3144                    dev_priv->gt.active_requests);
3145         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3146                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3147
3148         p = drm_seq_file_printer(m);
3149         for_each_engine(engine, dev_priv, id)
3150                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3151
3152         intel_runtime_pm_put(dev_priv, wakeref);
3153
3154         return 0;
3155 }
3156
3157 static int i915_rcs_topology(struct seq_file *m, void *unused)
3158 {
3159         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3160         struct drm_printer p = drm_seq_file_printer(m);
3161
3162         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3163
3164         return 0;
3165 }
3166
3167 static int i915_shrinker_info(struct seq_file *m, void *unused)
3168 {
3169         struct drm_i915_private *i915 = node_to_i915(m->private);
3170
3171         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3172         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3173
3174         return 0;
3175 }
3176
3177 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3178 {
3179         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3180         struct drm_device *dev = &dev_priv->drm;
3181         int i;
3182
3183         drm_modeset_lock_all(dev);
3184         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3185                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3186
3187                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3188                            pll->info->id);
3189                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3190                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3191                 seq_printf(m, " tracked hardware state:\n");
3192                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3193                 seq_printf(m, " dpll_md: 0x%08x\n",
3194                            pll->state.hw_state.dpll_md);
3195                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3196                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3197                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3198                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3199                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3200                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3201                            pll->state.hw_state.mg_refclkin_ctl);
3202                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3203                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3204                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3205                            pll->state.hw_state.mg_clktop2_hsclkctl);
3206                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3207                            pll->state.hw_state.mg_pll_div0);
3208                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3209                            pll->state.hw_state.mg_pll_div1);
3210                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3211                            pll->state.hw_state.mg_pll_lf);
3212                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3213                            pll->state.hw_state.mg_pll_frac_lock);
3214                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3215                            pll->state.hw_state.mg_pll_ssc);
3216                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3217                            pll->state.hw_state.mg_pll_bias);
3218                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3219                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3220         }
3221         drm_modeset_unlock_all(dev);
3222
3223         return 0;
3224 }
3225
3226 static int i915_wa_registers(struct seq_file *m, void *unused)
3227 {
3228         struct drm_i915_private *i915 = node_to_i915(m->private);
3229         const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3230         struct i915_wa *wa;
3231         unsigned int i;
3232
3233         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3234         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3235                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3236                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3237
3238         return 0;
3239 }
3240
3241 static int i915_ipc_status_show(struct seq_file *m, void *data)
3242 {
3243         struct drm_i915_private *dev_priv = m->private;
3244
3245         seq_printf(m, "Isochronous Priority Control: %s\n",
3246                         yesno(dev_priv->ipc_enabled));
3247         return 0;
3248 }
3249
3250 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3251 {
3252         struct drm_i915_private *dev_priv = inode->i_private;
3253
3254         if (!HAS_IPC(dev_priv))
3255                 return -ENODEV;
3256
3257         return single_open(file, i915_ipc_status_show, dev_priv);
3258 }
3259
3260 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3261                                      size_t len, loff_t *offp)
3262 {
3263         struct seq_file *m = file->private_data;
3264         struct drm_i915_private *dev_priv = m->private;
3265         intel_wakeref_t wakeref;
3266         bool enable;
3267         int ret;
3268
3269         ret = kstrtobool_from_user(ubuf, len, &enable);
3270         if (ret < 0)
3271                 return ret;
3272
3273         with_intel_runtime_pm(dev_priv, wakeref) {
3274                 if (!dev_priv->ipc_enabled && enable)
3275                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3276                 dev_priv->wm.distrust_bios_wm = true;
3277                 dev_priv->ipc_enabled = enable;
3278                 intel_enable_ipc(dev_priv);
3279         }
3280
3281         return len;
3282 }
3283
3284 static const struct file_operations i915_ipc_status_fops = {
3285         .owner = THIS_MODULE,
3286         .open = i915_ipc_status_open,
3287         .read = seq_read,
3288         .llseek = seq_lseek,
3289         .release = single_release,
3290         .write = i915_ipc_status_write
3291 };
3292
3293 static int i915_ddb_info(struct seq_file *m, void *unused)
3294 {
3295         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3296         struct drm_device *dev = &dev_priv->drm;
3297         struct skl_ddb_entry *entry;
3298         struct intel_crtc *crtc;
3299
3300         if (INTEL_GEN(dev_priv) < 9)
3301                 return -ENODEV;
3302
3303         drm_modeset_lock_all(dev);
3304
3305         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3306
3307         for_each_intel_crtc(&dev_priv->drm, crtc) {
3308                 struct intel_crtc_state *crtc_state =
3309                         to_intel_crtc_state(crtc->base.state);
3310                 enum pipe pipe = crtc->pipe;
3311                 enum plane_id plane_id;
3312
3313                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3314
3315                 for_each_plane_id_on_crtc(crtc, plane_id) {
3316                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3317                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3318                                    entry->start, entry->end,
3319                                    skl_ddb_entry_size(entry));
3320                 }
3321
3322                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3323                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3324                            entry->end, skl_ddb_entry_size(entry));
3325         }
3326
3327         drm_modeset_unlock_all(dev);
3328
3329         return 0;
3330 }
3331
3332 static void drrs_status_per_crtc(struct seq_file *m,
3333                                  struct drm_device *dev,
3334                                  struct intel_crtc *intel_crtc)
3335 {
3336         struct drm_i915_private *dev_priv = to_i915(dev);
3337         struct i915_drrs *drrs = &dev_priv->drrs;
3338         int vrefresh = 0;
3339         struct drm_connector *connector;
3340         struct drm_connector_list_iter conn_iter;
3341
3342         drm_connector_list_iter_begin(dev, &conn_iter);
3343         drm_for_each_connector_iter(connector, &conn_iter) {
3344                 if (connector->state->crtc != &intel_crtc->base)
3345                         continue;
3346
3347                 seq_printf(m, "%s:\n", connector->name);
3348         }
3349         drm_connector_list_iter_end(&conn_iter);
3350
3351         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3352                 seq_puts(m, "\tVBT: DRRS_type: Static");
3353         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3354                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3355         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3356                 seq_puts(m, "\tVBT: DRRS_type: None");
3357         else
3358                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3359
3360         seq_puts(m, "\n\n");
3361
3362         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3363                 struct intel_panel *panel;
3364
3365                 mutex_lock(&drrs->mutex);
3366                 /* DRRS Supported */
3367                 seq_puts(m, "\tDRRS Supported: Yes\n");
3368
3369                 /* disable_drrs() will make drrs->dp NULL */
3370                 if (!drrs->dp) {
3371                         seq_puts(m, "Idleness DRRS: Disabled\n");
3372                         if (dev_priv->psr.enabled)
3373                                 seq_puts(m,
3374                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3375                         mutex_unlock(&drrs->mutex);
3376                         return;
3377                 }
3378
3379                 panel = &drrs->dp->attached_connector->panel;
3380                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3381                                         drrs->busy_frontbuffer_bits);
3382
3383                 seq_puts(m, "\n\t\t");
3384                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3385                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3386                         vrefresh = panel->fixed_mode->vrefresh;
3387                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3388                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3389                         vrefresh = panel->downclock_mode->vrefresh;
3390                 } else {
3391                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3392                                                 drrs->refresh_rate_type);
3393                         mutex_unlock(&drrs->mutex);
3394                         return;
3395                 }
3396                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3397
3398                 seq_puts(m, "\n\t\t");
3399                 mutex_unlock(&drrs->mutex);
3400         } else {
3401                 /* DRRS not supported. Print the VBT parameter*/
3402                 seq_puts(m, "\tDRRS Supported : No");
3403         }
3404         seq_puts(m, "\n");
3405 }
3406
3407 static int i915_drrs_status(struct seq_file *m, void *unused)
3408 {
3409         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3410         struct drm_device *dev = &dev_priv->drm;
3411         struct intel_crtc *intel_crtc;
3412         int active_crtc_cnt = 0;
3413
3414         drm_modeset_lock_all(dev);
3415         for_each_intel_crtc(dev, intel_crtc) {
3416                 if (intel_crtc->base.state->active) {
3417                         active_crtc_cnt++;
3418                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3419
3420                         drrs_status_per_crtc(m, dev, intel_crtc);
3421                 }
3422         }
3423         drm_modeset_unlock_all(dev);
3424
3425         if (!active_crtc_cnt)
3426                 seq_puts(m, "No active crtc found\n");
3427
3428         return 0;
3429 }
3430
3431 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3432 {
3433         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3434         struct drm_device *dev = &dev_priv->drm;
3435         struct intel_encoder *intel_encoder;
3436         struct intel_digital_port *intel_dig_port;
3437         struct drm_connector *connector;
3438         struct drm_connector_list_iter conn_iter;
3439
3440         drm_connector_list_iter_begin(dev, &conn_iter);
3441         drm_for_each_connector_iter(connector, &conn_iter) {
3442                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3443                         continue;
3444
3445                 intel_encoder = intel_attached_encoder(connector);
3446                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3447                         continue;
3448
3449                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3450                 if (!intel_dig_port->dp.can_mst)
3451                         continue;
3452
3453                 seq_printf(m, "MST Source Port %c\n",
3454                            port_name(intel_dig_port->base.port));
3455                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3456         }
3457         drm_connector_list_iter_end(&conn_iter);
3458
3459         return 0;
3460 }
3461
3462 static ssize_t i915_displayport_test_active_write(struct file *file,
3463                                                   const char __user *ubuf,
3464                                                   size_t len, loff_t *offp)
3465 {
3466         char *input_buffer;
3467         int status = 0;
3468         struct drm_device *dev;
3469         struct drm_connector *connector;
3470         struct drm_connector_list_iter conn_iter;
3471         struct intel_dp *intel_dp;
3472         int val = 0;
3473
3474         dev = ((struct seq_file *)file->private_data)->private;
3475
3476         if (len == 0)
3477                 return 0;
3478
3479         input_buffer = memdup_user_nul(ubuf, len);
3480         if (IS_ERR(input_buffer))
3481                 return PTR_ERR(input_buffer);
3482
3483         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3484
3485         drm_connector_list_iter_begin(dev, &conn_iter);
3486         drm_for_each_connector_iter(connector, &conn_iter) {
3487                 struct intel_encoder *encoder;
3488
3489                 if (connector->connector_type !=
3490                     DRM_MODE_CONNECTOR_DisplayPort)
3491                         continue;
3492
3493                 encoder = to_intel_encoder(connector->encoder);
3494                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3495                         continue;
3496
3497                 if (encoder && connector->status == connector_status_connected) {
3498                         intel_dp = enc_to_intel_dp(&encoder->base);
3499                         status = kstrtoint(input_buffer, 10, &val);
3500                         if (status < 0)
3501                                 break;
3502                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3503                         /* To prevent erroneous activation of the compliance
3504                          * testing code, only accept an actual value of 1 here
3505                          */
3506                         if (val == 1)
3507                                 intel_dp->compliance.test_active = 1;
3508                         else
3509                                 intel_dp->compliance.test_active = 0;
3510                 }
3511         }
3512         drm_connector_list_iter_end(&conn_iter);
3513         kfree(input_buffer);
3514         if (status < 0)
3515                 return status;
3516
3517         *offp += len;
3518         return len;
3519 }
3520
3521 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3522 {
3523         struct drm_i915_private *dev_priv = m->private;
3524         struct drm_device *dev = &dev_priv->drm;
3525         struct drm_connector *connector;
3526         struct drm_connector_list_iter conn_iter;
3527         struct intel_dp *intel_dp;
3528
3529         drm_connector_list_iter_begin(dev, &conn_iter);
3530         drm_for_each_connector_iter(connector, &conn_iter) {
3531                 struct intel_encoder *encoder;
3532
3533                 if (connector->connector_type !=
3534                     DRM_MODE_CONNECTOR_DisplayPort)
3535                         continue;
3536
3537                 encoder = to_intel_encoder(connector->encoder);
3538                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3539                         continue;
3540
3541                 if (encoder && connector->status == connector_status_connected) {
3542                         intel_dp = enc_to_intel_dp(&encoder->base);
3543                         if (intel_dp->compliance.test_active)
3544                                 seq_puts(m, "1");
3545                         else
3546                                 seq_puts(m, "0");
3547                 } else
3548                         seq_puts(m, "0");
3549         }
3550         drm_connector_list_iter_end(&conn_iter);
3551
3552         return 0;
3553 }
3554
3555 static int i915_displayport_test_active_open(struct inode *inode,
3556                                              struct file *file)
3557 {
3558         return single_open(file, i915_displayport_test_active_show,
3559                            inode->i_private);
3560 }
3561
3562 static const struct file_operations i915_displayport_test_active_fops = {
3563         .owner = THIS_MODULE,
3564         .open = i915_displayport_test_active_open,
3565         .read = seq_read,
3566         .llseek = seq_lseek,
3567         .release = single_release,
3568         .write = i915_displayport_test_active_write
3569 };
3570
3571 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3572 {
3573         struct drm_i915_private *dev_priv = m->private;
3574         struct drm_device *dev = &dev_priv->drm;
3575         struct drm_connector *connector;
3576         struct drm_connector_list_iter conn_iter;
3577         struct intel_dp *intel_dp;
3578
3579         drm_connector_list_iter_begin(dev, &conn_iter);
3580         drm_for_each_connector_iter(connector, &conn_iter) {
3581                 struct intel_encoder *encoder;
3582
3583                 if (connector->connector_type !=
3584                     DRM_MODE_CONNECTOR_DisplayPort)
3585                         continue;
3586
3587                 encoder = to_intel_encoder(connector->encoder);
3588                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3589                         continue;
3590
3591                 if (encoder && connector->status == connector_status_connected) {
3592                         intel_dp = enc_to_intel_dp(&encoder->base);
3593                         if (intel_dp->compliance.test_type ==
3594                             DP_TEST_LINK_EDID_READ)
3595                                 seq_printf(m, "%lx",
3596                                            intel_dp->compliance.test_data.edid);
3597                         else if (intel_dp->compliance.test_type ==
3598                                  DP_TEST_LINK_VIDEO_PATTERN) {
3599                                 seq_printf(m, "hdisplay: %d\n",
3600                                            intel_dp->compliance.test_data.hdisplay);
3601                                 seq_printf(m, "vdisplay: %d\n",
3602                                            intel_dp->compliance.test_data.vdisplay);
3603                                 seq_printf(m, "bpc: %u\n",
3604                                            intel_dp->compliance.test_data.bpc);
3605                         }
3606                 } else
3607                         seq_puts(m, "0");
3608         }
3609         drm_connector_list_iter_end(&conn_iter);
3610
3611         return 0;
3612 }
3613 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3614
3615 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3616 {
3617         struct drm_i915_private *dev_priv = m->private;
3618         struct drm_device *dev = &dev_priv->drm;
3619         struct drm_connector *connector;
3620         struct drm_connector_list_iter conn_iter;
3621         struct intel_dp *intel_dp;
3622
3623         drm_connector_list_iter_begin(dev, &conn_iter);
3624         drm_for_each_connector_iter(connector, &conn_iter) {
3625                 struct intel_encoder *encoder;
3626
3627                 if (connector->connector_type !=
3628                     DRM_MODE_CONNECTOR_DisplayPort)
3629                         continue;
3630
3631                 encoder = to_intel_encoder(connector->encoder);
3632                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3633                         continue;
3634
3635                 if (encoder && connector->status == connector_status_connected) {
3636                         intel_dp = enc_to_intel_dp(&encoder->base);
3637                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3638                 } else
3639                         seq_puts(m, "0");
3640         }
3641         drm_connector_list_iter_end(&conn_iter);
3642
3643         return 0;
3644 }
3645 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3646
3647 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3648 {
3649         struct drm_i915_private *dev_priv = m->private;
3650         struct drm_device *dev = &dev_priv->drm;
3651         int level;
3652         int num_levels;
3653
3654         if (IS_CHERRYVIEW(dev_priv))
3655                 num_levels = 3;
3656         else if (IS_VALLEYVIEW(dev_priv))
3657                 num_levels = 1;
3658         else if (IS_G4X(dev_priv))
3659                 num_levels = 3;
3660         else
3661                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3662
3663         drm_modeset_lock_all(dev);
3664
3665         for (level = 0; level < num_levels; level++) {
3666                 unsigned int latency = wm[level];
3667
3668                 /*
3669                  * - WM1+ latency values in 0.5us units
3670                  * - latencies are in us on gen9/vlv/chv
3671                  */
3672                 if (INTEL_GEN(dev_priv) >= 9 ||
3673                     IS_VALLEYVIEW(dev_priv) ||
3674                     IS_CHERRYVIEW(dev_priv) ||
3675                     IS_G4X(dev_priv))
3676                         latency *= 10;
3677                 else if (level > 0)
3678                         latency *= 5;
3679
3680                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3681                            level, wm[level], latency / 10, latency % 10);
3682         }
3683
3684         drm_modeset_unlock_all(dev);
3685 }
3686
3687 static int pri_wm_latency_show(struct seq_file *m, void *data)
3688 {
3689         struct drm_i915_private *dev_priv = m->private;
3690         const uint16_t *latencies;
3691
3692         if (INTEL_GEN(dev_priv) >= 9)
3693                 latencies = dev_priv->wm.skl_latency;
3694         else
3695                 latencies = dev_priv->wm.pri_latency;
3696
3697         wm_latency_show(m, latencies);
3698
3699         return 0;
3700 }
3701
3702 static int spr_wm_latency_show(struct seq_file *m, void *data)
3703 {
3704         struct drm_i915_private *dev_priv = m->private;
3705         const uint16_t *latencies;
3706
3707         if (INTEL_GEN(dev_priv) >= 9)
3708                 latencies = dev_priv->wm.skl_latency;
3709         else
3710                 latencies = dev_priv->wm.spr_latency;
3711
3712         wm_latency_show(m, latencies);
3713
3714         return 0;
3715 }
3716
3717 static int cur_wm_latency_show(struct seq_file *m, void *data)
3718 {
3719         struct drm_i915_private *dev_priv = m->private;
3720         const uint16_t *latencies;
3721
3722         if (INTEL_GEN(dev_priv) >= 9)
3723                 latencies = dev_priv->wm.skl_latency;
3724         else
3725                 latencies = dev_priv->wm.cur_latency;
3726
3727         wm_latency_show(m, latencies);
3728
3729         return 0;
3730 }
3731
3732 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3733 {
3734         struct drm_i915_private *dev_priv = inode->i_private;
3735
3736         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3737                 return -ENODEV;
3738
3739         return single_open(file, pri_wm_latency_show, dev_priv);
3740 }
3741
3742 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3743 {
3744         struct drm_i915_private *dev_priv = inode->i_private;
3745
3746         if (HAS_GMCH_DISPLAY(dev_priv))
3747                 return -ENODEV;
3748
3749         return single_open(file, spr_wm_latency_show, dev_priv);
3750 }
3751
3752 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3753 {
3754         struct drm_i915_private *dev_priv = inode->i_private;
3755
3756         if (HAS_GMCH_DISPLAY(dev_priv))
3757                 return -ENODEV;
3758
3759         return single_open(file, cur_wm_latency_show, dev_priv);
3760 }
3761
3762 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3763                                 size_t len, loff_t *offp, uint16_t wm[8])
3764 {
3765         struct seq_file *m = file->private_data;
3766         struct drm_i915_private *dev_priv = m->private;
3767         struct drm_device *dev = &dev_priv->drm;
3768         uint16_t new[8] = { 0 };
3769         int num_levels;
3770         int level;
3771         int ret;
3772         char tmp[32];
3773
3774         if (IS_CHERRYVIEW(dev_priv))
3775                 num_levels = 3;
3776         else if (IS_VALLEYVIEW(dev_priv))
3777                 num_levels = 1;
3778         else if (IS_G4X(dev_priv))
3779                 num_levels = 3;
3780         else
3781                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3782
3783         if (len >= sizeof(tmp))
3784                 return -EINVAL;
3785
3786         if (copy_from_user(tmp, ubuf, len))
3787                 return -EFAULT;
3788
3789         tmp[len] = '\0';
3790
3791         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3792                      &new[0], &new[1], &new[2], &new[3],
3793                      &new[4], &new[5], &new[6], &new[7]);
3794         if (ret != num_levels)
3795                 return -EINVAL;
3796
3797         drm_modeset_lock_all(dev);
3798
3799         for (level = 0; level < num_levels; level++)
3800                 wm[level] = new[level];
3801
3802         drm_modeset_unlock_all(dev);
3803
3804         return len;
3805 }
3806
3807
3808 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3809                                     size_t len, loff_t *offp)
3810 {
3811         struct seq_file *m = file->private_data;
3812         struct drm_i915_private *dev_priv = m->private;
3813         uint16_t *latencies;
3814
3815         if (INTEL_GEN(dev_priv) >= 9)
3816                 latencies = dev_priv->wm.skl_latency;
3817         else
3818                 latencies = dev_priv->wm.pri_latency;
3819
3820         return wm_latency_write(file, ubuf, len, offp, latencies);
3821 }
3822
3823 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3824                                     size_t len, loff_t *offp)
3825 {
3826         struct seq_file *m = file->private_data;
3827         struct drm_i915_private *dev_priv = m->private;
3828         uint16_t *latencies;
3829
3830         if (INTEL_GEN(dev_priv) >= 9)
3831                 latencies = dev_priv->wm.skl_latency;
3832         else
3833                 latencies = dev_priv->wm.spr_latency;
3834
3835         return wm_latency_write(file, ubuf, len, offp, latencies);
3836 }
3837
3838 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3839                                     size_t len, loff_t *offp)
3840 {
3841         struct seq_file *m = file->private_data;
3842         struct drm_i915_private *dev_priv = m->private;
3843         uint16_t *latencies;
3844
3845         if (INTEL_GEN(dev_priv) >= 9)
3846                 latencies = dev_priv->wm.skl_latency;
3847         else
3848                 latencies = dev_priv->wm.cur_latency;
3849
3850         return wm_latency_write(file, ubuf, len, offp, latencies);
3851 }
3852
3853 static const struct file_operations i915_pri_wm_latency_fops = {
3854         .owner = THIS_MODULE,
3855         .open = pri_wm_latency_open,
3856         .read = seq_read,
3857         .llseek = seq_lseek,
3858         .release = single_release,
3859         .write = pri_wm_latency_write
3860 };
3861
3862 static const struct file_operations i915_spr_wm_latency_fops = {
3863         .owner = THIS_MODULE,
3864         .open = spr_wm_latency_open,
3865         .read = seq_read,
3866         .llseek = seq_lseek,
3867         .release = single_release,
3868         .write = spr_wm_latency_write
3869 };
3870
3871 static const struct file_operations i915_cur_wm_latency_fops = {
3872         .owner = THIS_MODULE,
3873         .open = cur_wm_latency_open,
3874         .read = seq_read,
3875         .llseek = seq_lseek,
3876         .release = single_release,
3877         .write = cur_wm_latency_write
3878 };
3879
3880 static int
3881 i915_wedged_get(void *data, u64 *val)
3882 {
3883         struct drm_i915_private *dev_priv = data;
3884
3885         *val = i915_terminally_wedged(&dev_priv->gpu_error);
3886
3887         return 0;
3888 }
3889
3890 static int
3891 i915_wedged_set(void *data, u64 val)
3892 {
3893         struct drm_i915_private *i915 = data;
3894         struct intel_engine_cs *engine;
3895         unsigned int tmp;
3896
3897         /*
3898          * There is no safeguard against this debugfs entry colliding
3899          * with the hangcheck calling same i915_handle_error() in
3900          * parallel, causing an explosion. For now we assume that the
3901          * test harness is responsible enough not to inject gpu hangs
3902          * while it is writing to 'i915_wedged'
3903          */
3904
3905         if (i915_reset_backoff(&i915->gpu_error))
3906                 return -EAGAIN;
3907
3908         for_each_engine_masked(engine, i915, val, tmp) {
3909                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3910                 engine->hangcheck.stalled = true;
3911         }
3912
3913         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3914                           "Manually set wedged engine mask = %llx", val);
3915
3916         wait_on_bit(&i915->gpu_error.flags,
3917                     I915_RESET_HANDOFF,
3918                     TASK_UNINTERRUPTIBLE);
3919
3920         return 0;
3921 }
3922
3923 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3924                         i915_wedged_get, i915_wedged_set,
3925                         "%llu\n");
3926
3927 static int
3928 fault_irq_set(struct drm_i915_private *i915,
3929               unsigned long *irq,
3930               unsigned long val)
3931 {
3932         int err;
3933
3934         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3935         if (err)
3936                 return err;
3937
3938         err = i915_gem_wait_for_idle(i915,
3939                                      I915_WAIT_LOCKED |
3940                                      I915_WAIT_INTERRUPTIBLE,
3941                                      MAX_SCHEDULE_TIMEOUT);
3942         if (err)
3943                 goto err_unlock;
3944
3945         *irq = val;
3946         mutex_unlock(&i915->drm.struct_mutex);
3947
3948         /* Flush idle worker to disarm irq */
3949         drain_delayed_work(&i915->gt.idle_work);
3950
3951         return 0;
3952
3953 err_unlock:
3954         mutex_unlock(&i915->drm.struct_mutex);
3955         return err;
3956 }
3957
3958 static int
3959 i915_ring_missed_irq_get(void *data, u64 *val)
3960 {
3961         struct drm_i915_private *dev_priv = data;
3962
3963         *val = dev_priv->gpu_error.missed_irq_rings;
3964         return 0;
3965 }
3966
3967 static int
3968 i915_ring_missed_irq_set(void *data, u64 val)
3969 {
3970         struct drm_i915_private *i915 = data;
3971
3972         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
3973 }
3974
3975 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3976                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3977                         "0x%08llx\n");
3978
3979 static int
3980 i915_ring_test_irq_get(void *data, u64 *val)
3981 {
3982         struct drm_i915_private *dev_priv = data;
3983
3984         *val = dev_priv->gpu_error.test_irq_rings;
3985
3986         return 0;
3987 }
3988
3989 static int
3990 i915_ring_test_irq_set(void *data, u64 val)
3991 {
3992         struct drm_i915_private *i915 = data;
3993
3994         /* GuC keeps the user interrupt permanently enabled for submission */
3995         if (USES_GUC_SUBMISSION(i915))
3996                 return -ENODEV;
3997
3998         /*
3999          * From icl, we can no longer individually mask interrupt generation
4000          * from each engine.
4001          */
4002         if (INTEL_GEN(i915) >= 11)
4003                 return -ENODEV;
4004
4005         val &= INTEL_INFO(i915)->ring_mask;
4006         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4007
4008         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4009 }
4010
4011 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4012                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4013                         "0x%08llx\n");
4014
4015 #define DROP_UNBOUND    BIT(0)
4016 #define DROP_BOUND      BIT(1)
4017 #define DROP_RETIRE     BIT(2)
4018 #define DROP_ACTIVE     BIT(3)
4019 #define DROP_FREED      BIT(4)
4020 #define DROP_SHRINK_ALL BIT(5)
4021 #define DROP_IDLE       BIT(6)
4022 #define DROP_RESET_ACTIVE       BIT(7)
4023 #define DROP_RESET_SEQNO        BIT(8)
4024 #define DROP_ALL (DROP_UNBOUND  | \
4025                   DROP_BOUND    | \
4026                   DROP_RETIRE   | \
4027                   DROP_ACTIVE   | \
4028                   DROP_FREED    | \
4029                   DROP_SHRINK_ALL |\
4030                   DROP_IDLE     | \
4031                   DROP_RESET_ACTIVE | \
4032                   DROP_RESET_SEQNO)
4033 static int
4034 i915_drop_caches_get(void *data, u64 *val)
4035 {
4036         *val = DROP_ALL;
4037
4038         return 0;
4039 }
4040
4041 static int
4042 i915_drop_caches_set(void *data, u64 val)
4043 {
4044         struct drm_i915_private *i915 = data;
4045         intel_wakeref_t wakeref;
4046         int ret = 0;
4047
4048         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4049                   val, val & DROP_ALL);
4050         wakeref = intel_runtime_pm_get(i915);
4051
4052         if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4053                 i915_gem_set_wedged(i915);
4054
4055         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4056          * on ioctls on -EAGAIN. */
4057         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4058                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4059                 if (ret)
4060                         goto out;
4061
4062                 if (val & DROP_ACTIVE)
4063                         ret = i915_gem_wait_for_idle(i915,
4064                                                      I915_WAIT_INTERRUPTIBLE |
4065                                                      I915_WAIT_LOCKED,
4066                                                      MAX_SCHEDULE_TIMEOUT);
4067
4068                 if (val & DROP_RETIRE)
4069                         i915_retire_requests(i915);
4070
4071                 mutex_unlock(&i915->drm.struct_mutex);
4072         }
4073
4074         if (val & DROP_RESET_ACTIVE &&
4075             i915_terminally_wedged(&i915->gpu_error)) {
4076                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4077                 wait_on_bit(&i915->gpu_error.flags,
4078                             I915_RESET_HANDOFF,
4079                             TASK_UNINTERRUPTIBLE);
4080         }
4081
4082         fs_reclaim_acquire(GFP_KERNEL);
4083         if (val & DROP_BOUND)
4084                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4085
4086         if (val & DROP_UNBOUND)
4087                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4088
4089         if (val & DROP_SHRINK_ALL)
4090                 i915_gem_shrink_all(i915);
4091         fs_reclaim_release(GFP_KERNEL);
4092
4093         if (val & DROP_IDLE) {
4094                 do {
4095                         if (READ_ONCE(i915->gt.active_requests))
4096                                 flush_delayed_work(&i915->gt.retire_work);
4097                         drain_delayed_work(&i915->gt.idle_work);
4098                 } while (READ_ONCE(i915->gt.awake));
4099         }
4100
4101         if (val & DROP_FREED)
4102                 i915_gem_drain_freed_objects(i915);
4103
4104 out:
4105         intel_runtime_pm_put(i915, wakeref);
4106
4107         return ret;
4108 }
4109
4110 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4111                         i915_drop_caches_get, i915_drop_caches_set,
4112                         "0x%08llx\n");
4113
4114 static int
4115 i915_cache_sharing_get(void *data, u64 *val)
4116 {
4117         struct drm_i915_private *dev_priv = data;
4118         intel_wakeref_t wakeref;
4119         u32 snpcr = 0;
4120
4121         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4122                 return -ENODEV;
4123
4124         with_intel_runtime_pm(dev_priv, wakeref)
4125                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4126
4127         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4128
4129         return 0;
4130 }
4131
4132 static int
4133 i915_cache_sharing_set(void *data, u64 val)
4134 {
4135         struct drm_i915_private *dev_priv = data;
4136         intel_wakeref_t wakeref;
4137
4138         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
4139                 return -ENODEV;
4140
4141         if (val > 3)
4142                 return -EINVAL;
4143
4144         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4145         with_intel_runtime_pm(dev_priv, wakeref) {
4146                 u32 snpcr;
4147
4148                 /* Update the cache sharing policy here as well */
4149                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4150                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4151                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4152                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4153         }
4154
4155         return 0;
4156 }
4157
4158 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4159                         i915_cache_sharing_get, i915_cache_sharing_set,
4160                         "%llu\n");
4161
4162 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4163                                           struct sseu_dev_info *sseu)
4164 {
4165 #define SS_MAX 2
4166         const int ss_max = SS_MAX;
4167         u32 sig1[SS_MAX], sig2[SS_MAX];
4168         int ss;
4169
4170         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4171         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4172         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4173         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4174
4175         for (ss = 0; ss < ss_max; ss++) {
4176                 unsigned int eu_cnt;
4177
4178                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4179                         /* skip disabled subslice */
4180                         continue;
4181
4182                 sseu->slice_mask = BIT(0);
4183                 sseu->subslice_mask[0] |= BIT(ss);
4184                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4185                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4186                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4187                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4188                 sseu->eu_total += eu_cnt;
4189                 sseu->eu_per_subslice = max_t(unsigned int,
4190                                               sseu->eu_per_subslice, eu_cnt);
4191         }
4192 #undef SS_MAX
4193 }
4194
4195 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4196                                      struct sseu_dev_info *sseu)
4197 {
4198 #define SS_MAX 6
4199         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4200         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4201         int s, ss;
4202
4203         for (s = 0; s < info->sseu.max_slices; s++) {
4204                 /*
4205                  * FIXME: Valid SS Mask respects the spec and read
4206                  * only valid bits for those registers, excluding reserved
4207                  * although this seems wrong because it would leave many
4208                  * subslices without ACK.
4209                  */
4210                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4211                         GEN10_PGCTL_VALID_SS_MASK(s);
4212                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4213                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4214         }
4215
4216         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4217                      GEN9_PGCTL_SSA_EU19_ACK |
4218                      GEN9_PGCTL_SSA_EU210_ACK |
4219                      GEN9_PGCTL_SSA_EU311_ACK;
4220         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4221                      GEN9_PGCTL_SSB_EU19_ACK |
4222                      GEN9_PGCTL_SSB_EU210_ACK |
4223                      GEN9_PGCTL_SSB_EU311_ACK;
4224
4225         for (s = 0; s < info->sseu.max_slices; s++) {
4226                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4227                         /* skip disabled slice */
4228                         continue;
4229
4230                 sseu->slice_mask |= BIT(s);
4231                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4232
4233                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4234                         unsigned int eu_cnt;
4235
4236                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4237                                 /* skip disabled subslice */
4238                                 continue;
4239
4240                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4241                                                eu_mask[ss % 2]);
4242                         sseu->eu_total += eu_cnt;
4243                         sseu->eu_per_subslice = max_t(unsigned int,
4244                                                       sseu->eu_per_subslice,
4245                                                       eu_cnt);
4246                 }
4247         }
4248 #undef SS_MAX
4249 }
4250
4251 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4252                                     struct sseu_dev_info *sseu)
4253 {
4254 #define SS_MAX 3
4255         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4256         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4257         int s, ss;
4258
4259         for (s = 0; s < info->sseu.max_slices; s++) {
4260                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4261                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4262                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4263         }
4264
4265         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4266                      GEN9_PGCTL_SSA_EU19_ACK |
4267                      GEN9_PGCTL_SSA_EU210_ACK |
4268                      GEN9_PGCTL_SSA_EU311_ACK;
4269         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4270                      GEN9_PGCTL_SSB_EU19_ACK |
4271                      GEN9_PGCTL_SSB_EU210_ACK |
4272                      GEN9_PGCTL_SSB_EU311_ACK;
4273
4274         for (s = 0; s < info->sseu.max_slices; s++) {
4275                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4276                         /* skip disabled slice */
4277                         continue;
4278
4279                 sseu->slice_mask |= BIT(s);
4280
4281                 if (IS_GEN9_BC(dev_priv))
4282                         sseu->subslice_mask[s] =
4283                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4284
4285                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4286                         unsigned int eu_cnt;
4287
4288                         if (IS_GEN9_LP(dev_priv)) {
4289                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4290                                         /* skip disabled subslice */
4291                                         continue;
4292
4293                                 sseu->subslice_mask[s] |= BIT(ss);
4294                         }
4295
4296                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4297                                                eu_mask[ss%2]);
4298                         sseu->eu_total += eu_cnt;
4299                         sseu->eu_per_subslice = max_t(unsigned int,
4300                                                       sseu->eu_per_subslice,
4301                                                       eu_cnt);
4302                 }
4303         }
4304 #undef SS_MAX
4305 }
4306
4307 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4308                                          struct sseu_dev_info *sseu)
4309 {
4310         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4311         int s;
4312
4313         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4314
4315         if (sseu->slice_mask) {
4316                 sseu->eu_per_subslice =
4317                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4318                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4319                         sseu->subslice_mask[s] =
4320                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4321                 }
4322                 sseu->eu_total = sseu->eu_per_subslice *
4323                                  sseu_subslice_total(sseu);
4324
4325                 /* subtract fused off EU(s) from enabled slice(s) */
4326                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4327                         u8 subslice_7eu =
4328                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4329
4330                         sseu->eu_total -= hweight8(subslice_7eu);
4331                 }
4332         }
4333 }
4334
4335 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4336                                  const struct sseu_dev_info *sseu)
4337 {
4338         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4339         const char *type = is_available_info ? "Available" : "Enabled";
4340         int s;
4341
4342         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4343                    sseu->slice_mask);
4344         seq_printf(m, "  %s Slice Total: %u\n", type,
4345                    hweight8(sseu->slice_mask));
4346         seq_printf(m, "  %s Subslice Total: %u\n", type,
4347                    sseu_subslice_total(sseu));
4348         for (s = 0; s < fls(sseu->slice_mask); s++) {
4349                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4350                            s, hweight8(sseu->subslice_mask[s]));
4351         }
4352         seq_printf(m, "  %s EU Total: %u\n", type,
4353                    sseu->eu_total);
4354         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4355                    sseu->eu_per_subslice);
4356
4357         if (!is_available_info)
4358                 return;
4359
4360         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4361         if (HAS_POOLED_EU(dev_priv))
4362                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4363
4364         seq_printf(m, "  Has Slice Power Gating: %s\n",
4365                    yesno(sseu->has_slice_pg));
4366         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4367                    yesno(sseu->has_subslice_pg));
4368         seq_printf(m, "  Has EU Power Gating: %s\n",
4369                    yesno(sseu->has_eu_pg));
4370 }
4371
4372 static int i915_sseu_status(struct seq_file *m, void *unused)
4373 {
4374         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4375         struct sseu_dev_info sseu;
4376         intel_wakeref_t wakeref;
4377
4378         if (INTEL_GEN(dev_priv) < 8)
4379                 return -ENODEV;
4380
4381         seq_puts(m, "SSEU Device Info\n");
4382         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4383
4384         seq_puts(m, "SSEU Device Status\n");
4385         memset(&sseu, 0, sizeof(sseu));
4386         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4387         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4388         sseu.max_eus_per_subslice =
4389                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4390
4391         with_intel_runtime_pm(dev_priv, wakeref) {
4392                 if (IS_CHERRYVIEW(dev_priv))
4393                         cherryview_sseu_device_status(dev_priv, &sseu);
4394                 else if (IS_BROADWELL(dev_priv))
4395                         broadwell_sseu_device_status(dev_priv, &sseu);
4396                 else if (IS_GEN(dev_priv, 9))
4397                         gen9_sseu_device_status(dev_priv, &sseu);
4398                 else if (INTEL_GEN(dev_priv) >= 10)
4399                         gen10_sseu_device_status(dev_priv, &sseu);
4400         }
4401
4402         i915_print_sseu_info(m, false, &sseu);
4403
4404         return 0;
4405 }
4406
4407 static int i915_forcewake_open(struct inode *inode, struct file *file)
4408 {
4409         struct drm_i915_private *i915 = inode->i_private;
4410
4411         if (INTEL_GEN(i915) < 6)
4412                 return 0;
4413
4414         intel_runtime_pm_get(i915);
4415         intel_uncore_forcewake_user_get(i915);
4416
4417         return 0;
4418 }
4419
4420 static int i915_forcewake_release(struct inode *inode, struct file *file)
4421 {
4422         struct drm_i915_private *i915 = inode->i_private;
4423
4424         if (INTEL_GEN(i915) < 6)
4425                 return 0;
4426
4427         intel_uncore_forcewake_user_put(i915);
4428         intel_runtime_pm_put_unchecked(i915);
4429
4430         return 0;
4431 }
4432
4433 static const struct file_operations i915_forcewake_fops = {
4434         .owner = THIS_MODULE,
4435         .open = i915_forcewake_open,
4436         .release = i915_forcewake_release,
4437 };
4438
4439 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4440 {
4441         struct drm_i915_private *dev_priv = m->private;
4442         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4443
4444         /* Synchronize with everything first in case there's been an HPD
4445          * storm, but we haven't finished handling it in the kernel yet
4446          */
4447         synchronize_irq(dev_priv->drm.irq);
4448         flush_work(&dev_priv->hotplug.dig_port_work);
4449         flush_work(&dev_priv->hotplug.hotplug_work);
4450
4451         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4452         seq_printf(m, "Detected: %s\n",
4453                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4454
4455         return 0;
4456 }
4457
4458 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4459                                         const char __user *ubuf, size_t len,
4460                                         loff_t *offp)
4461 {
4462         struct seq_file *m = file->private_data;
4463         struct drm_i915_private *dev_priv = m->private;
4464         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4465         unsigned int new_threshold;
4466         int i;
4467         char *newline;
4468         char tmp[16];
4469
4470         if (len >= sizeof(tmp))
4471                 return -EINVAL;
4472
4473         if (copy_from_user(tmp, ubuf, len))
4474                 return -EFAULT;
4475
4476         tmp[len] = '\0';
4477
4478         /* Strip newline, if any */
4479         newline = strchr(tmp, '\n');
4480         if (newline)
4481                 *newline = '\0';
4482
4483         if (strcmp(tmp, "reset") == 0)
4484                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4485         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4486                 return -EINVAL;
4487
4488         if (new_threshold > 0)
4489                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4490                               new_threshold);
4491         else
4492                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4493
4494         spin_lock_irq(&dev_priv->irq_lock);
4495         hotplug->hpd_storm_threshold = new_threshold;
4496         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4497         for_each_hpd_pin(i)
4498                 hotplug->stats[i].count = 0;
4499         spin_unlock_irq(&dev_priv->irq_lock);
4500
4501         /* Re-enable hpd immediately if we were in an irq storm */
4502         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4503
4504         return len;
4505 }
4506
4507 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4508 {
4509         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4510 }
4511
4512 static const struct file_operations i915_hpd_storm_ctl_fops = {
4513         .owner = THIS_MODULE,
4514         .open = i915_hpd_storm_ctl_open,
4515         .read = seq_read,
4516         .llseek = seq_lseek,
4517         .release = single_release,
4518         .write = i915_hpd_storm_ctl_write
4519 };
4520
4521 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4522 {
4523         struct drm_i915_private *dev_priv = m->private;
4524
4525         seq_printf(m, "Enabled: %s\n",
4526                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4527
4528         return 0;
4529 }
4530
4531 static int
4532 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4533 {
4534         return single_open(file, i915_hpd_short_storm_ctl_show,
4535                            inode->i_private);
4536 }
4537
4538 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4539                                               const char __user *ubuf,
4540                                               size_t len, loff_t *offp)
4541 {
4542         struct seq_file *m = file->private_data;
4543         struct drm_i915_private *dev_priv = m->private;
4544         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4545         char *newline;
4546         char tmp[16];
4547         int i;
4548         bool new_state;
4549
4550         if (len >= sizeof(tmp))
4551                 return -EINVAL;
4552
4553         if (copy_from_user(tmp, ubuf, len))
4554                 return -EFAULT;
4555
4556         tmp[len] = '\0';
4557
4558         /* Strip newline, if any */
4559         newline = strchr(tmp, '\n');
4560         if (newline)
4561                 *newline = '\0';
4562
4563         /* Reset to the "default" state for this system */
4564         if (strcmp(tmp, "reset") == 0)
4565                 new_state = !HAS_DP_MST(dev_priv);
4566         else if (kstrtobool(tmp, &new_state) != 0)
4567                 return -EINVAL;
4568
4569         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4570                       new_state ? "En" : "Dis");
4571
4572         spin_lock_irq(&dev_priv->irq_lock);
4573         hotplug->hpd_short_storm_enabled = new_state;
4574         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4575         for_each_hpd_pin(i)
4576                 hotplug->stats[i].count = 0;
4577         spin_unlock_irq(&dev_priv->irq_lock);
4578
4579         /* Re-enable hpd immediately if we were in an irq storm */
4580         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4581
4582         return len;
4583 }
4584
4585 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4586         .owner = THIS_MODULE,
4587         .open = i915_hpd_short_storm_ctl_open,
4588         .read = seq_read,
4589         .llseek = seq_lseek,
4590         .release = single_release,
4591         .write = i915_hpd_short_storm_ctl_write,
4592 };
4593
4594 static int i915_drrs_ctl_set(void *data, u64 val)
4595 {
4596         struct drm_i915_private *dev_priv = data;
4597         struct drm_device *dev = &dev_priv->drm;
4598         struct intel_crtc *crtc;
4599
4600         if (INTEL_GEN(dev_priv) < 7)
4601                 return -ENODEV;
4602
4603         for_each_intel_crtc(dev, crtc) {
4604                 struct drm_connector_list_iter conn_iter;
4605                 struct intel_crtc_state *crtc_state;
4606                 struct drm_connector *connector;
4607                 struct drm_crtc_commit *commit;
4608                 int ret;
4609
4610                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4611                 if (ret)
4612                         return ret;
4613
4614                 crtc_state = to_intel_crtc_state(crtc->base.state);
4615
4616                 if (!crtc_state->base.active ||
4617                     !crtc_state->has_drrs)
4618                         goto out;
4619
4620                 commit = crtc_state->base.commit;
4621                 if (commit) {
4622                         ret = wait_for_completion_interruptible(&commit->hw_done);
4623                         if (ret)
4624                                 goto out;
4625                 }
4626
4627                 drm_connector_list_iter_begin(dev, &conn_iter);
4628                 drm_for_each_connector_iter(connector, &conn_iter) {
4629                         struct intel_encoder *encoder;
4630                         struct intel_dp *intel_dp;
4631
4632                         if (!(crtc_state->base.connector_mask &
4633                               drm_connector_mask(connector)))
4634                                 continue;
4635
4636                         encoder = intel_attached_encoder(connector);
4637                         if (encoder->type != INTEL_OUTPUT_EDP)
4638                                 continue;
4639
4640                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4641                                                 val ? "en" : "dis", val);
4642
4643                         intel_dp = enc_to_intel_dp(&encoder->base);
4644                         if (val)
4645                                 intel_edp_drrs_enable(intel_dp,
4646                                                       crtc_state);
4647                         else
4648                                 intel_edp_drrs_disable(intel_dp,
4649                                                        crtc_state);
4650                 }
4651                 drm_connector_list_iter_end(&conn_iter);
4652
4653 out:
4654                 drm_modeset_unlock(&crtc->base.mutex);
4655                 if (ret)
4656                         return ret;
4657         }
4658
4659         return 0;
4660 }
4661
4662 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4663
4664 static ssize_t
4665 i915_fifo_underrun_reset_write(struct file *filp,
4666                                const char __user *ubuf,
4667                                size_t cnt, loff_t *ppos)
4668 {
4669         struct drm_i915_private *dev_priv = filp->private_data;
4670         struct intel_crtc *intel_crtc;
4671         struct drm_device *dev = &dev_priv->drm;
4672         int ret;
4673         bool reset;
4674
4675         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4676         if (ret)
4677                 return ret;
4678
4679         if (!reset)
4680                 return cnt;
4681
4682         for_each_intel_crtc(dev, intel_crtc) {
4683                 struct drm_crtc_commit *commit;
4684                 struct intel_crtc_state *crtc_state;
4685
4686                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4687                 if (ret)
4688                         return ret;
4689
4690                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4691                 commit = crtc_state->base.commit;
4692                 if (commit) {
4693                         ret = wait_for_completion_interruptible(&commit->hw_done);
4694                         if (!ret)
4695                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4696                 }
4697
4698                 if (!ret && crtc_state->base.active) {
4699                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4700                                       pipe_name(intel_crtc->pipe));
4701
4702                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4703                 }
4704
4705                 drm_modeset_unlock(&intel_crtc->base.mutex);
4706
4707                 if (ret)
4708                         return ret;
4709         }
4710
4711         ret = intel_fbc_reset_underrun(dev_priv);
4712         if (ret)
4713                 return ret;
4714
4715         return cnt;
4716 }
4717
4718 static const struct file_operations i915_fifo_underrun_reset_ops = {
4719         .owner = THIS_MODULE,
4720         .open = simple_open,
4721         .write = i915_fifo_underrun_reset_write,
4722         .llseek = default_llseek,
4723 };
4724
4725 static const struct drm_info_list i915_debugfs_list[] = {
4726         {"i915_capabilities", i915_capabilities, 0},
4727         {"i915_gem_objects", i915_gem_object_info, 0},
4728         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4729         {"i915_gem_stolen", i915_gem_stolen_list_info },
4730         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4731         {"i915_gem_interrupt", i915_interrupt_info, 0},
4732         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4733         {"i915_guc_info", i915_guc_info, 0},
4734         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4735         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4736         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4737         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4738         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4739         {"i915_frequency_info", i915_frequency_info, 0},
4740         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4741         {"i915_reset_info", i915_reset_info, 0},
4742         {"i915_drpc_info", i915_drpc_info, 0},
4743         {"i915_emon_status", i915_emon_status, 0},
4744         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4745         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4746         {"i915_fbc_status", i915_fbc_status, 0},
4747         {"i915_ips_status", i915_ips_status, 0},
4748         {"i915_sr_status", i915_sr_status, 0},
4749         {"i915_opregion", i915_opregion, 0},
4750         {"i915_vbt", i915_vbt, 0},
4751         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4752         {"i915_context_status", i915_context_status, 0},
4753         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4754         {"i915_swizzle_info", i915_swizzle_info, 0},
4755         {"i915_llc", i915_llc, 0},
4756         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4757         {"i915_energy_uJ", i915_energy_uJ, 0},
4758         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4759         {"i915_power_domain_info", i915_power_domain_info, 0},
4760         {"i915_dmc_info", i915_dmc_info, 0},
4761         {"i915_display_info", i915_display_info, 0},
4762         {"i915_engine_info", i915_engine_info, 0},
4763         {"i915_rcs_topology", i915_rcs_topology, 0},
4764         {"i915_shrinker_info", i915_shrinker_info, 0},
4765         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4766         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4767         {"i915_wa_registers", i915_wa_registers, 0},
4768         {"i915_ddb_info", i915_ddb_info, 0},
4769         {"i915_sseu_status", i915_sseu_status, 0},
4770         {"i915_drrs_status", i915_drrs_status, 0},
4771         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4772 };
4773 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4774
4775 static const struct i915_debugfs_files {
4776         const char *name;
4777         const struct file_operations *fops;
4778 } i915_debugfs_files[] = {
4779         {"i915_wedged", &i915_wedged_fops},
4780         {"i915_cache_sharing", &i915_cache_sharing_fops},
4781         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4782         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4783         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4784 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4785         {"i915_error_state", &i915_error_state_fops},
4786         {"i915_gpu_info", &i915_gpu_info_fops},
4787 #endif
4788         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4789         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4790         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4791         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4792         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4793         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4794         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4795         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4796         {"i915_guc_log_level", &i915_guc_log_level_fops},
4797         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4798         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4799         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4800         {"i915_ipc_status", &i915_ipc_status_fops},
4801         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4802         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4803 };
4804
4805 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4806 {
4807         struct drm_minor *minor = dev_priv->drm.primary;
4808         struct dentry *ent;
4809         int i;
4810
4811         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4812                                   minor->debugfs_root, to_i915(minor->dev),
4813                                   &i915_forcewake_fops);
4814         if (!ent)
4815                 return -ENOMEM;
4816
4817         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4818                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4819                                           S_IRUGO | S_IWUSR,
4820                                           minor->debugfs_root,
4821                                           to_i915(minor->dev),
4822                                           i915_debugfs_files[i].fops);
4823                 if (!ent)
4824                         return -ENOMEM;
4825         }
4826
4827         return drm_debugfs_create_files(i915_debugfs_list,
4828                                         I915_DEBUGFS_ENTRIES,
4829                                         minor->debugfs_root, minor);
4830 }
4831
4832 struct dpcd_block {
4833         /* DPCD dump start address. */
4834         unsigned int offset;
4835         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4836         unsigned int end;
4837         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4838         size_t size;
4839         /* Only valid for eDP. */
4840         bool edp;
4841 };
4842
4843 static const struct dpcd_block i915_dpcd_debug[] = {
4844         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4845         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4846         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4847         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4848         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4849         { .offset = DP_SET_POWER },
4850         { .offset = DP_EDP_DPCD_REV },
4851         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4852         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4853         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4854 };
4855
4856 static int i915_dpcd_show(struct seq_file *m, void *data)
4857 {
4858         struct drm_connector *connector = m->private;
4859         struct intel_dp *intel_dp =
4860                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4861         uint8_t buf[16];
4862         ssize_t err;
4863         int i;
4864
4865         if (connector->status != connector_status_connected)
4866                 return -ENODEV;
4867
4868         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4869                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4870                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4871
4872                 if (b->edp &&
4873                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4874                         continue;
4875
4876                 /* low tech for now */
4877                 if (WARN_ON(size > sizeof(buf)))
4878                         continue;
4879
4880                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4881                 if (err < 0)
4882                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4883                 else
4884                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4885         }
4886
4887         return 0;
4888 }
4889 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4890
4891 static int i915_panel_show(struct seq_file *m, void *data)
4892 {
4893         struct drm_connector *connector = m->private;
4894         struct intel_dp *intel_dp =
4895                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4896
4897         if (connector->status != connector_status_connected)
4898                 return -ENODEV;
4899
4900         seq_printf(m, "Panel power up delay: %d\n",
4901                    intel_dp->panel_power_up_delay);
4902         seq_printf(m, "Panel power down delay: %d\n",
4903                    intel_dp->panel_power_down_delay);
4904         seq_printf(m, "Backlight on delay: %d\n",
4905                    intel_dp->backlight_on_delay);
4906         seq_printf(m, "Backlight off delay: %d\n",
4907                    intel_dp->backlight_off_delay);
4908
4909         return 0;
4910 }
4911 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4912
4913 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4914 {
4915         struct drm_connector *connector = m->private;
4916         struct intel_connector *intel_connector = to_intel_connector(connector);
4917
4918         if (connector->status != connector_status_connected)
4919                 return -ENODEV;
4920
4921         /* HDCP is supported by connector */
4922         if (!intel_connector->hdcp.shim)
4923                 return -EINVAL;
4924
4925         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4926                    connector->base.id);
4927         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4928                    "None" : "HDCP1.4");
4929         seq_puts(m, "\n");
4930
4931         return 0;
4932 }
4933 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4934
4935 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4936 {
4937         struct drm_connector *connector = m->private;
4938         struct drm_device *dev = connector->dev;
4939         struct drm_crtc *crtc;
4940         struct intel_dp *intel_dp;
4941         struct drm_modeset_acquire_ctx ctx;
4942         struct intel_crtc_state *crtc_state = NULL;
4943         int ret = 0;
4944         bool try_again = false;
4945
4946         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4947
4948         do {
4949                 try_again = false;
4950                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4951                                        &ctx);
4952                 if (ret) {
4953                         ret = -EINTR;
4954                         break;
4955                 }
4956                 crtc = connector->state->crtc;
4957                 if (connector->status != connector_status_connected || !crtc) {
4958                         ret = -ENODEV;
4959                         break;
4960                 }
4961                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4962                 if (ret == -EDEADLK) {
4963                         ret = drm_modeset_backoff(&ctx);
4964                         if (!ret) {
4965                                 try_again = true;
4966                                 continue;
4967                         }
4968                         break;
4969                 } else if (ret) {
4970                         break;
4971                 }
4972                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4973                 crtc_state = to_intel_crtc_state(crtc->state);
4974                 seq_printf(m, "DSC_Enabled: %s\n",
4975                            yesno(crtc_state->dsc_params.compression_enable));
4976                 seq_printf(m, "DSC_Sink_Support: %s\n",
4977                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4978                 if (!intel_dp_is_edp(intel_dp))
4979                         seq_printf(m, "FEC_Sink_Support: %s\n",
4980                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4981         } while (try_again);
4982
4983         drm_modeset_drop_locks(&ctx);
4984         drm_modeset_acquire_fini(&ctx);
4985
4986         return ret;
4987 }
4988
4989 static ssize_t i915_dsc_fec_support_write(struct file *file,
4990                                           const char __user *ubuf,
4991                                           size_t len, loff_t *offp)
4992 {
4993         bool dsc_enable = false;
4994         int ret;
4995         struct drm_connector *connector =
4996                 ((struct seq_file *)file->private_data)->private;
4997         struct intel_encoder *encoder = intel_attached_encoder(connector);
4998         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4999
5000         if (len == 0)
5001                 return 0;
5002
5003         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
5004                          len);
5005
5006         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
5007         if (ret < 0)
5008                 return ret;
5009
5010         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
5011                          (dsc_enable) ? "true" : "false");
5012         intel_dp->force_dsc_en = dsc_enable;
5013
5014         *offp += len;
5015         return len;
5016 }
5017
5018 static int i915_dsc_fec_support_open(struct inode *inode,
5019                                      struct file *file)
5020 {
5021         return single_open(file, i915_dsc_fec_support_show,
5022                            inode->i_private);
5023 }
5024
5025 static const struct file_operations i915_dsc_fec_support_fops = {
5026         .owner = THIS_MODULE,
5027         .open = i915_dsc_fec_support_open,
5028         .read = seq_read,
5029         .llseek = seq_lseek,
5030         .release = single_release,
5031         .write = i915_dsc_fec_support_write
5032 };
5033
5034 /**
5035  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5036  * @connector: pointer to a registered drm_connector
5037  *
5038  * Cleanup will be done by drm_connector_unregister() through a call to
5039  * drm_debugfs_connector_remove().
5040  *
5041  * Returns 0 on success, negative error codes on error.
5042  */
5043 int i915_debugfs_connector_add(struct drm_connector *connector)
5044 {
5045         struct dentry *root = connector->debugfs_entry;
5046         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5047
5048         /* The connector must have been registered beforehands. */
5049         if (!root)
5050                 return -ENODEV;
5051
5052         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5053             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5054                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5055                                     connector, &i915_dpcd_fops);
5056
5057         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5058                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5059                                     connector, &i915_panel_fops);
5060                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5061                                     connector, &i915_psr_sink_status_fops);
5062         }
5063
5064         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5065             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5066             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5067                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5068                                     connector, &i915_hdcp_sink_capability_fops);
5069         }
5070
5071         if (INTEL_GEN(dev_priv) >= 10 &&
5072             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5073              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5074                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5075                                     connector, &i915_dsc_fec_support_fops);
5076
5077         return 0;
5078 }