Merge tag 'rtc-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(info, &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct drm_i915_file_private *file_priv;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305 };
306
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309         struct drm_i915_gem_object *obj = ptr;
310         struct file_stats *stats = data;
311         struct i915_vma *vma;
312
313         lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315         stats->count++;
316         stats->total += obj->base.size;
317         if (!obj->bind_count)
318                 stats->unbound += obj->base.size;
319         if (obj->base.name || obj->base.dma_buf)
320                 stats->shared += obj->base.size;
321
322         list_for_each_entry(vma, &obj->vma_list, obj_link) {
323                 if (!drm_mm_node_allocated(&vma->node))
324                         continue;
325
326                 if (i915_vma_is_ggtt(vma)) {
327                         stats->global += vma->node.size;
328                 } else {
329                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331                         if (ppgtt->base.file != stats->file_priv)
332                                 continue;
333                 }
334
335                 if (i915_vma_is_active(vma))
336                         stats->active += vma->node.size;
337                 else
338                         stats->inactive += vma->node.size;
339         }
340
341         return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345         if (stats.count) \
346                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347                            name, \
348                            stats.count, \
349                            stats.total, \
350                            stats.active, \
351                            stats.inactive, \
352                            stats.global, \
353                            stats.shared, \
354                            stats.unbound); \
355 } while (0)
356
357 static void print_batch_pool_stats(struct seq_file *m,
358                                    struct drm_i915_private *dev_priv)
359 {
360         struct drm_i915_gem_object *obj;
361         struct file_stats stats;
362         struct intel_engine_cs *engine;
363         enum intel_engine_id id;
364         int j;
365
366         memset(&stats, 0, sizeof(stats));
367
368         for_each_engine(engine, dev_priv, id) {
369                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370                         list_for_each_entry(obj,
371                                             &engine->batch_pool.cache_list[j],
372                                             batch_pool_link)
373                                 per_file_stats(0, obj, &stats);
374                 }
375         }
376
377         print_file_stats(m, "[k]batch pool", stats);
378 }
379
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382         struct i915_gem_context *ctx = ptr;
383         struct intel_engine_cs *engine;
384         enum intel_engine_id id;
385
386         for_each_engine(engine, ctx->i915, id) {
387                 struct intel_context *ce = to_intel_context(ctx, engine);
388
389                 if (ce->state)
390                         per_file_stats(0, ce->state->obj, data);
391                 if (ce->ring)
392                         per_file_stats(0, ce->ring->vma->obj, data);
393         }
394
395         return 0;
396 }
397
398 static void print_context_stats(struct seq_file *m,
399                                 struct drm_i915_private *dev_priv)
400 {
401         struct drm_device *dev = &dev_priv->drm;
402         struct file_stats stats;
403         struct drm_file *file;
404
405         memset(&stats, 0, sizeof(stats));
406
407         mutex_lock(&dev->struct_mutex);
408         if (dev_priv->kernel_context)
409                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411         list_for_each_entry(file, &dev->filelist, lhead) {
412                 struct drm_i915_file_private *fpriv = file->driver_priv;
413                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414         }
415         mutex_unlock(&dev->struct_mutex);
416
417         print_file_stats(m, "[k]contexts", stats);
418 }
419
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422         struct drm_i915_private *dev_priv = node_to_i915(m->private);
423         struct drm_device *dev = &dev_priv->drm;
424         struct i915_ggtt *ggtt = &dev_priv->ggtt;
425         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427         struct drm_i915_gem_object *obj;
428         unsigned int page_sizes = 0;
429         struct drm_file *file;
430         char buf[80];
431         int ret;
432
433         ret = mutex_lock_interruptible(&dev->struct_mutex);
434         if (ret)
435                 return ret;
436
437         seq_printf(m, "%u objects, %llu bytes\n",
438                    dev_priv->mm.object_count,
439                    dev_priv->mm.object_memory);
440
441         size = count = 0;
442         mapped_size = mapped_count = 0;
443         purgeable_size = purgeable_count = 0;
444         huge_size = huge_count = 0;
445
446         spin_lock(&dev_priv->mm.obj_lock);
447         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448                 size += obj->base.size;
449                 ++count;
450
451                 if (obj->mm.madv == I915_MADV_DONTNEED) {
452                         purgeable_size += obj->base.size;
453                         ++purgeable_count;
454                 }
455
456                 if (obj->mm.mapping) {
457                         mapped_count++;
458                         mapped_size += obj->base.size;
459                 }
460
461                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462                         huge_count++;
463                         huge_size += obj->base.size;
464                         page_sizes |= obj->mm.page_sizes.sg;
465                 }
466         }
467         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469         size = count = dpy_size = dpy_count = 0;
470         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471                 size += obj->base.size;
472                 ++count;
473
474                 if (obj->pin_global) {
475                         dpy_size += obj->base.size;
476                         ++dpy_count;
477                 }
478
479                 if (obj->mm.madv == I915_MADV_DONTNEED) {
480                         purgeable_size += obj->base.size;
481                         ++purgeable_count;
482                 }
483
484                 if (obj->mm.mapping) {
485                         mapped_count++;
486                         mapped_size += obj->base.size;
487                 }
488
489                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490                         huge_count++;
491                         huge_size += obj->base.size;
492                         page_sizes |= obj->mm.page_sizes.sg;
493                 }
494         }
495         spin_unlock(&dev_priv->mm.obj_lock);
496
497         seq_printf(m, "%u bound objects, %llu bytes\n",
498                    count, size);
499         seq_printf(m, "%u purgeable objects, %llu bytes\n",
500                    purgeable_count, purgeable_size);
501         seq_printf(m, "%u mapped objects, %llu bytes\n",
502                    mapped_count, mapped_size);
503         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504                    huge_count,
505                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506                    huge_size);
507         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508                    dpy_count, dpy_size);
509
510         seq_printf(m, "%llu [%pa] gtt total\n",
511                    ggtt->base.total, &ggtt->mappable_end);
512         seq_printf(m, "Supported page sizes: %s\n",
513                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514                                         buf, sizeof(buf)));
515
516         seq_putc(m, '\n');
517         print_batch_pool_stats(m, dev_priv);
518         mutex_unlock(&dev->struct_mutex);
519
520         mutex_lock(&dev->filelist_mutex);
521         print_context_stats(m, dev_priv);
522         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523                 struct file_stats stats;
524                 struct drm_i915_file_private *file_priv = file->driver_priv;
525                 struct i915_request *request;
526                 struct task_struct *task;
527
528                 mutex_lock(&dev->struct_mutex);
529
530                 memset(&stats, 0, sizeof(stats));
531                 stats.file_priv = file->driver_priv;
532                 spin_lock(&file->table_lock);
533                 idr_for_each(&file->object_idr, per_file_stats, &stats);
534                 spin_unlock(&file->table_lock);
535                 /*
536                  * Although we have a valid reference on file->pid, that does
537                  * not guarantee that the task_struct who called get_pid() is
538                  * still alive (e.g. get_pid(current) => fork() => exit()).
539                  * Therefore, we need to protect this ->comm access using RCU.
540                  */
541                 request = list_first_entry_or_null(&file_priv->mm.request_list,
542                                                    struct i915_request,
543                                                    client_link);
544                 rcu_read_lock();
545                 task = pid_task(request && request->ctx->pid ?
546                                 request->ctx->pid : file->pid,
547                                 PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550
551                 mutex_unlock(&dev->struct_mutex);
552         }
553         mutex_unlock(&dev->filelist_mutex);
554
555         return 0;
556 }
557
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560         struct drm_info_node *node = m->private;
561         struct drm_i915_private *dev_priv = node_to_i915(node);
562         struct drm_device *dev = &dev_priv->drm;
563         struct drm_i915_gem_object **objects;
564         struct drm_i915_gem_object *obj;
565         u64 total_obj_size, total_gtt_size;
566         unsigned long nobject, n;
567         int count, ret;
568
569         nobject = READ_ONCE(dev_priv->mm.object_count);
570         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571         if (!objects)
572                 return -ENOMEM;
573
574         ret = mutex_lock_interruptible(&dev->struct_mutex);
575         if (ret)
576                 return ret;
577
578         count = 0;
579         spin_lock(&dev_priv->mm.obj_lock);
580         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581                 objects[count++] = obj;
582                 if (count == nobject)
583                         break;
584         }
585         spin_unlock(&dev_priv->mm.obj_lock);
586
587         total_obj_size = total_gtt_size = 0;
588         for (n = 0;  n < count; n++) {
589                 obj = objects[n];
590
591                 seq_puts(m, "   ");
592                 describe_obj(m, obj);
593                 seq_putc(m, '\n');
594                 total_obj_size += obj->base.size;
595                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596         }
597
598         mutex_unlock(&dev->struct_mutex);
599
600         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601                    count, total_obj_size, total_gtt_size);
602         kvfree(objects);
603
604         return 0;
605 }
606
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609         struct drm_i915_private *dev_priv = node_to_i915(m->private);
610         struct drm_device *dev = &dev_priv->drm;
611         struct drm_i915_gem_object *obj;
612         struct intel_engine_cs *engine;
613         enum intel_engine_id id;
614         int total = 0;
615         int ret, j;
616
617         ret = mutex_lock_interruptible(&dev->struct_mutex);
618         if (ret)
619                 return ret;
620
621         for_each_engine(engine, dev_priv, id) {
622                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623                         int count;
624
625                         count = 0;
626                         list_for_each_entry(obj,
627                                             &engine->batch_pool.cache_list[j],
628                                             batch_pool_link)
629                                 count++;
630                         seq_printf(m, "%s cache[%d]: %d objects\n",
631                                    engine->name, j, count);
632
633                         list_for_each_entry(obj,
634                                             &engine->batch_pool.cache_list[j],
635                                             batch_pool_link) {
636                                 seq_puts(m, "   ");
637                                 describe_obj(m, obj);
638                                 seq_putc(m, '\n');
639                         }
640
641                         total += count;
642                 }
643         }
644
645         seq_printf(m, "total: %d\n", total);
646
647         mutex_unlock(&dev->struct_mutex);
648
649         return 0;
650 }
651
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654         struct drm_i915_private *dev_priv = node_to_i915(m->private);
655         int pipe;
656
657         for_each_pipe(dev_priv, pipe) {
658                 enum intel_display_power_domain power_domain;
659
660                 power_domain = POWER_DOMAIN_PIPE(pipe);
661                 if (!intel_display_power_get_if_enabled(dev_priv,
662                                                         power_domain)) {
663                         seq_printf(m, "Pipe %c power disabled\n",
664                                    pipe_name(pipe));
665                         continue;
666                 }
667                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668                            pipe_name(pipe),
669                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671                            pipe_name(pipe),
672                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673                 seq_printf(m, "Pipe %c IER:\t%08x\n",
674                            pipe_name(pipe),
675                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677                 intel_display_power_put(dev_priv, power_domain);
678         }
679
680         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681                    I915_READ(GEN8_DE_PORT_IMR));
682         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683                    I915_READ(GEN8_DE_PORT_IIR));
684         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685                    I915_READ(GEN8_DE_PORT_IER));
686
687         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688                    I915_READ(GEN8_DE_MISC_IMR));
689         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690                    I915_READ(GEN8_DE_MISC_IIR));
691         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692                    I915_READ(GEN8_DE_MISC_IER));
693
694         seq_printf(m, "PCU interrupt mask:\t%08x\n",
695                    I915_READ(GEN8_PCU_IMR));
696         seq_printf(m, "PCU interrupt identity:\t%08x\n",
697                    I915_READ(GEN8_PCU_IIR));
698         seq_printf(m, "PCU interrupt enable:\t%08x\n",
699                    I915_READ(GEN8_PCU_IER));
700 }
701
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
705         struct intel_engine_cs *engine;
706         enum intel_engine_id id;
707         int i, pipe;
708
709         intel_runtime_pm_get(dev_priv);
710
711         if (IS_CHERRYVIEW(dev_priv)) {
712                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713                            I915_READ(GEN8_MASTER_IRQ));
714
715                 seq_printf(m, "Display IER:\t%08x\n",
716                            I915_READ(VLV_IER));
717                 seq_printf(m, "Display IIR:\t%08x\n",
718                            I915_READ(VLV_IIR));
719                 seq_printf(m, "Display IIR_RW:\t%08x\n",
720                            I915_READ(VLV_IIR_RW));
721                 seq_printf(m, "Display IMR:\t%08x\n",
722                            I915_READ(VLV_IMR));
723                 for_each_pipe(dev_priv, pipe) {
724                         enum intel_display_power_domain power_domain;
725
726                         power_domain = POWER_DOMAIN_PIPE(pipe);
727                         if (!intel_display_power_get_if_enabled(dev_priv,
728                                                                 power_domain)) {
729                                 seq_printf(m, "Pipe %c power disabled\n",
730                                            pipe_name(pipe));
731                                 continue;
732                         }
733
734                         seq_printf(m, "Pipe %c stat:\t%08x\n",
735                                    pipe_name(pipe),
736                                    I915_READ(PIPESTAT(pipe)));
737
738                         intel_display_power_put(dev_priv, power_domain);
739                 }
740
741                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742                 seq_printf(m, "Port hotplug:\t%08x\n",
743                            I915_READ(PORT_HOTPLUG_EN));
744                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745                            I915_READ(VLV_DPFLIPSTAT));
746                 seq_printf(m, "DPINVGTT:\t%08x\n",
747                            I915_READ(DPINVGTT));
748                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750                 for (i = 0; i < 4; i++) {
751                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752                                    i, I915_READ(GEN8_GT_IMR(i)));
753                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754                                    i, I915_READ(GEN8_GT_IIR(i)));
755                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756                                    i, I915_READ(GEN8_GT_IER(i)));
757                 }
758
759                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760                            I915_READ(GEN8_PCU_IMR));
761                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762                            I915_READ(GEN8_PCU_IIR));
763                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764                            I915_READ(GEN8_PCU_IER));
765         } else if (INTEL_GEN(dev_priv) >= 11) {
766                 seq_printf(m, "Master Interrupt Control:  %08x\n",
767                            I915_READ(GEN11_GFX_MSTR_IRQ));
768
769                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783                            I915_READ(GEN11_DISPLAY_INT_CTL));
784
785                 gen8_display_interrupt_info(m);
786         } else if (INTEL_GEN(dev_priv) >= 8) {
787                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788                            I915_READ(GEN8_MASTER_IRQ));
789
790                 for (i = 0; i < 4; i++) {
791                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792                                    i, I915_READ(GEN8_GT_IMR(i)));
793                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794                                    i, I915_READ(GEN8_GT_IIR(i)));
795                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796                                    i, I915_READ(GEN8_GT_IER(i)));
797                 }
798
799                 gen8_display_interrupt_info(m);
800         } else if (IS_VALLEYVIEW(dev_priv)) {
801                 seq_printf(m, "Display IER:\t%08x\n",
802                            I915_READ(VLV_IER));
803                 seq_printf(m, "Display IIR:\t%08x\n",
804                            I915_READ(VLV_IIR));
805                 seq_printf(m, "Display IIR_RW:\t%08x\n",
806                            I915_READ(VLV_IIR_RW));
807                 seq_printf(m, "Display IMR:\t%08x\n",
808                            I915_READ(VLV_IMR));
809                 for_each_pipe(dev_priv, pipe) {
810                         enum intel_display_power_domain power_domain;
811
812                         power_domain = POWER_DOMAIN_PIPE(pipe);
813                         if (!intel_display_power_get_if_enabled(dev_priv,
814                                                                 power_domain)) {
815                                 seq_printf(m, "Pipe %c power disabled\n",
816                                            pipe_name(pipe));
817                                 continue;
818                         }
819
820                         seq_printf(m, "Pipe %c stat:\t%08x\n",
821                                    pipe_name(pipe),
822                                    I915_READ(PIPESTAT(pipe)));
823                         intel_display_power_put(dev_priv, power_domain);
824                 }
825
826                 seq_printf(m, "Master IER:\t%08x\n",
827                            I915_READ(VLV_MASTER_IER));
828
829                 seq_printf(m, "Render IER:\t%08x\n",
830                            I915_READ(GTIER));
831                 seq_printf(m, "Render IIR:\t%08x\n",
832                            I915_READ(GTIIR));
833                 seq_printf(m, "Render IMR:\t%08x\n",
834                            I915_READ(GTIMR));
835
836                 seq_printf(m, "PM IER:\t\t%08x\n",
837                            I915_READ(GEN6_PMIER));
838                 seq_printf(m, "PM IIR:\t\t%08x\n",
839                            I915_READ(GEN6_PMIIR));
840                 seq_printf(m, "PM IMR:\t\t%08x\n",
841                            I915_READ(GEN6_PMIMR));
842
843                 seq_printf(m, "Port hotplug:\t%08x\n",
844                            I915_READ(PORT_HOTPLUG_EN));
845                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846                            I915_READ(VLV_DPFLIPSTAT));
847                 seq_printf(m, "DPINVGTT:\t%08x\n",
848                            I915_READ(DPINVGTT));
849
850         } else if (!HAS_PCH_SPLIT(dev_priv)) {
851                 seq_printf(m, "Interrupt enable:    %08x\n",
852                            I915_READ(IER));
853                 seq_printf(m, "Interrupt identity:  %08x\n",
854                            I915_READ(IIR));
855                 seq_printf(m, "Interrupt mask:      %08x\n",
856                            I915_READ(IMR));
857                 for_each_pipe(dev_priv, pipe)
858                         seq_printf(m, "Pipe %c stat:         %08x\n",
859                                    pipe_name(pipe),
860                                    I915_READ(PIPESTAT(pipe)));
861         } else {
862                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
863                            I915_READ(DEIER));
864                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
865                            I915_READ(DEIIR));
866                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
867                            I915_READ(DEIMR));
868                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
869                            I915_READ(SDEIER));
870                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
871                            I915_READ(SDEIIR));
872                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
873                            I915_READ(SDEIMR));
874                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
875                            I915_READ(GTIER));
876                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
877                            I915_READ(GTIIR));
878                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
879                            I915_READ(GTIMR));
880         }
881
882         if (INTEL_GEN(dev_priv) >= 11) {
883                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894                            I915_READ(GEN11_GUC_SG_INTR_MASK));
895                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902         } else if (INTEL_GEN(dev_priv) >= 6) {
903                 for_each_engine(engine, dev_priv, id) {
904                         seq_printf(m,
905                                    "Graphics Interrupt mask (%s):       %08x\n",
906                                    engine->name, I915_READ_IMR(engine));
907                 }
908         }
909
910         intel_runtime_pm_put(dev_priv);
911
912         return 0;
913 }
914
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917         struct drm_i915_private *dev_priv = node_to_i915(m->private);
918         struct drm_device *dev = &dev_priv->drm;
919         int i, ret;
920
921         ret = mutex_lock_interruptible(&dev->struct_mutex);
922         if (ret)
923                 return ret;
924
925         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926         for (i = 0; i < dev_priv->num_fence_regs; i++) {
927                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929                 seq_printf(m, "Fence %d, pin count = %d, object = ",
930                            i, dev_priv->fence_regs[i].pin_count);
931                 if (!vma)
932                         seq_puts(m, "unused");
933                 else
934                         describe_obj(m, vma->obj);
935                 seq_putc(m, '\n');
936         }
937
938         mutex_unlock(&dev->struct_mutex);
939         return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944                               size_t count, loff_t *pos)
945 {
946         struct i915_gpu_state *error = file->private_data;
947         struct drm_i915_error_state_buf str;
948         ssize_t ret;
949         loff_t tmp;
950
951         if (!error)
952                 return 0;
953
954         ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955         if (ret)
956                 return ret;
957
958         ret = i915_error_state_to_str(&str, error);
959         if (ret)
960                 goto out;
961
962         tmp = 0;
963         ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964         if (ret < 0)
965                 goto out;
966
967         *pos = str.start + ret;
968 out:
969         i915_error_state_buf_release(&str);
970         return ret;
971 }
972
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975         i915_gpu_state_put(file->private_data);
976         return 0;
977 }
978
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981         struct drm_i915_private *i915 = inode->i_private;
982         struct i915_gpu_state *gpu;
983
984         intel_runtime_pm_get(i915);
985         gpu = i915_capture_gpu_state(i915);
986         intel_runtime_pm_put(i915);
987         if (!gpu)
988                 return -ENOMEM;
989
990         file->private_data = gpu;
991         return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995         .owner = THIS_MODULE,
996         .open = i915_gpu_info_open,
997         .read = gpu_state_read,
998         .llseek = default_llseek,
999         .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004                        const char __user *ubuf,
1005                        size_t cnt,
1006                        loff_t *ppos)
1007 {
1008         struct i915_gpu_state *error = filp->private_data;
1009
1010         if (!error)
1011                 return 0;
1012
1013         DRM_DEBUG_DRIVER("Resetting error state\n");
1014         i915_reset_error_state(error->i915);
1015
1016         return cnt;
1017 }
1018
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021         file->private_data = i915_first_error_state(inode->i_private);
1022         return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026         .owner = THIS_MODULE,
1027         .open = i915_error_state_open,
1028         .read = gpu_state_read,
1029         .write = i915_error_state_write,
1030         .llseek = default_llseek,
1031         .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038         struct drm_i915_private *dev_priv = data;
1039         struct drm_device *dev = &dev_priv->drm;
1040         int ret;
1041
1042         ret = mutex_lock_interruptible(&dev->struct_mutex);
1043         if (ret)
1044                 return ret;
1045
1046         intel_runtime_pm_get(dev_priv);
1047         ret = i915_gem_set_global_seqno(dev, val);
1048         intel_runtime_pm_put(dev_priv);
1049
1050         mutex_unlock(&dev->struct_mutex);
1051
1052         return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056                         NULL, i915_next_seqno_set,
1057                         "0x%llx\n");
1058
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063         int ret = 0;
1064
1065         intel_runtime_pm_get(dev_priv);
1066
1067         if (IS_GEN5(dev_priv)) {
1068                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074                            MEMSTAT_VID_SHIFT);
1075                 seq_printf(m, "Current P-state: %d\n",
1076                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078                 u32 rpmodectl, freq_sts;
1079
1080                 mutex_lock(&dev_priv->pcu_lock);
1081
1082                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083                 seq_printf(m, "Video Turbo Mode: %s\n",
1084                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085                 seq_printf(m, "HW control enabled: %s\n",
1086                            yesno(rpmodectl & GEN6_RP_ENABLE));
1087                 seq_printf(m, "SW control enabled: %s\n",
1088                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089                                   GEN6_RP_MEDIA_SW_MODE));
1090
1091                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095                 seq_printf(m, "actual GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098                 seq_printf(m, "current GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101                 seq_printf(m, "max GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104                 seq_printf(m, "min GPU freq: %d MHz\n",
1105                            intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107                 seq_printf(m, "idle GPU freq: %d MHz\n",
1108                            intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110                 seq_printf(m,
1111                            "efficient (RPe) frequency: %d MHz\n",
1112                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1113                 mutex_unlock(&dev_priv->pcu_lock);
1114         } else if (INTEL_GEN(dev_priv) >= 6) {
1115                 u32 rp_state_limits;
1116                 u32 gt_perf_status;
1117                 u32 rp_state_cap;
1118                 u32 rpmodectl, rpinclimit, rpdeclimit;
1119                 u32 rpstat, cagf, reqf;
1120                 u32 rpupei, rpcurup, rpprevup;
1121                 u32 rpdownei, rpcurdown, rpprevdown;
1122                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123                 int max_freq;
1124
1125                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126                 if (IS_GEN9_LP(dev_priv)) {
1127                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129                 } else {
1130                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132                 }
1133
1134                 /* RPSTAT1 is in the GT power well */
1135                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137                 reqf = I915_READ(GEN6_RPNSWREQ);
1138                 if (INTEL_GEN(dev_priv) >= 9)
1139                         reqf >>= 23;
1140                 else {
1141                         reqf &= ~GEN6_TURBO_DISABLE;
1142                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143                                 reqf >>= 24;
1144                         else
1145                                 reqf >>= 25;
1146                 }
1147                 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153                 rpstat = I915_READ(GEN6_RPSTAT1);
1154                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160                 cagf = intel_gpu_freq(dev_priv,
1161                                       intel_get_cagf(dev_priv, rpstat));
1162
1163                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165                 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1166                         pm_ier = I915_READ(GEN6_PMIER);
1167                         pm_imr = I915_READ(GEN6_PMIMR);
1168                         pm_isr = I915_READ(GEN6_PMISR);
1169                         pm_iir = I915_READ(GEN6_PMIIR);
1170                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1171                 } else {
1172                         pm_ier = I915_READ(GEN8_GT_IER(2));
1173                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1174                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1175                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1176                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1177                 }
1178                 seq_printf(m, "Video Turbo Mode: %s\n",
1179                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1180                 seq_printf(m, "HW control enabled: %s\n",
1181                            yesno(rpmodectl & GEN6_RP_ENABLE));
1182                 seq_printf(m, "SW control enabled: %s\n",
1183                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1184                                   GEN6_RP_MEDIA_SW_MODE));
1185                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1186                            pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1187                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1188                            rps->pm_intrmsk_mbz);
1189                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1190                 seq_printf(m, "Render p-state ratio: %d\n",
1191                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1192                 seq_printf(m, "Render p-state VID: %d\n",
1193                            gt_perf_status & 0xff);
1194                 seq_printf(m, "Render p-state limit: %d\n",
1195                            rp_state_limits & 0xff);
1196                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1197                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1198                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1199                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1200                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1201                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1202                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1203                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1204                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1205                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1206                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1207                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1208                 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1209
1210                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1211                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1212                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1213                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1214                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1215                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1216                 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1217
1218                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1219                             rp_state_cap >> 16) & 0xff;
1220                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1221                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1222                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1223                            intel_gpu_freq(dev_priv, max_freq));
1224
1225                 max_freq = (rp_state_cap & 0xff00) >> 8;
1226                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1227                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1228                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1229                            intel_gpu_freq(dev_priv, max_freq));
1230
1231                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1232                             rp_state_cap >> 0) & 0xff;
1233                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1234                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1235                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1236                            intel_gpu_freq(dev_priv, max_freq));
1237                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1238                            intel_gpu_freq(dev_priv, rps->max_freq));
1239
1240                 seq_printf(m, "Current freq: %d MHz\n",
1241                            intel_gpu_freq(dev_priv, rps->cur_freq));
1242                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1243                 seq_printf(m, "Idle freq: %d MHz\n",
1244                            intel_gpu_freq(dev_priv, rps->idle_freq));
1245                 seq_printf(m, "Min freq: %d MHz\n",
1246                            intel_gpu_freq(dev_priv, rps->min_freq));
1247                 seq_printf(m, "Boost freq: %d MHz\n",
1248                            intel_gpu_freq(dev_priv, rps->boost_freq));
1249                 seq_printf(m, "Max freq: %d MHz\n",
1250                            intel_gpu_freq(dev_priv, rps->max_freq));
1251                 seq_printf(m,
1252                            "efficient (RPe) frequency: %d MHz\n",
1253                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1254         } else {
1255                 seq_puts(m, "no P-state info available\n");
1256         }
1257
1258         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1259         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1260         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1261
1262         intel_runtime_pm_put(dev_priv);
1263         return ret;
1264 }
1265
1266 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1267                                struct seq_file *m,
1268                                struct intel_instdone *instdone)
1269 {
1270         int slice;
1271         int subslice;
1272
1273         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1274                    instdone->instdone);
1275
1276         if (INTEL_GEN(dev_priv) <= 3)
1277                 return;
1278
1279         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1280                    instdone->slice_common);
1281
1282         if (INTEL_GEN(dev_priv) <= 6)
1283                 return;
1284
1285         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1286                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1287                            slice, subslice, instdone->sampler[slice][subslice]);
1288
1289         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1290                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1291                            slice, subslice, instdone->row[slice][subslice]);
1292 }
1293
1294 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1295 {
1296         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1297         struct intel_engine_cs *engine;
1298         u64 acthd[I915_NUM_ENGINES];
1299         u32 seqno[I915_NUM_ENGINES];
1300         struct intel_instdone instdone;
1301         enum intel_engine_id id;
1302
1303         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1304                 seq_puts(m, "Wedged\n");
1305         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1306                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1307         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1308                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1309         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1310                 seq_puts(m, "Waiter holding struct mutex\n");
1311         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1312                 seq_puts(m, "struct_mutex blocked for reset\n");
1313
1314         if (!i915_modparams.enable_hangcheck) {
1315                 seq_puts(m, "Hangcheck disabled\n");
1316                 return 0;
1317         }
1318
1319         intel_runtime_pm_get(dev_priv);
1320
1321         for_each_engine(engine, dev_priv, id) {
1322                 acthd[id] = intel_engine_get_active_head(engine);
1323                 seqno[id] = intel_engine_get_seqno(engine);
1324         }
1325
1326         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1327
1328         intel_runtime_pm_put(dev_priv);
1329
1330         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1331                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1332                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1333                                             jiffies));
1334         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1335                 seq_puts(m, "Hangcheck active, work pending\n");
1336         else
1337                 seq_puts(m, "Hangcheck inactive\n");
1338
1339         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1340
1341         for_each_engine(engine, dev_priv, id) {
1342                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1343                 struct rb_node *rb;
1344
1345                 seq_printf(m, "%s:\n", engine->name);
1346                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1347                            engine->hangcheck.seqno, seqno[id],
1348                            intel_engine_last_submit(engine));
1349                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1350                            yesno(intel_engine_has_waiter(engine)),
1351                            yesno(test_bit(engine->id,
1352                                           &dev_priv->gpu_error.missed_irq_rings)),
1353                            yesno(engine->hangcheck.stalled));
1354
1355                 spin_lock_irq(&b->rb_lock);
1356                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1357                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1358
1359                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1360                                    w->tsk->comm, w->tsk->pid, w->seqno);
1361                 }
1362                 spin_unlock_irq(&b->rb_lock);
1363
1364                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1365                            (long long)engine->hangcheck.acthd,
1366                            (long long)acthd[id]);
1367                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1368                            hangcheck_action_to_str(engine->hangcheck.action),
1369                            engine->hangcheck.action,
1370                            jiffies_to_msecs(jiffies -
1371                                             engine->hangcheck.action_timestamp));
1372
1373                 if (engine->id == RCS) {
1374                         seq_puts(m, "\tinstdone read =\n");
1375
1376                         i915_instdone_info(dev_priv, m, &instdone);
1377
1378                         seq_puts(m, "\tinstdone accu =\n");
1379
1380                         i915_instdone_info(dev_priv, m,
1381                                            &engine->hangcheck.instdone);
1382                 }
1383         }
1384
1385         return 0;
1386 }
1387
1388 static int i915_reset_info(struct seq_file *m, void *unused)
1389 {
1390         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1391         struct i915_gpu_error *error = &dev_priv->gpu_error;
1392         struct intel_engine_cs *engine;
1393         enum intel_engine_id id;
1394
1395         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1396
1397         for_each_engine(engine, dev_priv, id) {
1398                 seq_printf(m, "%s = %u\n", engine->name,
1399                            i915_reset_engine_count(error, engine));
1400         }
1401
1402         return 0;
1403 }
1404
1405 static int ironlake_drpc_info(struct seq_file *m)
1406 {
1407         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1408         u32 rgvmodectl, rstdbyctl;
1409         u16 crstandvid;
1410
1411         rgvmodectl = I915_READ(MEMMODECTL);
1412         rstdbyctl = I915_READ(RSTDBYCTL);
1413         crstandvid = I915_READ16(CRSTANDVID);
1414
1415         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1416         seq_printf(m, "Boost freq: %d\n",
1417                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1418                    MEMMODE_BOOST_FREQ_SHIFT);
1419         seq_printf(m, "HW control enabled: %s\n",
1420                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1421         seq_printf(m, "SW control enabled: %s\n",
1422                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1423         seq_printf(m, "Gated voltage change: %s\n",
1424                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1425         seq_printf(m, "Starting frequency: P%d\n",
1426                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1427         seq_printf(m, "Max P-state: P%d\n",
1428                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1429         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1430         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1431         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1432         seq_printf(m, "Render standby enabled: %s\n",
1433                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1434         seq_puts(m, "Current RS state: ");
1435         switch (rstdbyctl & RSX_STATUS_MASK) {
1436         case RSX_STATUS_ON:
1437                 seq_puts(m, "on\n");
1438                 break;
1439         case RSX_STATUS_RC1:
1440                 seq_puts(m, "RC1\n");
1441                 break;
1442         case RSX_STATUS_RC1E:
1443                 seq_puts(m, "RC1E\n");
1444                 break;
1445         case RSX_STATUS_RS1:
1446                 seq_puts(m, "RS1\n");
1447                 break;
1448         case RSX_STATUS_RS2:
1449                 seq_puts(m, "RS2 (RC6)\n");
1450                 break;
1451         case RSX_STATUS_RS3:
1452                 seq_puts(m, "RC3 (RC6+)\n");
1453                 break;
1454         default:
1455                 seq_puts(m, "unknown\n");
1456                 break;
1457         }
1458
1459         return 0;
1460 }
1461
1462 static int i915_forcewake_domains(struct seq_file *m, void *data)
1463 {
1464         struct drm_i915_private *i915 = node_to_i915(m->private);
1465         struct intel_uncore_forcewake_domain *fw_domain;
1466         unsigned int tmp;
1467
1468         seq_printf(m, "user.bypass_count = %u\n",
1469                    i915->uncore.user_forcewake.count);
1470
1471         for_each_fw_domain(fw_domain, i915, tmp)
1472                 seq_printf(m, "%s.wake_count = %u\n",
1473                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1474                            READ_ONCE(fw_domain->wake_count));
1475
1476         return 0;
1477 }
1478
1479 static void print_rc6_res(struct seq_file *m,
1480                           const char *title,
1481                           const i915_reg_t reg)
1482 {
1483         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1484
1485         seq_printf(m, "%s %u (%llu us)\n",
1486                    title, I915_READ(reg),
1487                    intel_rc6_residency_us(dev_priv, reg));
1488 }
1489
1490 static int vlv_drpc_info(struct seq_file *m)
1491 {
1492         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1493         u32 rcctl1, pw_status;
1494
1495         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1496         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1497
1498         seq_printf(m, "RC6 Enabled: %s\n",
1499                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1500                                         GEN6_RC_CTL_EI_MODE(1))));
1501         seq_printf(m, "Render Power Well: %s\n",
1502                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1503         seq_printf(m, "Media Power Well: %s\n",
1504                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1505
1506         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1507         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1508
1509         return i915_forcewake_domains(m, NULL);
1510 }
1511
1512 static int gen6_drpc_info(struct seq_file *m)
1513 {
1514         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1515         u32 gt_core_status, rcctl1, rc6vids = 0;
1516         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1517
1518         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1519         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1520
1521         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1522         if (INTEL_GEN(dev_priv) >= 9) {
1523                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1524                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1525         }
1526
1527         if (INTEL_GEN(dev_priv) <= 7) {
1528                 mutex_lock(&dev_priv->pcu_lock);
1529                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1530                                        &rc6vids);
1531                 mutex_unlock(&dev_priv->pcu_lock);
1532         }
1533
1534         seq_printf(m, "RC1e Enabled: %s\n",
1535                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1536         seq_printf(m, "RC6 Enabled: %s\n",
1537                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1538         if (INTEL_GEN(dev_priv) >= 9) {
1539                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1540                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1541                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1542                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1543         }
1544         seq_printf(m, "Deep RC6 Enabled: %s\n",
1545                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1546         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1547                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1548         seq_puts(m, "Current RC state: ");
1549         switch (gt_core_status & GEN6_RCn_MASK) {
1550         case GEN6_RC0:
1551                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1552                         seq_puts(m, "Core Power Down\n");
1553                 else
1554                         seq_puts(m, "on\n");
1555                 break;
1556         case GEN6_RC3:
1557                 seq_puts(m, "RC3\n");
1558                 break;
1559         case GEN6_RC6:
1560                 seq_puts(m, "RC6\n");
1561                 break;
1562         case GEN6_RC7:
1563                 seq_puts(m, "RC7\n");
1564                 break;
1565         default:
1566                 seq_puts(m, "Unknown\n");
1567                 break;
1568         }
1569
1570         seq_printf(m, "Core Power Down: %s\n",
1571                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1572         if (INTEL_GEN(dev_priv) >= 9) {
1573                 seq_printf(m, "Render Power Well: %s\n",
1574                         (gen9_powergate_status &
1575                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1576                 seq_printf(m, "Media Power Well: %s\n",
1577                         (gen9_powergate_status &
1578                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1579         }
1580
1581         /* Not exactly sure what this is */
1582         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1583                       GEN6_GT_GFX_RC6_LOCKED);
1584         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1585         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1586         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1587
1588         if (INTEL_GEN(dev_priv) <= 7) {
1589                 seq_printf(m, "RC6   voltage: %dmV\n",
1590                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1591                 seq_printf(m, "RC6+  voltage: %dmV\n",
1592                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1593                 seq_printf(m, "RC6++ voltage: %dmV\n",
1594                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1595         }
1596
1597         return i915_forcewake_domains(m, NULL);
1598 }
1599
1600 static int i915_drpc_info(struct seq_file *m, void *unused)
1601 {
1602         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1603         int err;
1604
1605         intel_runtime_pm_get(dev_priv);
1606
1607         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1608                 err = vlv_drpc_info(m);
1609         else if (INTEL_GEN(dev_priv) >= 6)
1610                 err = gen6_drpc_info(m);
1611         else
1612                 err = ironlake_drpc_info(m);
1613
1614         intel_runtime_pm_put(dev_priv);
1615
1616         return err;
1617 }
1618
1619 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1620 {
1621         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1622
1623         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1624                    dev_priv->fb_tracking.busy_bits);
1625
1626         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1627                    dev_priv->fb_tracking.flip_bits);
1628
1629         return 0;
1630 }
1631
1632 static int i915_fbc_status(struct seq_file *m, void *unused)
1633 {
1634         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1635         struct intel_fbc *fbc = &dev_priv->fbc;
1636
1637         if (!HAS_FBC(dev_priv))
1638                 return -ENODEV;
1639
1640         intel_runtime_pm_get(dev_priv);
1641         mutex_lock(&fbc->lock);
1642
1643         if (intel_fbc_is_active(dev_priv))
1644                 seq_puts(m, "FBC enabled\n");
1645         else
1646                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1647
1648         if (fbc->work.scheduled)
1649                 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1650                            fbc->work.scheduled_vblank,
1651                            drm_crtc_vblank_count(&fbc->crtc->base));
1652
1653         if (intel_fbc_is_active(dev_priv)) {
1654                 u32 mask;
1655
1656                 if (INTEL_GEN(dev_priv) >= 8)
1657                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1658                 else if (INTEL_GEN(dev_priv) >= 7)
1659                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1660                 else if (INTEL_GEN(dev_priv) >= 5)
1661                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1662                 else if (IS_G4X(dev_priv))
1663                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1664                 else
1665                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1666                                                         FBC_STAT_COMPRESSED);
1667
1668                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1669         }
1670
1671         mutex_unlock(&fbc->lock);
1672         intel_runtime_pm_put(dev_priv);
1673
1674         return 0;
1675 }
1676
1677 static int i915_fbc_false_color_get(void *data, u64 *val)
1678 {
1679         struct drm_i915_private *dev_priv = data;
1680
1681         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1682                 return -ENODEV;
1683
1684         *val = dev_priv->fbc.false_color;
1685
1686         return 0;
1687 }
1688
1689 static int i915_fbc_false_color_set(void *data, u64 val)
1690 {
1691         struct drm_i915_private *dev_priv = data;
1692         u32 reg;
1693
1694         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1695                 return -ENODEV;
1696
1697         mutex_lock(&dev_priv->fbc.lock);
1698
1699         reg = I915_READ(ILK_DPFC_CONTROL);
1700         dev_priv->fbc.false_color = val;
1701
1702         I915_WRITE(ILK_DPFC_CONTROL, val ?
1703                    (reg | FBC_CTL_FALSE_COLOR) :
1704                    (reg & ~FBC_CTL_FALSE_COLOR));
1705
1706         mutex_unlock(&dev_priv->fbc.lock);
1707         return 0;
1708 }
1709
1710 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1711                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1712                         "%llu\n");
1713
1714 static int i915_ips_status(struct seq_file *m, void *unused)
1715 {
1716         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1717
1718         if (!HAS_IPS(dev_priv))
1719                 return -ENODEV;
1720
1721         intel_runtime_pm_get(dev_priv);
1722
1723         seq_printf(m, "Enabled by kernel parameter: %s\n",
1724                    yesno(i915_modparams.enable_ips));
1725
1726         if (INTEL_GEN(dev_priv) >= 8) {
1727                 seq_puts(m, "Currently: unknown\n");
1728         } else {
1729                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1730                         seq_puts(m, "Currently: enabled\n");
1731                 else
1732                         seq_puts(m, "Currently: disabled\n");
1733         }
1734
1735         intel_runtime_pm_put(dev_priv);
1736
1737         return 0;
1738 }
1739
1740 static int i915_sr_status(struct seq_file *m, void *unused)
1741 {
1742         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1743         bool sr_enabled = false;
1744
1745         intel_runtime_pm_get(dev_priv);
1746         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1747
1748         if (INTEL_GEN(dev_priv) >= 9)
1749                 /* no global SR status; inspect per-plane WM */;
1750         else if (HAS_PCH_SPLIT(dev_priv))
1751                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1752         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1753                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1754                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1755         else if (IS_I915GM(dev_priv))
1756                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1757         else if (IS_PINEVIEW(dev_priv))
1758                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1759         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1760                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1761
1762         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1763         intel_runtime_pm_put(dev_priv);
1764
1765         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1766
1767         return 0;
1768 }
1769
1770 static int i915_emon_status(struct seq_file *m, void *unused)
1771 {
1772         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1773         struct drm_device *dev = &dev_priv->drm;
1774         unsigned long temp, chipset, gfx;
1775         int ret;
1776
1777         if (!IS_GEN5(dev_priv))
1778                 return -ENODEV;
1779
1780         ret = mutex_lock_interruptible(&dev->struct_mutex);
1781         if (ret)
1782                 return ret;
1783
1784         temp = i915_mch_val(dev_priv);
1785         chipset = i915_chipset_val(dev_priv);
1786         gfx = i915_gfx_val(dev_priv);
1787         mutex_unlock(&dev->struct_mutex);
1788
1789         seq_printf(m, "GMCH temp: %ld\n", temp);
1790         seq_printf(m, "Chipset power: %ld\n", chipset);
1791         seq_printf(m, "GFX power: %ld\n", gfx);
1792         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1793
1794         return 0;
1795 }
1796
1797 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1798 {
1799         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1800         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1801         unsigned int max_gpu_freq, min_gpu_freq;
1802         int gpu_freq, ia_freq;
1803         int ret;
1804
1805         if (!HAS_LLC(dev_priv))
1806                 return -ENODEV;
1807
1808         intel_runtime_pm_get(dev_priv);
1809
1810         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1811         if (ret)
1812                 goto out;
1813
1814         min_gpu_freq = rps->min_freq;
1815         max_gpu_freq = rps->max_freq;
1816         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1817                 /* Convert GT frequency to 50 HZ units */
1818                 min_gpu_freq /= GEN9_FREQ_SCALER;
1819                 max_gpu_freq /= GEN9_FREQ_SCALER;
1820         }
1821
1822         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1823
1824         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1825                 ia_freq = gpu_freq;
1826                 sandybridge_pcode_read(dev_priv,
1827                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1828                                        &ia_freq);
1829                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1830                            intel_gpu_freq(dev_priv, (gpu_freq *
1831                                                      (IS_GEN9_BC(dev_priv) ||
1832                                                       INTEL_GEN(dev_priv) >= 10 ?
1833                                                       GEN9_FREQ_SCALER : 1))),
1834                            ((ia_freq >> 0) & 0xff) * 100,
1835                            ((ia_freq >> 8) & 0xff) * 100);
1836         }
1837
1838         mutex_unlock(&dev_priv->pcu_lock);
1839
1840 out:
1841         intel_runtime_pm_put(dev_priv);
1842         return ret;
1843 }
1844
1845 static int i915_opregion(struct seq_file *m, void *unused)
1846 {
1847         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1848         struct drm_device *dev = &dev_priv->drm;
1849         struct intel_opregion *opregion = &dev_priv->opregion;
1850         int ret;
1851
1852         ret = mutex_lock_interruptible(&dev->struct_mutex);
1853         if (ret)
1854                 goto out;
1855
1856         if (opregion->header)
1857                 seq_write(m, opregion->header, OPREGION_SIZE);
1858
1859         mutex_unlock(&dev->struct_mutex);
1860
1861 out:
1862         return 0;
1863 }
1864
1865 static int i915_vbt(struct seq_file *m, void *unused)
1866 {
1867         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1868
1869         if (opregion->vbt)
1870                 seq_write(m, opregion->vbt, opregion->vbt_size);
1871
1872         return 0;
1873 }
1874
1875 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1876 {
1877         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1878         struct drm_device *dev = &dev_priv->drm;
1879         struct intel_framebuffer *fbdev_fb = NULL;
1880         struct drm_framebuffer *drm_fb;
1881         int ret;
1882
1883         ret = mutex_lock_interruptible(&dev->struct_mutex);
1884         if (ret)
1885                 return ret;
1886
1887 #ifdef CONFIG_DRM_FBDEV_EMULATION
1888         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1889                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1890
1891                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1892                            fbdev_fb->base.width,
1893                            fbdev_fb->base.height,
1894                            fbdev_fb->base.format->depth,
1895                            fbdev_fb->base.format->cpp[0] * 8,
1896                            fbdev_fb->base.modifier,
1897                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1898                 describe_obj(m, fbdev_fb->obj);
1899                 seq_putc(m, '\n');
1900         }
1901 #endif
1902
1903         mutex_lock(&dev->mode_config.fb_lock);
1904         drm_for_each_fb(drm_fb, dev) {
1905                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1906                 if (fb == fbdev_fb)
1907                         continue;
1908
1909                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1910                            fb->base.width,
1911                            fb->base.height,
1912                            fb->base.format->depth,
1913                            fb->base.format->cpp[0] * 8,
1914                            fb->base.modifier,
1915                            drm_framebuffer_read_refcount(&fb->base));
1916                 describe_obj(m, fb->obj);
1917                 seq_putc(m, '\n');
1918         }
1919         mutex_unlock(&dev->mode_config.fb_lock);
1920         mutex_unlock(&dev->struct_mutex);
1921
1922         return 0;
1923 }
1924
1925 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1926 {
1927         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1928                    ring->space, ring->head, ring->tail, ring->emit);
1929 }
1930
1931 static int i915_context_status(struct seq_file *m, void *unused)
1932 {
1933         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1934         struct drm_device *dev = &dev_priv->drm;
1935         struct intel_engine_cs *engine;
1936         struct i915_gem_context *ctx;
1937         enum intel_engine_id id;
1938         int ret;
1939
1940         ret = mutex_lock_interruptible(&dev->struct_mutex);
1941         if (ret)
1942                 return ret;
1943
1944         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1945                 seq_printf(m, "HW context %u ", ctx->hw_id);
1946                 if (ctx->pid) {
1947                         struct task_struct *task;
1948
1949                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1950                         if (task) {
1951                                 seq_printf(m, "(%s [%d]) ",
1952                                            task->comm, task->pid);
1953                                 put_task_struct(task);
1954                         }
1955                 } else if (IS_ERR(ctx->file_priv)) {
1956                         seq_puts(m, "(deleted) ");
1957                 } else {
1958                         seq_puts(m, "(kernel) ");
1959                 }
1960
1961                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1962                 seq_putc(m, '\n');
1963
1964                 for_each_engine(engine, dev_priv, id) {
1965                         struct intel_context *ce =
1966                                 to_intel_context(ctx, engine);
1967
1968                         seq_printf(m, "%s: ", engine->name);
1969                         if (ce->state)
1970                                 describe_obj(m, ce->state->obj);
1971                         if (ce->ring)
1972                                 describe_ctx_ring(m, ce->ring);
1973                         seq_putc(m, '\n');
1974                 }
1975
1976                 seq_putc(m, '\n');
1977         }
1978
1979         mutex_unlock(&dev->struct_mutex);
1980
1981         return 0;
1982 }
1983
1984 static const char *swizzle_string(unsigned swizzle)
1985 {
1986         switch (swizzle) {
1987         case I915_BIT_6_SWIZZLE_NONE:
1988                 return "none";
1989         case I915_BIT_6_SWIZZLE_9:
1990                 return "bit9";
1991         case I915_BIT_6_SWIZZLE_9_10:
1992                 return "bit9/bit10";
1993         case I915_BIT_6_SWIZZLE_9_11:
1994                 return "bit9/bit11";
1995         case I915_BIT_6_SWIZZLE_9_10_11:
1996                 return "bit9/bit10/bit11";
1997         case I915_BIT_6_SWIZZLE_9_17:
1998                 return "bit9/bit17";
1999         case I915_BIT_6_SWIZZLE_9_10_17:
2000                 return "bit9/bit10/bit17";
2001         case I915_BIT_6_SWIZZLE_UNKNOWN:
2002                 return "unknown";
2003         }
2004
2005         return "bug";
2006 }
2007
2008 static int i915_swizzle_info(struct seq_file *m, void *data)
2009 {
2010         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2011
2012         intel_runtime_pm_get(dev_priv);
2013
2014         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2015                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2016         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2017                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2018
2019         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2020                 seq_printf(m, "DDC = 0x%08x\n",
2021                            I915_READ(DCC));
2022                 seq_printf(m, "DDC2 = 0x%08x\n",
2023                            I915_READ(DCC2));
2024                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2025                            I915_READ16(C0DRB3));
2026                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2027                            I915_READ16(C1DRB3));
2028         } else if (INTEL_GEN(dev_priv) >= 6) {
2029                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2030                            I915_READ(MAD_DIMM_C0));
2031                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2032                            I915_READ(MAD_DIMM_C1));
2033                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2034                            I915_READ(MAD_DIMM_C2));
2035                 seq_printf(m, "TILECTL = 0x%08x\n",
2036                            I915_READ(TILECTL));
2037                 if (INTEL_GEN(dev_priv) >= 8)
2038                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2039                                    I915_READ(GAMTARBMODE));
2040                 else
2041                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2042                                    I915_READ(ARB_MODE));
2043                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2044                            I915_READ(DISP_ARB_CTL));
2045         }
2046
2047         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2048                 seq_puts(m, "L-shaped memory detected\n");
2049
2050         intel_runtime_pm_put(dev_priv);
2051
2052         return 0;
2053 }
2054
2055 static int per_file_ctx(int id, void *ptr, void *data)
2056 {
2057         struct i915_gem_context *ctx = ptr;
2058         struct seq_file *m = data;
2059         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2060
2061         if (!ppgtt) {
2062                 seq_printf(m, "  no ppgtt for context %d\n",
2063                            ctx->user_handle);
2064                 return 0;
2065         }
2066
2067         if (i915_gem_context_is_default(ctx))
2068                 seq_puts(m, "  default context:\n");
2069         else
2070                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2071         ppgtt->debug_dump(ppgtt, m);
2072
2073         return 0;
2074 }
2075
2076 static void gen8_ppgtt_info(struct seq_file *m,
2077                             struct drm_i915_private *dev_priv)
2078 {
2079         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2080         struct intel_engine_cs *engine;
2081         enum intel_engine_id id;
2082         int i;
2083
2084         if (!ppgtt)
2085                 return;
2086
2087         for_each_engine(engine, dev_priv, id) {
2088                 seq_printf(m, "%s\n", engine->name);
2089                 for (i = 0; i < 4; i++) {
2090                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2091                         pdp <<= 32;
2092                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2093                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2094                 }
2095         }
2096 }
2097
2098 static void gen6_ppgtt_info(struct seq_file *m,
2099                             struct drm_i915_private *dev_priv)
2100 {
2101         struct intel_engine_cs *engine;
2102         enum intel_engine_id id;
2103
2104         if (IS_GEN6(dev_priv))
2105                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2106
2107         for_each_engine(engine, dev_priv, id) {
2108                 seq_printf(m, "%s\n", engine->name);
2109                 if (IS_GEN7(dev_priv))
2110                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2111                                    I915_READ(RING_MODE_GEN7(engine)));
2112                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2113                            I915_READ(RING_PP_DIR_BASE(engine)));
2114                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2115                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2116                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2117                            I915_READ(RING_PP_DIR_DCLV(engine)));
2118         }
2119         if (dev_priv->mm.aliasing_ppgtt) {
2120                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2121
2122                 seq_puts(m, "aliasing PPGTT:\n");
2123                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2124
2125                 ppgtt->debug_dump(ppgtt, m);
2126         }
2127
2128         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2129 }
2130
2131 static int i915_ppgtt_info(struct seq_file *m, void *data)
2132 {
2133         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2134         struct drm_device *dev = &dev_priv->drm;
2135         struct drm_file *file;
2136         int ret;
2137
2138         mutex_lock(&dev->filelist_mutex);
2139         ret = mutex_lock_interruptible(&dev->struct_mutex);
2140         if (ret)
2141                 goto out_unlock;
2142
2143         intel_runtime_pm_get(dev_priv);
2144
2145         if (INTEL_GEN(dev_priv) >= 8)
2146                 gen8_ppgtt_info(m, dev_priv);
2147         else if (INTEL_GEN(dev_priv) >= 6)
2148                 gen6_ppgtt_info(m, dev_priv);
2149
2150         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2151                 struct drm_i915_file_private *file_priv = file->driver_priv;
2152                 struct task_struct *task;
2153
2154                 task = get_pid_task(file->pid, PIDTYPE_PID);
2155                 if (!task) {
2156                         ret = -ESRCH;
2157                         goto out_rpm;
2158                 }
2159                 seq_printf(m, "\nproc: %s\n", task->comm);
2160                 put_task_struct(task);
2161                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2162                              (void *)(unsigned long)m);
2163         }
2164
2165 out_rpm:
2166         intel_runtime_pm_put(dev_priv);
2167         mutex_unlock(&dev->struct_mutex);
2168 out_unlock:
2169         mutex_unlock(&dev->filelist_mutex);
2170         return ret;
2171 }
2172
2173 static int count_irq_waiters(struct drm_i915_private *i915)
2174 {
2175         struct intel_engine_cs *engine;
2176         enum intel_engine_id id;
2177         int count = 0;
2178
2179         for_each_engine(engine, i915, id)
2180                 count += intel_engine_has_waiter(engine);
2181
2182         return count;
2183 }
2184
2185 static const char *rps_power_to_str(unsigned int power)
2186 {
2187         static const char * const strings[] = {
2188                 [LOW_POWER] = "low power",
2189                 [BETWEEN] = "mixed",
2190                 [HIGH_POWER] = "high power",
2191         };
2192
2193         if (power >= ARRAY_SIZE(strings) || !strings[power])
2194                 return "unknown";
2195
2196         return strings[power];
2197 }
2198
2199 static int i915_rps_boost_info(struct seq_file *m, void *data)
2200 {
2201         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2202         struct drm_device *dev = &dev_priv->drm;
2203         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2204         struct drm_file *file;
2205
2206         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2207         seq_printf(m, "GPU busy? %s [%d requests]\n",
2208                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2209         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2210         seq_printf(m, "Boosts outstanding? %d\n",
2211                    atomic_read(&rps->num_waiters));
2212         seq_printf(m, "Frequency requested %d\n",
2213                    intel_gpu_freq(dev_priv, rps->cur_freq));
2214         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2215                    intel_gpu_freq(dev_priv, rps->min_freq),
2216                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2217                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2218                    intel_gpu_freq(dev_priv, rps->max_freq));
2219         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2220                    intel_gpu_freq(dev_priv, rps->idle_freq),
2221                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2222                    intel_gpu_freq(dev_priv, rps->boost_freq));
2223
2224         mutex_lock(&dev->filelist_mutex);
2225         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2226                 struct drm_i915_file_private *file_priv = file->driver_priv;
2227                 struct task_struct *task;
2228
2229                 rcu_read_lock();
2230                 task = pid_task(file->pid, PIDTYPE_PID);
2231                 seq_printf(m, "%s [%d]: %d boosts\n",
2232                            task ? task->comm : "<unknown>",
2233                            task ? task->pid : -1,
2234                            atomic_read(&file_priv->rps_client.boosts));
2235                 rcu_read_unlock();
2236         }
2237         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2238                    atomic_read(&rps->boosts));
2239         mutex_unlock(&dev->filelist_mutex);
2240
2241         if (INTEL_GEN(dev_priv) >= 6 &&
2242             rps->enabled &&
2243             dev_priv->gt.active_requests) {
2244                 u32 rpup, rpupei;
2245                 u32 rpdown, rpdownei;
2246
2247                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2248                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2249                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2250                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2251                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2252                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2253
2254                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2255                            rps_power_to_str(rps->power));
2256                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2257                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2258                            rps->up_threshold);
2259                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2260                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2261                            rps->down_threshold);
2262         } else {
2263                 seq_puts(m, "\nRPS Autotuning inactive\n");
2264         }
2265
2266         return 0;
2267 }
2268
2269 static int i915_llc(struct seq_file *m, void *data)
2270 {
2271         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2272         const bool edram = INTEL_GEN(dev_priv) > 8;
2273
2274         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2275         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2276                    intel_uncore_edram_size(dev_priv)/1024/1024);
2277
2278         return 0;
2279 }
2280
2281 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2282 {
2283         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2284         struct drm_printer p;
2285
2286         if (!HAS_HUC(dev_priv))
2287                 return -ENODEV;
2288
2289         p = drm_seq_file_printer(m);
2290         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2291
2292         intel_runtime_pm_get(dev_priv);
2293         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2294         intel_runtime_pm_put(dev_priv);
2295
2296         return 0;
2297 }
2298
2299 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2300 {
2301         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2302         struct drm_printer p;
2303         u32 tmp, i;
2304
2305         if (!HAS_GUC(dev_priv))
2306                 return -ENODEV;
2307
2308         p = drm_seq_file_printer(m);
2309         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2310
2311         intel_runtime_pm_get(dev_priv);
2312
2313         tmp = I915_READ(GUC_STATUS);
2314
2315         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2316         seq_printf(m, "\tBootrom status = 0x%x\n",
2317                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2318         seq_printf(m, "\tuKernel status = 0x%x\n",
2319                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2320         seq_printf(m, "\tMIA Core status = 0x%x\n",
2321                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2322         seq_puts(m, "\nScratch registers:\n");
2323         for (i = 0; i < 16; i++)
2324                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2325
2326         intel_runtime_pm_put(dev_priv);
2327
2328         return 0;
2329 }
2330
2331 static const char *
2332 stringify_guc_log_type(enum guc_log_buffer_type type)
2333 {
2334         switch (type) {
2335         case GUC_ISR_LOG_BUFFER:
2336                 return "ISR";
2337         case GUC_DPC_LOG_BUFFER:
2338                 return "DPC";
2339         case GUC_CRASH_DUMP_LOG_BUFFER:
2340                 return "CRASH";
2341         default:
2342                 MISSING_CASE(type);
2343         }
2344
2345         return "";
2346 }
2347
2348 static void i915_guc_log_info(struct seq_file *m,
2349                               struct drm_i915_private *dev_priv)
2350 {
2351         struct intel_guc_log *log = &dev_priv->guc.log;
2352         enum guc_log_buffer_type type;
2353
2354         if (!intel_guc_log_relay_enabled(log)) {
2355                 seq_puts(m, "GuC log relay disabled\n");
2356                 return;
2357         }
2358
2359         seq_puts(m, "GuC logging stats:\n");
2360
2361         seq_printf(m, "\tRelay full count: %u\n",
2362                    log->relay.full_count);
2363
2364         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2365                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2366                            stringify_guc_log_type(type),
2367                            log->stats[type].flush,
2368                            log->stats[type].sampled_overflow);
2369         }
2370 }
2371
2372 static void i915_guc_client_info(struct seq_file *m,
2373                                  struct drm_i915_private *dev_priv,
2374                                  struct intel_guc_client *client)
2375 {
2376         struct intel_engine_cs *engine;
2377         enum intel_engine_id id;
2378         uint64_t tot = 0;
2379
2380         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2381                 client->priority, client->stage_id, client->proc_desc_offset);
2382         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2383                 client->doorbell_id, client->doorbell_offset);
2384
2385         for_each_engine(engine, dev_priv, id) {
2386                 u64 submissions = client->submissions[id];
2387                 tot += submissions;
2388                 seq_printf(m, "\tSubmissions: %llu %s\n",
2389                                 submissions, engine->name);
2390         }
2391         seq_printf(m, "\tTotal: %llu\n", tot);
2392 }
2393
2394 static int i915_guc_info(struct seq_file *m, void *data)
2395 {
2396         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2397         const struct intel_guc *guc = &dev_priv->guc;
2398
2399         if (!USES_GUC(dev_priv))
2400                 return -ENODEV;
2401
2402         i915_guc_log_info(m, dev_priv);
2403
2404         if (!USES_GUC_SUBMISSION(dev_priv))
2405                 return 0;
2406
2407         GEM_BUG_ON(!guc->execbuf_client);
2408
2409         seq_printf(m, "\nDoorbell map:\n");
2410         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2411         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2412
2413         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2414         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2415         if (guc->preempt_client) {
2416                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2417                            guc->preempt_client);
2418                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2419         }
2420
2421         /* Add more as required ... */
2422
2423         return 0;
2424 }
2425
2426 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2427 {
2428         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2429         const struct intel_guc *guc = &dev_priv->guc;
2430         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2431         struct intel_guc_client *client = guc->execbuf_client;
2432         unsigned int tmp;
2433         int index;
2434
2435         if (!USES_GUC_SUBMISSION(dev_priv))
2436                 return -ENODEV;
2437
2438         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2439                 struct intel_engine_cs *engine;
2440
2441                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2442                         continue;
2443
2444                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2445                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2446                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2447                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2448                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2449                 seq_printf(m, "\tEngines used: 0x%x\n",
2450                            desc->engines_used);
2451                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2452                            desc->db_trigger_phy,
2453                            desc->db_trigger_cpu,
2454                            desc->db_trigger_uk);
2455                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2456                            desc->process_desc);
2457                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2458                            desc->wq_addr, desc->wq_size);
2459                 seq_putc(m, '\n');
2460
2461                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2462                         u32 guc_engine_id = engine->guc_id;
2463                         struct guc_execlist_context *lrc =
2464                                                 &desc->lrc[guc_engine_id];
2465
2466                         seq_printf(m, "\t%s LRC:\n", engine->name);
2467                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2468                                    lrc->context_desc);
2469                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2470                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2471                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2472                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2473                         seq_putc(m, '\n');
2474                 }
2475         }
2476
2477         return 0;
2478 }
2479
2480 static int i915_guc_log_dump(struct seq_file *m, void *data)
2481 {
2482         struct drm_info_node *node = m->private;
2483         struct drm_i915_private *dev_priv = node_to_i915(node);
2484         bool dump_load_err = !!node->info_ent->data;
2485         struct drm_i915_gem_object *obj = NULL;
2486         u32 *log;
2487         int i = 0;
2488
2489         if (!HAS_GUC(dev_priv))
2490                 return -ENODEV;
2491
2492         if (dump_load_err)
2493                 obj = dev_priv->guc.load_err_log;
2494         else if (dev_priv->guc.log.vma)
2495                 obj = dev_priv->guc.log.vma->obj;
2496
2497         if (!obj)
2498                 return 0;
2499
2500         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2501         if (IS_ERR(log)) {
2502                 DRM_DEBUG("Failed to pin object\n");
2503                 seq_puts(m, "(log data unaccessible)\n");
2504                 return PTR_ERR(log);
2505         }
2506
2507         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2508                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2509                            *(log + i), *(log + i + 1),
2510                            *(log + i + 2), *(log + i + 3));
2511
2512         seq_putc(m, '\n');
2513
2514         i915_gem_object_unpin_map(obj);
2515
2516         return 0;
2517 }
2518
2519 static int i915_guc_log_level_get(void *data, u64 *val)
2520 {
2521         struct drm_i915_private *dev_priv = data;
2522
2523         if (!USES_GUC(dev_priv))
2524                 return -ENODEV;
2525
2526         *val = intel_guc_log_level_get(&dev_priv->guc.log);
2527
2528         return 0;
2529 }
2530
2531 static int i915_guc_log_level_set(void *data, u64 val)
2532 {
2533         struct drm_i915_private *dev_priv = data;
2534
2535         if (!USES_GUC(dev_priv))
2536                 return -ENODEV;
2537
2538         return intel_guc_log_level_set(&dev_priv->guc.log, val);
2539 }
2540
2541 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2542                         i915_guc_log_level_get, i915_guc_log_level_set,
2543                         "%lld\n");
2544
2545 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2546 {
2547         struct drm_i915_private *dev_priv = inode->i_private;
2548
2549         if (!USES_GUC(dev_priv))
2550                 return -ENODEV;
2551
2552         file->private_data = &dev_priv->guc.log;
2553
2554         return intel_guc_log_relay_open(&dev_priv->guc.log);
2555 }
2556
2557 static ssize_t
2558 i915_guc_log_relay_write(struct file *filp,
2559                          const char __user *ubuf,
2560                          size_t cnt,
2561                          loff_t *ppos)
2562 {
2563         struct intel_guc_log *log = filp->private_data;
2564
2565         intel_guc_log_relay_flush(log);
2566
2567         return cnt;
2568 }
2569
2570 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2571 {
2572         struct drm_i915_private *dev_priv = inode->i_private;
2573
2574         intel_guc_log_relay_close(&dev_priv->guc.log);
2575
2576         return 0;
2577 }
2578
2579 static const struct file_operations i915_guc_log_relay_fops = {
2580         .owner = THIS_MODULE,
2581         .open = i915_guc_log_relay_open,
2582         .write = i915_guc_log_relay_write,
2583         .release = i915_guc_log_relay_release,
2584 };
2585
2586 static const char *psr2_live_status(u32 val)
2587 {
2588         static const char * const live_status[] = {
2589                 "IDLE",
2590                 "CAPTURE",
2591                 "CAPTURE_FS",
2592                 "SLEEP",
2593                 "BUFON_FW",
2594                 "ML_UP",
2595                 "SU_STANDBY",
2596                 "FAST_SLEEP",
2597                 "DEEP_SLEEP",
2598                 "BUF_ON",
2599                 "TG_ON"
2600         };
2601
2602         val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2603         if (val < ARRAY_SIZE(live_status))
2604                 return live_status[val];
2605
2606         return "unknown";
2607 }
2608
2609 static const char *psr_sink_status(u8 val)
2610 {
2611         static const char * const sink_status[] = {
2612                 "inactive",
2613                 "transition to active, capture and display",
2614                 "active, display from RFB",
2615                 "active, capture and display on sink device timings",
2616                 "transition to inactive, capture and display, timing re-sync",
2617                 "reserved",
2618                 "reserved",
2619                 "sink internal error"
2620         };
2621
2622         val &= DP_PSR_SINK_STATE_MASK;
2623         if (val < ARRAY_SIZE(sink_status))
2624                 return sink_status[val];
2625
2626         return "unknown";
2627 }
2628
2629 static int i915_edp_psr_status(struct seq_file *m, void *data)
2630 {
2631         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2632         u32 psrperf = 0;
2633         u32 stat[3];
2634         enum pipe pipe;
2635         bool enabled = false;
2636         bool sink_support;
2637
2638         if (!HAS_PSR(dev_priv))
2639                 return -ENODEV;
2640
2641         sink_support = dev_priv->psr.sink_support;
2642         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2643         if (!sink_support)
2644                 return 0;
2645
2646         intel_runtime_pm_get(dev_priv);
2647
2648         mutex_lock(&dev_priv->psr.lock);
2649         seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2650         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2651                    dev_priv->psr.busy_frontbuffer_bits);
2652         seq_printf(m, "Re-enable work scheduled: %s\n",
2653                    yesno(work_busy(&dev_priv->psr.work.work)));
2654
2655         if (HAS_DDI(dev_priv)) {
2656                 if (dev_priv->psr.psr2_enabled)
2657                         enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2658                 else
2659                         enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2660         } else {
2661                 for_each_pipe(dev_priv, pipe) {
2662                         enum transcoder cpu_transcoder =
2663                                 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2664                         enum intel_display_power_domain power_domain;
2665
2666                         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2667                         if (!intel_display_power_get_if_enabled(dev_priv,
2668                                                                 power_domain))
2669                                 continue;
2670
2671                         stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2672                                 VLV_EDP_PSR_CURR_STATE_MASK;
2673                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2674                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2675                                 enabled = true;
2676
2677                         intel_display_power_put(dev_priv, power_domain);
2678                 }
2679         }
2680
2681         seq_printf(m, "Main link in standby mode: %s\n",
2682                    yesno(dev_priv->psr.link_standby));
2683
2684         seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2685
2686         if (!HAS_DDI(dev_priv))
2687                 for_each_pipe(dev_priv, pipe) {
2688                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2689                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2690                                 seq_printf(m, " pipe %c", pipe_name(pipe));
2691                 }
2692         seq_puts(m, "\n");
2693
2694         /*
2695          * VLV/CHV PSR has no kind of performance counter
2696          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2697          */
2698         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2699                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2700                         EDP_PSR_PERF_CNT_MASK;
2701
2702                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2703         }
2704         if (dev_priv->psr.psr2_enabled) {
2705                 u32 psr2 = I915_READ(EDP_PSR2_STATUS);
2706
2707                 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
2708                            psr2, psr2_live_status(psr2));
2709         }
2710
2711         if (dev_priv->psr.enabled) {
2712                 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2713                 u8 val;
2714
2715                 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2716                         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2717                                    psr_sink_status(val));
2718         }
2719         mutex_unlock(&dev_priv->psr.lock);
2720
2721         if (READ_ONCE(dev_priv->psr.debug)) {
2722                 seq_printf(m, "Last attempted entry at: %lld\n",
2723                            dev_priv->psr.last_entry_attempt);
2724                 seq_printf(m, "Last exit at: %lld\n",
2725                            dev_priv->psr.last_exit);
2726         }
2727
2728         intel_runtime_pm_put(dev_priv);
2729         return 0;
2730 }
2731
2732 static int
2733 i915_edp_psr_debug_set(void *data, u64 val)
2734 {
2735         struct drm_i915_private *dev_priv = data;
2736
2737         if (!CAN_PSR(dev_priv))
2738                 return -ENODEV;
2739
2740         DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2741
2742         intel_runtime_pm_get(dev_priv);
2743         intel_psr_irq_control(dev_priv, !!val);
2744         intel_runtime_pm_put(dev_priv);
2745
2746         return 0;
2747 }
2748
2749 static int
2750 i915_edp_psr_debug_get(void *data, u64 *val)
2751 {
2752         struct drm_i915_private *dev_priv = data;
2753
2754         if (!CAN_PSR(dev_priv))
2755                 return -ENODEV;
2756
2757         *val = READ_ONCE(dev_priv->psr.debug);
2758         return 0;
2759 }
2760
2761 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2762                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2763                         "%llu\n");
2764
2765 static int i915_sink_crc(struct seq_file *m, void *data)
2766 {
2767         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2768         struct drm_device *dev = &dev_priv->drm;
2769         struct intel_connector *connector;
2770         struct drm_connector_list_iter conn_iter;
2771         struct intel_dp *intel_dp = NULL;
2772         struct drm_modeset_acquire_ctx ctx;
2773         int ret;
2774         u8 crc[6];
2775
2776         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2777
2778         drm_connector_list_iter_begin(dev, &conn_iter);
2779
2780         for_each_intel_connector_iter(connector, &conn_iter) {
2781                 struct drm_crtc *crtc;
2782                 struct drm_connector_state *state;
2783                 struct intel_crtc_state *crtc_state;
2784
2785                 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2786                         continue;
2787
2788 retry:
2789                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2790                 if (ret)
2791                         goto err;
2792
2793                 state = connector->base.state;
2794                 if (!state->best_encoder)
2795                         continue;
2796
2797                 crtc = state->crtc;
2798                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2799                 if (ret)
2800                         goto err;
2801
2802                 crtc_state = to_intel_crtc_state(crtc->state);
2803                 if (!crtc_state->base.active)
2804                         continue;
2805
2806                 /*
2807                  * We need to wait for all crtc updates to complete, to make
2808                  * sure any pending modesets and plane updates are completed.
2809                  */
2810                 if (crtc_state->base.commit) {
2811                         ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2812
2813                         if (ret)
2814                                 goto err;
2815                 }
2816
2817                 intel_dp = enc_to_intel_dp(state->best_encoder);
2818
2819                 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2820                 if (ret)
2821                         goto err;
2822
2823                 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2824                            crc[0], crc[1], crc[2],
2825                            crc[3], crc[4], crc[5]);
2826                 goto out;
2827
2828 err:
2829                 if (ret == -EDEADLK) {
2830                         ret = drm_modeset_backoff(&ctx);
2831                         if (!ret)
2832                                 goto retry;
2833                 }
2834                 goto out;
2835         }
2836         ret = -ENODEV;
2837 out:
2838         drm_connector_list_iter_end(&conn_iter);
2839         drm_modeset_drop_locks(&ctx);
2840         drm_modeset_acquire_fini(&ctx);
2841
2842         return ret;
2843 }
2844
2845 static int i915_energy_uJ(struct seq_file *m, void *data)
2846 {
2847         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2848         unsigned long long power;
2849         u32 units;
2850
2851         if (INTEL_GEN(dev_priv) < 6)
2852                 return -ENODEV;
2853
2854         intel_runtime_pm_get(dev_priv);
2855
2856         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2857                 intel_runtime_pm_put(dev_priv);
2858                 return -ENODEV;
2859         }
2860
2861         units = (power & 0x1f00) >> 8;
2862         power = I915_READ(MCH_SECP_NRG_STTS);
2863         power = (1000000 * power) >> units; /* convert to uJ */
2864
2865         intel_runtime_pm_put(dev_priv);
2866
2867         seq_printf(m, "%llu", power);
2868
2869         return 0;
2870 }
2871
2872 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2873 {
2874         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2875         struct pci_dev *pdev = dev_priv->drm.pdev;
2876
2877         if (!HAS_RUNTIME_PM(dev_priv))
2878                 seq_puts(m, "Runtime power management not supported\n");
2879
2880         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2881                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2882         seq_printf(m, "IRQs disabled: %s\n",
2883                    yesno(!intel_irqs_enabled(dev_priv)));
2884 #ifdef CONFIG_PM
2885         seq_printf(m, "Usage count: %d\n",
2886                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2887 #else
2888         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2889 #endif
2890         seq_printf(m, "PCI device power state: %s [%d]\n",
2891                    pci_power_name(pdev->current_state),
2892                    pdev->current_state);
2893
2894         return 0;
2895 }
2896
2897 static int i915_power_domain_info(struct seq_file *m, void *unused)
2898 {
2899         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2900         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2901         int i;
2902
2903         mutex_lock(&power_domains->lock);
2904
2905         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2906         for (i = 0; i < power_domains->power_well_count; i++) {
2907                 struct i915_power_well *power_well;
2908                 enum intel_display_power_domain power_domain;
2909
2910                 power_well = &power_domains->power_wells[i];
2911                 seq_printf(m, "%-25s %d\n", power_well->name,
2912                            power_well->count);
2913
2914                 for_each_power_domain(power_domain, power_well->domains)
2915                         seq_printf(m, "  %-23s %d\n",
2916                                  intel_display_power_domain_str(power_domain),
2917                                  power_domains->domain_use_count[power_domain]);
2918         }
2919
2920         mutex_unlock(&power_domains->lock);
2921
2922         return 0;
2923 }
2924
2925 static int i915_dmc_info(struct seq_file *m, void *unused)
2926 {
2927         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2928         struct intel_csr *csr;
2929
2930         if (!HAS_CSR(dev_priv))
2931                 return -ENODEV;
2932
2933         csr = &dev_priv->csr;
2934
2935         intel_runtime_pm_get(dev_priv);
2936
2937         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2938         seq_printf(m, "path: %s\n", csr->fw_path);
2939
2940         if (!csr->dmc_payload)
2941                 goto out;
2942
2943         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2944                    CSR_VERSION_MINOR(csr->version));
2945
2946         if (IS_KABYLAKE(dev_priv) ||
2947             (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2948                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2949                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2950                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2951                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2952         } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2953                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2954                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2955         }
2956
2957 out:
2958         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2959         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2960         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2961
2962         intel_runtime_pm_put(dev_priv);
2963
2964         return 0;
2965 }
2966
2967 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2968                                  struct drm_display_mode *mode)
2969 {
2970         int i;
2971
2972         for (i = 0; i < tabs; i++)
2973                 seq_putc(m, '\t');
2974
2975         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2976                    mode->base.id, mode->name,
2977                    mode->vrefresh, mode->clock,
2978                    mode->hdisplay, mode->hsync_start,
2979                    mode->hsync_end, mode->htotal,
2980                    mode->vdisplay, mode->vsync_start,
2981                    mode->vsync_end, mode->vtotal,
2982                    mode->type, mode->flags);
2983 }
2984
2985 static void intel_encoder_info(struct seq_file *m,
2986                                struct intel_crtc *intel_crtc,
2987                                struct intel_encoder *intel_encoder)
2988 {
2989         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2990         struct drm_device *dev = &dev_priv->drm;
2991         struct drm_crtc *crtc = &intel_crtc->base;
2992         struct intel_connector *intel_connector;
2993         struct drm_encoder *encoder;
2994
2995         encoder = &intel_encoder->base;
2996         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2997                    encoder->base.id, encoder->name);
2998         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2999                 struct drm_connector *connector = &intel_connector->base;
3000                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
3001                            connector->base.id,
3002                            connector->name,
3003                            drm_get_connector_status_name(connector->status));
3004                 if (connector->status == connector_status_connected) {
3005                         struct drm_display_mode *mode = &crtc->mode;
3006                         seq_printf(m, ", mode:\n");
3007                         intel_seq_print_mode(m, 2, mode);
3008                 } else {
3009                         seq_putc(m, '\n');
3010                 }
3011         }
3012 }
3013
3014 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3015 {
3016         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3017         struct drm_device *dev = &dev_priv->drm;
3018         struct drm_crtc *crtc = &intel_crtc->base;
3019         struct intel_encoder *intel_encoder;
3020         struct drm_plane_state *plane_state = crtc->primary->state;
3021         struct drm_framebuffer *fb = plane_state->fb;
3022
3023         if (fb)
3024                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3025                            fb->base.id, plane_state->src_x >> 16,
3026                            plane_state->src_y >> 16, fb->width, fb->height);
3027         else
3028                 seq_puts(m, "\tprimary plane disabled\n");
3029         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3030                 intel_encoder_info(m, intel_crtc, intel_encoder);
3031 }
3032
3033 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3034 {
3035         struct drm_display_mode *mode = panel->fixed_mode;
3036
3037         seq_printf(m, "\tfixed mode:\n");
3038         intel_seq_print_mode(m, 2, mode);
3039 }
3040
3041 static void intel_dp_info(struct seq_file *m,
3042                           struct intel_connector *intel_connector)
3043 {
3044         struct intel_encoder *intel_encoder = intel_connector->encoder;
3045         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3046
3047         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3048         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3049         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3050                 intel_panel_info(m, &intel_connector->panel);
3051
3052         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3053                                 &intel_dp->aux);
3054 }
3055
3056 static void intel_dp_mst_info(struct seq_file *m,
3057                           struct intel_connector *intel_connector)
3058 {
3059         struct intel_encoder *intel_encoder = intel_connector->encoder;
3060         struct intel_dp_mst_encoder *intel_mst =
3061                 enc_to_mst(&intel_encoder->base);
3062         struct intel_digital_port *intel_dig_port = intel_mst->primary;
3063         struct intel_dp *intel_dp = &intel_dig_port->dp;
3064         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3065                                         intel_connector->port);
3066
3067         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3068 }
3069
3070 static void intel_hdmi_info(struct seq_file *m,
3071                             struct intel_connector *intel_connector)
3072 {
3073         struct intel_encoder *intel_encoder = intel_connector->encoder;
3074         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3075
3076         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3077 }
3078
3079 static void intel_lvds_info(struct seq_file *m,
3080                             struct intel_connector *intel_connector)
3081 {
3082         intel_panel_info(m, &intel_connector->panel);
3083 }
3084
3085 static void intel_connector_info(struct seq_file *m,
3086                                  struct drm_connector *connector)
3087 {
3088         struct intel_connector *intel_connector = to_intel_connector(connector);
3089         struct intel_encoder *intel_encoder = intel_connector->encoder;
3090         struct drm_display_mode *mode;
3091
3092         seq_printf(m, "connector %d: type %s, status: %s\n",
3093                    connector->base.id, connector->name,
3094                    drm_get_connector_status_name(connector->status));
3095         if (connector->status == connector_status_connected) {
3096                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3097                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3098                            connector->display_info.width_mm,
3099                            connector->display_info.height_mm);
3100                 seq_printf(m, "\tsubpixel order: %s\n",
3101                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3102                 seq_printf(m, "\tCEA rev: %d\n",
3103                            connector->display_info.cea_rev);
3104         }
3105
3106         if (!intel_encoder)
3107                 return;
3108
3109         switch (connector->connector_type) {
3110         case DRM_MODE_CONNECTOR_DisplayPort:
3111         case DRM_MODE_CONNECTOR_eDP:
3112                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3113                         intel_dp_mst_info(m, intel_connector);
3114                 else
3115                         intel_dp_info(m, intel_connector);
3116                 break;
3117         case DRM_MODE_CONNECTOR_LVDS:
3118                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3119                         intel_lvds_info(m, intel_connector);
3120                 break;
3121         case DRM_MODE_CONNECTOR_HDMIA:
3122                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3123                     intel_encoder->type == INTEL_OUTPUT_DDI)
3124                         intel_hdmi_info(m, intel_connector);
3125                 break;
3126         default:
3127                 break;
3128         }
3129
3130         seq_printf(m, "\tmodes:\n");
3131         list_for_each_entry(mode, &connector->modes, head)
3132                 intel_seq_print_mode(m, 2, mode);
3133 }
3134
3135 static const char *plane_type(enum drm_plane_type type)
3136 {
3137         switch (type) {
3138         case DRM_PLANE_TYPE_OVERLAY:
3139                 return "OVL";
3140         case DRM_PLANE_TYPE_PRIMARY:
3141                 return "PRI";
3142         case DRM_PLANE_TYPE_CURSOR:
3143                 return "CUR";
3144         /*
3145          * Deliberately omitting default: to generate compiler warnings
3146          * when a new drm_plane_type gets added.
3147          */
3148         }
3149
3150         return "unknown";
3151 }
3152
3153 static const char *plane_rotation(unsigned int rotation)
3154 {
3155         static char buf[48];
3156         /*
3157          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3158          * will print them all to visualize if the values are misused
3159          */
3160         snprintf(buf, sizeof(buf),
3161                  "%s%s%s%s%s%s(0x%08x)",
3162                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3163                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3164                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3165                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3166                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3167                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3168                  rotation);
3169
3170         return buf;
3171 }
3172
3173 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3174 {
3175         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3176         struct drm_device *dev = &dev_priv->drm;
3177         struct intel_plane *intel_plane;
3178
3179         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3180                 struct drm_plane_state *state;
3181                 struct drm_plane *plane = &intel_plane->base;
3182                 struct drm_format_name_buf format_name;
3183
3184                 if (!plane->state) {
3185                         seq_puts(m, "plane->state is NULL!\n");
3186                         continue;
3187                 }
3188
3189                 state = plane->state;
3190
3191                 if (state->fb) {
3192                         drm_get_format_name(state->fb->format->format,
3193                                             &format_name);
3194                 } else {
3195                         sprintf(format_name.str, "N/A");
3196                 }
3197
3198                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3199                            plane->base.id,
3200                            plane_type(intel_plane->base.type),
3201                            state->crtc_x, state->crtc_y,
3202                            state->crtc_w, state->crtc_h,
3203                            (state->src_x >> 16),
3204                            ((state->src_x & 0xffff) * 15625) >> 10,
3205                            (state->src_y >> 16),
3206                            ((state->src_y & 0xffff) * 15625) >> 10,
3207                            (state->src_w >> 16),
3208                            ((state->src_w & 0xffff) * 15625) >> 10,
3209                            (state->src_h >> 16),
3210                            ((state->src_h & 0xffff) * 15625) >> 10,
3211                            format_name.str,
3212                            plane_rotation(state->rotation));
3213         }
3214 }
3215
3216 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3217 {
3218         struct intel_crtc_state *pipe_config;
3219         int num_scalers = intel_crtc->num_scalers;
3220         int i;
3221
3222         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3223
3224         /* Not all platformas have a scaler */
3225         if (num_scalers) {
3226                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3227                            num_scalers,
3228                            pipe_config->scaler_state.scaler_users,
3229                            pipe_config->scaler_state.scaler_id);
3230
3231                 for (i = 0; i < num_scalers; i++) {
3232                         struct intel_scaler *sc =
3233                                         &pipe_config->scaler_state.scalers[i];
3234
3235                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3236                                    i, yesno(sc->in_use), sc->mode);
3237                 }
3238                 seq_puts(m, "\n");
3239         } else {
3240                 seq_puts(m, "\tNo scalers available on this platform\n");
3241         }
3242 }
3243
3244 static int i915_display_info(struct seq_file *m, void *unused)
3245 {
3246         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3247         struct drm_device *dev = &dev_priv->drm;
3248         struct intel_crtc *crtc;
3249         struct drm_connector *connector;
3250         struct drm_connector_list_iter conn_iter;
3251
3252         intel_runtime_pm_get(dev_priv);
3253         seq_printf(m, "CRTC info\n");
3254         seq_printf(m, "---------\n");
3255         for_each_intel_crtc(dev, crtc) {
3256                 struct intel_crtc_state *pipe_config;
3257
3258                 drm_modeset_lock(&crtc->base.mutex, NULL);
3259                 pipe_config = to_intel_crtc_state(crtc->base.state);
3260
3261                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3262                            crtc->base.base.id, pipe_name(crtc->pipe),
3263                            yesno(pipe_config->base.active),
3264                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3265                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3266
3267                 if (pipe_config->base.active) {
3268                         struct intel_plane *cursor =
3269                                 to_intel_plane(crtc->base.cursor);
3270
3271                         intel_crtc_info(m, crtc);
3272
3273                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3274                                    yesno(cursor->base.state->visible),
3275                                    cursor->base.state->crtc_x,
3276                                    cursor->base.state->crtc_y,
3277                                    cursor->base.state->crtc_w,
3278                                    cursor->base.state->crtc_h,
3279                                    cursor->cursor.base);
3280                         intel_scaler_info(m, crtc);
3281                         intel_plane_info(m, crtc);
3282                 }
3283
3284                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3285                            yesno(!crtc->cpu_fifo_underrun_disabled),
3286                            yesno(!crtc->pch_fifo_underrun_disabled));
3287                 drm_modeset_unlock(&crtc->base.mutex);
3288         }
3289
3290         seq_printf(m, "\n");
3291         seq_printf(m, "Connector info\n");
3292         seq_printf(m, "--------------\n");
3293         mutex_lock(&dev->mode_config.mutex);
3294         drm_connector_list_iter_begin(dev, &conn_iter);
3295         drm_for_each_connector_iter(connector, &conn_iter)
3296                 intel_connector_info(m, connector);
3297         drm_connector_list_iter_end(&conn_iter);
3298         mutex_unlock(&dev->mode_config.mutex);
3299
3300         intel_runtime_pm_put(dev_priv);
3301
3302         return 0;
3303 }
3304
3305 static int i915_engine_info(struct seq_file *m, void *unused)
3306 {
3307         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3308         struct intel_engine_cs *engine;
3309         enum intel_engine_id id;
3310         struct drm_printer p;
3311
3312         intel_runtime_pm_get(dev_priv);
3313
3314         seq_printf(m, "GT awake? %s (epoch %u)\n",
3315                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3316         seq_printf(m, "Global active requests: %d\n",
3317                    dev_priv->gt.active_requests);
3318         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3319                    dev_priv->info.cs_timestamp_frequency_khz);
3320
3321         p = drm_seq_file_printer(m);
3322         for_each_engine(engine, dev_priv, id)
3323                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3324
3325         intel_runtime_pm_put(dev_priv);
3326
3327         return 0;
3328 }
3329
3330 static int i915_rcs_topology(struct seq_file *m, void *unused)
3331 {
3332         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3333         struct drm_printer p = drm_seq_file_printer(m);
3334
3335         intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3336
3337         return 0;
3338 }
3339
3340 static int i915_shrinker_info(struct seq_file *m, void *unused)
3341 {
3342         struct drm_i915_private *i915 = node_to_i915(m->private);
3343
3344         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3345         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3346
3347         return 0;
3348 }
3349
3350 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3351 {
3352         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3353         struct drm_device *dev = &dev_priv->drm;
3354         int i;
3355
3356         drm_modeset_lock_all(dev);
3357         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3358                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3359
3360                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3361                            pll->info->id);
3362                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3363                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3364                 seq_printf(m, " tracked hardware state:\n");
3365                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3366                 seq_printf(m, " dpll_md: 0x%08x\n",
3367                            pll->state.hw_state.dpll_md);
3368                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3369                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3370                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3371                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3372                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3373                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3374                            pll->state.hw_state.mg_refclkin_ctl);
3375                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3376                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3377                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3378                            pll->state.hw_state.mg_clktop2_hsclkctl);
3379                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3380                            pll->state.hw_state.mg_pll_div0);
3381                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3382                            pll->state.hw_state.mg_pll_div1);
3383                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3384                            pll->state.hw_state.mg_pll_lf);
3385                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3386                            pll->state.hw_state.mg_pll_frac_lock);
3387                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3388                            pll->state.hw_state.mg_pll_ssc);
3389                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3390                            pll->state.hw_state.mg_pll_bias);
3391                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3392                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3393         }
3394         drm_modeset_unlock_all(dev);
3395
3396         return 0;
3397 }
3398
3399 static int i915_wa_registers(struct seq_file *m, void *unused)
3400 {
3401         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3402         struct i915_workarounds *workarounds = &dev_priv->workarounds;
3403         int i;
3404
3405         intel_runtime_pm_get(dev_priv);
3406
3407         seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3408         for (i = 0; i < workarounds->count; ++i) {
3409                 i915_reg_t addr;
3410                 u32 mask, value, read;
3411                 bool ok;
3412
3413                 addr = workarounds->reg[i].addr;
3414                 mask = workarounds->reg[i].mask;
3415                 value = workarounds->reg[i].value;
3416                 read = I915_READ(addr);
3417                 ok = (value & mask) == (read & mask);
3418                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3419                            i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3420         }
3421
3422         intel_runtime_pm_put(dev_priv);
3423
3424         return 0;
3425 }
3426
3427 static int i915_ipc_status_show(struct seq_file *m, void *data)
3428 {
3429         struct drm_i915_private *dev_priv = m->private;
3430
3431         seq_printf(m, "Isochronous Priority Control: %s\n",
3432                         yesno(dev_priv->ipc_enabled));
3433         return 0;
3434 }
3435
3436 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3437 {
3438         struct drm_i915_private *dev_priv = inode->i_private;
3439
3440         if (!HAS_IPC(dev_priv))
3441                 return -ENODEV;
3442
3443         return single_open(file, i915_ipc_status_show, dev_priv);
3444 }
3445
3446 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3447                                      size_t len, loff_t *offp)
3448 {
3449         struct seq_file *m = file->private_data;
3450         struct drm_i915_private *dev_priv = m->private;
3451         int ret;
3452         bool enable;
3453
3454         ret = kstrtobool_from_user(ubuf, len, &enable);
3455         if (ret < 0)
3456                 return ret;
3457
3458         intel_runtime_pm_get(dev_priv);
3459         if (!dev_priv->ipc_enabled && enable)
3460                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3461         dev_priv->wm.distrust_bios_wm = true;
3462         dev_priv->ipc_enabled = enable;
3463         intel_enable_ipc(dev_priv);
3464         intel_runtime_pm_put(dev_priv);
3465
3466         return len;
3467 }
3468
3469 static const struct file_operations i915_ipc_status_fops = {
3470         .owner = THIS_MODULE,
3471         .open = i915_ipc_status_open,
3472         .read = seq_read,
3473         .llseek = seq_lseek,
3474         .release = single_release,
3475         .write = i915_ipc_status_write
3476 };
3477
3478 static int i915_ddb_info(struct seq_file *m, void *unused)
3479 {
3480         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3481         struct drm_device *dev = &dev_priv->drm;
3482         struct skl_ddb_allocation *ddb;
3483         struct skl_ddb_entry *entry;
3484         enum pipe pipe;
3485         int plane;
3486
3487         if (INTEL_GEN(dev_priv) < 9)
3488                 return -ENODEV;
3489
3490         drm_modeset_lock_all(dev);
3491
3492         ddb = &dev_priv->wm.skl_hw.ddb;
3493
3494         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3495
3496         for_each_pipe(dev_priv, pipe) {
3497                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3498
3499                 for_each_universal_plane(dev_priv, pipe, plane) {
3500                         entry = &ddb->plane[pipe][plane];
3501                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3502                                    entry->start, entry->end,
3503                                    skl_ddb_entry_size(entry));
3504                 }
3505
3506                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3507                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3508                            entry->end, skl_ddb_entry_size(entry));
3509         }
3510
3511         drm_modeset_unlock_all(dev);
3512
3513         return 0;
3514 }
3515
3516 static void drrs_status_per_crtc(struct seq_file *m,
3517                                  struct drm_device *dev,
3518                                  struct intel_crtc *intel_crtc)
3519 {
3520         struct drm_i915_private *dev_priv = to_i915(dev);
3521         struct i915_drrs *drrs = &dev_priv->drrs;
3522         int vrefresh = 0;
3523         struct drm_connector *connector;
3524         struct drm_connector_list_iter conn_iter;
3525
3526         drm_connector_list_iter_begin(dev, &conn_iter);
3527         drm_for_each_connector_iter(connector, &conn_iter) {
3528                 if (connector->state->crtc != &intel_crtc->base)
3529                         continue;
3530
3531                 seq_printf(m, "%s:\n", connector->name);
3532         }
3533         drm_connector_list_iter_end(&conn_iter);
3534
3535         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3536                 seq_puts(m, "\tVBT: DRRS_type: Static");
3537         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3538                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3539         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3540                 seq_puts(m, "\tVBT: DRRS_type: None");
3541         else
3542                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3543
3544         seq_puts(m, "\n\n");
3545
3546         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3547                 struct intel_panel *panel;
3548
3549                 mutex_lock(&drrs->mutex);
3550                 /* DRRS Supported */
3551                 seq_puts(m, "\tDRRS Supported: Yes\n");
3552
3553                 /* disable_drrs() will make drrs->dp NULL */
3554                 if (!drrs->dp) {
3555                         seq_puts(m, "Idleness DRRS: Disabled\n");
3556                         if (dev_priv->psr.enabled)
3557                                 seq_puts(m,
3558                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3559                         mutex_unlock(&drrs->mutex);
3560                         return;
3561                 }
3562
3563                 panel = &drrs->dp->attached_connector->panel;
3564                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3565                                         drrs->busy_frontbuffer_bits);
3566
3567                 seq_puts(m, "\n\t\t");
3568                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3569                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3570                         vrefresh = panel->fixed_mode->vrefresh;
3571                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3572                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3573                         vrefresh = panel->downclock_mode->vrefresh;
3574                 } else {
3575                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3576                                                 drrs->refresh_rate_type);
3577                         mutex_unlock(&drrs->mutex);
3578                         return;
3579                 }
3580                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3581
3582                 seq_puts(m, "\n\t\t");
3583                 mutex_unlock(&drrs->mutex);
3584         } else {
3585                 /* DRRS not supported. Print the VBT parameter*/
3586                 seq_puts(m, "\tDRRS Supported : No");
3587         }
3588         seq_puts(m, "\n");
3589 }
3590
3591 static int i915_drrs_status(struct seq_file *m, void *unused)
3592 {
3593         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3594         struct drm_device *dev = &dev_priv->drm;
3595         struct intel_crtc *intel_crtc;
3596         int active_crtc_cnt = 0;
3597
3598         drm_modeset_lock_all(dev);
3599         for_each_intel_crtc(dev, intel_crtc) {
3600                 if (intel_crtc->base.state->active) {
3601                         active_crtc_cnt++;
3602                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3603
3604                         drrs_status_per_crtc(m, dev, intel_crtc);
3605                 }
3606         }
3607         drm_modeset_unlock_all(dev);
3608
3609         if (!active_crtc_cnt)
3610                 seq_puts(m, "No active crtc found\n");
3611
3612         return 0;
3613 }
3614
3615 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3616 {
3617         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3618         struct drm_device *dev = &dev_priv->drm;
3619         struct intel_encoder *intel_encoder;
3620         struct intel_digital_port *intel_dig_port;
3621         struct drm_connector *connector;
3622         struct drm_connector_list_iter conn_iter;
3623
3624         drm_connector_list_iter_begin(dev, &conn_iter);
3625         drm_for_each_connector_iter(connector, &conn_iter) {
3626                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3627                         continue;
3628
3629                 intel_encoder = intel_attached_encoder(connector);
3630                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3631                         continue;
3632
3633                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3634                 if (!intel_dig_port->dp.can_mst)
3635                         continue;
3636
3637                 seq_printf(m, "MST Source Port %c\n",
3638                            port_name(intel_dig_port->base.port));
3639                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3640         }
3641         drm_connector_list_iter_end(&conn_iter);
3642
3643         return 0;
3644 }
3645
3646 static ssize_t i915_displayport_test_active_write(struct file *file,
3647                                                   const char __user *ubuf,
3648                                                   size_t len, loff_t *offp)
3649 {
3650         char *input_buffer;
3651         int status = 0;
3652         struct drm_device *dev;
3653         struct drm_connector *connector;
3654         struct drm_connector_list_iter conn_iter;
3655         struct intel_dp *intel_dp;
3656         int val = 0;
3657
3658         dev = ((struct seq_file *)file->private_data)->private;
3659
3660         if (len == 0)
3661                 return 0;
3662
3663         input_buffer = memdup_user_nul(ubuf, len);
3664         if (IS_ERR(input_buffer))
3665                 return PTR_ERR(input_buffer);
3666
3667         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3668
3669         drm_connector_list_iter_begin(dev, &conn_iter);
3670         drm_for_each_connector_iter(connector, &conn_iter) {
3671                 struct intel_encoder *encoder;
3672
3673                 if (connector->connector_type !=
3674                     DRM_MODE_CONNECTOR_DisplayPort)
3675                         continue;
3676
3677                 encoder = to_intel_encoder(connector->encoder);
3678                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3679                         continue;
3680
3681                 if (encoder && connector->status == connector_status_connected) {
3682                         intel_dp = enc_to_intel_dp(&encoder->base);
3683                         status = kstrtoint(input_buffer, 10, &val);
3684                         if (status < 0)
3685                                 break;
3686                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3687                         /* To prevent erroneous activation of the compliance
3688                          * testing code, only accept an actual value of 1 here
3689                          */
3690                         if (val == 1)
3691                                 intel_dp->compliance.test_active = 1;
3692                         else
3693                                 intel_dp->compliance.test_active = 0;
3694                 }
3695         }
3696         drm_connector_list_iter_end(&conn_iter);
3697         kfree(input_buffer);
3698         if (status < 0)
3699                 return status;
3700
3701         *offp += len;
3702         return len;
3703 }
3704
3705 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3706 {
3707         struct drm_i915_private *dev_priv = m->private;
3708         struct drm_device *dev = &dev_priv->drm;
3709         struct drm_connector *connector;
3710         struct drm_connector_list_iter conn_iter;
3711         struct intel_dp *intel_dp;
3712
3713         drm_connector_list_iter_begin(dev, &conn_iter);
3714         drm_for_each_connector_iter(connector, &conn_iter) {
3715                 struct intel_encoder *encoder;
3716
3717                 if (connector->connector_type !=
3718                     DRM_MODE_CONNECTOR_DisplayPort)
3719                         continue;
3720
3721                 encoder = to_intel_encoder(connector->encoder);
3722                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3723                         continue;
3724
3725                 if (encoder && connector->status == connector_status_connected) {
3726                         intel_dp = enc_to_intel_dp(&encoder->base);
3727                         if (intel_dp->compliance.test_active)
3728                                 seq_puts(m, "1");
3729                         else
3730                                 seq_puts(m, "0");
3731                 } else
3732                         seq_puts(m, "0");
3733         }
3734         drm_connector_list_iter_end(&conn_iter);
3735
3736         return 0;
3737 }
3738
3739 static int i915_displayport_test_active_open(struct inode *inode,
3740                                              struct file *file)
3741 {
3742         return single_open(file, i915_displayport_test_active_show,
3743                            inode->i_private);
3744 }
3745
3746 static const struct file_operations i915_displayport_test_active_fops = {
3747         .owner = THIS_MODULE,
3748         .open = i915_displayport_test_active_open,
3749         .read = seq_read,
3750         .llseek = seq_lseek,
3751         .release = single_release,
3752         .write = i915_displayport_test_active_write
3753 };
3754
3755 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3756 {
3757         struct drm_i915_private *dev_priv = m->private;
3758         struct drm_device *dev = &dev_priv->drm;
3759         struct drm_connector *connector;
3760         struct drm_connector_list_iter conn_iter;
3761         struct intel_dp *intel_dp;
3762
3763         drm_connector_list_iter_begin(dev, &conn_iter);
3764         drm_for_each_connector_iter(connector, &conn_iter) {
3765                 struct intel_encoder *encoder;
3766
3767                 if (connector->connector_type !=
3768                     DRM_MODE_CONNECTOR_DisplayPort)
3769                         continue;
3770
3771                 encoder = to_intel_encoder(connector->encoder);
3772                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3773                         continue;
3774
3775                 if (encoder && connector->status == connector_status_connected) {
3776                         intel_dp = enc_to_intel_dp(&encoder->base);
3777                         if (intel_dp->compliance.test_type ==
3778                             DP_TEST_LINK_EDID_READ)
3779                                 seq_printf(m, "%lx",
3780                                            intel_dp->compliance.test_data.edid);
3781                         else if (intel_dp->compliance.test_type ==
3782                                  DP_TEST_LINK_VIDEO_PATTERN) {
3783                                 seq_printf(m, "hdisplay: %d\n",
3784                                            intel_dp->compliance.test_data.hdisplay);
3785                                 seq_printf(m, "vdisplay: %d\n",
3786                                            intel_dp->compliance.test_data.vdisplay);
3787                                 seq_printf(m, "bpc: %u\n",
3788                                            intel_dp->compliance.test_data.bpc);
3789                         }
3790                 } else
3791                         seq_puts(m, "0");
3792         }
3793         drm_connector_list_iter_end(&conn_iter);
3794
3795         return 0;
3796 }
3797 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3798
3799 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3800 {
3801         struct drm_i915_private *dev_priv = m->private;
3802         struct drm_device *dev = &dev_priv->drm;
3803         struct drm_connector *connector;
3804         struct drm_connector_list_iter conn_iter;
3805         struct intel_dp *intel_dp;
3806
3807         drm_connector_list_iter_begin(dev, &conn_iter);
3808         drm_for_each_connector_iter(connector, &conn_iter) {
3809                 struct intel_encoder *encoder;
3810
3811                 if (connector->connector_type !=
3812                     DRM_MODE_CONNECTOR_DisplayPort)
3813                         continue;
3814
3815                 encoder = to_intel_encoder(connector->encoder);
3816                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3817                         continue;
3818
3819                 if (encoder && connector->status == connector_status_connected) {
3820                         intel_dp = enc_to_intel_dp(&encoder->base);
3821                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3822                 } else
3823                         seq_puts(m, "0");
3824         }
3825         drm_connector_list_iter_end(&conn_iter);
3826
3827         return 0;
3828 }
3829 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3830
3831 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3832 {
3833         struct drm_i915_private *dev_priv = m->private;
3834         struct drm_device *dev = &dev_priv->drm;
3835         int level;
3836         int num_levels;
3837
3838         if (IS_CHERRYVIEW(dev_priv))
3839                 num_levels = 3;
3840         else if (IS_VALLEYVIEW(dev_priv))
3841                 num_levels = 1;
3842         else if (IS_G4X(dev_priv))
3843                 num_levels = 3;
3844         else
3845                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3846
3847         drm_modeset_lock_all(dev);
3848
3849         for (level = 0; level < num_levels; level++) {
3850                 unsigned int latency = wm[level];
3851
3852                 /*
3853                  * - WM1+ latency values in 0.5us units
3854                  * - latencies are in us on gen9/vlv/chv
3855                  */
3856                 if (INTEL_GEN(dev_priv) >= 9 ||
3857                     IS_VALLEYVIEW(dev_priv) ||
3858                     IS_CHERRYVIEW(dev_priv) ||
3859                     IS_G4X(dev_priv))
3860                         latency *= 10;
3861                 else if (level > 0)
3862                         latency *= 5;
3863
3864                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3865                            level, wm[level], latency / 10, latency % 10);
3866         }
3867
3868         drm_modeset_unlock_all(dev);
3869 }
3870
3871 static int pri_wm_latency_show(struct seq_file *m, void *data)
3872 {
3873         struct drm_i915_private *dev_priv = m->private;
3874         const uint16_t *latencies;
3875
3876         if (INTEL_GEN(dev_priv) >= 9)
3877                 latencies = dev_priv->wm.skl_latency;
3878         else
3879                 latencies = dev_priv->wm.pri_latency;
3880
3881         wm_latency_show(m, latencies);
3882
3883         return 0;
3884 }
3885
3886 static int spr_wm_latency_show(struct seq_file *m, void *data)
3887 {
3888         struct drm_i915_private *dev_priv = m->private;
3889         const uint16_t *latencies;
3890
3891         if (INTEL_GEN(dev_priv) >= 9)
3892                 latencies = dev_priv->wm.skl_latency;
3893         else
3894                 latencies = dev_priv->wm.spr_latency;
3895
3896         wm_latency_show(m, latencies);
3897
3898         return 0;
3899 }
3900
3901 static int cur_wm_latency_show(struct seq_file *m, void *data)
3902 {
3903         struct drm_i915_private *dev_priv = m->private;
3904         const uint16_t *latencies;
3905
3906         if (INTEL_GEN(dev_priv) >= 9)
3907                 latencies = dev_priv->wm.skl_latency;
3908         else
3909                 latencies = dev_priv->wm.cur_latency;
3910
3911         wm_latency_show(m, latencies);
3912
3913         return 0;
3914 }
3915
3916 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3917 {
3918         struct drm_i915_private *dev_priv = inode->i_private;
3919
3920         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3921                 return -ENODEV;
3922
3923         return single_open(file, pri_wm_latency_show, dev_priv);
3924 }
3925
3926 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3927 {
3928         struct drm_i915_private *dev_priv = inode->i_private;
3929
3930         if (HAS_GMCH_DISPLAY(dev_priv))
3931                 return -ENODEV;
3932
3933         return single_open(file, spr_wm_latency_show, dev_priv);
3934 }
3935
3936 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3937 {
3938         struct drm_i915_private *dev_priv = inode->i_private;
3939
3940         if (HAS_GMCH_DISPLAY(dev_priv))
3941                 return -ENODEV;
3942
3943         return single_open(file, cur_wm_latency_show, dev_priv);
3944 }
3945
3946 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3947                                 size_t len, loff_t *offp, uint16_t wm[8])
3948 {
3949         struct seq_file *m = file->private_data;
3950         struct drm_i915_private *dev_priv = m->private;
3951         struct drm_device *dev = &dev_priv->drm;
3952         uint16_t new[8] = { 0 };
3953         int num_levels;
3954         int level;
3955         int ret;
3956         char tmp[32];
3957
3958         if (IS_CHERRYVIEW(dev_priv))
3959                 num_levels = 3;
3960         else if (IS_VALLEYVIEW(dev_priv))
3961                 num_levels = 1;
3962         else if (IS_G4X(dev_priv))
3963                 num_levels = 3;
3964         else
3965                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3966
3967         if (len >= sizeof(tmp))
3968                 return -EINVAL;
3969
3970         if (copy_from_user(tmp, ubuf, len))
3971                 return -EFAULT;
3972
3973         tmp[len] = '\0';
3974
3975         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3976                      &new[0], &new[1], &new[2], &new[3],
3977                      &new[4], &new[5], &new[6], &new[7]);
3978         if (ret != num_levels)
3979                 return -EINVAL;
3980
3981         drm_modeset_lock_all(dev);
3982
3983         for (level = 0; level < num_levels; level++)
3984                 wm[level] = new[level];
3985
3986         drm_modeset_unlock_all(dev);
3987
3988         return len;
3989 }
3990
3991
3992 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3993                                     size_t len, loff_t *offp)
3994 {
3995         struct seq_file *m = file->private_data;
3996         struct drm_i915_private *dev_priv = m->private;
3997         uint16_t *latencies;
3998
3999         if (INTEL_GEN(dev_priv) >= 9)
4000                 latencies = dev_priv->wm.skl_latency;
4001         else
4002                 latencies = dev_priv->wm.pri_latency;
4003
4004         return wm_latency_write(file, ubuf, len, offp, latencies);
4005 }
4006
4007 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4008                                     size_t len, loff_t *offp)
4009 {
4010         struct seq_file *m = file->private_data;
4011         struct drm_i915_private *dev_priv = m->private;
4012         uint16_t *latencies;
4013
4014         if (INTEL_GEN(dev_priv) >= 9)
4015                 latencies = dev_priv->wm.skl_latency;
4016         else
4017                 latencies = dev_priv->wm.spr_latency;
4018
4019         return wm_latency_write(file, ubuf, len, offp, latencies);
4020 }
4021
4022 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4023                                     size_t len, loff_t *offp)
4024 {
4025         struct seq_file *m = file->private_data;
4026         struct drm_i915_private *dev_priv = m->private;
4027         uint16_t *latencies;
4028
4029         if (INTEL_GEN(dev_priv) >= 9)
4030                 latencies = dev_priv->wm.skl_latency;
4031         else
4032                 latencies = dev_priv->wm.cur_latency;
4033
4034         return wm_latency_write(file, ubuf, len, offp, latencies);
4035 }
4036
4037 static const struct file_operations i915_pri_wm_latency_fops = {
4038         .owner = THIS_MODULE,
4039         .open = pri_wm_latency_open,
4040         .read = seq_read,
4041         .llseek = seq_lseek,
4042         .release = single_release,
4043         .write = pri_wm_latency_write
4044 };
4045
4046 static const struct file_operations i915_spr_wm_latency_fops = {
4047         .owner = THIS_MODULE,
4048         .open = spr_wm_latency_open,
4049         .read = seq_read,
4050         .llseek = seq_lseek,
4051         .release = single_release,
4052         .write = spr_wm_latency_write
4053 };
4054
4055 static const struct file_operations i915_cur_wm_latency_fops = {
4056         .owner = THIS_MODULE,
4057         .open = cur_wm_latency_open,
4058         .read = seq_read,
4059         .llseek = seq_lseek,
4060         .release = single_release,
4061         .write = cur_wm_latency_write
4062 };
4063
4064 static int
4065 i915_wedged_get(void *data, u64 *val)
4066 {
4067         struct drm_i915_private *dev_priv = data;
4068
4069         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4070
4071         return 0;
4072 }
4073
4074 static int
4075 i915_wedged_set(void *data, u64 val)
4076 {
4077         struct drm_i915_private *i915 = data;
4078         struct intel_engine_cs *engine;
4079         unsigned int tmp;
4080
4081         /*
4082          * There is no safeguard against this debugfs entry colliding
4083          * with the hangcheck calling same i915_handle_error() in
4084          * parallel, causing an explosion. For now we assume that the
4085          * test harness is responsible enough not to inject gpu hangs
4086          * while it is writing to 'i915_wedged'
4087          */
4088
4089         if (i915_reset_backoff(&i915->gpu_error))
4090                 return -EAGAIN;
4091
4092         for_each_engine_masked(engine, i915, val, tmp) {
4093                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4094                 engine->hangcheck.stalled = true;
4095         }
4096
4097         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4098                           "Manually set wedged engine mask = %llx", val);
4099
4100         wait_on_bit(&i915->gpu_error.flags,
4101                     I915_RESET_HANDOFF,
4102                     TASK_UNINTERRUPTIBLE);
4103
4104         return 0;
4105 }
4106
4107 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4108                         i915_wedged_get, i915_wedged_set,
4109                         "%llu\n");
4110
4111 static int
4112 fault_irq_set(struct drm_i915_private *i915,
4113               unsigned long *irq,
4114               unsigned long val)
4115 {
4116         int err;
4117
4118         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4119         if (err)
4120                 return err;
4121
4122         err = i915_gem_wait_for_idle(i915,
4123                                      I915_WAIT_LOCKED |
4124                                      I915_WAIT_INTERRUPTIBLE);
4125         if (err)
4126                 goto err_unlock;
4127
4128         *irq = val;
4129         mutex_unlock(&i915->drm.struct_mutex);
4130
4131         /* Flush idle worker to disarm irq */
4132         drain_delayed_work(&i915->gt.idle_work);
4133
4134         return 0;
4135
4136 err_unlock:
4137         mutex_unlock(&i915->drm.struct_mutex);
4138         return err;
4139 }
4140
4141 static int
4142 i915_ring_missed_irq_get(void *data, u64 *val)
4143 {
4144         struct drm_i915_private *dev_priv = data;
4145
4146         *val = dev_priv->gpu_error.missed_irq_rings;
4147         return 0;
4148 }
4149
4150 static int
4151 i915_ring_missed_irq_set(void *data, u64 val)
4152 {
4153         struct drm_i915_private *i915 = data;
4154
4155         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4156 }
4157
4158 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4159                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4160                         "0x%08llx\n");
4161
4162 static int
4163 i915_ring_test_irq_get(void *data, u64 *val)
4164 {
4165         struct drm_i915_private *dev_priv = data;
4166
4167         *val = dev_priv->gpu_error.test_irq_rings;
4168
4169         return 0;
4170 }
4171
4172 static int
4173 i915_ring_test_irq_set(void *data, u64 val)
4174 {
4175         struct drm_i915_private *i915 = data;
4176
4177         val &= INTEL_INFO(i915)->ring_mask;
4178         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4179
4180         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4181 }
4182
4183 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4184                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4185                         "0x%08llx\n");
4186
4187 #define DROP_UNBOUND    BIT(0)
4188 #define DROP_BOUND      BIT(1)
4189 #define DROP_RETIRE     BIT(2)
4190 #define DROP_ACTIVE     BIT(3)
4191 #define DROP_FREED      BIT(4)
4192 #define DROP_SHRINK_ALL BIT(5)
4193 #define DROP_IDLE       BIT(6)
4194 #define DROP_ALL (DROP_UNBOUND  | \
4195                   DROP_BOUND    | \
4196                   DROP_RETIRE   | \
4197                   DROP_ACTIVE   | \
4198                   DROP_FREED    | \
4199                   DROP_SHRINK_ALL |\
4200                   DROP_IDLE)
4201 static int
4202 i915_drop_caches_get(void *data, u64 *val)
4203 {
4204         *val = DROP_ALL;
4205
4206         return 0;
4207 }
4208
4209 static int
4210 i915_drop_caches_set(void *data, u64 val)
4211 {
4212         struct drm_i915_private *dev_priv = data;
4213         struct drm_device *dev = &dev_priv->drm;
4214         int ret = 0;
4215
4216         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4217                   val, val & DROP_ALL);
4218
4219         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4220          * on ioctls on -EAGAIN. */
4221         if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4222                 ret = mutex_lock_interruptible(&dev->struct_mutex);
4223                 if (ret)
4224                         return ret;
4225
4226                 if (val & DROP_ACTIVE)
4227                         ret = i915_gem_wait_for_idle(dev_priv,
4228                                                      I915_WAIT_INTERRUPTIBLE |
4229                                                      I915_WAIT_LOCKED);
4230
4231                 if (val & DROP_RETIRE)
4232                         i915_retire_requests(dev_priv);
4233
4234                 mutex_unlock(&dev->struct_mutex);
4235         }
4236
4237         fs_reclaim_acquire(GFP_KERNEL);
4238         if (val & DROP_BOUND)
4239                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4240
4241         if (val & DROP_UNBOUND)
4242                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4243
4244         if (val & DROP_SHRINK_ALL)
4245                 i915_gem_shrink_all(dev_priv);
4246         fs_reclaim_release(GFP_KERNEL);
4247
4248         if (val & DROP_IDLE)
4249                 drain_delayed_work(&dev_priv->gt.idle_work);
4250
4251         if (val & DROP_FREED)
4252                 i915_gem_drain_freed_objects(dev_priv);
4253
4254         return ret;
4255 }
4256
4257 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4258                         i915_drop_caches_get, i915_drop_caches_set,
4259                         "0x%08llx\n");
4260
4261 static int
4262 i915_cache_sharing_get(void *data, u64 *val)
4263 {
4264         struct drm_i915_private *dev_priv = data;
4265         u32 snpcr;
4266
4267         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4268                 return -ENODEV;
4269
4270         intel_runtime_pm_get(dev_priv);
4271
4272         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4273
4274         intel_runtime_pm_put(dev_priv);
4275
4276         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4277
4278         return 0;
4279 }
4280
4281 static int
4282 i915_cache_sharing_set(void *data, u64 val)
4283 {
4284         struct drm_i915_private *dev_priv = data;
4285         u32 snpcr;
4286
4287         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4288                 return -ENODEV;
4289
4290         if (val > 3)
4291                 return -EINVAL;
4292
4293         intel_runtime_pm_get(dev_priv);
4294         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4295
4296         /* Update the cache sharing policy here as well */
4297         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4298         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4299         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4300         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4301
4302         intel_runtime_pm_put(dev_priv);
4303         return 0;
4304 }
4305
4306 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4307                         i915_cache_sharing_get, i915_cache_sharing_set,
4308                         "%llu\n");
4309
4310 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4311                                           struct sseu_dev_info *sseu)
4312 {
4313 #define SS_MAX 2
4314         const int ss_max = SS_MAX;
4315         u32 sig1[SS_MAX], sig2[SS_MAX];
4316         int ss;
4317
4318         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4319         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4320         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4321         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4322
4323         for (ss = 0; ss < ss_max; ss++) {
4324                 unsigned int eu_cnt;
4325
4326                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4327                         /* skip disabled subslice */
4328                         continue;
4329
4330                 sseu->slice_mask = BIT(0);
4331                 sseu->subslice_mask[0] |= BIT(ss);
4332                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4333                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4334                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4335                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4336                 sseu->eu_total += eu_cnt;
4337                 sseu->eu_per_subslice = max_t(unsigned int,
4338                                               sseu->eu_per_subslice, eu_cnt);
4339         }
4340 #undef SS_MAX
4341 }
4342
4343 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4344                                      struct sseu_dev_info *sseu)
4345 {
4346 #define SS_MAX 6
4347         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4348         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4349         int s, ss;
4350
4351         for (s = 0; s < info->sseu.max_slices; s++) {
4352                 /*
4353                  * FIXME: Valid SS Mask respects the spec and read
4354                  * only valid bits for those registers, excluding reserverd
4355                  * although this seems wrong because it would leave many
4356                  * subslices without ACK.
4357                  */
4358                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4359                         GEN10_PGCTL_VALID_SS_MASK(s);
4360                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4361                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4362         }
4363
4364         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4365                      GEN9_PGCTL_SSA_EU19_ACK |
4366                      GEN9_PGCTL_SSA_EU210_ACK |
4367                      GEN9_PGCTL_SSA_EU311_ACK;
4368         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4369                      GEN9_PGCTL_SSB_EU19_ACK |
4370                      GEN9_PGCTL_SSB_EU210_ACK |
4371                      GEN9_PGCTL_SSB_EU311_ACK;
4372
4373         for (s = 0; s < info->sseu.max_slices; s++) {
4374                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4375                         /* skip disabled slice */
4376                         continue;
4377
4378                 sseu->slice_mask |= BIT(s);
4379                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4380
4381                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4382                         unsigned int eu_cnt;
4383
4384                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4385                                 /* skip disabled subslice */
4386                                 continue;
4387
4388                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4389                                                eu_mask[ss % 2]);
4390                         sseu->eu_total += eu_cnt;
4391                         sseu->eu_per_subslice = max_t(unsigned int,
4392                                                       sseu->eu_per_subslice,
4393                                                       eu_cnt);
4394                 }
4395         }
4396 #undef SS_MAX
4397 }
4398
4399 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4400                                     struct sseu_dev_info *sseu)
4401 {
4402 #define SS_MAX 3
4403         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4404         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4405         int s, ss;
4406
4407         for (s = 0; s < info->sseu.max_slices; s++) {
4408                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4409                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4410                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4411         }
4412
4413         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4414                      GEN9_PGCTL_SSA_EU19_ACK |
4415                      GEN9_PGCTL_SSA_EU210_ACK |
4416                      GEN9_PGCTL_SSA_EU311_ACK;
4417         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4418                      GEN9_PGCTL_SSB_EU19_ACK |
4419                      GEN9_PGCTL_SSB_EU210_ACK |
4420                      GEN9_PGCTL_SSB_EU311_ACK;
4421
4422         for (s = 0; s < info->sseu.max_slices; s++) {
4423                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4424                         /* skip disabled slice */
4425                         continue;
4426
4427                 sseu->slice_mask |= BIT(s);
4428
4429                 if (IS_GEN9_BC(dev_priv))
4430                         sseu->subslice_mask[s] =
4431                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4432
4433                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4434                         unsigned int eu_cnt;
4435
4436                         if (IS_GEN9_LP(dev_priv)) {
4437                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4438                                         /* skip disabled subslice */
4439                                         continue;
4440
4441                                 sseu->subslice_mask[s] |= BIT(ss);
4442                         }
4443
4444                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4445                                                eu_mask[ss%2]);
4446                         sseu->eu_total += eu_cnt;
4447                         sseu->eu_per_subslice = max_t(unsigned int,
4448                                                       sseu->eu_per_subslice,
4449                                                       eu_cnt);
4450                 }
4451         }
4452 #undef SS_MAX
4453 }
4454
4455 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4456                                          struct sseu_dev_info *sseu)
4457 {
4458         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4459         int s;
4460
4461         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4462
4463         if (sseu->slice_mask) {
4464                 sseu->eu_per_subslice =
4465                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4466                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4467                         sseu->subslice_mask[s] =
4468                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4469                 }
4470                 sseu->eu_total = sseu->eu_per_subslice *
4471                                  sseu_subslice_total(sseu);
4472
4473                 /* subtract fused off EU(s) from enabled slice(s) */
4474                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4475                         u8 subslice_7eu =
4476                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4477
4478                         sseu->eu_total -= hweight8(subslice_7eu);
4479                 }
4480         }
4481 }
4482
4483 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4484                                  const struct sseu_dev_info *sseu)
4485 {
4486         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4487         const char *type = is_available_info ? "Available" : "Enabled";
4488         int s;
4489
4490         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4491                    sseu->slice_mask);
4492         seq_printf(m, "  %s Slice Total: %u\n", type,
4493                    hweight8(sseu->slice_mask));
4494         seq_printf(m, "  %s Subslice Total: %u\n", type,
4495                    sseu_subslice_total(sseu));
4496         for (s = 0; s < fls(sseu->slice_mask); s++) {
4497                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4498                            s, hweight8(sseu->subslice_mask[s]));
4499         }
4500         seq_printf(m, "  %s EU Total: %u\n", type,
4501                    sseu->eu_total);
4502         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4503                    sseu->eu_per_subslice);
4504
4505         if (!is_available_info)
4506                 return;
4507
4508         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4509         if (HAS_POOLED_EU(dev_priv))
4510                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4511
4512         seq_printf(m, "  Has Slice Power Gating: %s\n",
4513                    yesno(sseu->has_slice_pg));
4514         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4515                    yesno(sseu->has_subslice_pg));
4516         seq_printf(m, "  Has EU Power Gating: %s\n",
4517                    yesno(sseu->has_eu_pg));
4518 }
4519
4520 static int i915_sseu_status(struct seq_file *m, void *unused)
4521 {
4522         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4523         struct sseu_dev_info sseu;
4524
4525         if (INTEL_GEN(dev_priv) < 8)
4526                 return -ENODEV;
4527
4528         seq_puts(m, "SSEU Device Info\n");
4529         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4530
4531         seq_puts(m, "SSEU Device Status\n");
4532         memset(&sseu, 0, sizeof(sseu));
4533         sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4534         sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4535         sseu.max_eus_per_subslice =
4536                 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4537
4538         intel_runtime_pm_get(dev_priv);
4539
4540         if (IS_CHERRYVIEW(dev_priv)) {
4541                 cherryview_sseu_device_status(dev_priv, &sseu);
4542         } else if (IS_BROADWELL(dev_priv)) {
4543                 broadwell_sseu_device_status(dev_priv, &sseu);
4544         } else if (IS_GEN9(dev_priv)) {
4545                 gen9_sseu_device_status(dev_priv, &sseu);
4546         } else if (INTEL_GEN(dev_priv) >= 10) {
4547                 gen10_sseu_device_status(dev_priv, &sseu);
4548         }
4549
4550         intel_runtime_pm_put(dev_priv);
4551
4552         i915_print_sseu_info(m, false, &sseu);
4553
4554         return 0;
4555 }
4556
4557 static int i915_forcewake_open(struct inode *inode, struct file *file)
4558 {
4559         struct drm_i915_private *i915 = inode->i_private;
4560
4561         if (INTEL_GEN(i915) < 6)
4562                 return 0;
4563
4564         intel_runtime_pm_get(i915);
4565         intel_uncore_forcewake_user_get(i915);
4566
4567         return 0;
4568 }
4569
4570 static int i915_forcewake_release(struct inode *inode, struct file *file)
4571 {
4572         struct drm_i915_private *i915 = inode->i_private;
4573
4574         if (INTEL_GEN(i915) < 6)
4575                 return 0;
4576
4577         intel_uncore_forcewake_user_put(i915);
4578         intel_runtime_pm_put(i915);
4579
4580         return 0;
4581 }
4582
4583 static const struct file_operations i915_forcewake_fops = {
4584         .owner = THIS_MODULE,
4585         .open = i915_forcewake_open,
4586         .release = i915_forcewake_release,
4587 };
4588
4589 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4590 {
4591         struct drm_i915_private *dev_priv = m->private;
4592         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4593
4594         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4595         seq_printf(m, "Detected: %s\n",
4596                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4597
4598         return 0;
4599 }
4600
4601 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4602                                         const char __user *ubuf, size_t len,
4603                                         loff_t *offp)
4604 {
4605         struct seq_file *m = file->private_data;
4606         struct drm_i915_private *dev_priv = m->private;
4607         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4608         unsigned int new_threshold;
4609         int i;
4610         char *newline;
4611         char tmp[16];
4612
4613         if (len >= sizeof(tmp))
4614                 return -EINVAL;
4615
4616         if (copy_from_user(tmp, ubuf, len))
4617                 return -EFAULT;
4618
4619         tmp[len] = '\0';
4620
4621         /* Strip newline, if any */
4622         newline = strchr(tmp, '\n');
4623         if (newline)
4624                 *newline = '\0';
4625
4626         if (strcmp(tmp, "reset") == 0)
4627                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4628         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4629                 return -EINVAL;
4630
4631         if (new_threshold > 0)
4632                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4633                               new_threshold);
4634         else
4635                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4636
4637         spin_lock_irq(&dev_priv->irq_lock);
4638         hotplug->hpd_storm_threshold = new_threshold;
4639         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4640         for_each_hpd_pin(i)
4641                 hotplug->stats[i].count = 0;
4642         spin_unlock_irq(&dev_priv->irq_lock);
4643
4644         /* Re-enable hpd immediately if we were in an irq storm */
4645         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4646
4647         return len;
4648 }
4649
4650 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4651 {
4652         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4653 }
4654
4655 static const struct file_operations i915_hpd_storm_ctl_fops = {
4656         .owner = THIS_MODULE,
4657         .open = i915_hpd_storm_ctl_open,
4658         .read = seq_read,
4659         .llseek = seq_lseek,
4660         .release = single_release,
4661         .write = i915_hpd_storm_ctl_write
4662 };
4663
4664 static int i915_drrs_ctl_set(void *data, u64 val)
4665 {
4666         struct drm_i915_private *dev_priv = data;
4667         struct drm_device *dev = &dev_priv->drm;
4668         struct intel_crtc *intel_crtc;
4669         struct intel_encoder *encoder;
4670         struct intel_dp *intel_dp;
4671
4672         if (INTEL_GEN(dev_priv) < 7)
4673                 return -ENODEV;
4674
4675         drm_modeset_lock_all(dev);
4676         for_each_intel_crtc(dev, intel_crtc) {
4677                 if (!intel_crtc->base.state->active ||
4678                                         !intel_crtc->config->has_drrs)
4679                         continue;
4680
4681                 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4682                         if (encoder->type != INTEL_OUTPUT_EDP)
4683                                 continue;
4684
4685                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4686                                                 val ? "en" : "dis", val);
4687
4688                         intel_dp = enc_to_intel_dp(&encoder->base);
4689                         if (val)
4690                                 intel_edp_drrs_enable(intel_dp,
4691                                                         intel_crtc->config);
4692                         else
4693                                 intel_edp_drrs_disable(intel_dp,
4694                                                         intel_crtc->config);
4695                 }
4696         }
4697         drm_modeset_unlock_all(dev);
4698
4699         return 0;
4700 }
4701
4702 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4703
4704 static ssize_t
4705 i915_fifo_underrun_reset_write(struct file *filp,
4706                                const char __user *ubuf,
4707                                size_t cnt, loff_t *ppos)
4708 {
4709         struct drm_i915_private *dev_priv = filp->private_data;
4710         struct intel_crtc *intel_crtc;
4711         struct drm_device *dev = &dev_priv->drm;
4712         int ret;
4713         bool reset;
4714
4715         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4716         if (ret)
4717                 return ret;
4718
4719         if (!reset)
4720                 return cnt;
4721
4722         for_each_intel_crtc(dev, intel_crtc) {
4723                 struct drm_crtc_commit *commit;
4724                 struct intel_crtc_state *crtc_state;
4725
4726                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4727                 if (ret)
4728                         return ret;
4729
4730                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4731                 commit = crtc_state->base.commit;
4732                 if (commit) {
4733                         ret = wait_for_completion_interruptible(&commit->hw_done);
4734                         if (!ret)
4735                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4736                 }
4737
4738                 if (!ret && crtc_state->base.active) {
4739                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4740                                       pipe_name(intel_crtc->pipe));
4741
4742                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4743                 }
4744
4745                 drm_modeset_unlock(&intel_crtc->base.mutex);
4746
4747                 if (ret)
4748                         return ret;
4749         }
4750
4751         ret = intel_fbc_reset_underrun(dev_priv);
4752         if (ret)
4753                 return ret;
4754
4755         return cnt;
4756 }
4757
4758 static const struct file_operations i915_fifo_underrun_reset_ops = {
4759         .owner = THIS_MODULE,
4760         .open = simple_open,
4761         .write = i915_fifo_underrun_reset_write,
4762         .llseek = default_llseek,
4763 };
4764
4765 static const struct drm_info_list i915_debugfs_list[] = {
4766         {"i915_capabilities", i915_capabilities, 0},
4767         {"i915_gem_objects", i915_gem_object_info, 0},
4768         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4769         {"i915_gem_stolen", i915_gem_stolen_list_info },
4770         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4771         {"i915_gem_interrupt", i915_interrupt_info, 0},
4772         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4773         {"i915_guc_info", i915_guc_info, 0},
4774         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4775         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4776         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4777         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4778         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4779         {"i915_frequency_info", i915_frequency_info, 0},
4780         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4781         {"i915_reset_info", i915_reset_info, 0},
4782         {"i915_drpc_info", i915_drpc_info, 0},
4783         {"i915_emon_status", i915_emon_status, 0},
4784         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4785         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4786         {"i915_fbc_status", i915_fbc_status, 0},
4787         {"i915_ips_status", i915_ips_status, 0},
4788         {"i915_sr_status", i915_sr_status, 0},
4789         {"i915_opregion", i915_opregion, 0},
4790         {"i915_vbt", i915_vbt, 0},
4791         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4792         {"i915_context_status", i915_context_status, 0},
4793         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4794         {"i915_swizzle_info", i915_swizzle_info, 0},
4795         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4796         {"i915_llc", i915_llc, 0},
4797         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4798         {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4799         {"i915_energy_uJ", i915_energy_uJ, 0},
4800         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4801         {"i915_power_domain_info", i915_power_domain_info, 0},
4802         {"i915_dmc_info", i915_dmc_info, 0},
4803         {"i915_display_info", i915_display_info, 0},
4804         {"i915_engine_info", i915_engine_info, 0},
4805         {"i915_rcs_topology", i915_rcs_topology, 0},
4806         {"i915_shrinker_info", i915_shrinker_info, 0},
4807         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4808         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4809         {"i915_wa_registers", i915_wa_registers, 0},
4810         {"i915_ddb_info", i915_ddb_info, 0},
4811         {"i915_sseu_status", i915_sseu_status, 0},
4812         {"i915_drrs_status", i915_drrs_status, 0},
4813         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4814 };
4815 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4816
4817 static const struct i915_debugfs_files {
4818         const char *name;
4819         const struct file_operations *fops;
4820 } i915_debugfs_files[] = {
4821         {"i915_wedged", &i915_wedged_fops},
4822         {"i915_cache_sharing", &i915_cache_sharing_fops},
4823         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4824         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4825         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4826 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4827         {"i915_error_state", &i915_error_state_fops},
4828         {"i915_gpu_info", &i915_gpu_info_fops},
4829 #endif
4830         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4831         {"i915_next_seqno", &i915_next_seqno_fops},
4832         {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4833         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4834         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4835         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4836         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4837         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4838         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4839         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4840         {"i915_guc_log_level", &i915_guc_log_level_fops},
4841         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4842         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4843         {"i915_ipc_status", &i915_ipc_status_fops},
4844         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4845         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4846 };
4847
4848 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4849 {
4850         struct drm_minor *minor = dev_priv->drm.primary;
4851         struct dentry *ent;
4852         int ret, i;
4853
4854         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4855                                   minor->debugfs_root, to_i915(minor->dev),
4856                                   &i915_forcewake_fops);
4857         if (!ent)
4858                 return -ENOMEM;
4859
4860         ret = intel_pipe_crc_create(minor);
4861         if (ret)
4862                 return ret;
4863
4864         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4865                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4866                                           S_IRUGO | S_IWUSR,
4867                                           minor->debugfs_root,
4868                                           to_i915(minor->dev),
4869                                           i915_debugfs_files[i].fops);
4870                 if (!ent)
4871                         return -ENOMEM;
4872         }
4873
4874         return drm_debugfs_create_files(i915_debugfs_list,
4875                                         I915_DEBUGFS_ENTRIES,
4876                                         minor->debugfs_root, minor);
4877 }
4878
4879 struct dpcd_block {
4880         /* DPCD dump start address. */
4881         unsigned int offset;
4882         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4883         unsigned int end;
4884         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4885         size_t size;
4886         /* Only valid for eDP. */
4887         bool edp;
4888 };
4889
4890 static const struct dpcd_block i915_dpcd_debug[] = {
4891         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4892         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4893         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4894         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4895         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4896         { .offset = DP_SET_POWER },
4897         { .offset = DP_EDP_DPCD_REV },
4898         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4899         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4900         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4901 };
4902
4903 static int i915_dpcd_show(struct seq_file *m, void *data)
4904 {
4905         struct drm_connector *connector = m->private;
4906         struct intel_dp *intel_dp =
4907                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4908         uint8_t buf[16];
4909         ssize_t err;
4910         int i;
4911
4912         if (connector->status != connector_status_connected)
4913                 return -ENODEV;
4914
4915         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4916                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4917                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4918
4919                 if (b->edp &&
4920                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4921                         continue;
4922
4923                 /* low tech for now */
4924                 if (WARN_ON(size > sizeof(buf)))
4925                         continue;
4926
4927                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4928                 if (err <= 0) {
4929                         DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4930                                   size, b->offset, err);
4931                         continue;
4932                 }
4933
4934                 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4935         }
4936
4937         return 0;
4938 }
4939 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4940
4941 static int i915_panel_show(struct seq_file *m, void *data)
4942 {
4943         struct drm_connector *connector = m->private;
4944         struct intel_dp *intel_dp =
4945                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4946
4947         if (connector->status != connector_status_connected)
4948                 return -ENODEV;
4949
4950         seq_printf(m, "Panel power up delay: %d\n",
4951                    intel_dp->panel_power_up_delay);
4952         seq_printf(m, "Panel power down delay: %d\n",
4953                    intel_dp->panel_power_down_delay);
4954         seq_printf(m, "Backlight on delay: %d\n",
4955                    intel_dp->backlight_on_delay);
4956         seq_printf(m, "Backlight off delay: %d\n",
4957                    intel_dp->backlight_off_delay);
4958
4959         return 0;
4960 }
4961 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4962
4963 /**
4964  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4965  * @connector: pointer to a registered drm_connector
4966  *
4967  * Cleanup will be done by drm_connector_unregister() through a call to
4968  * drm_debugfs_connector_remove().
4969  *
4970  * Returns 0 on success, negative error codes on error.
4971  */
4972 int i915_debugfs_connector_add(struct drm_connector *connector)
4973 {
4974         struct dentry *root = connector->debugfs_entry;
4975
4976         /* The connector must have been registered beforehands. */
4977         if (!root)
4978                 return -ENODEV;
4979
4980         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4981             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4982                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4983                                     connector, &i915_dpcd_fops);
4984
4985         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4986                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4987                                     connector, &i915_panel_fops);
4988
4989         return 0;
4990 }