Merge tag 'drm-intel-next-2018-06-06' of git://anongit.freedesktop.org/drm/drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(info, &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct drm_i915_file_private *file_priv;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305 };
306
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309         struct drm_i915_gem_object *obj = ptr;
310         struct file_stats *stats = data;
311         struct i915_vma *vma;
312
313         lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315         stats->count++;
316         stats->total += obj->base.size;
317         if (!obj->bind_count)
318                 stats->unbound += obj->base.size;
319         if (obj->base.name || obj->base.dma_buf)
320                 stats->shared += obj->base.size;
321
322         list_for_each_entry(vma, &obj->vma_list, obj_link) {
323                 if (!drm_mm_node_allocated(&vma->node))
324                         continue;
325
326                 if (i915_vma_is_ggtt(vma)) {
327                         stats->global += vma->node.size;
328                 } else {
329                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331                         if (ppgtt->vm.file != stats->file_priv)
332                                 continue;
333                 }
334
335                 if (i915_vma_is_active(vma))
336                         stats->active += vma->node.size;
337                 else
338                         stats->inactive += vma->node.size;
339         }
340
341         return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345         if (stats.count) \
346                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347                            name, \
348                            stats.count, \
349                            stats.total, \
350                            stats.active, \
351                            stats.inactive, \
352                            stats.global, \
353                            stats.shared, \
354                            stats.unbound); \
355 } while (0)
356
357 static void print_batch_pool_stats(struct seq_file *m,
358                                    struct drm_i915_private *dev_priv)
359 {
360         struct drm_i915_gem_object *obj;
361         struct file_stats stats;
362         struct intel_engine_cs *engine;
363         enum intel_engine_id id;
364         int j;
365
366         memset(&stats, 0, sizeof(stats));
367
368         for_each_engine(engine, dev_priv, id) {
369                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370                         list_for_each_entry(obj,
371                                             &engine->batch_pool.cache_list[j],
372                                             batch_pool_link)
373                                 per_file_stats(0, obj, &stats);
374                 }
375         }
376
377         print_file_stats(m, "[k]batch pool", stats);
378 }
379
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382         struct i915_gem_context *ctx = ptr;
383         struct intel_engine_cs *engine;
384         enum intel_engine_id id;
385
386         for_each_engine(engine, ctx->i915, id) {
387                 struct intel_context *ce = to_intel_context(ctx, engine);
388
389                 if (ce->state)
390                         per_file_stats(0, ce->state->obj, data);
391                 if (ce->ring)
392                         per_file_stats(0, ce->ring->vma->obj, data);
393         }
394
395         return 0;
396 }
397
398 static void print_context_stats(struct seq_file *m,
399                                 struct drm_i915_private *dev_priv)
400 {
401         struct drm_device *dev = &dev_priv->drm;
402         struct file_stats stats;
403         struct drm_file *file;
404
405         memset(&stats, 0, sizeof(stats));
406
407         mutex_lock(&dev->struct_mutex);
408         if (dev_priv->kernel_context)
409                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411         list_for_each_entry(file, &dev->filelist, lhead) {
412                 struct drm_i915_file_private *fpriv = file->driver_priv;
413                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414         }
415         mutex_unlock(&dev->struct_mutex);
416
417         print_file_stats(m, "[k]contexts", stats);
418 }
419
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422         struct drm_i915_private *dev_priv = node_to_i915(m->private);
423         struct drm_device *dev = &dev_priv->drm;
424         struct i915_ggtt *ggtt = &dev_priv->ggtt;
425         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427         struct drm_i915_gem_object *obj;
428         unsigned int page_sizes = 0;
429         struct drm_file *file;
430         char buf[80];
431         int ret;
432
433         ret = mutex_lock_interruptible(&dev->struct_mutex);
434         if (ret)
435                 return ret;
436
437         seq_printf(m, "%u objects, %llu bytes\n",
438                    dev_priv->mm.object_count,
439                    dev_priv->mm.object_memory);
440
441         size = count = 0;
442         mapped_size = mapped_count = 0;
443         purgeable_size = purgeable_count = 0;
444         huge_size = huge_count = 0;
445
446         spin_lock(&dev_priv->mm.obj_lock);
447         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448                 size += obj->base.size;
449                 ++count;
450
451                 if (obj->mm.madv == I915_MADV_DONTNEED) {
452                         purgeable_size += obj->base.size;
453                         ++purgeable_count;
454                 }
455
456                 if (obj->mm.mapping) {
457                         mapped_count++;
458                         mapped_size += obj->base.size;
459                 }
460
461                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462                         huge_count++;
463                         huge_size += obj->base.size;
464                         page_sizes |= obj->mm.page_sizes.sg;
465                 }
466         }
467         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469         size = count = dpy_size = dpy_count = 0;
470         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471                 size += obj->base.size;
472                 ++count;
473
474                 if (obj->pin_global) {
475                         dpy_size += obj->base.size;
476                         ++dpy_count;
477                 }
478
479                 if (obj->mm.madv == I915_MADV_DONTNEED) {
480                         purgeable_size += obj->base.size;
481                         ++purgeable_count;
482                 }
483
484                 if (obj->mm.mapping) {
485                         mapped_count++;
486                         mapped_size += obj->base.size;
487                 }
488
489                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490                         huge_count++;
491                         huge_size += obj->base.size;
492                         page_sizes |= obj->mm.page_sizes.sg;
493                 }
494         }
495         spin_unlock(&dev_priv->mm.obj_lock);
496
497         seq_printf(m, "%u bound objects, %llu bytes\n",
498                    count, size);
499         seq_printf(m, "%u purgeable objects, %llu bytes\n",
500                    purgeable_count, purgeable_size);
501         seq_printf(m, "%u mapped objects, %llu bytes\n",
502                    mapped_count, mapped_size);
503         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504                    huge_count,
505                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506                    huge_size);
507         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508                    dpy_count, dpy_size);
509
510         seq_printf(m, "%llu [%pa] gtt total\n",
511                    ggtt->vm.total, &ggtt->mappable_end);
512         seq_printf(m, "Supported page sizes: %s\n",
513                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514                                         buf, sizeof(buf)));
515
516         seq_putc(m, '\n');
517         print_batch_pool_stats(m, dev_priv);
518         mutex_unlock(&dev->struct_mutex);
519
520         mutex_lock(&dev->filelist_mutex);
521         print_context_stats(m, dev_priv);
522         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523                 struct file_stats stats;
524                 struct drm_i915_file_private *file_priv = file->driver_priv;
525                 struct i915_request *request;
526                 struct task_struct *task;
527
528                 mutex_lock(&dev->struct_mutex);
529
530                 memset(&stats, 0, sizeof(stats));
531                 stats.file_priv = file->driver_priv;
532                 spin_lock(&file->table_lock);
533                 idr_for_each(&file->object_idr, per_file_stats, &stats);
534                 spin_unlock(&file->table_lock);
535                 /*
536                  * Although we have a valid reference on file->pid, that does
537                  * not guarantee that the task_struct who called get_pid() is
538                  * still alive (e.g. get_pid(current) => fork() => exit()).
539                  * Therefore, we need to protect this ->comm access using RCU.
540                  */
541                 request = list_first_entry_or_null(&file_priv->mm.request_list,
542                                                    struct i915_request,
543                                                    client_link);
544                 rcu_read_lock();
545                 task = pid_task(request && request->gem_context->pid ?
546                                 request->gem_context->pid : file->pid,
547                                 PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550
551                 mutex_unlock(&dev->struct_mutex);
552         }
553         mutex_unlock(&dev->filelist_mutex);
554
555         return 0;
556 }
557
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560         struct drm_info_node *node = m->private;
561         struct drm_i915_private *dev_priv = node_to_i915(node);
562         struct drm_device *dev = &dev_priv->drm;
563         struct drm_i915_gem_object **objects;
564         struct drm_i915_gem_object *obj;
565         u64 total_obj_size, total_gtt_size;
566         unsigned long nobject, n;
567         int count, ret;
568
569         nobject = READ_ONCE(dev_priv->mm.object_count);
570         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571         if (!objects)
572                 return -ENOMEM;
573
574         ret = mutex_lock_interruptible(&dev->struct_mutex);
575         if (ret)
576                 return ret;
577
578         count = 0;
579         spin_lock(&dev_priv->mm.obj_lock);
580         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581                 objects[count++] = obj;
582                 if (count == nobject)
583                         break;
584         }
585         spin_unlock(&dev_priv->mm.obj_lock);
586
587         total_obj_size = total_gtt_size = 0;
588         for (n = 0;  n < count; n++) {
589                 obj = objects[n];
590
591                 seq_puts(m, "   ");
592                 describe_obj(m, obj);
593                 seq_putc(m, '\n');
594                 total_obj_size += obj->base.size;
595                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596         }
597
598         mutex_unlock(&dev->struct_mutex);
599
600         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601                    count, total_obj_size, total_gtt_size);
602         kvfree(objects);
603
604         return 0;
605 }
606
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609         struct drm_i915_private *dev_priv = node_to_i915(m->private);
610         struct drm_device *dev = &dev_priv->drm;
611         struct drm_i915_gem_object *obj;
612         struct intel_engine_cs *engine;
613         enum intel_engine_id id;
614         int total = 0;
615         int ret, j;
616
617         ret = mutex_lock_interruptible(&dev->struct_mutex);
618         if (ret)
619                 return ret;
620
621         for_each_engine(engine, dev_priv, id) {
622                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623                         int count;
624
625                         count = 0;
626                         list_for_each_entry(obj,
627                                             &engine->batch_pool.cache_list[j],
628                                             batch_pool_link)
629                                 count++;
630                         seq_printf(m, "%s cache[%d]: %d objects\n",
631                                    engine->name, j, count);
632
633                         list_for_each_entry(obj,
634                                             &engine->batch_pool.cache_list[j],
635                                             batch_pool_link) {
636                                 seq_puts(m, "   ");
637                                 describe_obj(m, obj);
638                                 seq_putc(m, '\n');
639                         }
640
641                         total += count;
642                 }
643         }
644
645         seq_printf(m, "total: %d\n", total);
646
647         mutex_unlock(&dev->struct_mutex);
648
649         return 0;
650 }
651
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654         struct drm_i915_private *dev_priv = node_to_i915(m->private);
655         int pipe;
656
657         for_each_pipe(dev_priv, pipe) {
658                 enum intel_display_power_domain power_domain;
659
660                 power_domain = POWER_DOMAIN_PIPE(pipe);
661                 if (!intel_display_power_get_if_enabled(dev_priv,
662                                                         power_domain)) {
663                         seq_printf(m, "Pipe %c power disabled\n",
664                                    pipe_name(pipe));
665                         continue;
666                 }
667                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668                            pipe_name(pipe),
669                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671                            pipe_name(pipe),
672                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673                 seq_printf(m, "Pipe %c IER:\t%08x\n",
674                            pipe_name(pipe),
675                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677                 intel_display_power_put(dev_priv, power_domain);
678         }
679
680         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681                    I915_READ(GEN8_DE_PORT_IMR));
682         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683                    I915_READ(GEN8_DE_PORT_IIR));
684         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685                    I915_READ(GEN8_DE_PORT_IER));
686
687         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688                    I915_READ(GEN8_DE_MISC_IMR));
689         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690                    I915_READ(GEN8_DE_MISC_IIR));
691         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692                    I915_READ(GEN8_DE_MISC_IER));
693
694         seq_printf(m, "PCU interrupt mask:\t%08x\n",
695                    I915_READ(GEN8_PCU_IMR));
696         seq_printf(m, "PCU interrupt identity:\t%08x\n",
697                    I915_READ(GEN8_PCU_IIR));
698         seq_printf(m, "PCU interrupt enable:\t%08x\n",
699                    I915_READ(GEN8_PCU_IER));
700 }
701
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
705         struct intel_engine_cs *engine;
706         enum intel_engine_id id;
707         int i, pipe;
708
709         intel_runtime_pm_get(dev_priv);
710
711         if (IS_CHERRYVIEW(dev_priv)) {
712                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713                            I915_READ(GEN8_MASTER_IRQ));
714
715                 seq_printf(m, "Display IER:\t%08x\n",
716                            I915_READ(VLV_IER));
717                 seq_printf(m, "Display IIR:\t%08x\n",
718                            I915_READ(VLV_IIR));
719                 seq_printf(m, "Display IIR_RW:\t%08x\n",
720                            I915_READ(VLV_IIR_RW));
721                 seq_printf(m, "Display IMR:\t%08x\n",
722                            I915_READ(VLV_IMR));
723                 for_each_pipe(dev_priv, pipe) {
724                         enum intel_display_power_domain power_domain;
725
726                         power_domain = POWER_DOMAIN_PIPE(pipe);
727                         if (!intel_display_power_get_if_enabled(dev_priv,
728                                                                 power_domain)) {
729                                 seq_printf(m, "Pipe %c power disabled\n",
730                                            pipe_name(pipe));
731                                 continue;
732                         }
733
734                         seq_printf(m, "Pipe %c stat:\t%08x\n",
735                                    pipe_name(pipe),
736                                    I915_READ(PIPESTAT(pipe)));
737
738                         intel_display_power_put(dev_priv, power_domain);
739                 }
740
741                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742                 seq_printf(m, "Port hotplug:\t%08x\n",
743                            I915_READ(PORT_HOTPLUG_EN));
744                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745                            I915_READ(VLV_DPFLIPSTAT));
746                 seq_printf(m, "DPINVGTT:\t%08x\n",
747                            I915_READ(DPINVGTT));
748                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750                 for (i = 0; i < 4; i++) {
751                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752                                    i, I915_READ(GEN8_GT_IMR(i)));
753                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754                                    i, I915_READ(GEN8_GT_IIR(i)));
755                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756                                    i, I915_READ(GEN8_GT_IER(i)));
757                 }
758
759                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760                            I915_READ(GEN8_PCU_IMR));
761                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762                            I915_READ(GEN8_PCU_IIR));
763                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764                            I915_READ(GEN8_PCU_IER));
765         } else if (INTEL_GEN(dev_priv) >= 11) {
766                 seq_printf(m, "Master Interrupt Control:  %08x\n",
767                            I915_READ(GEN11_GFX_MSTR_IRQ));
768
769                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783                            I915_READ(GEN11_DISPLAY_INT_CTL));
784
785                 gen8_display_interrupt_info(m);
786         } else if (INTEL_GEN(dev_priv) >= 8) {
787                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788                            I915_READ(GEN8_MASTER_IRQ));
789
790                 for (i = 0; i < 4; i++) {
791                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792                                    i, I915_READ(GEN8_GT_IMR(i)));
793                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794                                    i, I915_READ(GEN8_GT_IIR(i)));
795                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796                                    i, I915_READ(GEN8_GT_IER(i)));
797                 }
798
799                 gen8_display_interrupt_info(m);
800         } else if (IS_VALLEYVIEW(dev_priv)) {
801                 seq_printf(m, "Display IER:\t%08x\n",
802                            I915_READ(VLV_IER));
803                 seq_printf(m, "Display IIR:\t%08x\n",
804                            I915_READ(VLV_IIR));
805                 seq_printf(m, "Display IIR_RW:\t%08x\n",
806                            I915_READ(VLV_IIR_RW));
807                 seq_printf(m, "Display IMR:\t%08x\n",
808                            I915_READ(VLV_IMR));
809                 for_each_pipe(dev_priv, pipe) {
810                         enum intel_display_power_domain power_domain;
811
812                         power_domain = POWER_DOMAIN_PIPE(pipe);
813                         if (!intel_display_power_get_if_enabled(dev_priv,
814                                                                 power_domain)) {
815                                 seq_printf(m, "Pipe %c power disabled\n",
816                                            pipe_name(pipe));
817                                 continue;
818                         }
819
820                         seq_printf(m, "Pipe %c stat:\t%08x\n",
821                                    pipe_name(pipe),
822                                    I915_READ(PIPESTAT(pipe)));
823                         intel_display_power_put(dev_priv, power_domain);
824                 }
825
826                 seq_printf(m, "Master IER:\t%08x\n",
827                            I915_READ(VLV_MASTER_IER));
828
829                 seq_printf(m, "Render IER:\t%08x\n",
830                            I915_READ(GTIER));
831                 seq_printf(m, "Render IIR:\t%08x\n",
832                            I915_READ(GTIIR));
833                 seq_printf(m, "Render IMR:\t%08x\n",
834                            I915_READ(GTIMR));
835
836                 seq_printf(m, "PM IER:\t\t%08x\n",
837                            I915_READ(GEN6_PMIER));
838                 seq_printf(m, "PM IIR:\t\t%08x\n",
839                            I915_READ(GEN6_PMIIR));
840                 seq_printf(m, "PM IMR:\t\t%08x\n",
841                            I915_READ(GEN6_PMIMR));
842
843                 seq_printf(m, "Port hotplug:\t%08x\n",
844                            I915_READ(PORT_HOTPLUG_EN));
845                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846                            I915_READ(VLV_DPFLIPSTAT));
847                 seq_printf(m, "DPINVGTT:\t%08x\n",
848                            I915_READ(DPINVGTT));
849
850         } else if (!HAS_PCH_SPLIT(dev_priv)) {
851                 seq_printf(m, "Interrupt enable:    %08x\n",
852                            I915_READ(IER));
853                 seq_printf(m, "Interrupt identity:  %08x\n",
854                            I915_READ(IIR));
855                 seq_printf(m, "Interrupt mask:      %08x\n",
856                            I915_READ(IMR));
857                 for_each_pipe(dev_priv, pipe)
858                         seq_printf(m, "Pipe %c stat:         %08x\n",
859                                    pipe_name(pipe),
860                                    I915_READ(PIPESTAT(pipe)));
861         } else {
862                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
863                            I915_READ(DEIER));
864                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
865                            I915_READ(DEIIR));
866                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
867                            I915_READ(DEIMR));
868                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
869                            I915_READ(SDEIER));
870                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
871                            I915_READ(SDEIIR));
872                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
873                            I915_READ(SDEIMR));
874                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
875                            I915_READ(GTIER));
876                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
877                            I915_READ(GTIIR));
878                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
879                            I915_READ(GTIMR));
880         }
881
882         if (INTEL_GEN(dev_priv) >= 11) {
883                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894                            I915_READ(GEN11_GUC_SG_INTR_MASK));
895                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902         } else if (INTEL_GEN(dev_priv) >= 6) {
903                 for_each_engine(engine, dev_priv, id) {
904                         seq_printf(m,
905                                    "Graphics Interrupt mask (%s):       %08x\n",
906                                    engine->name, I915_READ_IMR(engine));
907                 }
908         }
909
910         intel_runtime_pm_put(dev_priv);
911
912         return 0;
913 }
914
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917         struct drm_i915_private *dev_priv = node_to_i915(m->private);
918         struct drm_device *dev = &dev_priv->drm;
919         int i, ret;
920
921         ret = mutex_lock_interruptible(&dev->struct_mutex);
922         if (ret)
923                 return ret;
924
925         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926         for (i = 0; i < dev_priv->num_fence_regs; i++) {
927                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929                 seq_printf(m, "Fence %d, pin count = %d, object = ",
930                            i, dev_priv->fence_regs[i].pin_count);
931                 if (!vma)
932                         seq_puts(m, "unused");
933                 else
934                         describe_obj(m, vma->obj);
935                 seq_putc(m, '\n');
936         }
937
938         mutex_unlock(&dev->struct_mutex);
939         return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944                               size_t count, loff_t *pos)
945 {
946         struct i915_gpu_state *error = file->private_data;
947         struct drm_i915_error_state_buf str;
948         ssize_t ret;
949         loff_t tmp;
950
951         if (!error)
952                 return 0;
953
954         ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955         if (ret)
956                 return ret;
957
958         ret = i915_error_state_to_str(&str, error);
959         if (ret)
960                 goto out;
961
962         tmp = 0;
963         ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964         if (ret < 0)
965                 goto out;
966
967         *pos = str.start + ret;
968 out:
969         i915_error_state_buf_release(&str);
970         return ret;
971 }
972
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975         i915_gpu_state_put(file->private_data);
976         return 0;
977 }
978
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981         struct drm_i915_private *i915 = inode->i_private;
982         struct i915_gpu_state *gpu;
983
984         intel_runtime_pm_get(i915);
985         gpu = i915_capture_gpu_state(i915);
986         intel_runtime_pm_put(i915);
987         if (!gpu)
988                 return -ENOMEM;
989
990         file->private_data = gpu;
991         return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995         .owner = THIS_MODULE,
996         .open = i915_gpu_info_open,
997         .read = gpu_state_read,
998         .llseek = default_llseek,
999         .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004                        const char __user *ubuf,
1005                        size_t cnt,
1006                        loff_t *ppos)
1007 {
1008         struct i915_gpu_state *error = filp->private_data;
1009
1010         if (!error)
1011                 return 0;
1012
1013         DRM_DEBUG_DRIVER("Resetting error state\n");
1014         i915_reset_error_state(error->i915);
1015
1016         return cnt;
1017 }
1018
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021         file->private_data = i915_first_error_state(inode->i_private);
1022         return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026         .owner = THIS_MODULE,
1027         .open = i915_error_state_open,
1028         .read = gpu_state_read,
1029         .write = i915_error_state_write,
1030         .llseek = default_llseek,
1031         .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038         struct drm_i915_private *dev_priv = data;
1039         struct drm_device *dev = &dev_priv->drm;
1040         int ret;
1041
1042         ret = mutex_lock_interruptible(&dev->struct_mutex);
1043         if (ret)
1044                 return ret;
1045
1046         intel_runtime_pm_get(dev_priv);
1047         ret = i915_gem_set_global_seqno(dev, val);
1048         intel_runtime_pm_put(dev_priv);
1049
1050         mutex_unlock(&dev->struct_mutex);
1051
1052         return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056                         NULL, i915_next_seqno_set,
1057                         "0x%llx\n");
1058
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063         int ret = 0;
1064
1065         intel_runtime_pm_get(dev_priv);
1066
1067         if (IS_GEN5(dev_priv)) {
1068                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074                            MEMSTAT_VID_SHIFT);
1075                 seq_printf(m, "Current P-state: %d\n",
1076                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078                 u32 rpmodectl, freq_sts;
1079
1080                 mutex_lock(&dev_priv->pcu_lock);
1081
1082                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083                 seq_printf(m, "Video Turbo Mode: %s\n",
1084                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085                 seq_printf(m, "HW control enabled: %s\n",
1086                            yesno(rpmodectl & GEN6_RP_ENABLE));
1087                 seq_printf(m, "SW control enabled: %s\n",
1088                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089                                   GEN6_RP_MEDIA_SW_MODE));
1090
1091                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095                 seq_printf(m, "actual GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098                 seq_printf(m, "current GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101                 seq_printf(m, "max GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104                 seq_printf(m, "min GPU freq: %d MHz\n",
1105                            intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107                 seq_printf(m, "idle GPU freq: %d MHz\n",
1108                            intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110                 seq_printf(m,
1111                            "efficient (RPe) frequency: %d MHz\n",
1112                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1113                 mutex_unlock(&dev_priv->pcu_lock);
1114         } else if (INTEL_GEN(dev_priv) >= 6) {
1115                 u32 rp_state_limits;
1116                 u32 gt_perf_status;
1117                 u32 rp_state_cap;
1118                 u32 rpmodectl, rpinclimit, rpdeclimit;
1119                 u32 rpstat, cagf, reqf;
1120                 u32 rpupei, rpcurup, rpprevup;
1121                 u32 rpdownei, rpcurdown, rpprevdown;
1122                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123                 int max_freq;
1124
1125                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126                 if (IS_GEN9_LP(dev_priv)) {
1127                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129                 } else {
1130                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132                 }
1133
1134                 /* RPSTAT1 is in the GT power well */
1135                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137                 reqf = I915_READ(GEN6_RPNSWREQ);
1138                 if (INTEL_GEN(dev_priv) >= 9)
1139                         reqf >>= 23;
1140                 else {
1141                         reqf &= ~GEN6_TURBO_DISABLE;
1142                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143                                 reqf >>= 24;
1144                         else
1145                                 reqf >>= 25;
1146                 }
1147                 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153                 rpstat = I915_READ(GEN6_RPSTAT1);
1154                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160                 cagf = intel_gpu_freq(dev_priv,
1161                                       intel_get_cagf(dev_priv, rpstat));
1162
1163                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165                 if (INTEL_GEN(dev_priv) >= 11) {
1166                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168                         /*
1169                          * The equivalent to the PM ISR & IIR cannot be read
1170                          * without affecting the current state of the system
1171                          */
1172                         pm_isr = 0;
1173                         pm_iir = 0;
1174                 } else if (INTEL_GEN(dev_priv) >= 8) {
1175                         pm_ier = I915_READ(GEN8_GT_IER(2));
1176                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1177                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1178                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1179                 } else {
1180                         pm_ier = I915_READ(GEN6_PMIER);
1181                         pm_imr = I915_READ(GEN6_PMIMR);
1182                         pm_isr = I915_READ(GEN6_PMISR);
1183                         pm_iir = I915_READ(GEN6_PMIIR);
1184                 }
1185                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
1187                 seq_printf(m, "Video Turbo Mode: %s\n",
1188                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189                 seq_printf(m, "HW control enabled: %s\n",
1190                            yesno(rpmodectl & GEN6_RP_ENABLE));
1191                 seq_printf(m, "SW control enabled: %s\n",
1192                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193                                   GEN6_RP_MEDIA_SW_MODE));
1194
1195                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196                            pm_ier, pm_imr, pm_mask);
1197                 if (INTEL_GEN(dev_priv) <= 10)
1198                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199                                    pm_isr, pm_iir);
1200                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201                            rps->pm_intrmsk_mbz);
1202                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203                 seq_printf(m, "Render p-state ratio: %d\n",
1204                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205                 seq_printf(m, "Render p-state VID: %d\n",
1206                            gt_perf_status & 0xff);
1207                 seq_printf(m, "Render p-state limit: %d\n",
1208                            rp_state_limits & 0xff);
1209                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221                 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1222
1223                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1224                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1225                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1226                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1227                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1228                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1229                 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1230
1231                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1232                             rp_state_cap >> 16) & 0xff;
1233                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1234                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1235                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1236                            intel_gpu_freq(dev_priv, max_freq));
1237
1238                 max_freq = (rp_state_cap & 0xff00) >> 8;
1239                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1240                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1241                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1242                            intel_gpu_freq(dev_priv, max_freq));
1243
1244                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1245                             rp_state_cap >> 0) & 0xff;
1246                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1247                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1248                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1249                            intel_gpu_freq(dev_priv, max_freq));
1250                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1251                            intel_gpu_freq(dev_priv, rps->max_freq));
1252
1253                 seq_printf(m, "Current freq: %d MHz\n",
1254                            intel_gpu_freq(dev_priv, rps->cur_freq));
1255                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1256                 seq_printf(m, "Idle freq: %d MHz\n",
1257                            intel_gpu_freq(dev_priv, rps->idle_freq));
1258                 seq_printf(m, "Min freq: %d MHz\n",
1259                            intel_gpu_freq(dev_priv, rps->min_freq));
1260                 seq_printf(m, "Boost freq: %d MHz\n",
1261                            intel_gpu_freq(dev_priv, rps->boost_freq));
1262                 seq_printf(m, "Max freq: %d MHz\n",
1263                            intel_gpu_freq(dev_priv, rps->max_freq));
1264                 seq_printf(m,
1265                            "efficient (RPe) frequency: %d MHz\n",
1266                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1267         } else {
1268                 seq_puts(m, "no P-state info available\n");
1269         }
1270
1271         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1272         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1273         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1274
1275         intel_runtime_pm_put(dev_priv);
1276         return ret;
1277 }
1278
1279 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1280                                struct seq_file *m,
1281                                struct intel_instdone *instdone)
1282 {
1283         int slice;
1284         int subslice;
1285
1286         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1287                    instdone->instdone);
1288
1289         if (INTEL_GEN(dev_priv) <= 3)
1290                 return;
1291
1292         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1293                    instdone->slice_common);
1294
1295         if (INTEL_GEN(dev_priv) <= 6)
1296                 return;
1297
1298         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1299                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1300                            slice, subslice, instdone->sampler[slice][subslice]);
1301
1302         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1303                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1304                            slice, subslice, instdone->row[slice][subslice]);
1305 }
1306
1307 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1308 {
1309         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1310         struct intel_engine_cs *engine;
1311         u64 acthd[I915_NUM_ENGINES];
1312         u32 seqno[I915_NUM_ENGINES];
1313         struct intel_instdone instdone;
1314         enum intel_engine_id id;
1315
1316         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1317                 seq_puts(m, "Wedged\n");
1318         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1319                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1320         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1321                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1322         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1323                 seq_puts(m, "Waiter holding struct mutex\n");
1324         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1325                 seq_puts(m, "struct_mutex blocked for reset\n");
1326
1327         if (!i915_modparams.enable_hangcheck) {
1328                 seq_puts(m, "Hangcheck disabled\n");
1329                 return 0;
1330         }
1331
1332         intel_runtime_pm_get(dev_priv);
1333
1334         for_each_engine(engine, dev_priv, id) {
1335                 acthd[id] = intel_engine_get_active_head(engine);
1336                 seqno[id] = intel_engine_get_seqno(engine);
1337         }
1338
1339         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1340
1341         intel_runtime_pm_put(dev_priv);
1342
1343         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1344                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1345                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1346                                             jiffies));
1347         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1348                 seq_puts(m, "Hangcheck active, work pending\n");
1349         else
1350                 seq_puts(m, "Hangcheck inactive\n");
1351
1352         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1353
1354         for_each_engine(engine, dev_priv, id) {
1355                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1356                 struct rb_node *rb;
1357
1358                 seq_printf(m, "%s:\n", engine->name);
1359                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1360                            engine->hangcheck.seqno, seqno[id],
1361                            intel_engine_last_submit(engine));
1362                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1363                            yesno(intel_engine_has_waiter(engine)),
1364                            yesno(test_bit(engine->id,
1365                                           &dev_priv->gpu_error.missed_irq_rings)),
1366                            yesno(engine->hangcheck.stalled));
1367
1368                 spin_lock_irq(&b->rb_lock);
1369                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1370                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1371
1372                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1373                                    w->tsk->comm, w->tsk->pid, w->seqno);
1374                 }
1375                 spin_unlock_irq(&b->rb_lock);
1376
1377                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1378                            (long long)engine->hangcheck.acthd,
1379                            (long long)acthd[id]);
1380                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1381                            hangcheck_action_to_str(engine->hangcheck.action),
1382                            engine->hangcheck.action,
1383                            jiffies_to_msecs(jiffies -
1384                                             engine->hangcheck.action_timestamp));
1385
1386                 if (engine->id == RCS) {
1387                         seq_puts(m, "\tinstdone read =\n");
1388
1389                         i915_instdone_info(dev_priv, m, &instdone);
1390
1391                         seq_puts(m, "\tinstdone accu =\n");
1392
1393                         i915_instdone_info(dev_priv, m,
1394                                            &engine->hangcheck.instdone);
1395                 }
1396         }
1397
1398         return 0;
1399 }
1400
1401 static int i915_reset_info(struct seq_file *m, void *unused)
1402 {
1403         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1404         struct i915_gpu_error *error = &dev_priv->gpu_error;
1405         struct intel_engine_cs *engine;
1406         enum intel_engine_id id;
1407
1408         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1409
1410         for_each_engine(engine, dev_priv, id) {
1411                 seq_printf(m, "%s = %u\n", engine->name,
1412                            i915_reset_engine_count(error, engine));
1413         }
1414
1415         return 0;
1416 }
1417
1418 static int ironlake_drpc_info(struct seq_file *m)
1419 {
1420         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1421         u32 rgvmodectl, rstdbyctl;
1422         u16 crstandvid;
1423
1424         rgvmodectl = I915_READ(MEMMODECTL);
1425         rstdbyctl = I915_READ(RSTDBYCTL);
1426         crstandvid = I915_READ16(CRSTANDVID);
1427
1428         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1429         seq_printf(m, "Boost freq: %d\n",
1430                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1431                    MEMMODE_BOOST_FREQ_SHIFT);
1432         seq_printf(m, "HW control enabled: %s\n",
1433                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1434         seq_printf(m, "SW control enabled: %s\n",
1435                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1436         seq_printf(m, "Gated voltage change: %s\n",
1437                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1438         seq_printf(m, "Starting frequency: P%d\n",
1439                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1440         seq_printf(m, "Max P-state: P%d\n",
1441                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1442         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1443         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1444         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1445         seq_printf(m, "Render standby enabled: %s\n",
1446                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1447         seq_puts(m, "Current RS state: ");
1448         switch (rstdbyctl & RSX_STATUS_MASK) {
1449         case RSX_STATUS_ON:
1450                 seq_puts(m, "on\n");
1451                 break;
1452         case RSX_STATUS_RC1:
1453                 seq_puts(m, "RC1\n");
1454                 break;
1455         case RSX_STATUS_RC1E:
1456                 seq_puts(m, "RC1E\n");
1457                 break;
1458         case RSX_STATUS_RS1:
1459                 seq_puts(m, "RS1\n");
1460                 break;
1461         case RSX_STATUS_RS2:
1462                 seq_puts(m, "RS2 (RC6)\n");
1463                 break;
1464         case RSX_STATUS_RS3:
1465                 seq_puts(m, "RC3 (RC6+)\n");
1466                 break;
1467         default:
1468                 seq_puts(m, "unknown\n");
1469                 break;
1470         }
1471
1472         return 0;
1473 }
1474
1475 static int i915_forcewake_domains(struct seq_file *m, void *data)
1476 {
1477         struct drm_i915_private *i915 = node_to_i915(m->private);
1478         struct intel_uncore_forcewake_domain *fw_domain;
1479         unsigned int tmp;
1480
1481         seq_printf(m, "user.bypass_count = %u\n",
1482                    i915->uncore.user_forcewake.count);
1483
1484         for_each_fw_domain(fw_domain, i915, tmp)
1485                 seq_printf(m, "%s.wake_count = %u\n",
1486                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1487                            READ_ONCE(fw_domain->wake_count));
1488
1489         return 0;
1490 }
1491
1492 static void print_rc6_res(struct seq_file *m,
1493                           const char *title,
1494                           const i915_reg_t reg)
1495 {
1496         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1497
1498         seq_printf(m, "%s %u (%llu us)\n",
1499                    title, I915_READ(reg),
1500                    intel_rc6_residency_us(dev_priv, reg));
1501 }
1502
1503 static int vlv_drpc_info(struct seq_file *m)
1504 {
1505         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1506         u32 rcctl1, pw_status;
1507
1508         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1509         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1510
1511         seq_printf(m, "RC6 Enabled: %s\n",
1512                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1513                                         GEN6_RC_CTL_EI_MODE(1))));
1514         seq_printf(m, "Render Power Well: %s\n",
1515                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1516         seq_printf(m, "Media Power Well: %s\n",
1517                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1518
1519         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1520         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1521
1522         return i915_forcewake_domains(m, NULL);
1523 }
1524
1525 static int gen6_drpc_info(struct seq_file *m)
1526 {
1527         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1528         u32 gt_core_status, rcctl1, rc6vids = 0;
1529         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1530
1531         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1532         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1533
1534         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1535         if (INTEL_GEN(dev_priv) >= 9) {
1536                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1537                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1538         }
1539
1540         if (INTEL_GEN(dev_priv) <= 7) {
1541                 mutex_lock(&dev_priv->pcu_lock);
1542                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1543                                        &rc6vids);
1544                 mutex_unlock(&dev_priv->pcu_lock);
1545         }
1546
1547         seq_printf(m, "RC1e Enabled: %s\n",
1548                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1549         seq_printf(m, "RC6 Enabled: %s\n",
1550                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1551         if (INTEL_GEN(dev_priv) >= 9) {
1552                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1553                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1554                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1555                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1556         }
1557         seq_printf(m, "Deep RC6 Enabled: %s\n",
1558                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1559         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1560                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1561         seq_puts(m, "Current RC state: ");
1562         switch (gt_core_status & GEN6_RCn_MASK) {
1563         case GEN6_RC0:
1564                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1565                         seq_puts(m, "Core Power Down\n");
1566                 else
1567                         seq_puts(m, "on\n");
1568                 break;
1569         case GEN6_RC3:
1570                 seq_puts(m, "RC3\n");
1571                 break;
1572         case GEN6_RC6:
1573                 seq_puts(m, "RC6\n");
1574                 break;
1575         case GEN6_RC7:
1576                 seq_puts(m, "RC7\n");
1577                 break;
1578         default:
1579                 seq_puts(m, "Unknown\n");
1580                 break;
1581         }
1582
1583         seq_printf(m, "Core Power Down: %s\n",
1584                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1585         if (INTEL_GEN(dev_priv) >= 9) {
1586                 seq_printf(m, "Render Power Well: %s\n",
1587                         (gen9_powergate_status &
1588                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1589                 seq_printf(m, "Media Power Well: %s\n",
1590                         (gen9_powergate_status &
1591                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1592         }
1593
1594         /* Not exactly sure what this is */
1595         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1596                       GEN6_GT_GFX_RC6_LOCKED);
1597         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1598         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1599         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1600
1601         if (INTEL_GEN(dev_priv) <= 7) {
1602                 seq_printf(m, "RC6   voltage: %dmV\n",
1603                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1604                 seq_printf(m, "RC6+  voltage: %dmV\n",
1605                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1606                 seq_printf(m, "RC6++ voltage: %dmV\n",
1607                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1608         }
1609
1610         return i915_forcewake_domains(m, NULL);
1611 }
1612
1613 static int i915_drpc_info(struct seq_file *m, void *unused)
1614 {
1615         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1616         int err;
1617
1618         intel_runtime_pm_get(dev_priv);
1619
1620         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1621                 err = vlv_drpc_info(m);
1622         else if (INTEL_GEN(dev_priv) >= 6)
1623                 err = gen6_drpc_info(m);
1624         else
1625                 err = ironlake_drpc_info(m);
1626
1627         intel_runtime_pm_put(dev_priv);
1628
1629         return err;
1630 }
1631
1632 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1633 {
1634         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1635
1636         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1637                    dev_priv->fb_tracking.busy_bits);
1638
1639         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1640                    dev_priv->fb_tracking.flip_bits);
1641
1642         return 0;
1643 }
1644
1645 static int i915_fbc_status(struct seq_file *m, void *unused)
1646 {
1647         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1648         struct intel_fbc *fbc = &dev_priv->fbc;
1649
1650         if (!HAS_FBC(dev_priv))
1651                 return -ENODEV;
1652
1653         intel_runtime_pm_get(dev_priv);
1654         mutex_lock(&fbc->lock);
1655
1656         if (intel_fbc_is_active(dev_priv))
1657                 seq_puts(m, "FBC enabled\n");
1658         else
1659                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1660
1661         if (fbc->work.scheduled)
1662                 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1663                            fbc->work.scheduled_vblank,
1664                            drm_crtc_vblank_count(&fbc->crtc->base));
1665
1666         if (intel_fbc_is_active(dev_priv)) {
1667                 u32 mask;
1668
1669                 if (INTEL_GEN(dev_priv) >= 8)
1670                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1671                 else if (INTEL_GEN(dev_priv) >= 7)
1672                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1673                 else if (INTEL_GEN(dev_priv) >= 5)
1674                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1675                 else if (IS_G4X(dev_priv))
1676                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1677                 else
1678                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1679                                                         FBC_STAT_COMPRESSED);
1680
1681                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1682         }
1683
1684         mutex_unlock(&fbc->lock);
1685         intel_runtime_pm_put(dev_priv);
1686
1687         return 0;
1688 }
1689
1690 static int i915_fbc_false_color_get(void *data, u64 *val)
1691 {
1692         struct drm_i915_private *dev_priv = data;
1693
1694         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1695                 return -ENODEV;
1696
1697         *val = dev_priv->fbc.false_color;
1698
1699         return 0;
1700 }
1701
1702 static int i915_fbc_false_color_set(void *data, u64 val)
1703 {
1704         struct drm_i915_private *dev_priv = data;
1705         u32 reg;
1706
1707         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1708                 return -ENODEV;
1709
1710         mutex_lock(&dev_priv->fbc.lock);
1711
1712         reg = I915_READ(ILK_DPFC_CONTROL);
1713         dev_priv->fbc.false_color = val;
1714
1715         I915_WRITE(ILK_DPFC_CONTROL, val ?
1716                    (reg | FBC_CTL_FALSE_COLOR) :
1717                    (reg & ~FBC_CTL_FALSE_COLOR));
1718
1719         mutex_unlock(&dev_priv->fbc.lock);
1720         return 0;
1721 }
1722
1723 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1724                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1725                         "%llu\n");
1726
1727 static int i915_ips_status(struct seq_file *m, void *unused)
1728 {
1729         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1730
1731         if (!HAS_IPS(dev_priv))
1732                 return -ENODEV;
1733
1734         intel_runtime_pm_get(dev_priv);
1735
1736         seq_printf(m, "Enabled by kernel parameter: %s\n",
1737                    yesno(i915_modparams.enable_ips));
1738
1739         if (INTEL_GEN(dev_priv) >= 8) {
1740                 seq_puts(m, "Currently: unknown\n");
1741         } else {
1742                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1743                         seq_puts(m, "Currently: enabled\n");
1744                 else
1745                         seq_puts(m, "Currently: disabled\n");
1746         }
1747
1748         intel_runtime_pm_put(dev_priv);
1749
1750         return 0;
1751 }
1752
1753 static int i915_sr_status(struct seq_file *m, void *unused)
1754 {
1755         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1756         bool sr_enabled = false;
1757
1758         intel_runtime_pm_get(dev_priv);
1759         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1760
1761         if (INTEL_GEN(dev_priv) >= 9)
1762                 /* no global SR status; inspect per-plane WM */;
1763         else if (HAS_PCH_SPLIT(dev_priv))
1764                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1765         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1766                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1767                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1768         else if (IS_I915GM(dev_priv))
1769                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1770         else if (IS_PINEVIEW(dev_priv))
1771                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1772         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1773                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1774
1775         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1776         intel_runtime_pm_put(dev_priv);
1777
1778         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1779
1780         return 0;
1781 }
1782
1783 static int i915_emon_status(struct seq_file *m, void *unused)
1784 {
1785         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1786         struct drm_device *dev = &dev_priv->drm;
1787         unsigned long temp, chipset, gfx;
1788         int ret;
1789
1790         if (!IS_GEN5(dev_priv))
1791                 return -ENODEV;
1792
1793         ret = mutex_lock_interruptible(&dev->struct_mutex);
1794         if (ret)
1795                 return ret;
1796
1797         temp = i915_mch_val(dev_priv);
1798         chipset = i915_chipset_val(dev_priv);
1799         gfx = i915_gfx_val(dev_priv);
1800         mutex_unlock(&dev->struct_mutex);
1801
1802         seq_printf(m, "GMCH temp: %ld\n", temp);
1803         seq_printf(m, "Chipset power: %ld\n", chipset);
1804         seq_printf(m, "GFX power: %ld\n", gfx);
1805         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1806
1807         return 0;
1808 }
1809
1810 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1811 {
1812         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1813         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1814         unsigned int max_gpu_freq, min_gpu_freq;
1815         int gpu_freq, ia_freq;
1816         int ret;
1817
1818         if (!HAS_LLC(dev_priv))
1819                 return -ENODEV;
1820
1821         intel_runtime_pm_get(dev_priv);
1822
1823         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1824         if (ret)
1825                 goto out;
1826
1827         min_gpu_freq = rps->min_freq;
1828         max_gpu_freq = rps->max_freq;
1829         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1830                 /* Convert GT frequency to 50 HZ units */
1831                 min_gpu_freq /= GEN9_FREQ_SCALER;
1832                 max_gpu_freq /= GEN9_FREQ_SCALER;
1833         }
1834
1835         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1836
1837         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1838                 ia_freq = gpu_freq;
1839                 sandybridge_pcode_read(dev_priv,
1840                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1841                                        &ia_freq);
1842                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1843                            intel_gpu_freq(dev_priv, (gpu_freq *
1844                                                      (IS_GEN9_BC(dev_priv) ||
1845                                                       INTEL_GEN(dev_priv) >= 10 ?
1846                                                       GEN9_FREQ_SCALER : 1))),
1847                            ((ia_freq >> 0) & 0xff) * 100,
1848                            ((ia_freq >> 8) & 0xff) * 100);
1849         }
1850
1851         mutex_unlock(&dev_priv->pcu_lock);
1852
1853 out:
1854         intel_runtime_pm_put(dev_priv);
1855         return ret;
1856 }
1857
1858 static int i915_opregion(struct seq_file *m, void *unused)
1859 {
1860         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1861         struct drm_device *dev = &dev_priv->drm;
1862         struct intel_opregion *opregion = &dev_priv->opregion;
1863         int ret;
1864
1865         ret = mutex_lock_interruptible(&dev->struct_mutex);
1866         if (ret)
1867                 goto out;
1868
1869         if (opregion->header)
1870                 seq_write(m, opregion->header, OPREGION_SIZE);
1871
1872         mutex_unlock(&dev->struct_mutex);
1873
1874 out:
1875         return 0;
1876 }
1877
1878 static int i915_vbt(struct seq_file *m, void *unused)
1879 {
1880         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1881
1882         if (opregion->vbt)
1883                 seq_write(m, opregion->vbt, opregion->vbt_size);
1884
1885         return 0;
1886 }
1887
1888 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1889 {
1890         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1891         struct drm_device *dev = &dev_priv->drm;
1892         struct intel_framebuffer *fbdev_fb = NULL;
1893         struct drm_framebuffer *drm_fb;
1894         int ret;
1895
1896         ret = mutex_lock_interruptible(&dev->struct_mutex);
1897         if (ret)
1898                 return ret;
1899
1900 #ifdef CONFIG_DRM_FBDEV_EMULATION
1901         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1902                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1903
1904                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1905                            fbdev_fb->base.width,
1906                            fbdev_fb->base.height,
1907                            fbdev_fb->base.format->depth,
1908                            fbdev_fb->base.format->cpp[0] * 8,
1909                            fbdev_fb->base.modifier,
1910                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1911                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1912                 seq_putc(m, '\n');
1913         }
1914 #endif
1915
1916         mutex_lock(&dev->mode_config.fb_lock);
1917         drm_for_each_fb(drm_fb, dev) {
1918                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1919                 if (fb == fbdev_fb)
1920                         continue;
1921
1922                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1923                            fb->base.width,
1924                            fb->base.height,
1925                            fb->base.format->depth,
1926                            fb->base.format->cpp[0] * 8,
1927                            fb->base.modifier,
1928                            drm_framebuffer_read_refcount(&fb->base));
1929                 describe_obj(m, intel_fb_obj(&fb->base));
1930                 seq_putc(m, '\n');
1931         }
1932         mutex_unlock(&dev->mode_config.fb_lock);
1933         mutex_unlock(&dev->struct_mutex);
1934
1935         return 0;
1936 }
1937
1938 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1939 {
1940         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1941                    ring->space, ring->head, ring->tail, ring->emit);
1942 }
1943
1944 static int i915_context_status(struct seq_file *m, void *unused)
1945 {
1946         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1947         struct drm_device *dev = &dev_priv->drm;
1948         struct intel_engine_cs *engine;
1949         struct i915_gem_context *ctx;
1950         enum intel_engine_id id;
1951         int ret;
1952
1953         ret = mutex_lock_interruptible(&dev->struct_mutex);
1954         if (ret)
1955                 return ret;
1956
1957         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1958                 seq_printf(m, "HW context %u ", ctx->hw_id);
1959                 if (ctx->pid) {
1960                         struct task_struct *task;
1961
1962                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1963                         if (task) {
1964                                 seq_printf(m, "(%s [%d]) ",
1965                                            task->comm, task->pid);
1966                                 put_task_struct(task);
1967                         }
1968                 } else if (IS_ERR(ctx->file_priv)) {
1969                         seq_puts(m, "(deleted) ");
1970                 } else {
1971                         seq_puts(m, "(kernel) ");
1972                 }
1973
1974                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1975                 seq_putc(m, '\n');
1976
1977                 for_each_engine(engine, dev_priv, id) {
1978                         struct intel_context *ce =
1979                                 to_intel_context(ctx, engine);
1980
1981                         seq_printf(m, "%s: ", engine->name);
1982                         if (ce->state)
1983                                 describe_obj(m, ce->state->obj);
1984                         if (ce->ring)
1985                                 describe_ctx_ring(m, ce->ring);
1986                         seq_putc(m, '\n');
1987                 }
1988
1989                 seq_putc(m, '\n');
1990         }
1991
1992         mutex_unlock(&dev->struct_mutex);
1993
1994         return 0;
1995 }
1996
1997 static const char *swizzle_string(unsigned swizzle)
1998 {
1999         switch (swizzle) {
2000         case I915_BIT_6_SWIZZLE_NONE:
2001                 return "none";
2002         case I915_BIT_6_SWIZZLE_9:
2003                 return "bit9";
2004         case I915_BIT_6_SWIZZLE_9_10:
2005                 return "bit9/bit10";
2006         case I915_BIT_6_SWIZZLE_9_11:
2007                 return "bit9/bit11";
2008         case I915_BIT_6_SWIZZLE_9_10_11:
2009                 return "bit9/bit10/bit11";
2010         case I915_BIT_6_SWIZZLE_9_17:
2011                 return "bit9/bit17";
2012         case I915_BIT_6_SWIZZLE_9_10_17:
2013                 return "bit9/bit10/bit17";
2014         case I915_BIT_6_SWIZZLE_UNKNOWN:
2015                 return "unknown";
2016         }
2017
2018         return "bug";
2019 }
2020
2021 static int i915_swizzle_info(struct seq_file *m, void *data)
2022 {
2023         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2024
2025         intel_runtime_pm_get(dev_priv);
2026
2027         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2028                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2029         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2030                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2031
2032         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2033                 seq_printf(m, "DDC = 0x%08x\n",
2034                            I915_READ(DCC));
2035                 seq_printf(m, "DDC2 = 0x%08x\n",
2036                            I915_READ(DCC2));
2037                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2038                            I915_READ16(C0DRB3));
2039                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2040                            I915_READ16(C1DRB3));
2041         } else if (INTEL_GEN(dev_priv) >= 6) {
2042                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2043                            I915_READ(MAD_DIMM_C0));
2044                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2045                            I915_READ(MAD_DIMM_C1));
2046                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2047                            I915_READ(MAD_DIMM_C2));
2048                 seq_printf(m, "TILECTL = 0x%08x\n",
2049                            I915_READ(TILECTL));
2050                 if (INTEL_GEN(dev_priv) >= 8)
2051                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2052                                    I915_READ(GAMTARBMODE));
2053                 else
2054                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2055                                    I915_READ(ARB_MODE));
2056                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2057                            I915_READ(DISP_ARB_CTL));
2058         }
2059
2060         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2061                 seq_puts(m, "L-shaped memory detected\n");
2062
2063         intel_runtime_pm_put(dev_priv);
2064
2065         return 0;
2066 }
2067
2068 static int per_file_ctx(int id, void *ptr, void *data)
2069 {
2070         struct i915_gem_context *ctx = ptr;
2071         struct seq_file *m = data;
2072         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2073
2074         if (!ppgtt) {
2075                 seq_printf(m, "  no ppgtt for context %d\n",
2076                            ctx->user_handle);
2077                 return 0;
2078         }
2079
2080         if (i915_gem_context_is_default(ctx))
2081                 seq_puts(m, "  default context:\n");
2082         else
2083                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2084         ppgtt->debug_dump(ppgtt, m);
2085
2086         return 0;
2087 }
2088
2089 static void gen8_ppgtt_info(struct seq_file *m,
2090                             struct drm_i915_private *dev_priv)
2091 {
2092         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2093         struct intel_engine_cs *engine;
2094         enum intel_engine_id id;
2095         int i;
2096
2097         if (!ppgtt)
2098                 return;
2099
2100         for_each_engine(engine, dev_priv, id) {
2101                 seq_printf(m, "%s\n", engine->name);
2102                 for (i = 0; i < 4; i++) {
2103                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2104                         pdp <<= 32;
2105                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2106                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2107                 }
2108         }
2109 }
2110
2111 static void gen6_ppgtt_info(struct seq_file *m,
2112                             struct drm_i915_private *dev_priv)
2113 {
2114         struct intel_engine_cs *engine;
2115         enum intel_engine_id id;
2116
2117         if (IS_GEN6(dev_priv))
2118                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2119
2120         for_each_engine(engine, dev_priv, id) {
2121                 seq_printf(m, "%s\n", engine->name);
2122                 if (IS_GEN7(dev_priv))
2123                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2124                                    I915_READ(RING_MODE_GEN7(engine)));
2125                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2126                            I915_READ(RING_PP_DIR_BASE(engine)));
2127                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2128                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2129                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2130                            I915_READ(RING_PP_DIR_DCLV(engine)));
2131         }
2132         if (dev_priv->mm.aliasing_ppgtt) {
2133                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2134
2135                 seq_puts(m, "aliasing PPGTT:\n");
2136                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2137
2138                 ppgtt->debug_dump(ppgtt, m);
2139         }
2140
2141         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2142 }
2143
2144 static int i915_ppgtt_info(struct seq_file *m, void *data)
2145 {
2146         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2147         struct drm_device *dev = &dev_priv->drm;
2148         struct drm_file *file;
2149         int ret;
2150
2151         mutex_lock(&dev->filelist_mutex);
2152         ret = mutex_lock_interruptible(&dev->struct_mutex);
2153         if (ret)
2154                 goto out_unlock;
2155
2156         intel_runtime_pm_get(dev_priv);
2157
2158         if (INTEL_GEN(dev_priv) >= 8)
2159                 gen8_ppgtt_info(m, dev_priv);
2160         else if (INTEL_GEN(dev_priv) >= 6)
2161                 gen6_ppgtt_info(m, dev_priv);
2162
2163         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2164                 struct drm_i915_file_private *file_priv = file->driver_priv;
2165                 struct task_struct *task;
2166
2167                 task = get_pid_task(file->pid, PIDTYPE_PID);
2168                 if (!task) {
2169                         ret = -ESRCH;
2170                         goto out_rpm;
2171                 }
2172                 seq_printf(m, "\nproc: %s\n", task->comm);
2173                 put_task_struct(task);
2174                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2175                              (void *)(unsigned long)m);
2176         }
2177
2178 out_rpm:
2179         intel_runtime_pm_put(dev_priv);
2180         mutex_unlock(&dev->struct_mutex);
2181 out_unlock:
2182         mutex_unlock(&dev->filelist_mutex);
2183         return ret;
2184 }
2185
2186 static int count_irq_waiters(struct drm_i915_private *i915)
2187 {
2188         struct intel_engine_cs *engine;
2189         enum intel_engine_id id;
2190         int count = 0;
2191
2192         for_each_engine(engine, i915, id)
2193                 count += intel_engine_has_waiter(engine);
2194
2195         return count;
2196 }
2197
2198 static const char *rps_power_to_str(unsigned int power)
2199 {
2200         static const char * const strings[] = {
2201                 [LOW_POWER] = "low power",
2202                 [BETWEEN] = "mixed",
2203                 [HIGH_POWER] = "high power",
2204         };
2205
2206         if (power >= ARRAY_SIZE(strings) || !strings[power])
2207                 return "unknown";
2208
2209         return strings[power];
2210 }
2211
2212 static int i915_rps_boost_info(struct seq_file *m, void *data)
2213 {
2214         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2215         struct drm_device *dev = &dev_priv->drm;
2216         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2217         struct drm_file *file;
2218
2219         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2220         seq_printf(m, "GPU busy? %s [%d requests]\n",
2221                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2222         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2223         seq_printf(m, "Boosts outstanding? %d\n",
2224                    atomic_read(&rps->num_waiters));
2225         seq_printf(m, "Frequency requested %d\n",
2226                    intel_gpu_freq(dev_priv, rps->cur_freq));
2227         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2228                    intel_gpu_freq(dev_priv, rps->min_freq),
2229                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2230                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2231                    intel_gpu_freq(dev_priv, rps->max_freq));
2232         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2233                    intel_gpu_freq(dev_priv, rps->idle_freq),
2234                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2235                    intel_gpu_freq(dev_priv, rps->boost_freq));
2236
2237         mutex_lock(&dev->filelist_mutex);
2238         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2239                 struct drm_i915_file_private *file_priv = file->driver_priv;
2240                 struct task_struct *task;
2241
2242                 rcu_read_lock();
2243                 task = pid_task(file->pid, PIDTYPE_PID);
2244                 seq_printf(m, "%s [%d]: %d boosts\n",
2245                            task ? task->comm : "<unknown>",
2246                            task ? task->pid : -1,
2247                            atomic_read(&file_priv->rps_client.boosts));
2248                 rcu_read_unlock();
2249         }
2250         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2251                    atomic_read(&rps->boosts));
2252         mutex_unlock(&dev->filelist_mutex);
2253
2254         if (INTEL_GEN(dev_priv) >= 6 &&
2255             rps->enabled &&
2256             dev_priv->gt.active_requests) {
2257                 u32 rpup, rpupei;
2258                 u32 rpdown, rpdownei;
2259
2260                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2261                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2262                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2263                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2264                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2265                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2266
2267                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2268                            rps_power_to_str(rps->power));
2269                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2270                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2271                            rps->up_threshold);
2272                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2273                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2274                            rps->down_threshold);
2275         } else {
2276                 seq_puts(m, "\nRPS Autotuning inactive\n");
2277         }
2278
2279         return 0;
2280 }
2281
2282 static int i915_llc(struct seq_file *m, void *data)
2283 {
2284         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2285         const bool edram = INTEL_GEN(dev_priv) > 8;
2286
2287         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2288         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2289                    intel_uncore_edram_size(dev_priv)/1024/1024);
2290
2291         return 0;
2292 }
2293
2294 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2295 {
2296         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2297         struct drm_printer p;
2298
2299         if (!HAS_HUC(dev_priv))
2300                 return -ENODEV;
2301
2302         p = drm_seq_file_printer(m);
2303         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2304
2305         intel_runtime_pm_get(dev_priv);
2306         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2307         intel_runtime_pm_put(dev_priv);
2308
2309         return 0;
2310 }
2311
2312 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2313 {
2314         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2315         struct drm_printer p;
2316         u32 tmp, i;
2317
2318         if (!HAS_GUC(dev_priv))
2319                 return -ENODEV;
2320
2321         p = drm_seq_file_printer(m);
2322         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2323
2324         intel_runtime_pm_get(dev_priv);
2325
2326         tmp = I915_READ(GUC_STATUS);
2327
2328         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2329         seq_printf(m, "\tBootrom status = 0x%x\n",
2330                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2331         seq_printf(m, "\tuKernel status = 0x%x\n",
2332                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2333         seq_printf(m, "\tMIA Core status = 0x%x\n",
2334                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2335         seq_puts(m, "\nScratch registers:\n");
2336         for (i = 0; i < 16; i++)
2337                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2338
2339         intel_runtime_pm_put(dev_priv);
2340
2341         return 0;
2342 }
2343
2344 static const char *
2345 stringify_guc_log_type(enum guc_log_buffer_type type)
2346 {
2347         switch (type) {
2348         case GUC_ISR_LOG_BUFFER:
2349                 return "ISR";
2350         case GUC_DPC_LOG_BUFFER:
2351                 return "DPC";
2352         case GUC_CRASH_DUMP_LOG_BUFFER:
2353                 return "CRASH";
2354         default:
2355                 MISSING_CASE(type);
2356         }
2357
2358         return "";
2359 }
2360
2361 static void i915_guc_log_info(struct seq_file *m,
2362                               struct drm_i915_private *dev_priv)
2363 {
2364         struct intel_guc_log *log = &dev_priv->guc.log;
2365         enum guc_log_buffer_type type;
2366
2367         if (!intel_guc_log_relay_enabled(log)) {
2368                 seq_puts(m, "GuC log relay disabled\n");
2369                 return;
2370         }
2371
2372         seq_puts(m, "GuC logging stats:\n");
2373
2374         seq_printf(m, "\tRelay full count: %u\n",
2375                    log->relay.full_count);
2376
2377         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2378                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2379                            stringify_guc_log_type(type),
2380                            log->stats[type].flush,
2381                            log->stats[type].sampled_overflow);
2382         }
2383 }
2384
2385 static void i915_guc_client_info(struct seq_file *m,
2386                                  struct drm_i915_private *dev_priv,
2387                                  struct intel_guc_client *client)
2388 {
2389         struct intel_engine_cs *engine;
2390         enum intel_engine_id id;
2391         uint64_t tot = 0;
2392
2393         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2394                 client->priority, client->stage_id, client->proc_desc_offset);
2395         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2396                 client->doorbell_id, client->doorbell_offset);
2397
2398         for_each_engine(engine, dev_priv, id) {
2399                 u64 submissions = client->submissions[id];
2400                 tot += submissions;
2401                 seq_printf(m, "\tSubmissions: %llu %s\n",
2402                                 submissions, engine->name);
2403         }
2404         seq_printf(m, "\tTotal: %llu\n", tot);
2405 }
2406
2407 static int i915_guc_info(struct seq_file *m, void *data)
2408 {
2409         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2410         const struct intel_guc *guc = &dev_priv->guc;
2411
2412         if (!USES_GUC(dev_priv))
2413                 return -ENODEV;
2414
2415         i915_guc_log_info(m, dev_priv);
2416
2417         if (!USES_GUC_SUBMISSION(dev_priv))
2418                 return 0;
2419
2420         GEM_BUG_ON(!guc->execbuf_client);
2421
2422         seq_printf(m, "\nDoorbell map:\n");
2423         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2424         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2425
2426         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2427         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2428         if (guc->preempt_client) {
2429                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2430                            guc->preempt_client);
2431                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2432         }
2433
2434         /* Add more as required ... */
2435
2436         return 0;
2437 }
2438
2439 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2440 {
2441         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2442         const struct intel_guc *guc = &dev_priv->guc;
2443         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2444         struct intel_guc_client *client = guc->execbuf_client;
2445         unsigned int tmp;
2446         int index;
2447
2448         if (!USES_GUC_SUBMISSION(dev_priv))
2449                 return -ENODEV;
2450
2451         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2452                 struct intel_engine_cs *engine;
2453
2454                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2455                         continue;
2456
2457                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2458                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2459                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2460                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2461                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2462                 seq_printf(m, "\tEngines used: 0x%x\n",
2463                            desc->engines_used);
2464                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2465                            desc->db_trigger_phy,
2466                            desc->db_trigger_cpu,
2467                            desc->db_trigger_uk);
2468                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2469                            desc->process_desc);
2470                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2471                            desc->wq_addr, desc->wq_size);
2472                 seq_putc(m, '\n');
2473
2474                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2475                         u32 guc_engine_id = engine->guc_id;
2476                         struct guc_execlist_context *lrc =
2477                                                 &desc->lrc[guc_engine_id];
2478
2479                         seq_printf(m, "\t%s LRC:\n", engine->name);
2480                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2481                                    lrc->context_desc);
2482                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2483                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2484                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2485                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2486                         seq_putc(m, '\n');
2487                 }
2488         }
2489
2490         return 0;
2491 }
2492
2493 static int i915_guc_log_dump(struct seq_file *m, void *data)
2494 {
2495         struct drm_info_node *node = m->private;
2496         struct drm_i915_private *dev_priv = node_to_i915(node);
2497         bool dump_load_err = !!node->info_ent->data;
2498         struct drm_i915_gem_object *obj = NULL;
2499         u32 *log;
2500         int i = 0;
2501
2502         if (!HAS_GUC(dev_priv))
2503                 return -ENODEV;
2504
2505         if (dump_load_err)
2506                 obj = dev_priv->guc.load_err_log;
2507         else if (dev_priv->guc.log.vma)
2508                 obj = dev_priv->guc.log.vma->obj;
2509
2510         if (!obj)
2511                 return 0;
2512
2513         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2514         if (IS_ERR(log)) {
2515                 DRM_DEBUG("Failed to pin object\n");
2516                 seq_puts(m, "(log data unaccessible)\n");
2517                 return PTR_ERR(log);
2518         }
2519
2520         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2521                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2522                            *(log + i), *(log + i + 1),
2523                            *(log + i + 2), *(log + i + 3));
2524
2525         seq_putc(m, '\n');
2526
2527         i915_gem_object_unpin_map(obj);
2528
2529         return 0;
2530 }
2531
2532 static int i915_guc_log_level_get(void *data, u64 *val)
2533 {
2534         struct drm_i915_private *dev_priv = data;
2535
2536         if (!USES_GUC(dev_priv))
2537                 return -ENODEV;
2538
2539         *val = intel_guc_log_level_get(&dev_priv->guc.log);
2540
2541         return 0;
2542 }
2543
2544 static int i915_guc_log_level_set(void *data, u64 val)
2545 {
2546         struct drm_i915_private *dev_priv = data;
2547
2548         if (!USES_GUC(dev_priv))
2549                 return -ENODEV;
2550
2551         return intel_guc_log_level_set(&dev_priv->guc.log, val);
2552 }
2553
2554 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2555                         i915_guc_log_level_get, i915_guc_log_level_set,
2556                         "%lld\n");
2557
2558 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2559 {
2560         struct drm_i915_private *dev_priv = inode->i_private;
2561
2562         if (!USES_GUC(dev_priv))
2563                 return -ENODEV;
2564
2565         file->private_data = &dev_priv->guc.log;
2566
2567         return intel_guc_log_relay_open(&dev_priv->guc.log);
2568 }
2569
2570 static ssize_t
2571 i915_guc_log_relay_write(struct file *filp,
2572                          const char __user *ubuf,
2573                          size_t cnt,
2574                          loff_t *ppos)
2575 {
2576         struct intel_guc_log *log = filp->private_data;
2577
2578         intel_guc_log_relay_flush(log);
2579
2580         return cnt;
2581 }
2582
2583 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2584 {
2585         struct drm_i915_private *dev_priv = inode->i_private;
2586
2587         intel_guc_log_relay_close(&dev_priv->guc.log);
2588
2589         return 0;
2590 }
2591
2592 static const struct file_operations i915_guc_log_relay_fops = {
2593         .owner = THIS_MODULE,
2594         .open = i915_guc_log_relay_open,
2595         .write = i915_guc_log_relay_write,
2596         .release = i915_guc_log_relay_release,
2597 };
2598
2599 static const char *psr2_live_status(u32 val)
2600 {
2601         static const char * const live_status[] = {
2602                 "IDLE",
2603                 "CAPTURE",
2604                 "CAPTURE_FS",
2605                 "SLEEP",
2606                 "BUFON_FW",
2607                 "ML_UP",
2608                 "SU_STANDBY",
2609                 "FAST_SLEEP",
2610                 "DEEP_SLEEP",
2611                 "BUF_ON",
2612                 "TG_ON"
2613         };
2614
2615         val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2616         if (val < ARRAY_SIZE(live_status))
2617                 return live_status[val];
2618
2619         return "unknown";
2620 }
2621
2622 static const char *psr_sink_status(u8 val)
2623 {
2624         static const char * const sink_status[] = {
2625                 "inactive",
2626                 "transition to active, capture and display",
2627                 "active, display from RFB",
2628                 "active, capture and display on sink device timings",
2629                 "transition to inactive, capture and display, timing re-sync",
2630                 "reserved",
2631                 "reserved",
2632                 "sink internal error"
2633         };
2634
2635         val &= DP_PSR_SINK_STATE_MASK;
2636         if (val < ARRAY_SIZE(sink_status))
2637                 return sink_status[val];
2638
2639         return "unknown";
2640 }
2641
2642 static int i915_edp_psr_status(struct seq_file *m, void *data)
2643 {
2644         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2645         u32 psrperf = 0;
2646         bool enabled = false;
2647         bool sink_support;
2648
2649         if (!HAS_PSR(dev_priv))
2650                 return -ENODEV;
2651
2652         sink_support = dev_priv->psr.sink_support;
2653         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2654         if (!sink_support)
2655                 return 0;
2656
2657         intel_runtime_pm_get(dev_priv);
2658
2659         mutex_lock(&dev_priv->psr.lock);
2660         seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2661         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2662                    dev_priv->psr.busy_frontbuffer_bits);
2663         seq_printf(m, "Re-enable work scheduled: %s\n",
2664                    yesno(work_busy(&dev_priv->psr.work.work)));
2665
2666         if (dev_priv->psr.psr2_enabled)
2667                 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2668         else
2669                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2670
2671         seq_printf(m, "Main link in standby mode: %s\n",
2672                    yesno(dev_priv->psr.link_standby));
2673
2674         seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2675
2676         /*
2677          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2678          */
2679         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2680                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2681                         EDP_PSR_PERF_CNT_MASK;
2682
2683                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2684         }
2685         if (dev_priv->psr.psr2_enabled) {
2686                 u32 psr2 = I915_READ(EDP_PSR2_STATUS);
2687
2688                 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
2689                            psr2, psr2_live_status(psr2));
2690         }
2691
2692         if (dev_priv->psr.enabled) {
2693                 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2694                 u8 val;
2695
2696                 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2697                         seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2698                                    psr_sink_status(val));
2699         }
2700         mutex_unlock(&dev_priv->psr.lock);
2701
2702         if (READ_ONCE(dev_priv->psr.debug)) {
2703                 seq_printf(m, "Last attempted entry at: %lld\n",
2704                            dev_priv->psr.last_entry_attempt);
2705                 seq_printf(m, "Last exit at: %lld\n",
2706                            dev_priv->psr.last_exit);
2707         }
2708
2709         intel_runtime_pm_put(dev_priv);
2710         return 0;
2711 }
2712
2713 static int
2714 i915_edp_psr_debug_set(void *data, u64 val)
2715 {
2716         struct drm_i915_private *dev_priv = data;
2717
2718         if (!CAN_PSR(dev_priv))
2719                 return -ENODEV;
2720
2721         DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2722
2723         intel_runtime_pm_get(dev_priv);
2724         intel_psr_irq_control(dev_priv, !!val);
2725         intel_runtime_pm_put(dev_priv);
2726
2727         return 0;
2728 }
2729
2730 static int
2731 i915_edp_psr_debug_get(void *data, u64 *val)
2732 {
2733         struct drm_i915_private *dev_priv = data;
2734
2735         if (!CAN_PSR(dev_priv))
2736                 return -ENODEV;
2737
2738         *val = READ_ONCE(dev_priv->psr.debug);
2739         return 0;
2740 }
2741
2742 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2743                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2744                         "%llu\n");
2745
2746 static int i915_sink_crc(struct seq_file *m, void *data)
2747 {
2748         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2749         struct drm_device *dev = &dev_priv->drm;
2750         struct intel_connector *connector;
2751         struct drm_connector_list_iter conn_iter;
2752         struct intel_dp *intel_dp = NULL;
2753         struct drm_modeset_acquire_ctx ctx;
2754         int ret;
2755         u8 crc[6];
2756
2757         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2758
2759         drm_connector_list_iter_begin(dev, &conn_iter);
2760
2761         for_each_intel_connector_iter(connector, &conn_iter) {
2762                 struct drm_crtc *crtc;
2763                 struct drm_connector_state *state;
2764                 struct intel_crtc_state *crtc_state;
2765
2766                 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2767                         continue;
2768
2769 retry:
2770                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2771                 if (ret)
2772                         goto err;
2773
2774                 state = connector->base.state;
2775                 if (!state->best_encoder)
2776                         continue;
2777
2778                 crtc = state->crtc;
2779                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2780                 if (ret)
2781                         goto err;
2782
2783                 crtc_state = to_intel_crtc_state(crtc->state);
2784                 if (!crtc_state->base.active)
2785                         continue;
2786
2787                 /*
2788                  * We need to wait for all crtc updates to complete, to make
2789                  * sure any pending modesets and plane updates are completed.
2790                  */
2791                 if (crtc_state->base.commit) {
2792                         ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2793
2794                         if (ret)
2795                                 goto err;
2796                 }
2797
2798                 intel_dp = enc_to_intel_dp(state->best_encoder);
2799
2800                 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2801                 if (ret)
2802                         goto err;
2803
2804                 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2805                            crc[0], crc[1], crc[2],
2806                            crc[3], crc[4], crc[5]);
2807                 goto out;
2808
2809 err:
2810                 if (ret == -EDEADLK) {
2811                         ret = drm_modeset_backoff(&ctx);
2812                         if (!ret)
2813                                 goto retry;
2814                 }
2815                 goto out;
2816         }
2817         ret = -ENODEV;
2818 out:
2819         drm_connector_list_iter_end(&conn_iter);
2820         drm_modeset_drop_locks(&ctx);
2821         drm_modeset_acquire_fini(&ctx);
2822
2823         return ret;
2824 }
2825
2826 static int i915_energy_uJ(struct seq_file *m, void *data)
2827 {
2828         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2829         unsigned long long power;
2830         u32 units;
2831
2832         if (INTEL_GEN(dev_priv) < 6)
2833                 return -ENODEV;
2834
2835         intel_runtime_pm_get(dev_priv);
2836
2837         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2838                 intel_runtime_pm_put(dev_priv);
2839                 return -ENODEV;
2840         }
2841
2842         units = (power & 0x1f00) >> 8;
2843         power = I915_READ(MCH_SECP_NRG_STTS);
2844         power = (1000000 * power) >> units; /* convert to uJ */
2845
2846         intel_runtime_pm_put(dev_priv);
2847
2848         seq_printf(m, "%llu", power);
2849
2850         return 0;
2851 }
2852
2853 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2854 {
2855         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2856         struct pci_dev *pdev = dev_priv->drm.pdev;
2857
2858         if (!HAS_RUNTIME_PM(dev_priv))
2859                 seq_puts(m, "Runtime power management not supported\n");
2860
2861         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2862                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2863         seq_printf(m, "IRQs disabled: %s\n",
2864                    yesno(!intel_irqs_enabled(dev_priv)));
2865 #ifdef CONFIG_PM
2866         seq_printf(m, "Usage count: %d\n",
2867                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2868 #else
2869         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2870 #endif
2871         seq_printf(m, "PCI device power state: %s [%d]\n",
2872                    pci_power_name(pdev->current_state),
2873                    pdev->current_state);
2874
2875         return 0;
2876 }
2877
2878 static int i915_power_domain_info(struct seq_file *m, void *unused)
2879 {
2880         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2881         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2882         int i;
2883
2884         mutex_lock(&power_domains->lock);
2885
2886         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2887         for (i = 0; i < power_domains->power_well_count; i++) {
2888                 struct i915_power_well *power_well;
2889                 enum intel_display_power_domain power_domain;
2890
2891                 power_well = &power_domains->power_wells[i];
2892                 seq_printf(m, "%-25s %d\n", power_well->name,
2893                            power_well->count);
2894
2895                 for_each_power_domain(power_domain, power_well->domains)
2896                         seq_printf(m, "  %-23s %d\n",
2897                                  intel_display_power_domain_str(power_domain),
2898                                  power_domains->domain_use_count[power_domain]);
2899         }
2900
2901         mutex_unlock(&power_domains->lock);
2902
2903         return 0;
2904 }
2905
2906 static int i915_dmc_info(struct seq_file *m, void *unused)
2907 {
2908         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2909         struct intel_csr *csr;
2910
2911         if (!HAS_CSR(dev_priv))
2912                 return -ENODEV;
2913
2914         csr = &dev_priv->csr;
2915
2916         intel_runtime_pm_get(dev_priv);
2917
2918         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2919         seq_printf(m, "path: %s\n", csr->fw_path);
2920
2921         if (!csr->dmc_payload)
2922                 goto out;
2923
2924         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2925                    CSR_VERSION_MINOR(csr->version));
2926
2927         if (IS_KABYLAKE(dev_priv) ||
2928             (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2929                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2930                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2931                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2932                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2933         } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2934                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2935                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2936         }
2937
2938 out:
2939         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2940         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2941         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2942
2943         intel_runtime_pm_put(dev_priv);
2944
2945         return 0;
2946 }
2947
2948 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2949                                  struct drm_display_mode *mode)
2950 {
2951         int i;
2952
2953         for (i = 0; i < tabs; i++)
2954                 seq_putc(m, '\t');
2955
2956         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2957                    mode->base.id, mode->name,
2958                    mode->vrefresh, mode->clock,
2959                    mode->hdisplay, mode->hsync_start,
2960                    mode->hsync_end, mode->htotal,
2961                    mode->vdisplay, mode->vsync_start,
2962                    mode->vsync_end, mode->vtotal,
2963                    mode->type, mode->flags);
2964 }
2965
2966 static void intel_encoder_info(struct seq_file *m,
2967                                struct intel_crtc *intel_crtc,
2968                                struct intel_encoder *intel_encoder)
2969 {
2970         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2971         struct drm_device *dev = &dev_priv->drm;
2972         struct drm_crtc *crtc = &intel_crtc->base;
2973         struct intel_connector *intel_connector;
2974         struct drm_encoder *encoder;
2975
2976         encoder = &intel_encoder->base;
2977         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2978                    encoder->base.id, encoder->name);
2979         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2980                 struct drm_connector *connector = &intel_connector->base;
2981                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2982                            connector->base.id,
2983                            connector->name,
2984                            drm_get_connector_status_name(connector->status));
2985                 if (connector->status == connector_status_connected) {
2986                         struct drm_display_mode *mode = &crtc->mode;
2987                         seq_printf(m, ", mode:\n");
2988                         intel_seq_print_mode(m, 2, mode);
2989                 } else {
2990                         seq_putc(m, '\n');
2991                 }
2992         }
2993 }
2994
2995 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2996 {
2997         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2998         struct drm_device *dev = &dev_priv->drm;
2999         struct drm_crtc *crtc = &intel_crtc->base;
3000         struct intel_encoder *intel_encoder;
3001         struct drm_plane_state *plane_state = crtc->primary->state;
3002         struct drm_framebuffer *fb = plane_state->fb;
3003
3004         if (fb)
3005                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3006                            fb->base.id, plane_state->src_x >> 16,
3007                            plane_state->src_y >> 16, fb->width, fb->height);
3008         else
3009                 seq_puts(m, "\tprimary plane disabled\n");
3010         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3011                 intel_encoder_info(m, intel_crtc, intel_encoder);
3012 }
3013
3014 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3015 {
3016         struct drm_display_mode *mode = panel->fixed_mode;
3017
3018         seq_printf(m, "\tfixed mode:\n");
3019         intel_seq_print_mode(m, 2, mode);
3020 }
3021
3022 static void intel_dp_info(struct seq_file *m,
3023                           struct intel_connector *intel_connector)
3024 {
3025         struct intel_encoder *intel_encoder = intel_connector->encoder;
3026         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3027
3028         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3029         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3030         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3031                 intel_panel_info(m, &intel_connector->panel);
3032
3033         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3034                                 &intel_dp->aux);
3035 }
3036
3037 static void intel_dp_mst_info(struct seq_file *m,
3038                           struct intel_connector *intel_connector)
3039 {
3040         struct intel_encoder *intel_encoder = intel_connector->encoder;
3041         struct intel_dp_mst_encoder *intel_mst =
3042                 enc_to_mst(&intel_encoder->base);
3043         struct intel_digital_port *intel_dig_port = intel_mst->primary;
3044         struct intel_dp *intel_dp = &intel_dig_port->dp;
3045         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3046                                         intel_connector->port);
3047
3048         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3049 }
3050
3051 static void intel_hdmi_info(struct seq_file *m,
3052                             struct intel_connector *intel_connector)
3053 {
3054         struct intel_encoder *intel_encoder = intel_connector->encoder;
3055         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3056
3057         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3058 }
3059
3060 static void intel_lvds_info(struct seq_file *m,
3061                             struct intel_connector *intel_connector)
3062 {
3063         intel_panel_info(m, &intel_connector->panel);
3064 }
3065
3066 static void intel_connector_info(struct seq_file *m,
3067                                  struct drm_connector *connector)
3068 {
3069         struct intel_connector *intel_connector = to_intel_connector(connector);
3070         struct intel_encoder *intel_encoder = intel_connector->encoder;
3071         struct drm_display_mode *mode;
3072
3073         seq_printf(m, "connector %d: type %s, status: %s\n",
3074                    connector->base.id, connector->name,
3075                    drm_get_connector_status_name(connector->status));
3076         if (connector->status == connector_status_connected) {
3077                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3078                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3079                            connector->display_info.width_mm,
3080                            connector->display_info.height_mm);
3081                 seq_printf(m, "\tsubpixel order: %s\n",
3082                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3083                 seq_printf(m, "\tCEA rev: %d\n",
3084                            connector->display_info.cea_rev);
3085         }
3086
3087         if (!intel_encoder)
3088                 return;
3089
3090         switch (connector->connector_type) {
3091         case DRM_MODE_CONNECTOR_DisplayPort:
3092         case DRM_MODE_CONNECTOR_eDP:
3093                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3094                         intel_dp_mst_info(m, intel_connector);
3095                 else
3096                         intel_dp_info(m, intel_connector);
3097                 break;
3098         case DRM_MODE_CONNECTOR_LVDS:
3099                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3100                         intel_lvds_info(m, intel_connector);
3101                 break;
3102         case DRM_MODE_CONNECTOR_HDMIA:
3103                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3104                     intel_encoder->type == INTEL_OUTPUT_DDI)
3105                         intel_hdmi_info(m, intel_connector);
3106                 break;
3107         default:
3108                 break;
3109         }
3110
3111         seq_printf(m, "\tmodes:\n");
3112         list_for_each_entry(mode, &connector->modes, head)
3113                 intel_seq_print_mode(m, 2, mode);
3114 }
3115
3116 static const char *plane_type(enum drm_plane_type type)
3117 {
3118         switch (type) {
3119         case DRM_PLANE_TYPE_OVERLAY:
3120                 return "OVL";
3121         case DRM_PLANE_TYPE_PRIMARY:
3122                 return "PRI";
3123         case DRM_PLANE_TYPE_CURSOR:
3124                 return "CUR";
3125         /*
3126          * Deliberately omitting default: to generate compiler warnings
3127          * when a new drm_plane_type gets added.
3128          */
3129         }
3130
3131         return "unknown";
3132 }
3133
3134 static const char *plane_rotation(unsigned int rotation)
3135 {
3136         static char buf[48];
3137         /*
3138          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3139          * will print them all to visualize if the values are misused
3140          */
3141         snprintf(buf, sizeof(buf),
3142                  "%s%s%s%s%s%s(0x%08x)",
3143                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3144                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3145                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3146                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3147                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3148                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3149                  rotation);
3150
3151         return buf;
3152 }
3153
3154 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3155 {
3156         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3157         struct drm_device *dev = &dev_priv->drm;
3158         struct intel_plane *intel_plane;
3159
3160         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3161                 struct drm_plane_state *state;
3162                 struct drm_plane *plane = &intel_plane->base;
3163                 struct drm_format_name_buf format_name;
3164
3165                 if (!plane->state) {
3166                         seq_puts(m, "plane->state is NULL!\n");
3167                         continue;
3168                 }
3169
3170                 state = plane->state;
3171
3172                 if (state->fb) {
3173                         drm_get_format_name(state->fb->format->format,
3174                                             &format_name);
3175                 } else {
3176                         sprintf(format_name.str, "N/A");
3177                 }
3178
3179                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3180                            plane->base.id,
3181                            plane_type(intel_plane->base.type),
3182                            state->crtc_x, state->crtc_y,
3183                            state->crtc_w, state->crtc_h,
3184                            (state->src_x >> 16),
3185                            ((state->src_x & 0xffff) * 15625) >> 10,
3186                            (state->src_y >> 16),
3187                            ((state->src_y & 0xffff) * 15625) >> 10,
3188                            (state->src_w >> 16),
3189                            ((state->src_w & 0xffff) * 15625) >> 10,
3190                            (state->src_h >> 16),
3191                            ((state->src_h & 0xffff) * 15625) >> 10,
3192                            format_name.str,
3193                            plane_rotation(state->rotation));
3194         }
3195 }
3196
3197 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3198 {
3199         struct intel_crtc_state *pipe_config;
3200         int num_scalers = intel_crtc->num_scalers;
3201         int i;
3202
3203         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3204
3205         /* Not all platformas have a scaler */
3206         if (num_scalers) {
3207                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3208                            num_scalers,
3209                            pipe_config->scaler_state.scaler_users,
3210                            pipe_config->scaler_state.scaler_id);
3211
3212                 for (i = 0; i < num_scalers; i++) {
3213                         struct intel_scaler *sc =
3214                                         &pipe_config->scaler_state.scalers[i];
3215
3216                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3217                                    i, yesno(sc->in_use), sc->mode);
3218                 }
3219                 seq_puts(m, "\n");
3220         } else {
3221                 seq_puts(m, "\tNo scalers available on this platform\n");
3222         }
3223 }
3224
3225 static int i915_display_info(struct seq_file *m, void *unused)
3226 {
3227         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3228         struct drm_device *dev = &dev_priv->drm;
3229         struct intel_crtc *crtc;
3230         struct drm_connector *connector;
3231         struct drm_connector_list_iter conn_iter;
3232
3233         intel_runtime_pm_get(dev_priv);
3234         seq_printf(m, "CRTC info\n");
3235         seq_printf(m, "---------\n");
3236         for_each_intel_crtc(dev, crtc) {
3237                 struct intel_crtc_state *pipe_config;
3238
3239                 drm_modeset_lock(&crtc->base.mutex, NULL);
3240                 pipe_config = to_intel_crtc_state(crtc->base.state);
3241
3242                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3243                            crtc->base.base.id, pipe_name(crtc->pipe),
3244                            yesno(pipe_config->base.active),
3245                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3246                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3247
3248                 if (pipe_config->base.active) {
3249                         struct intel_plane *cursor =
3250                                 to_intel_plane(crtc->base.cursor);
3251
3252                         intel_crtc_info(m, crtc);
3253
3254                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3255                                    yesno(cursor->base.state->visible),
3256                                    cursor->base.state->crtc_x,
3257                                    cursor->base.state->crtc_y,
3258                                    cursor->base.state->crtc_w,
3259                                    cursor->base.state->crtc_h,
3260                                    cursor->cursor.base);
3261                         intel_scaler_info(m, crtc);
3262                         intel_plane_info(m, crtc);
3263                 }
3264
3265                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3266                            yesno(!crtc->cpu_fifo_underrun_disabled),
3267                            yesno(!crtc->pch_fifo_underrun_disabled));
3268                 drm_modeset_unlock(&crtc->base.mutex);
3269         }
3270
3271         seq_printf(m, "\n");
3272         seq_printf(m, "Connector info\n");
3273         seq_printf(m, "--------------\n");
3274         mutex_lock(&dev->mode_config.mutex);
3275         drm_connector_list_iter_begin(dev, &conn_iter);
3276         drm_for_each_connector_iter(connector, &conn_iter)
3277                 intel_connector_info(m, connector);
3278         drm_connector_list_iter_end(&conn_iter);
3279         mutex_unlock(&dev->mode_config.mutex);
3280
3281         intel_runtime_pm_put(dev_priv);
3282
3283         return 0;
3284 }
3285
3286 static int i915_engine_info(struct seq_file *m, void *unused)
3287 {
3288         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3289         struct intel_engine_cs *engine;
3290         enum intel_engine_id id;
3291         struct drm_printer p;
3292
3293         intel_runtime_pm_get(dev_priv);
3294
3295         seq_printf(m, "GT awake? %s (epoch %u)\n",
3296                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3297         seq_printf(m, "Global active requests: %d\n",
3298                    dev_priv->gt.active_requests);
3299         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3300                    dev_priv->info.cs_timestamp_frequency_khz);
3301
3302         p = drm_seq_file_printer(m);
3303         for_each_engine(engine, dev_priv, id)
3304                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3305
3306         intel_runtime_pm_put(dev_priv);
3307
3308         return 0;
3309 }
3310
3311 static int i915_rcs_topology(struct seq_file *m, void *unused)
3312 {
3313         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3314         struct drm_printer p = drm_seq_file_printer(m);
3315
3316         intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3317
3318         return 0;
3319 }
3320
3321 static int i915_shrinker_info(struct seq_file *m, void *unused)
3322 {
3323         struct drm_i915_private *i915 = node_to_i915(m->private);
3324
3325         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3326         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3327
3328         return 0;
3329 }
3330
3331 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3332 {
3333         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3334         struct drm_device *dev = &dev_priv->drm;
3335         int i;
3336
3337         drm_modeset_lock_all(dev);
3338         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3339                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3340
3341                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3342                            pll->info->id);
3343                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3344                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3345                 seq_printf(m, " tracked hardware state:\n");
3346                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3347                 seq_printf(m, " dpll_md: 0x%08x\n",
3348                            pll->state.hw_state.dpll_md);
3349                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3350                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3351                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3352                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3353                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3354                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3355                            pll->state.hw_state.mg_refclkin_ctl);
3356                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3357                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3358                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3359                            pll->state.hw_state.mg_clktop2_hsclkctl);
3360                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3361                            pll->state.hw_state.mg_pll_div0);
3362                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3363                            pll->state.hw_state.mg_pll_div1);
3364                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3365                            pll->state.hw_state.mg_pll_lf);
3366                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3367                            pll->state.hw_state.mg_pll_frac_lock);
3368                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3369                            pll->state.hw_state.mg_pll_ssc);
3370                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3371                            pll->state.hw_state.mg_pll_bias);
3372                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3373                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3374         }
3375         drm_modeset_unlock_all(dev);
3376
3377         return 0;
3378 }
3379
3380 static int i915_wa_registers(struct seq_file *m, void *unused)
3381 {
3382         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3383         struct i915_workarounds *workarounds = &dev_priv->workarounds;
3384         int i;
3385
3386         intel_runtime_pm_get(dev_priv);
3387
3388         seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3389         for (i = 0; i < workarounds->count; ++i) {
3390                 i915_reg_t addr;
3391                 u32 mask, value, read;
3392                 bool ok;
3393
3394                 addr = workarounds->reg[i].addr;
3395                 mask = workarounds->reg[i].mask;
3396                 value = workarounds->reg[i].value;
3397                 read = I915_READ(addr);
3398                 ok = (value & mask) == (read & mask);
3399                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3400                            i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3401         }
3402
3403         intel_runtime_pm_put(dev_priv);
3404
3405         return 0;
3406 }
3407
3408 static int i915_ipc_status_show(struct seq_file *m, void *data)
3409 {
3410         struct drm_i915_private *dev_priv = m->private;
3411
3412         seq_printf(m, "Isochronous Priority Control: %s\n",
3413                         yesno(dev_priv->ipc_enabled));
3414         return 0;
3415 }
3416
3417 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3418 {
3419         struct drm_i915_private *dev_priv = inode->i_private;
3420
3421         if (!HAS_IPC(dev_priv))
3422                 return -ENODEV;
3423
3424         return single_open(file, i915_ipc_status_show, dev_priv);
3425 }
3426
3427 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3428                                      size_t len, loff_t *offp)
3429 {
3430         struct seq_file *m = file->private_data;
3431         struct drm_i915_private *dev_priv = m->private;
3432         int ret;
3433         bool enable;
3434
3435         ret = kstrtobool_from_user(ubuf, len, &enable);
3436         if (ret < 0)
3437                 return ret;
3438
3439         intel_runtime_pm_get(dev_priv);
3440         if (!dev_priv->ipc_enabled && enable)
3441                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3442         dev_priv->wm.distrust_bios_wm = true;
3443         dev_priv->ipc_enabled = enable;
3444         intel_enable_ipc(dev_priv);
3445         intel_runtime_pm_put(dev_priv);
3446
3447         return len;
3448 }
3449
3450 static const struct file_operations i915_ipc_status_fops = {
3451         .owner = THIS_MODULE,
3452         .open = i915_ipc_status_open,
3453         .read = seq_read,
3454         .llseek = seq_lseek,
3455         .release = single_release,
3456         .write = i915_ipc_status_write
3457 };
3458
3459 static int i915_ddb_info(struct seq_file *m, void *unused)
3460 {
3461         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3462         struct drm_device *dev = &dev_priv->drm;
3463         struct skl_ddb_allocation *ddb;
3464         struct skl_ddb_entry *entry;
3465         enum pipe pipe;
3466         int plane;
3467
3468         if (INTEL_GEN(dev_priv) < 9)
3469                 return -ENODEV;
3470
3471         drm_modeset_lock_all(dev);
3472
3473         ddb = &dev_priv->wm.skl_hw.ddb;
3474
3475         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3476
3477         for_each_pipe(dev_priv, pipe) {
3478                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3479
3480                 for_each_universal_plane(dev_priv, pipe, plane) {
3481                         entry = &ddb->plane[pipe][plane];
3482                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3483                                    entry->start, entry->end,
3484                                    skl_ddb_entry_size(entry));
3485                 }
3486
3487                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3488                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3489                            entry->end, skl_ddb_entry_size(entry));
3490         }
3491
3492         drm_modeset_unlock_all(dev);
3493
3494         return 0;
3495 }
3496
3497 static void drrs_status_per_crtc(struct seq_file *m,
3498                                  struct drm_device *dev,
3499                                  struct intel_crtc *intel_crtc)
3500 {
3501         struct drm_i915_private *dev_priv = to_i915(dev);
3502         struct i915_drrs *drrs = &dev_priv->drrs;
3503         int vrefresh = 0;
3504         struct drm_connector *connector;
3505         struct drm_connector_list_iter conn_iter;
3506
3507         drm_connector_list_iter_begin(dev, &conn_iter);
3508         drm_for_each_connector_iter(connector, &conn_iter) {
3509                 if (connector->state->crtc != &intel_crtc->base)
3510                         continue;
3511
3512                 seq_printf(m, "%s:\n", connector->name);
3513         }
3514         drm_connector_list_iter_end(&conn_iter);
3515
3516         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3517                 seq_puts(m, "\tVBT: DRRS_type: Static");
3518         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3519                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3520         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3521                 seq_puts(m, "\tVBT: DRRS_type: None");
3522         else
3523                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3524
3525         seq_puts(m, "\n\n");
3526
3527         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3528                 struct intel_panel *panel;
3529
3530                 mutex_lock(&drrs->mutex);
3531                 /* DRRS Supported */
3532                 seq_puts(m, "\tDRRS Supported: Yes\n");
3533
3534                 /* disable_drrs() will make drrs->dp NULL */
3535                 if (!drrs->dp) {
3536                         seq_puts(m, "Idleness DRRS: Disabled\n");
3537                         if (dev_priv->psr.enabled)
3538                                 seq_puts(m,
3539                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3540                         mutex_unlock(&drrs->mutex);
3541                         return;
3542                 }
3543
3544                 panel = &drrs->dp->attached_connector->panel;
3545                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3546                                         drrs->busy_frontbuffer_bits);
3547
3548                 seq_puts(m, "\n\t\t");
3549                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3550                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3551                         vrefresh = panel->fixed_mode->vrefresh;
3552                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3553                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3554                         vrefresh = panel->downclock_mode->vrefresh;
3555                 } else {
3556                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3557                                                 drrs->refresh_rate_type);
3558                         mutex_unlock(&drrs->mutex);
3559                         return;
3560                 }
3561                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3562
3563                 seq_puts(m, "\n\t\t");
3564                 mutex_unlock(&drrs->mutex);
3565         } else {
3566                 /* DRRS not supported. Print the VBT parameter*/
3567                 seq_puts(m, "\tDRRS Supported : No");
3568         }
3569         seq_puts(m, "\n");
3570 }
3571
3572 static int i915_drrs_status(struct seq_file *m, void *unused)
3573 {
3574         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3575         struct drm_device *dev = &dev_priv->drm;
3576         struct intel_crtc *intel_crtc;
3577         int active_crtc_cnt = 0;
3578
3579         drm_modeset_lock_all(dev);
3580         for_each_intel_crtc(dev, intel_crtc) {
3581                 if (intel_crtc->base.state->active) {
3582                         active_crtc_cnt++;
3583                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3584
3585                         drrs_status_per_crtc(m, dev, intel_crtc);
3586                 }
3587         }
3588         drm_modeset_unlock_all(dev);
3589
3590         if (!active_crtc_cnt)
3591                 seq_puts(m, "No active crtc found\n");
3592
3593         return 0;
3594 }
3595
3596 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3597 {
3598         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3599         struct drm_device *dev = &dev_priv->drm;
3600         struct intel_encoder *intel_encoder;
3601         struct intel_digital_port *intel_dig_port;
3602         struct drm_connector *connector;
3603         struct drm_connector_list_iter conn_iter;
3604
3605         drm_connector_list_iter_begin(dev, &conn_iter);
3606         drm_for_each_connector_iter(connector, &conn_iter) {
3607                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3608                         continue;
3609
3610                 intel_encoder = intel_attached_encoder(connector);
3611                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3612                         continue;
3613
3614                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3615                 if (!intel_dig_port->dp.can_mst)
3616                         continue;
3617
3618                 seq_printf(m, "MST Source Port %c\n",
3619                            port_name(intel_dig_port->base.port));
3620                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3621         }
3622         drm_connector_list_iter_end(&conn_iter);
3623
3624         return 0;
3625 }
3626
3627 static ssize_t i915_displayport_test_active_write(struct file *file,
3628                                                   const char __user *ubuf,
3629                                                   size_t len, loff_t *offp)
3630 {
3631         char *input_buffer;
3632         int status = 0;
3633         struct drm_device *dev;
3634         struct drm_connector *connector;
3635         struct drm_connector_list_iter conn_iter;
3636         struct intel_dp *intel_dp;
3637         int val = 0;
3638
3639         dev = ((struct seq_file *)file->private_data)->private;
3640
3641         if (len == 0)
3642                 return 0;
3643
3644         input_buffer = memdup_user_nul(ubuf, len);
3645         if (IS_ERR(input_buffer))
3646                 return PTR_ERR(input_buffer);
3647
3648         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3649
3650         drm_connector_list_iter_begin(dev, &conn_iter);
3651         drm_for_each_connector_iter(connector, &conn_iter) {
3652                 struct intel_encoder *encoder;
3653
3654                 if (connector->connector_type !=
3655                     DRM_MODE_CONNECTOR_DisplayPort)
3656                         continue;
3657
3658                 encoder = to_intel_encoder(connector->encoder);
3659                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3660                         continue;
3661
3662                 if (encoder && connector->status == connector_status_connected) {
3663                         intel_dp = enc_to_intel_dp(&encoder->base);
3664                         status = kstrtoint(input_buffer, 10, &val);
3665                         if (status < 0)
3666                                 break;
3667                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3668                         /* To prevent erroneous activation of the compliance
3669                          * testing code, only accept an actual value of 1 here
3670                          */
3671                         if (val == 1)
3672                                 intel_dp->compliance.test_active = 1;
3673                         else
3674                                 intel_dp->compliance.test_active = 0;
3675                 }
3676         }
3677         drm_connector_list_iter_end(&conn_iter);
3678         kfree(input_buffer);
3679         if (status < 0)
3680                 return status;
3681
3682         *offp += len;
3683         return len;
3684 }
3685
3686 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3687 {
3688         struct drm_i915_private *dev_priv = m->private;
3689         struct drm_device *dev = &dev_priv->drm;
3690         struct drm_connector *connector;
3691         struct drm_connector_list_iter conn_iter;
3692         struct intel_dp *intel_dp;
3693
3694         drm_connector_list_iter_begin(dev, &conn_iter);
3695         drm_for_each_connector_iter(connector, &conn_iter) {
3696                 struct intel_encoder *encoder;
3697
3698                 if (connector->connector_type !=
3699                     DRM_MODE_CONNECTOR_DisplayPort)
3700                         continue;
3701
3702                 encoder = to_intel_encoder(connector->encoder);
3703                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3704                         continue;
3705
3706                 if (encoder && connector->status == connector_status_connected) {
3707                         intel_dp = enc_to_intel_dp(&encoder->base);
3708                         if (intel_dp->compliance.test_active)
3709                                 seq_puts(m, "1");
3710                         else
3711                                 seq_puts(m, "0");
3712                 } else
3713                         seq_puts(m, "0");
3714         }
3715         drm_connector_list_iter_end(&conn_iter);
3716
3717         return 0;
3718 }
3719
3720 static int i915_displayport_test_active_open(struct inode *inode,
3721                                              struct file *file)
3722 {
3723         return single_open(file, i915_displayport_test_active_show,
3724                            inode->i_private);
3725 }
3726
3727 static const struct file_operations i915_displayport_test_active_fops = {
3728         .owner = THIS_MODULE,
3729         .open = i915_displayport_test_active_open,
3730         .read = seq_read,
3731         .llseek = seq_lseek,
3732         .release = single_release,
3733         .write = i915_displayport_test_active_write
3734 };
3735
3736 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3737 {
3738         struct drm_i915_private *dev_priv = m->private;
3739         struct drm_device *dev = &dev_priv->drm;
3740         struct drm_connector *connector;
3741         struct drm_connector_list_iter conn_iter;
3742         struct intel_dp *intel_dp;
3743
3744         drm_connector_list_iter_begin(dev, &conn_iter);
3745         drm_for_each_connector_iter(connector, &conn_iter) {
3746                 struct intel_encoder *encoder;
3747
3748                 if (connector->connector_type !=
3749                     DRM_MODE_CONNECTOR_DisplayPort)
3750                         continue;
3751
3752                 encoder = to_intel_encoder(connector->encoder);
3753                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3754                         continue;
3755
3756                 if (encoder && connector->status == connector_status_connected) {
3757                         intel_dp = enc_to_intel_dp(&encoder->base);
3758                         if (intel_dp->compliance.test_type ==
3759                             DP_TEST_LINK_EDID_READ)
3760                                 seq_printf(m, "%lx",
3761                                            intel_dp->compliance.test_data.edid);
3762                         else if (intel_dp->compliance.test_type ==
3763                                  DP_TEST_LINK_VIDEO_PATTERN) {
3764                                 seq_printf(m, "hdisplay: %d\n",
3765                                            intel_dp->compliance.test_data.hdisplay);
3766                                 seq_printf(m, "vdisplay: %d\n",
3767                                            intel_dp->compliance.test_data.vdisplay);
3768                                 seq_printf(m, "bpc: %u\n",
3769                                            intel_dp->compliance.test_data.bpc);
3770                         }
3771                 } else
3772                         seq_puts(m, "0");
3773         }
3774         drm_connector_list_iter_end(&conn_iter);
3775
3776         return 0;
3777 }
3778 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3779
3780 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3781 {
3782         struct drm_i915_private *dev_priv = m->private;
3783         struct drm_device *dev = &dev_priv->drm;
3784         struct drm_connector *connector;
3785         struct drm_connector_list_iter conn_iter;
3786         struct intel_dp *intel_dp;
3787
3788         drm_connector_list_iter_begin(dev, &conn_iter);
3789         drm_for_each_connector_iter(connector, &conn_iter) {
3790                 struct intel_encoder *encoder;
3791
3792                 if (connector->connector_type !=
3793                     DRM_MODE_CONNECTOR_DisplayPort)
3794                         continue;
3795
3796                 encoder = to_intel_encoder(connector->encoder);
3797                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3798                         continue;
3799
3800                 if (encoder && connector->status == connector_status_connected) {
3801                         intel_dp = enc_to_intel_dp(&encoder->base);
3802                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3803                 } else
3804                         seq_puts(m, "0");
3805         }
3806         drm_connector_list_iter_end(&conn_iter);
3807
3808         return 0;
3809 }
3810 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3811
3812 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3813 {
3814         struct drm_i915_private *dev_priv = m->private;
3815         struct drm_device *dev = &dev_priv->drm;
3816         int level;
3817         int num_levels;
3818
3819         if (IS_CHERRYVIEW(dev_priv))
3820                 num_levels = 3;
3821         else if (IS_VALLEYVIEW(dev_priv))
3822                 num_levels = 1;
3823         else if (IS_G4X(dev_priv))
3824                 num_levels = 3;
3825         else
3826                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3827
3828         drm_modeset_lock_all(dev);
3829
3830         for (level = 0; level < num_levels; level++) {
3831                 unsigned int latency = wm[level];
3832
3833                 /*
3834                  * - WM1+ latency values in 0.5us units
3835                  * - latencies are in us on gen9/vlv/chv
3836                  */
3837                 if (INTEL_GEN(dev_priv) >= 9 ||
3838                     IS_VALLEYVIEW(dev_priv) ||
3839                     IS_CHERRYVIEW(dev_priv) ||
3840                     IS_G4X(dev_priv))
3841                         latency *= 10;
3842                 else if (level > 0)
3843                         latency *= 5;
3844
3845                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3846                            level, wm[level], latency / 10, latency % 10);
3847         }
3848
3849         drm_modeset_unlock_all(dev);
3850 }
3851
3852 static int pri_wm_latency_show(struct seq_file *m, void *data)
3853 {
3854         struct drm_i915_private *dev_priv = m->private;
3855         const uint16_t *latencies;
3856
3857         if (INTEL_GEN(dev_priv) >= 9)
3858                 latencies = dev_priv->wm.skl_latency;
3859         else
3860                 latencies = dev_priv->wm.pri_latency;
3861
3862         wm_latency_show(m, latencies);
3863
3864         return 0;
3865 }
3866
3867 static int spr_wm_latency_show(struct seq_file *m, void *data)
3868 {
3869         struct drm_i915_private *dev_priv = m->private;
3870         const uint16_t *latencies;
3871
3872         if (INTEL_GEN(dev_priv) >= 9)
3873                 latencies = dev_priv->wm.skl_latency;
3874         else
3875                 latencies = dev_priv->wm.spr_latency;
3876
3877         wm_latency_show(m, latencies);
3878
3879         return 0;
3880 }
3881
3882 static int cur_wm_latency_show(struct seq_file *m, void *data)
3883 {
3884         struct drm_i915_private *dev_priv = m->private;
3885         const uint16_t *latencies;
3886
3887         if (INTEL_GEN(dev_priv) >= 9)
3888                 latencies = dev_priv->wm.skl_latency;
3889         else
3890                 latencies = dev_priv->wm.cur_latency;
3891
3892         wm_latency_show(m, latencies);
3893
3894         return 0;
3895 }
3896
3897 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3898 {
3899         struct drm_i915_private *dev_priv = inode->i_private;
3900
3901         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3902                 return -ENODEV;
3903
3904         return single_open(file, pri_wm_latency_show, dev_priv);
3905 }
3906
3907 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3908 {
3909         struct drm_i915_private *dev_priv = inode->i_private;
3910
3911         if (HAS_GMCH_DISPLAY(dev_priv))
3912                 return -ENODEV;
3913
3914         return single_open(file, spr_wm_latency_show, dev_priv);
3915 }
3916
3917 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3918 {
3919         struct drm_i915_private *dev_priv = inode->i_private;
3920
3921         if (HAS_GMCH_DISPLAY(dev_priv))
3922                 return -ENODEV;
3923
3924         return single_open(file, cur_wm_latency_show, dev_priv);
3925 }
3926
3927 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3928                                 size_t len, loff_t *offp, uint16_t wm[8])
3929 {
3930         struct seq_file *m = file->private_data;
3931         struct drm_i915_private *dev_priv = m->private;
3932         struct drm_device *dev = &dev_priv->drm;
3933         uint16_t new[8] = { 0 };
3934         int num_levels;
3935         int level;
3936         int ret;
3937         char tmp[32];
3938
3939         if (IS_CHERRYVIEW(dev_priv))
3940                 num_levels = 3;
3941         else if (IS_VALLEYVIEW(dev_priv))
3942                 num_levels = 1;
3943         else if (IS_G4X(dev_priv))
3944                 num_levels = 3;
3945         else
3946                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3947
3948         if (len >= sizeof(tmp))
3949                 return -EINVAL;
3950
3951         if (copy_from_user(tmp, ubuf, len))
3952                 return -EFAULT;
3953
3954         tmp[len] = '\0';
3955
3956         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3957                      &new[0], &new[1], &new[2], &new[3],
3958                      &new[4], &new[5], &new[6], &new[7]);
3959         if (ret != num_levels)
3960                 return -EINVAL;
3961
3962         drm_modeset_lock_all(dev);
3963
3964         for (level = 0; level < num_levels; level++)
3965                 wm[level] = new[level];
3966
3967         drm_modeset_unlock_all(dev);
3968
3969         return len;
3970 }
3971
3972
3973 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3974                                     size_t len, loff_t *offp)
3975 {
3976         struct seq_file *m = file->private_data;
3977         struct drm_i915_private *dev_priv = m->private;
3978         uint16_t *latencies;
3979
3980         if (INTEL_GEN(dev_priv) >= 9)
3981                 latencies = dev_priv->wm.skl_latency;
3982         else
3983                 latencies = dev_priv->wm.pri_latency;
3984
3985         return wm_latency_write(file, ubuf, len, offp, latencies);
3986 }
3987
3988 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3989                                     size_t len, loff_t *offp)
3990 {
3991         struct seq_file *m = file->private_data;
3992         struct drm_i915_private *dev_priv = m->private;
3993         uint16_t *latencies;
3994
3995         if (INTEL_GEN(dev_priv) >= 9)
3996                 latencies = dev_priv->wm.skl_latency;
3997         else
3998                 latencies = dev_priv->wm.spr_latency;
3999
4000         return wm_latency_write(file, ubuf, len, offp, latencies);
4001 }
4002
4003 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4004                                     size_t len, loff_t *offp)
4005 {
4006         struct seq_file *m = file->private_data;
4007         struct drm_i915_private *dev_priv = m->private;
4008         uint16_t *latencies;
4009
4010         if (INTEL_GEN(dev_priv) >= 9)
4011                 latencies = dev_priv->wm.skl_latency;
4012         else
4013                 latencies = dev_priv->wm.cur_latency;
4014
4015         return wm_latency_write(file, ubuf, len, offp, latencies);
4016 }
4017
4018 static const struct file_operations i915_pri_wm_latency_fops = {
4019         .owner = THIS_MODULE,
4020         .open = pri_wm_latency_open,
4021         .read = seq_read,
4022         .llseek = seq_lseek,
4023         .release = single_release,
4024         .write = pri_wm_latency_write
4025 };
4026
4027 static const struct file_operations i915_spr_wm_latency_fops = {
4028         .owner = THIS_MODULE,
4029         .open = spr_wm_latency_open,
4030         .read = seq_read,
4031         .llseek = seq_lseek,
4032         .release = single_release,
4033         .write = spr_wm_latency_write
4034 };
4035
4036 static const struct file_operations i915_cur_wm_latency_fops = {
4037         .owner = THIS_MODULE,
4038         .open = cur_wm_latency_open,
4039         .read = seq_read,
4040         .llseek = seq_lseek,
4041         .release = single_release,
4042         .write = cur_wm_latency_write
4043 };
4044
4045 static int
4046 i915_wedged_get(void *data, u64 *val)
4047 {
4048         struct drm_i915_private *dev_priv = data;
4049
4050         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4051
4052         return 0;
4053 }
4054
4055 static int
4056 i915_wedged_set(void *data, u64 val)
4057 {
4058         struct drm_i915_private *i915 = data;
4059         struct intel_engine_cs *engine;
4060         unsigned int tmp;
4061
4062         /*
4063          * There is no safeguard against this debugfs entry colliding
4064          * with the hangcheck calling same i915_handle_error() in
4065          * parallel, causing an explosion. For now we assume that the
4066          * test harness is responsible enough not to inject gpu hangs
4067          * while it is writing to 'i915_wedged'
4068          */
4069
4070         if (i915_reset_backoff(&i915->gpu_error))
4071                 return -EAGAIN;
4072
4073         for_each_engine_masked(engine, i915, val, tmp) {
4074                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4075                 engine->hangcheck.stalled = true;
4076         }
4077
4078         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4079                           "Manually set wedged engine mask = %llx", val);
4080
4081         wait_on_bit(&i915->gpu_error.flags,
4082                     I915_RESET_HANDOFF,
4083                     TASK_UNINTERRUPTIBLE);
4084
4085         return 0;
4086 }
4087
4088 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4089                         i915_wedged_get, i915_wedged_set,
4090                         "%llu\n");
4091
4092 static int
4093 fault_irq_set(struct drm_i915_private *i915,
4094               unsigned long *irq,
4095               unsigned long val)
4096 {
4097         int err;
4098
4099         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4100         if (err)
4101                 return err;
4102
4103         err = i915_gem_wait_for_idle(i915,
4104                                      I915_WAIT_LOCKED |
4105                                      I915_WAIT_INTERRUPTIBLE);
4106         if (err)
4107                 goto err_unlock;
4108
4109         *irq = val;
4110         mutex_unlock(&i915->drm.struct_mutex);
4111
4112         /* Flush idle worker to disarm irq */
4113         drain_delayed_work(&i915->gt.idle_work);
4114
4115         return 0;
4116
4117 err_unlock:
4118         mutex_unlock(&i915->drm.struct_mutex);
4119         return err;
4120 }
4121
4122 static int
4123 i915_ring_missed_irq_get(void *data, u64 *val)
4124 {
4125         struct drm_i915_private *dev_priv = data;
4126
4127         *val = dev_priv->gpu_error.missed_irq_rings;
4128         return 0;
4129 }
4130
4131 static int
4132 i915_ring_missed_irq_set(void *data, u64 val)
4133 {
4134         struct drm_i915_private *i915 = data;
4135
4136         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4137 }
4138
4139 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4140                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4141                         "0x%08llx\n");
4142
4143 static int
4144 i915_ring_test_irq_get(void *data, u64 *val)
4145 {
4146         struct drm_i915_private *dev_priv = data;
4147
4148         *val = dev_priv->gpu_error.test_irq_rings;
4149
4150         return 0;
4151 }
4152
4153 static int
4154 i915_ring_test_irq_set(void *data, u64 val)
4155 {
4156         struct drm_i915_private *i915 = data;
4157
4158         val &= INTEL_INFO(i915)->ring_mask;
4159         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4160
4161         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4162 }
4163
4164 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4165                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4166                         "0x%08llx\n");
4167
4168 #define DROP_UNBOUND    BIT(0)
4169 #define DROP_BOUND      BIT(1)
4170 #define DROP_RETIRE     BIT(2)
4171 #define DROP_ACTIVE     BIT(3)
4172 #define DROP_FREED      BIT(4)
4173 #define DROP_SHRINK_ALL BIT(5)
4174 #define DROP_IDLE       BIT(6)
4175 #define DROP_ALL (DROP_UNBOUND  | \
4176                   DROP_BOUND    | \
4177                   DROP_RETIRE   | \
4178                   DROP_ACTIVE   | \
4179                   DROP_FREED    | \
4180                   DROP_SHRINK_ALL |\
4181                   DROP_IDLE)
4182 static int
4183 i915_drop_caches_get(void *data, u64 *val)
4184 {
4185         *val = DROP_ALL;
4186
4187         return 0;
4188 }
4189
4190 static int
4191 i915_drop_caches_set(void *data, u64 val)
4192 {
4193         struct drm_i915_private *dev_priv = data;
4194         struct drm_device *dev = &dev_priv->drm;
4195         int ret = 0;
4196
4197         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4198                   val, val & DROP_ALL);
4199
4200         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4201          * on ioctls on -EAGAIN. */
4202         if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4203                 ret = mutex_lock_interruptible(&dev->struct_mutex);
4204                 if (ret)
4205                         return ret;
4206
4207                 if (val & DROP_ACTIVE)
4208                         ret = i915_gem_wait_for_idle(dev_priv,
4209                                                      I915_WAIT_INTERRUPTIBLE |
4210                                                      I915_WAIT_LOCKED);
4211
4212                 if (val & DROP_RETIRE)
4213                         i915_retire_requests(dev_priv);
4214
4215                 mutex_unlock(&dev->struct_mutex);
4216         }
4217
4218         fs_reclaim_acquire(GFP_KERNEL);
4219         if (val & DROP_BOUND)
4220                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4221
4222         if (val & DROP_UNBOUND)
4223                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4224
4225         if (val & DROP_SHRINK_ALL)
4226                 i915_gem_shrink_all(dev_priv);
4227         fs_reclaim_release(GFP_KERNEL);
4228
4229         if (val & DROP_IDLE) {
4230                 do {
4231                         if (READ_ONCE(dev_priv->gt.active_requests))
4232                                 flush_delayed_work(&dev_priv->gt.retire_work);
4233                         drain_delayed_work(&dev_priv->gt.idle_work);
4234                 } while (READ_ONCE(dev_priv->gt.awake));
4235         }
4236
4237         if (val & DROP_FREED)
4238                 i915_gem_drain_freed_objects(dev_priv);
4239
4240         return ret;
4241 }
4242
4243 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4244                         i915_drop_caches_get, i915_drop_caches_set,
4245                         "0x%08llx\n");
4246
4247 static int
4248 i915_cache_sharing_get(void *data, u64 *val)
4249 {
4250         struct drm_i915_private *dev_priv = data;
4251         u32 snpcr;
4252
4253         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4254                 return -ENODEV;
4255
4256         intel_runtime_pm_get(dev_priv);
4257
4258         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4259
4260         intel_runtime_pm_put(dev_priv);
4261
4262         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4263
4264         return 0;
4265 }
4266
4267 static int
4268 i915_cache_sharing_set(void *data, u64 val)
4269 {
4270         struct drm_i915_private *dev_priv = data;
4271         u32 snpcr;
4272
4273         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4274                 return -ENODEV;
4275
4276         if (val > 3)
4277                 return -EINVAL;
4278
4279         intel_runtime_pm_get(dev_priv);
4280         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4281
4282         /* Update the cache sharing policy here as well */
4283         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4284         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4285         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4286         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4287
4288         intel_runtime_pm_put(dev_priv);
4289         return 0;
4290 }
4291
4292 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4293                         i915_cache_sharing_get, i915_cache_sharing_set,
4294                         "%llu\n");
4295
4296 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4297                                           struct sseu_dev_info *sseu)
4298 {
4299 #define SS_MAX 2
4300         const int ss_max = SS_MAX;
4301         u32 sig1[SS_MAX], sig2[SS_MAX];
4302         int ss;
4303
4304         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4305         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4306         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4307         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4308
4309         for (ss = 0; ss < ss_max; ss++) {
4310                 unsigned int eu_cnt;
4311
4312                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4313                         /* skip disabled subslice */
4314                         continue;
4315
4316                 sseu->slice_mask = BIT(0);
4317                 sseu->subslice_mask[0] |= BIT(ss);
4318                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4319                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4320                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4321                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4322                 sseu->eu_total += eu_cnt;
4323                 sseu->eu_per_subslice = max_t(unsigned int,
4324                                               sseu->eu_per_subslice, eu_cnt);
4325         }
4326 #undef SS_MAX
4327 }
4328
4329 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4330                                      struct sseu_dev_info *sseu)
4331 {
4332 #define SS_MAX 6
4333         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4334         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4335         int s, ss;
4336
4337         for (s = 0; s < info->sseu.max_slices; s++) {
4338                 /*
4339                  * FIXME: Valid SS Mask respects the spec and read
4340                  * only valid bits for those registers, excluding reserverd
4341                  * although this seems wrong because it would leave many
4342                  * subslices without ACK.
4343                  */
4344                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4345                         GEN10_PGCTL_VALID_SS_MASK(s);
4346                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4347                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4348         }
4349
4350         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4351                      GEN9_PGCTL_SSA_EU19_ACK |
4352                      GEN9_PGCTL_SSA_EU210_ACK |
4353                      GEN9_PGCTL_SSA_EU311_ACK;
4354         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4355                      GEN9_PGCTL_SSB_EU19_ACK |
4356                      GEN9_PGCTL_SSB_EU210_ACK |
4357                      GEN9_PGCTL_SSB_EU311_ACK;
4358
4359         for (s = 0; s < info->sseu.max_slices; s++) {
4360                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4361                         /* skip disabled slice */
4362                         continue;
4363
4364                 sseu->slice_mask |= BIT(s);
4365                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4366
4367                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4368                         unsigned int eu_cnt;
4369
4370                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4371                                 /* skip disabled subslice */
4372                                 continue;
4373
4374                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4375                                                eu_mask[ss % 2]);
4376                         sseu->eu_total += eu_cnt;
4377                         sseu->eu_per_subslice = max_t(unsigned int,
4378                                                       sseu->eu_per_subslice,
4379                                                       eu_cnt);
4380                 }
4381         }
4382 #undef SS_MAX
4383 }
4384
4385 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4386                                     struct sseu_dev_info *sseu)
4387 {
4388 #define SS_MAX 3
4389         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4390         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4391         int s, ss;
4392
4393         for (s = 0; s < info->sseu.max_slices; s++) {
4394                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4395                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4396                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4397         }
4398
4399         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4400                      GEN9_PGCTL_SSA_EU19_ACK |
4401                      GEN9_PGCTL_SSA_EU210_ACK |
4402                      GEN9_PGCTL_SSA_EU311_ACK;
4403         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4404                      GEN9_PGCTL_SSB_EU19_ACK |
4405                      GEN9_PGCTL_SSB_EU210_ACK |
4406                      GEN9_PGCTL_SSB_EU311_ACK;
4407
4408         for (s = 0; s < info->sseu.max_slices; s++) {
4409                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4410                         /* skip disabled slice */
4411                         continue;
4412
4413                 sseu->slice_mask |= BIT(s);
4414
4415                 if (IS_GEN9_BC(dev_priv))
4416                         sseu->subslice_mask[s] =
4417                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4418
4419                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4420                         unsigned int eu_cnt;
4421
4422                         if (IS_GEN9_LP(dev_priv)) {
4423                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4424                                         /* skip disabled subslice */
4425                                         continue;
4426
4427                                 sseu->subslice_mask[s] |= BIT(ss);
4428                         }
4429
4430                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4431                                                eu_mask[ss%2]);
4432                         sseu->eu_total += eu_cnt;
4433                         sseu->eu_per_subslice = max_t(unsigned int,
4434                                                       sseu->eu_per_subslice,
4435                                                       eu_cnt);
4436                 }
4437         }
4438 #undef SS_MAX
4439 }
4440
4441 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4442                                          struct sseu_dev_info *sseu)
4443 {
4444         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4445         int s;
4446
4447         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4448
4449         if (sseu->slice_mask) {
4450                 sseu->eu_per_subslice =
4451                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4452                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4453                         sseu->subslice_mask[s] =
4454                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4455                 }
4456                 sseu->eu_total = sseu->eu_per_subslice *
4457                                  sseu_subslice_total(sseu);
4458
4459                 /* subtract fused off EU(s) from enabled slice(s) */
4460                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4461                         u8 subslice_7eu =
4462                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4463
4464                         sseu->eu_total -= hweight8(subslice_7eu);
4465                 }
4466         }
4467 }
4468
4469 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4470                                  const struct sseu_dev_info *sseu)
4471 {
4472         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4473         const char *type = is_available_info ? "Available" : "Enabled";
4474         int s;
4475
4476         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4477                    sseu->slice_mask);
4478         seq_printf(m, "  %s Slice Total: %u\n", type,
4479                    hweight8(sseu->slice_mask));
4480         seq_printf(m, "  %s Subslice Total: %u\n", type,
4481                    sseu_subslice_total(sseu));
4482         for (s = 0; s < fls(sseu->slice_mask); s++) {
4483                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4484                            s, hweight8(sseu->subslice_mask[s]));
4485         }
4486         seq_printf(m, "  %s EU Total: %u\n", type,
4487                    sseu->eu_total);
4488         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4489                    sseu->eu_per_subslice);
4490
4491         if (!is_available_info)
4492                 return;
4493
4494         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4495         if (HAS_POOLED_EU(dev_priv))
4496                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4497
4498         seq_printf(m, "  Has Slice Power Gating: %s\n",
4499                    yesno(sseu->has_slice_pg));
4500         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4501                    yesno(sseu->has_subslice_pg));
4502         seq_printf(m, "  Has EU Power Gating: %s\n",
4503                    yesno(sseu->has_eu_pg));
4504 }
4505
4506 static int i915_sseu_status(struct seq_file *m, void *unused)
4507 {
4508         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4509         struct sseu_dev_info sseu;
4510
4511         if (INTEL_GEN(dev_priv) < 8)
4512                 return -ENODEV;
4513
4514         seq_puts(m, "SSEU Device Info\n");
4515         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4516
4517         seq_puts(m, "SSEU Device Status\n");
4518         memset(&sseu, 0, sizeof(sseu));
4519         sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4520         sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4521         sseu.max_eus_per_subslice =
4522                 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4523
4524         intel_runtime_pm_get(dev_priv);
4525
4526         if (IS_CHERRYVIEW(dev_priv)) {
4527                 cherryview_sseu_device_status(dev_priv, &sseu);
4528         } else if (IS_BROADWELL(dev_priv)) {
4529                 broadwell_sseu_device_status(dev_priv, &sseu);
4530         } else if (IS_GEN9(dev_priv)) {
4531                 gen9_sseu_device_status(dev_priv, &sseu);
4532         } else if (INTEL_GEN(dev_priv) >= 10) {
4533                 gen10_sseu_device_status(dev_priv, &sseu);
4534         }
4535
4536         intel_runtime_pm_put(dev_priv);
4537
4538         i915_print_sseu_info(m, false, &sseu);
4539
4540         return 0;
4541 }
4542
4543 static int i915_forcewake_open(struct inode *inode, struct file *file)
4544 {
4545         struct drm_i915_private *i915 = inode->i_private;
4546
4547         if (INTEL_GEN(i915) < 6)
4548                 return 0;
4549
4550         intel_runtime_pm_get(i915);
4551         intel_uncore_forcewake_user_get(i915);
4552
4553         return 0;
4554 }
4555
4556 static int i915_forcewake_release(struct inode *inode, struct file *file)
4557 {
4558         struct drm_i915_private *i915 = inode->i_private;
4559
4560         if (INTEL_GEN(i915) < 6)
4561                 return 0;
4562
4563         intel_uncore_forcewake_user_put(i915);
4564         intel_runtime_pm_put(i915);
4565
4566         return 0;
4567 }
4568
4569 static const struct file_operations i915_forcewake_fops = {
4570         .owner = THIS_MODULE,
4571         .open = i915_forcewake_open,
4572         .release = i915_forcewake_release,
4573 };
4574
4575 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4576 {
4577         struct drm_i915_private *dev_priv = m->private;
4578         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4579
4580         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4581         seq_printf(m, "Detected: %s\n",
4582                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4583
4584         return 0;
4585 }
4586
4587 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4588                                         const char __user *ubuf, size_t len,
4589                                         loff_t *offp)
4590 {
4591         struct seq_file *m = file->private_data;
4592         struct drm_i915_private *dev_priv = m->private;
4593         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4594         unsigned int new_threshold;
4595         int i;
4596         char *newline;
4597         char tmp[16];
4598
4599         if (len >= sizeof(tmp))
4600                 return -EINVAL;
4601
4602         if (copy_from_user(tmp, ubuf, len))
4603                 return -EFAULT;
4604
4605         tmp[len] = '\0';
4606
4607         /* Strip newline, if any */
4608         newline = strchr(tmp, '\n');
4609         if (newline)
4610                 *newline = '\0';
4611
4612         if (strcmp(tmp, "reset") == 0)
4613                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4614         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4615                 return -EINVAL;
4616
4617         if (new_threshold > 0)
4618                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4619                               new_threshold);
4620         else
4621                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4622
4623         spin_lock_irq(&dev_priv->irq_lock);
4624         hotplug->hpd_storm_threshold = new_threshold;
4625         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4626         for_each_hpd_pin(i)
4627                 hotplug->stats[i].count = 0;
4628         spin_unlock_irq(&dev_priv->irq_lock);
4629
4630         /* Re-enable hpd immediately if we were in an irq storm */
4631         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4632
4633         return len;
4634 }
4635
4636 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4637 {
4638         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4639 }
4640
4641 static const struct file_operations i915_hpd_storm_ctl_fops = {
4642         .owner = THIS_MODULE,
4643         .open = i915_hpd_storm_ctl_open,
4644         .read = seq_read,
4645         .llseek = seq_lseek,
4646         .release = single_release,
4647         .write = i915_hpd_storm_ctl_write
4648 };
4649
4650 static int i915_drrs_ctl_set(void *data, u64 val)
4651 {
4652         struct drm_i915_private *dev_priv = data;
4653         struct drm_device *dev = &dev_priv->drm;
4654         struct intel_crtc *intel_crtc;
4655         struct intel_encoder *encoder;
4656         struct intel_dp *intel_dp;
4657
4658         if (INTEL_GEN(dev_priv) < 7)
4659                 return -ENODEV;
4660
4661         drm_modeset_lock_all(dev);
4662         for_each_intel_crtc(dev, intel_crtc) {
4663                 if (!intel_crtc->base.state->active ||
4664                                         !intel_crtc->config->has_drrs)
4665                         continue;
4666
4667                 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4668                         if (encoder->type != INTEL_OUTPUT_EDP)
4669                                 continue;
4670
4671                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4672                                                 val ? "en" : "dis", val);
4673
4674                         intel_dp = enc_to_intel_dp(&encoder->base);
4675                         if (val)
4676                                 intel_edp_drrs_enable(intel_dp,
4677                                                         intel_crtc->config);
4678                         else
4679                                 intel_edp_drrs_disable(intel_dp,
4680                                                         intel_crtc->config);
4681                 }
4682         }
4683         drm_modeset_unlock_all(dev);
4684
4685         return 0;
4686 }
4687
4688 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4689
4690 static ssize_t
4691 i915_fifo_underrun_reset_write(struct file *filp,
4692                                const char __user *ubuf,
4693                                size_t cnt, loff_t *ppos)
4694 {
4695         struct drm_i915_private *dev_priv = filp->private_data;
4696         struct intel_crtc *intel_crtc;
4697         struct drm_device *dev = &dev_priv->drm;
4698         int ret;
4699         bool reset;
4700
4701         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4702         if (ret)
4703                 return ret;
4704
4705         if (!reset)
4706                 return cnt;
4707
4708         for_each_intel_crtc(dev, intel_crtc) {
4709                 struct drm_crtc_commit *commit;
4710                 struct intel_crtc_state *crtc_state;
4711
4712                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4713                 if (ret)
4714                         return ret;
4715
4716                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4717                 commit = crtc_state->base.commit;
4718                 if (commit) {
4719                         ret = wait_for_completion_interruptible(&commit->hw_done);
4720                         if (!ret)
4721                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4722                 }
4723
4724                 if (!ret && crtc_state->base.active) {
4725                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4726                                       pipe_name(intel_crtc->pipe));
4727
4728                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4729                 }
4730
4731                 drm_modeset_unlock(&intel_crtc->base.mutex);
4732
4733                 if (ret)
4734                         return ret;
4735         }
4736
4737         ret = intel_fbc_reset_underrun(dev_priv);
4738         if (ret)
4739                 return ret;
4740
4741         return cnt;
4742 }
4743
4744 static const struct file_operations i915_fifo_underrun_reset_ops = {
4745         .owner = THIS_MODULE,
4746         .open = simple_open,
4747         .write = i915_fifo_underrun_reset_write,
4748         .llseek = default_llseek,
4749 };
4750
4751 static const struct drm_info_list i915_debugfs_list[] = {
4752         {"i915_capabilities", i915_capabilities, 0},
4753         {"i915_gem_objects", i915_gem_object_info, 0},
4754         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4755         {"i915_gem_stolen", i915_gem_stolen_list_info },
4756         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4757         {"i915_gem_interrupt", i915_interrupt_info, 0},
4758         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4759         {"i915_guc_info", i915_guc_info, 0},
4760         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4761         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4762         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4763         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4764         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4765         {"i915_frequency_info", i915_frequency_info, 0},
4766         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4767         {"i915_reset_info", i915_reset_info, 0},
4768         {"i915_drpc_info", i915_drpc_info, 0},
4769         {"i915_emon_status", i915_emon_status, 0},
4770         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4771         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4772         {"i915_fbc_status", i915_fbc_status, 0},
4773         {"i915_ips_status", i915_ips_status, 0},
4774         {"i915_sr_status", i915_sr_status, 0},
4775         {"i915_opregion", i915_opregion, 0},
4776         {"i915_vbt", i915_vbt, 0},
4777         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4778         {"i915_context_status", i915_context_status, 0},
4779         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4780         {"i915_swizzle_info", i915_swizzle_info, 0},
4781         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4782         {"i915_llc", i915_llc, 0},
4783         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4784         {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4785         {"i915_energy_uJ", i915_energy_uJ, 0},
4786         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4787         {"i915_power_domain_info", i915_power_domain_info, 0},
4788         {"i915_dmc_info", i915_dmc_info, 0},
4789         {"i915_display_info", i915_display_info, 0},
4790         {"i915_engine_info", i915_engine_info, 0},
4791         {"i915_rcs_topology", i915_rcs_topology, 0},
4792         {"i915_shrinker_info", i915_shrinker_info, 0},
4793         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4794         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4795         {"i915_wa_registers", i915_wa_registers, 0},
4796         {"i915_ddb_info", i915_ddb_info, 0},
4797         {"i915_sseu_status", i915_sseu_status, 0},
4798         {"i915_drrs_status", i915_drrs_status, 0},
4799         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4800 };
4801 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4802
4803 static const struct i915_debugfs_files {
4804         const char *name;
4805         const struct file_operations *fops;
4806 } i915_debugfs_files[] = {
4807         {"i915_wedged", &i915_wedged_fops},
4808         {"i915_cache_sharing", &i915_cache_sharing_fops},
4809         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4810         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4811         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4812 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4813         {"i915_error_state", &i915_error_state_fops},
4814         {"i915_gpu_info", &i915_gpu_info_fops},
4815 #endif
4816         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4817         {"i915_next_seqno", &i915_next_seqno_fops},
4818         {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4819         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4820         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4821         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4822         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4823         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4824         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4825         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4826         {"i915_guc_log_level", &i915_guc_log_level_fops},
4827         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4828         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4829         {"i915_ipc_status", &i915_ipc_status_fops},
4830         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4831         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4832 };
4833
4834 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4835 {
4836         struct drm_minor *minor = dev_priv->drm.primary;
4837         struct dentry *ent;
4838         int ret, i;
4839
4840         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4841                                   minor->debugfs_root, to_i915(minor->dev),
4842                                   &i915_forcewake_fops);
4843         if (!ent)
4844                 return -ENOMEM;
4845
4846         ret = intel_pipe_crc_create(minor);
4847         if (ret)
4848                 return ret;
4849
4850         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4851                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4852                                           S_IRUGO | S_IWUSR,
4853                                           minor->debugfs_root,
4854                                           to_i915(minor->dev),
4855                                           i915_debugfs_files[i].fops);
4856                 if (!ent)
4857                         return -ENOMEM;
4858         }
4859
4860         return drm_debugfs_create_files(i915_debugfs_list,
4861                                         I915_DEBUGFS_ENTRIES,
4862                                         minor->debugfs_root, minor);
4863 }
4864
4865 struct dpcd_block {
4866         /* DPCD dump start address. */
4867         unsigned int offset;
4868         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4869         unsigned int end;
4870         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4871         size_t size;
4872         /* Only valid for eDP. */
4873         bool edp;
4874 };
4875
4876 static const struct dpcd_block i915_dpcd_debug[] = {
4877         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4878         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4879         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4880         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4881         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4882         { .offset = DP_SET_POWER },
4883         { .offset = DP_EDP_DPCD_REV },
4884         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4885         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4886         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4887 };
4888
4889 static int i915_dpcd_show(struct seq_file *m, void *data)
4890 {
4891         struct drm_connector *connector = m->private;
4892         struct intel_dp *intel_dp =
4893                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4894         uint8_t buf[16];
4895         ssize_t err;
4896         int i;
4897
4898         if (connector->status != connector_status_connected)
4899                 return -ENODEV;
4900
4901         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4902                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4903                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4904
4905                 if (b->edp &&
4906                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4907                         continue;
4908
4909                 /* low tech for now */
4910                 if (WARN_ON(size > sizeof(buf)))
4911                         continue;
4912
4913                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4914                 if (err <= 0) {
4915                         DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4916                                   size, b->offset, err);
4917                         continue;
4918                 }
4919
4920                 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4921         }
4922
4923         return 0;
4924 }
4925 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4926
4927 static int i915_panel_show(struct seq_file *m, void *data)
4928 {
4929         struct drm_connector *connector = m->private;
4930         struct intel_dp *intel_dp =
4931                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4932
4933         if (connector->status != connector_status_connected)
4934                 return -ENODEV;
4935
4936         seq_printf(m, "Panel power up delay: %d\n",
4937                    intel_dp->panel_power_up_delay);
4938         seq_printf(m, "Panel power down delay: %d\n",
4939                    intel_dp->panel_power_down_delay);
4940         seq_printf(m, "Backlight on delay: %d\n",
4941                    intel_dp->backlight_on_delay);
4942         seq_printf(m, "Backlight off delay: %d\n",
4943                    intel_dp->backlight_off_delay);
4944
4945         return 0;
4946 }
4947 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4948
4949 /**
4950  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4951  * @connector: pointer to a registered drm_connector
4952  *
4953  * Cleanup will be done by drm_connector_unregister() through a call to
4954  * drm_debugfs_connector_remove().
4955  *
4956  * Returns 0 on success, negative error codes on error.
4957  */
4958 int i915_debugfs_connector_add(struct drm_connector *connector)
4959 {
4960         struct dentry *root = connector->debugfs_entry;
4961
4962         /* The connector must have been registered beforehands. */
4963         if (!root)
4964                 return -ENODEV;
4965
4966         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4967             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4968                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4969                                     connector, &i915_dpcd_fops);
4970
4971         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4972                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4973                                     connector, &i915_panel_fops);
4974
4975         return 0;
4976 }