drm/i915: Extract intel_get_cagf
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static __always_inline void seq_print_param(struct seq_file *m,
41                                             const char *name,
42                                             const char *type,
43                                             const void *x)
44 {
45         if (!__builtin_strcmp(type, "bool"))
46                 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
47         else if (!__builtin_strcmp(type, "int"))
48                 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
49         else if (!__builtin_strcmp(type, "unsigned int"))
50                 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
51         else if (!__builtin_strcmp(type, "char *"))
52                 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
53         else
54                 BUILD_BUG();
55 }
56
57 static int i915_capabilities(struct seq_file *m, void *data)
58 {
59         struct drm_i915_private *dev_priv = node_to_i915(m->private);
60         const struct intel_device_info *info = INTEL_INFO(dev_priv);
61
62         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
63         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
64         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
65
66 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
67         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
68 #undef PRINT_FLAG
69
70         kernel_param_lock(THIS_MODULE);
71 #define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
72         I915_PARAMS_FOR_EACH(PRINT_PARAM);
73 #undef PRINT_PARAM
74         kernel_param_unlock(THIS_MODULE);
75
76         return 0;
77 }
78
79 static char get_active_flag(struct drm_i915_gem_object *obj)
80 {
81         return i915_gem_object_is_active(obj) ? '*' : ' ';
82 }
83
84 static char get_pin_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->pin_global ? 'p' : ' ';
87 }
88
89 static char get_tiling_flag(struct drm_i915_gem_object *obj)
90 {
91         switch (i915_gem_object_get_tiling(obj)) {
92         default:
93         case I915_TILING_NONE: return ' ';
94         case I915_TILING_X: return 'X';
95         case I915_TILING_Y: return 'Y';
96         }
97 }
98
99 static char get_global_flag(struct drm_i915_gem_object *obj)
100 {
101         return obj->userfault_count ? 'g' : ' ';
102 }
103
104 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
105 {
106         return obj->mm.mapping ? 'M' : ' ';
107 }
108
109 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
110 {
111         u64 size = 0;
112         struct i915_vma *vma;
113
114         list_for_each_entry(vma, &obj->vma_list, obj_link) {
115                 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
116                         size += vma->node.size;
117         }
118
119         return size;
120 }
121
122 static const char *
123 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
124 {
125         size_t x = 0;
126
127         switch (page_sizes) {
128         case 0:
129                 return "";
130         case I915_GTT_PAGE_SIZE_4K:
131                 return "4K";
132         case I915_GTT_PAGE_SIZE_64K:
133                 return "64K";
134         case I915_GTT_PAGE_SIZE_2M:
135                 return "2M";
136         default:
137                 if (!buf)
138                         return "M";
139
140                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
141                         x += snprintf(buf + x, len - x, "2M, ");
142                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
143                         x += snprintf(buf + x, len - x, "64K, ");
144                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
145                         x += snprintf(buf + x, len - x, "4K, ");
146                 buf[x-2] = '\0';
147
148                 return buf;
149         }
150 }
151
152 static void
153 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
154 {
155         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
156         struct intel_engine_cs *engine;
157         struct i915_vma *vma;
158         unsigned int frontbuffer_bits;
159         int pin_count = 0;
160
161         lockdep_assert_held(&obj->base.dev->struct_mutex);
162
163         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
164                    &obj->base,
165                    get_active_flag(obj),
166                    get_pin_flag(obj),
167                    get_tiling_flag(obj),
168                    get_global_flag(obj),
169                    get_pin_mapped_flag(obj),
170                    obj->base.size / 1024,
171                    obj->base.read_domains,
172                    obj->base.write_domain,
173                    i915_cache_level_str(dev_priv, obj->cache_level),
174                    obj->mm.dirty ? " dirty" : "",
175                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
176         if (obj->base.name)
177                 seq_printf(m, " (name: %d)", obj->base.name);
178         list_for_each_entry(vma, &obj->vma_list, obj_link) {
179                 if (i915_vma_is_pinned(vma))
180                         pin_count++;
181         }
182         seq_printf(m, " (pinned x %d)", pin_count);
183         if (obj->pin_global)
184                 seq_printf(m, " (global)");
185         list_for_each_entry(vma, &obj->vma_list, obj_link) {
186                 if (!drm_mm_node_allocated(&vma->node))
187                         continue;
188
189                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
190                            i915_vma_is_ggtt(vma) ? "g" : "pp",
191                            vma->node.start, vma->node.size,
192                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
193                 if (i915_vma_is_ggtt(vma)) {
194                         switch (vma->ggtt_view.type) {
195                         case I915_GGTT_VIEW_NORMAL:
196                                 seq_puts(m, ", normal");
197                                 break;
198
199                         case I915_GGTT_VIEW_PARTIAL:
200                                 seq_printf(m, ", partial [%08llx+%x]",
201                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
202                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
203                                 break;
204
205                         case I915_GGTT_VIEW_ROTATED:
206                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
207                                            vma->ggtt_view.rotated.plane[0].width,
208                                            vma->ggtt_view.rotated.plane[0].height,
209                                            vma->ggtt_view.rotated.plane[0].stride,
210                                            vma->ggtt_view.rotated.plane[0].offset,
211                                            vma->ggtt_view.rotated.plane[1].width,
212                                            vma->ggtt_view.rotated.plane[1].height,
213                                            vma->ggtt_view.rotated.plane[1].stride,
214                                            vma->ggtt_view.rotated.plane[1].offset);
215                                 break;
216
217                         default:
218                                 MISSING_CASE(vma->ggtt_view.type);
219                                 break;
220                         }
221                 }
222                 if (vma->fence)
223                         seq_printf(m, " , fence: %d%s",
224                                    vma->fence->id,
225                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
226                 seq_puts(m, ")");
227         }
228         if (obj->stolen)
229                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
230
231         engine = i915_gem_object_last_write_engine(obj);
232         if (engine)
233                 seq_printf(m, " (%s)", engine->name);
234
235         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
236         if (frontbuffer_bits)
237                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
238 }
239
240 static int obj_rank_by_stolen(const void *A, const void *B)
241 {
242         const struct drm_i915_gem_object *a =
243                 *(const struct drm_i915_gem_object **)A;
244         const struct drm_i915_gem_object *b =
245                 *(const struct drm_i915_gem_object **)B;
246
247         if (a->stolen->start < b->stolen->start)
248                 return -1;
249         if (a->stolen->start > b->stolen->start)
250                 return 1;
251         return 0;
252 }
253
254 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
255 {
256         struct drm_i915_private *dev_priv = node_to_i915(m->private);
257         struct drm_device *dev = &dev_priv->drm;
258         struct drm_i915_gem_object **objects;
259         struct drm_i915_gem_object *obj;
260         u64 total_obj_size, total_gtt_size;
261         unsigned long total, count, n;
262         int ret;
263
264         total = READ_ONCE(dev_priv->mm.object_count);
265         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
266         if (!objects)
267                 return -ENOMEM;
268
269         ret = mutex_lock_interruptible(&dev->struct_mutex);
270         if (ret)
271                 goto out;
272
273         total_obj_size = total_gtt_size = count = 0;
274
275         spin_lock(&dev_priv->mm.obj_lock);
276         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
277                 if (count == total)
278                         break;
279
280                 if (obj->stolen == NULL)
281                         continue;
282
283                 objects[count++] = obj;
284                 total_obj_size += obj->base.size;
285                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
286
287         }
288         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
289                 if (count == total)
290                         break;
291
292                 if (obj->stolen == NULL)
293                         continue;
294
295                 objects[count++] = obj;
296                 total_obj_size += obj->base.size;
297         }
298         spin_unlock(&dev_priv->mm.obj_lock);
299
300         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
301
302         seq_puts(m, "Stolen:\n");
303         for (n = 0; n < count; n++) {
304                 seq_puts(m, "   ");
305                 describe_obj(m, objects[n]);
306                 seq_putc(m, '\n');
307         }
308         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
309                    count, total_obj_size, total_gtt_size);
310
311         mutex_unlock(&dev->struct_mutex);
312 out:
313         kvfree(objects);
314         return ret;
315 }
316
317 struct file_stats {
318         struct drm_i915_file_private *file_priv;
319         unsigned long count;
320         u64 total, unbound;
321         u64 global, shared;
322         u64 active, inactive;
323 };
324
325 static int per_file_stats(int id, void *ptr, void *data)
326 {
327         struct drm_i915_gem_object *obj = ptr;
328         struct file_stats *stats = data;
329         struct i915_vma *vma;
330
331         lockdep_assert_held(&obj->base.dev->struct_mutex);
332
333         stats->count++;
334         stats->total += obj->base.size;
335         if (!obj->bind_count)
336                 stats->unbound += obj->base.size;
337         if (obj->base.name || obj->base.dma_buf)
338                 stats->shared += obj->base.size;
339
340         list_for_each_entry(vma, &obj->vma_list, obj_link) {
341                 if (!drm_mm_node_allocated(&vma->node))
342                         continue;
343
344                 if (i915_vma_is_ggtt(vma)) {
345                         stats->global += vma->node.size;
346                 } else {
347                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
348
349                         if (ppgtt->base.file != stats->file_priv)
350                                 continue;
351                 }
352
353                 if (i915_vma_is_active(vma))
354                         stats->active += vma->node.size;
355                 else
356                         stats->inactive += vma->node.size;
357         }
358
359         return 0;
360 }
361
362 #define print_file_stats(m, name, stats) do { \
363         if (stats.count) \
364                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
365                            name, \
366                            stats.count, \
367                            stats.total, \
368                            stats.active, \
369                            stats.inactive, \
370                            stats.global, \
371                            stats.shared, \
372                            stats.unbound); \
373 } while (0)
374
375 static void print_batch_pool_stats(struct seq_file *m,
376                                    struct drm_i915_private *dev_priv)
377 {
378         struct drm_i915_gem_object *obj;
379         struct file_stats stats;
380         struct intel_engine_cs *engine;
381         enum intel_engine_id id;
382         int j;
383
384         memset(&stats, 0, sizeof(stats));
385
386         for_each_engine(engine, dev_priv, id) {
387                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
388                         list_for_each_entry(obj,
389                                             &engine->batch_pool.cache_list[j],
390                                             batch_pool_link)
391                                 per_file_stats(0, obj, &stats);
392                 }
393         }
394
395         print_file_stats(m, "[k]batch pool", stats);
396 }
397
398 static int per_file_ctx_stats(int id, void *ptr, void *data)
399 {
400         struct i915_gem_context *ctx = ptr;
401         int n;
402
403         for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
404                 if (ctx->engine[n].state)
405                         per_file_stats(0, ctx->engine[n].state->obj, data);
406                 if (ctx->engine[n].ring)
407                         per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
408         }
409
410         return 0;
411 }
412
413 static void print_context_stats(struct seq_file *m,
414                                 struct drm_i915_private *dev_priv)
415 {
416         struct drm_device *dev = &dev_priv->drm;
417         struct file_stats stats;
418         struct drm_file *file;
419
420         memset(&stats, 0, sizeof(stats));
421
422         mutex_lock(&dev->struct_mutex);
423         if (dev_priv->kernel_context)
424                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
425
426         list_for_each_entry(file, &dev->filelist, lhead) {
427                 struct drm_i915_file_private *fpriv = file->driver_priv;
428                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
429         }
430         mutex_unlock(&dev->struct_mutex);
431
432         print_file_stats(m, "[k]contexts", stats);
433 }
434
435 static int i915_gem_object_info(struct seq_file *m, void *data)
436 {
437         struct drm_i915_private *dev_priv = node_to_i915(m->private);
438         struct drm_device *dev = &dev_priv->drm;
439         struct i915_ggtt *ggtt = &dev_priv->ggtt;
440         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
441         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
442         struct drm_i915_gem_object *obj;
443         unsigned int page_sizes = 0;
444         struct drm_file *file;
445         char buf[80];
446         int ret;
447
448         ret = mutex_lock_interruptible(&dev->struct_mutex);
449         if (ret)
450                 return ret;
451
452         seq_printf(m, "%u objects, %llu bytes\n",
453                    dev_priv->mm.object_count,
454                    dev_priv->mm.object_memory);
455
456         size = count = 0;
457         mapped_size = mapped_count = 0;
458         purgeable_size = purgeable_count = 0;
459         huge_size = huge_count = 0;
460
461         spin_lock(&dev_priv->mm.obj_lock);
462         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
463                 size += obj->base.size;
464                 ++count;
465
466                 if (obj->mm.madv == I915_MADV_DONTNEED) {
467                         purgeable_size += obj->base.size;
468                         ++purgeable_count;
469                 }
470
471                 if (obj->mm.mapping) {
472                         mapped_count++;
473                         mapped_size += obj->base.size;
474                 }
475
476                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
477                         huge_count++;
478                         huge_size += obj->base.size;
479                         page_sizes |= obj->mm.page_sizes.sg;
480                 }
481         }
482         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
483
484         size = count = dpy_size = dpy_count = 0;
485         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
486                 size += obj->base.size;
487                 ++count;
488
489                 if (obj->pin_global) {
490                         dpy_size += obj->base.size;
491                         ++dpy_count;
492                 }
493
494                 if (obj->mm.madv == I915_MADV_DONTNEED) {
495                         purgeable_size += obj->base.size;
496                         ++purgeable_count;
497                 }
498
499                 if (obj->mm.mapping) {
500                         mapped_count++;
501                         mapped_size += obj->base.size;
502                 }
503
504                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
505                         huge_count++;
506                         huge_size += obj->base.size;
507                         page_sizes |= obj->mm.page_sizes.sg;
508                 }
509         }
510         spin_unlock(&dev_priv->mm.obj_lock);
511
512         seq_printf(m, "%u bound objects, %llu bytes\n",
513                    count, size);
514         seq_printf(m, "%u purgeable objects, %llu bytes\n",
515                    purgeable_count, purgeable_size);
516         seq_printf(m, "%u mapped objects, %llu bytes\n",
517                    mapped_count, mapped_size);
518         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
519                    huge_count,
520                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
521                    huge_size);
522         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
523                    dpy_count, dpy_size);
524
525         seq_printf(m, "%llu [%llu] gtt total\n",
526                    ggtt->base.total, ggtt->mappable_end);
527         seq_printf(m, "Supported page sizes: %s\n",
528                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
529                                         buf, sizeof(buf)));
530
531         seq_putc(m, '\n');
532         print_batch_pool_stats(m, dev_priv);
533         mutex_unlock(&dev->struct_mutex);
534
535         mutex_lock(&dev->filelist_mutex);
536         print_context_stats(m, dev_priv);
537         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
538                 struct file_stats stats;
539                 struct drm_i915_file_private *file_priv = file->driver_priv;
540                 struct drm_i915_gem_request *request;
541                 struct task_struct *task;
542
543                 mutex_lock(&dev->struct_mutex);
544
545                 memset(&stats, 0, sizeof(stats));
546                 stats.file_priv = file->driver_priv;
547                 spin_lock(&file->table_lock);
548                 idr_for_each(&file->object_idr, per_file_stats, &stats);
549                 spin_unlock(&file->table_lock);
550                 /*
551                  * Although we have a valid reference on file->pid, that does
552                  * not guarantee that the task_struct who called get_pid() is
553                  * still alive (e.g. get_pid(current) => fork() => exit()).
554                  * Therefore, we need to protect this ->comm access using RCU.
555                  */
556                 request = list_first_entry_or_null(&file_priv->mm.request_list,
557                                                    struct drm_i915_gem_request,
558                                                    client_link);
559                 rcu_read_lock();
560                 task = pid_task(request && request->ctx->pid ?
561                                 request->ctx->pid : file->pid,
562                                 PIDTYPE_PID);
563                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
564                 rcu_read_unlock();
565
566                 mutex_unlock(&dev->struct_mutex);
567         }
568         mutex_unlock(&dev->filelist_mutex);
569
570         return 0;
571 }
572
573 static int i915_gem_gtt_info(struct seq_file *m, void *data)
574 {
575         struct drm_info_node *node = m->private;
576         struct drm_i915_private *dev_priv = node_to_i915(node);
577         struct drm_device *dev = &dev_priv->drm;
578         struct drm_i915_gem_object **objects;
579         struct drm_i915_gem_object *obj;
580         u64 total_obj_size, total_gtt_size;
581         unsigned long nobject, n;
582         int count, ret;
583
584         nobject = READ_ONCE(dev_priv->mm.object_count);
585         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
586         if (!objects)
587                 return -ENOMEM;
588
589         ret = mutex_lock_interruptible(&dev->struct_mutex);
590         if (ret)
591                 return ret;
592
593         count = 0;
594         spin_lock(&dev_priv->mm.obj_lock);
595         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
596                 objects[count++] = obj;
597                 if (count == nobject)
598                         break;
599         }
600         spin_unlock(&dev_priv->mm.obj_lock);
601
602         total_obj_size = total_gtt_size = 0;
603         for (n = 0;  n < count; n++) {
604                 obj = objects[n];
605
606                 seq_puts(m, "   ");
607                 describe_obj(m, obj);
608                 seq_putc(m, '\n');
609                 total_obj_size += obj->base.size;
610                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
611         }
612
613         mutex_unlock(&dev->struct_mutex);
614
615         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
616                    count, total_obj_size, total_gtt_size);
617         kvfree(objects);
618
619         return 0;
620 }
621
622 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
623 {
624         struct drm_i915_private *dev_priv = node_to_i915(m->private);
625         struct drm_device *dev = &dev_priv->drm;
626         struct drm_i915_gem_object *obj;
627         struct intel_engine_cs *engine;
628         enum intel_engine_id id;
629         int total = 0;
630         int ret, j;
631
632         ret = mutex_lock_interruptible(&dev->struct_mutex);
633         if (ret)
634                 return ret;
635
636         for_each_engine(engine, dev_priv, id) {
637                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
638                         int count;
639
640                         count = 0;
641                         list_for_each_entry(obj,
642                                             &engine->batch_pool.cache_list[j],
643                                             batch_pool_link)
644                                 count++;
645                         seq_printf(m, "%s cache[%d]: %d objects\n",
646                                    engine->name, j, count);
647
648                         list_for_each_entry(obj,
649                                             &engine->batch_pool.cache_list[j],
650                                             batch_pool_link) {
651                                 seq_puts(m, "   ");
652                                 describe_obj(m, obj);
653                                 seq_putc(m, '\n');
654                         }
655
656                         total += count;
657                 }
658         }
659
660         seq_printf(m, "total: %d\n", total);
661
662         mutex_unlock(&dev->struct_mutex);
663
664         return 0;
665 }
666
667 static void i915_ring_seqno_info(struct seq_file *m,
668                                  struct intel_engine_cs *engine)
669 {
670         struct intel_breadcrumbs *b = &engine->breadcrumbs;
671         struct rb_node *rb;
672
673         seq_printf(m, "Current sequence (%s): %x\n",
674                    engine->name, intel_engine_get_seqno(engine));
675
676         spin_lock_irq(&b->rb_lock);
677         for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
678                 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
679
680                 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
681                            engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
682         }
683         spin_unlock_irq(&b->rb_lock);
684 }
685
686 static int i915_gem_seqno_info(struct seq_file *m, void *data)
687 {
688         struct drm_i915_private *dev_priv = node_to_i915(m->private);
689         struct intel_engine_cs *engine;
690         enum intel_engine_id id;
691
692         for_each_engine(engine, dev_priv, id)
693                 i915_ring_seqno_info(m, engine);
694
695         return 0;
696 }
697
698
699 static int i915_interrupt_info(struct seq_file *m, void *data)
700 {
701         struct drm_i915_private *dev_priv = node_to_i915(m->private);
702         struct intel_engine_cs *engine;
703         enum intel_engine_id id;
704         int i, pipe;
705
706         intel_runtime_pm_get(dev_priv);
707
708         if (IS_CHERRYVIEW(dev_priv)) {
709                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
710                            I915_READ(GEN8_MASTER_IRQ));
711
712                 seq_printf(m, "Display IER:\t%08x\n",
713                            I915_READ(VLV_IER));
714                 seq_printf(m, "Display IIR:\t%08x\n",
715                            I915_READ(VLV_IIR));
716                 seq_printf(m, "Display IIR_RW:\t%08x\n",
717                            I915_READ(VLV_IIR_RW));
718                 seq_printf(m, "Display IMR:\t%08x\n",
719                            I915_READ(VLV_IMR));
720                 for_each_pipe(dev_priv, pipe) {
721                         enum intel_display_power_domain power_domain;
722
723                         power_domain = POWER_DOMAIN_PIPE(pipe);
724                         if (!intel_display_power_get_if_enabled(dev_priv,
725                                                                 power_domain)) {
726                                 seq_printf(m, "Pipe %c power disabled\n",
727                                            pipe_name(pipe));
728                                 continue;
729                         }
730
731                         seq_printf(m, "Pipe %c stat:\t%08x\n",
732                                    pipe_name(pipe),
733                                    I915_READ(PIPESTAT(pipe)));
734
735                         intel_display_power_put(dev_priv, power_domain);
736                 }
737
738                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
739                 seq_printf(m, "Port hotplug:\t%08x\n",
740                            I915_READ(PORT_HOTPLUG_EN));
741                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
742                            I915_READ(VLV_DPFLIPSTAT));
743                 seq_printf(m, "DPINVGTT:\t%08x\n",
744                            I915_READ(DPINVGTT));
745                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
746
747                 for (i = 0; i < 4; i++) {
748                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
749                                    i, I915_READ(GEN8_GT_IMR(i)));
750                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
751                                    i, I915_READ(GEN8_GT_IIR(i)));
752                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
753                                    i, I915_READ(GEN8_GT_IER(i)));
754                 }
755
756                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
757                            I915_READ(GEN8_PCU_IMR));
758                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
759                            I915_READ(GEN8_PCU_IIR));
760                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
761                            I915_READ(GEN8_PCU_IER));
762         } else if (INTEL_GEN(dev_priv) >= 8) {
763                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
764                            I915_READ(GEN8_MASTER_IRQ));
765
766                 for (i = 0; i < 4; i++) {
767                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
768                                    i, I915_READ(GEN8_GT_IMR(i)));
769                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
770                                    i, I915_READ(GEN8_GT_IIR(i)));
771                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
772                                    i, I915_READ(GEN8_GT_IER(i)));
773                 }
774
775                 for_each_pipe(dev_priv, pipe) {
776                         enum intel_display_power_domain power_domain;
777
778                         power_domain = POWER_DOMAIN_PIPE(pipe);
779                         if (!intel_display_power_get_if_enabled(dev_priv,
780                                                                 power_domain)) {
781                                 seq_printf(m, "Pipe %c power disabled\n",
782                                            pipe_name(pipe));
783                                 continue;
784                         }
785                         seq_printf(m, "Pipe %c IMR:\t%08x\n",
786                                    pipe_name(pipe),
787                                    I915_READ(GEN8_DE_PIPE_IMR(pipe)));
788                         seq_printf(m, "Pipe %c IIR:\t%08x\n",
789                                    pipe_name(pipe),
790                                    I915_READ(GEN8_DE_PIPE_IIR(pipe)));
791                         seq_printf(m, "Pipe %c IER:\t%08x\n",
792                                    pipe_name(pipe),
793                                    I915_READ(GEN8_DE_PIPE_IER(pipe)));
794
795                         intel_display_power_put(dev_priv, power_domain);
796                 }
797
798                 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
799                            I915_READ(GEN8_DE_PORT_IMR));
800                 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
801                            I915_READ(GEN8_DE_PORT_IIR));
802                 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
803                            I915_READ(GEN8_DE_PORT_IER));
804
805                 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
806                            I915_READ(GEN8_DE_MISC_IMR));
807                 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
808                            I915_READ(GEN8_DE_MISC_IIR));
809                 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
810                            I915_READ(GEN8_DE_MISC_IER));
811
812                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
813                            I915_READ(GEN8_PCU_IMR));
814                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
815                            I915_READ(GEN8_PCU_IIR));
816                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
817                            I915_READ(GEN8_PCU_IER));
818         } else if (IS_VALLEYVIEW(dev_priv)) {
819                 seq_printf(m, "Display IER:\t%08x\n",
820                            I915_READ(VLV_IER));
821                 seq_printf(m, "Display IIR:\t%08x\n",
822                            I915_READ(VLV_IIR));
823                 seq_printf(m, "Display IIR_RW:\t%08x\n",
824                            I915_READ(VLV_IIR_RW));
825                 seq_printf(m, "Display IMR:\t%08x\n",
826                            I915_READ(VLV_IMR));
827                 for_each_pipe(dev_priv, pipe) {
828                         enum intel_display_power_domain power_domain;
829
830                         power_domain = POWER_DOMAIN_PIPE(pipe);
831                         if (!intel_display_power_get_if_enabled(dev_priv,
832                                                                 power_domain)) {
833                                 seq_printf(m, "Pipe %c power disabled\n",
834                                            pipe_name(pipe));
835                                 continue;
836                         }
837
838                         seq_printf(m, "Pipe %c stat:\t%08x\n",
839                                    pipe_name(pipe),
840                                    I915_READ(PIPESTAT(pipe)));
841                         intel_display_power_put(dev_priv, power_domain);
842                 }
843
844                 seq_printf(m, "Master IER:\t%08x\n",
845                            I915_READ(VLV_MASTER_IER));
846
847                 seq_printf(m, "Render IER:\t%08x\n",
848                            I915_READ(GTIER));
849                 seq_printf(m, "Render IIR:\t%08x\n",
850                            I915_READ(GTIIR));
851                 seq_printf(m, "Render IMR:\t%08x\n",
852                            I915_READ(GTIMR));
853
854                 seq_printf(m, "PM IER:\t\t%08x\n",
855                            I915_READ(GEN6_PMIER));
856                 seq_printf(m, "PM IIR:\t\t%08x\n",
857                            I915_READ(GEN6_PMIIR));
858                 seq_printf(m, "PM IMR:\t\t%08x\n",
859                            I915_READ(GEN6_PMIMR));
860
861                 seq_printf(m, "Port hotplug:\t%08x\n",
862                            I915_READ(PORT_HOTPLUG_EN));
863                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
864                            I915_READ(VLV_DPFLIPSTAT));
865                 seq_printf(m, "DPINVGTT:\t%08x\n",
866                            I915_READ(DPINVGTT));
867
868         } else if (!HAS_PCH_SPLIT(dev_priv)) {
869                 seq_printf(m, "Interrupt enable:    %08x\n",
870                            I915_READ(IER));
871                 seq_printf(m, "Interrupt identity:  %08x\n",
872                            I915_READ(IIR));
873                 seq_printf(m, "Interrupt mask:      %08x\n",
874                            I915_READ(IMR));
875                 for_each_pipe(dev_priv, pipe)
876                         seq_printf(m, "Pipe %c stat:         %08x\n",
877                                    pipe_name(pipe),
878                                    I915_READ(PIPESTAT(pipe)));
879         } else {
880                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
881                            I915_READ(DEIER));
882                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
883                            I915_READ(DEIIR));
884                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
885                            I915_READ(DEIMR));
886                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
887                            I915_READ(SDEIER));
888                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
889                            I915_READ(SDEIIR));
890                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
891                            I915_READ(SDEIMR));
892                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
893                            I915_READ(GTIER));
894                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
895                            I915_READ(GTIIR));
896                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
897                            I915_READ(GTIMR));
898         }
899         for_each_engine(engine, dev_priv, id) {
900                 if (INTEL_GEN(dev_priv) >= 6) {
901                         seq_printf(m,
902                                    "Graphics Interrupt mask (%s):       %08x\n",
903                                    engine->name, I915_READ_IMR(engine));
904                 }
905                 i915_ring_seqno_info(m, engine);
906         }
907         intel_runtime_pm_put(dev_priv);
908
909         return 0;
910 }
911
912 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
913 {
914         struct drm_i915_private *dev_priv = node_to_i915(m->private);
915         struct drm_device *dev = &dev_priv->drm;
916         int i, ret;
917
918         ret = mutex_lock_interruptible(&dev->struct_mutex);
919         if (ret)
920                 return ret;
921
922         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
923         for (i = 0; i < dev_priv->num_fence_regs; i++) {
924                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
925
926                 seq_printf(m, "Fence %d, pin count = %d, object = ",
927                            i, dev_priv->fence_regs[i].pin_count);
928                 if (!vma)
929                         seq_puts(m, "unused");
930                 else
931                         describe_obj(m, vma->obj);
932                 seq_putc(m, '\n');
933         }
934
935         mutex_unlock(&dev->struct_mutex);
936         return 0;
937 }
938
939 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
940 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
941                               size_t count, loff_t *pos)
942 {
943         struct i915_gpu_state *error = file->private_data;
944         struct drm_i915_error_state_buf str;
945         ssize_t ret;
946         loff_t tmp;
947
948         if (!error)
949                 return 0;
950
951         ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
952         if (ret)
953                 return ret;
954
955         ret = i915_error_state_to_str(&str, error);
956         if (ret)
957                 goto out;
958
959         tmp = 0;
960         ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
961         if (ret < 0)
962                 goto out;
963
964         *pos = str.start + ret;
965 out:
966         i915_error_state_buf_release(&str);
967         return ret;
968 }
969
970 static int gpu_state_release(struct inode *inode, struct file *file)
971 {
972         i915_gpu_state_put(file->private_data);
973         return 0;
974 }
975
976 static int i915_gpu_info_open(struct inode *inode, struct file *file)
977 {
978         struct drm_i915_private *i915 = inode->i_private;
979         struct i915_gpu_state *gpu;
980
981         intel_runtime_pm_get(i915);
982         gpu = i915_capture_gpu_state(i915);
983         intel_runtime_pm_put(i915);
984         if (!gpu)
985                 return -ENOMEM;
986
987         file->private_data = gpu;
988         return 0;
989 }
990
991 static const struct file_operations i915_gpu_info_fops = {
992         .owner = THIS_MODULE,
993         .open = i915_gpu_info_open,
994         .read = gpu_state_read,
995         .llseek = default_llseek,
996         .release = gpu_state_release,
997 };
998
999 static ssize_t
1000 i915_error_state_write(struct file *filp,
1001                        const char __user *ubuf,
1002                        size_t cnt,
1003                        loff_t *ppos)
1004 {
1005         struct i915_gpu_state *error = filp->private_data;
1006
1007         if (!error)
1008                 return 0;
1009
1010         DRM_DEBUG_DRIVER("Resetting error state\n");
1011         i915_reset_error_state(error->i915);
1012
1013         return cnt;
1014 }
1015
1016 static int i915_error_state_open(struct inode *inode, struct file *file)
1017 {
1018         file->private_data = i915_first_error_state(inode->i_private);
1019         return 0;
1020 }
1021
1022 static const struct file_operations i915_error_state_fops = {
1023         .owner = THIS_MODULE,
1024         .open = i915_error_state_open,
1025         .read = gpu_state_read,
1026         .write = i915_error_state_write,
1027         .llseek = default_llseek,
1028         .release = gpu_state_release,
1029 };
1030 #endif
1031
1032 static int
1033 i915_next_seqno_set(void *data, u64 val)
1034 {
1035         struct drm_i915_private *dev_priv = data;
1036         struct drm_device *dev = &dev_priv->drm;
1037         int ret;
1038
1039         ret = mutex_lock_interruptible(&dev->struct_mutex);
1040         if (ret)
1041                 return ret;
1042
1043         ret = i915_gem_set_global_seqno(dev, val);
1044         mutex_unlock(&dev->struct_mutex);
1045
1046         return ret;
1047 }
1048
1049 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1050                         NULL, i915_next_seqno_set,
1051                         "0x%llx\n");
1052
1053 static int i915_frequency_info(struct seq_file *m, void *unused)
1054 {
1055         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1056         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1057         int ret = 0;
1058
1059         intel_runtime_pm_get(dev_priv);
1060
1061         if (IS_GEN5(dev_priv)) {
1062                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1063                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1064
1065                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1066                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1067                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1068                            MEMSTAT_VID_SHIFT);
1069                 seq_printf(m, "Current P-state: %d\n",
1070                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1071         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1072                 u32 rpmodectl, freq_sts;
1073
1074                 mutex_lock(&dev_priv->pcu_lock);
1075
1076                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1077                 seq_printf(m, "Video Turbo Mode: %s\n",
1078                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1079                 seq_printf(m, "HW control enabled: %s\n",
1080                            yesno(rpmodectl & GEN6_RP_ENABLE));
1081                 seq_printf(m, "SW control enabled: %s\n",
1082                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1083                                   GEN6_RP_MEDIA_SW_MODE));
1084
1085                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1086                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1087                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1088
1089                 seq_printf(m, "actual GPU freq: %d MHz\n",
1090                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1091
1092                 seq_printf(m, "current GPU freq: %d MHz\n",
1093                            intel_gpu_freq(dev_priv, rps->cur_freq));
1094
1095                 seq_printf(m, "max GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, rps->max_freq));
1097
1098                 seq_printf(m, "min GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->min_freq));
1100
1101                 seq_printf(m, "idle GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->idle_freq));
1103
1104                 seq_printf(m,
1105                            "efficient (RPe) frequency: %d MHz\n",
1106                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1107                 mutex_unlock(&dev_priv->pcu_lock);
1108         } else if (INTEL_GEN(dev_priv) >= 6) {
1109                 u32 rp_state_limits;
1110                 u32 gt_perf_status;
1111                 u32 rp_state_cap;
1112                 u32 rpmodectl, rpinclimit, rpdeclimit;
1113                 u32 rpstat, cagf, reqf;
1114                 u32 rpupei, rpcurup, rpprevup;
1115                 u32 rpdownei, rpcurdown, rpprevdown;
1116                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1117                 int max_freq;
1118
1119                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1120                 if (IS_GEN9_LP(dev_priv)) {
1121                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1122                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1123                 } else {
1124                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1125                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1126                 }
1127
1128                 /* RPSTAT1 is in the GT power well */
1129                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1130
1131                 reqf = I915_READ(GEN6_RPNSWREQ);
1132                 if (INTEL_GEN(dev_priv) >= 9)
1133                         reqf >>= 23;
1134                 else {
1135                         reqf &= ~GEN6_TURBO_DISABLE;
1136                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1137                                 reqf >>= 24;
1138                         else
1139                                 reqf >>= 25;
1140                 }
1141                 reqf = intel_gpu_freq(dev_priv, reqf);
1142
1143                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1144                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1145                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1146
1147                 rpstat = I915_READ(GEN6_RPSTAT1);
1148                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1149                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1150                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1151                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1152                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1153                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1154                 cagf = intel_gpu_freq(dev_priv,
1155                                       intel_get_cagf(dev_priv, rpstat));
1156
1157                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1158
1159                 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1160                         pm_ier = I915_READ(GEN6_PMIER);
1161                         pm_imr = I915_READ(GEN6_PMIMR);
1162                         pm_isr = I915_READ(GEN6_PMISR);
1163                         pm_iir = I915_READ(GEN6_PMIIR);
1164                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1165                 } else {
1166                         pm_ier = I915_READ(GEN8_GT_IER(2));
1167                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1168                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1169                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1170                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1171                 }
1172                 seq_printf(m, "Video Turbo Mode: %s\n",
1173                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1174                 seq_printf(m, "HW control enabled: %s\n",
1175                            yesno(rpmodectl & GEN6_RP_ENABLE));
1176                 seq_printf(m, "SW control enabled: %s\n",
1177                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1178                                   GEN6_RP_MEDIA_SW_MODE));
1179                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1180                            pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1181                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1182                            rps->pm_intrmsk_mbz);
1183                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1184                 seq_printf(m, "Render p-state ratio: %d\n",
1185                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1186                 seq_printf(m, "Render p-state VID: %d\n",
1187                            gt_perf_status & 0xff);
1188                 seq_printf(m, "Render p-state limit: %d\n",
1189                            rp_state_limits & 0xff);
1190                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1191                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1192                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1193                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1194                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1195                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1196                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1197                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1198                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1199                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1200                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1201                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1202                 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1203
1204                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1205                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1206                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1207                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1208                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1209                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1210                 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1211
1212                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1213                             rp_state_cap >> 16) & 0xff;
1214                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1215                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1216                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1217                            intel_gpu_freq(dev_priv, max_freq));
1218
1219                 max_freq = (rp_state_cap & 0xff00) >> 8;
1220                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1221                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1222                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1223                            intel_gpu_freq(dev_priv, max_freq));
1224
1225                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1226                             rp_state_cap >> 0) & 0xff;
1227                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1228                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1229                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1230                            intel_gpu_freq(dev_priv, max_freq));
1231                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1232                            intel_gpu_freq(dev_priv, rps->max_freq));
1233
1234                 seq_printf(m, "Current freq: %d MHz\n",
1235                            intel_gpu_freq(dev_priv, rps->cur_freq));
1236                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1237                 seq_printf(m, "Idle freq: %d MHz\n",
1238                            intel_gpu_freq(dev_priv, rps->idle_freq));
1239                 seq_printf(m, "Min freq: %d MHz\n",
1240                            intel_gpu_freq(dev_priv, rps->min_freq));
1241                 seq_printf(m, "Boost freq: %d MHz\n",
1242                            intel_gpu_freq(dev_priv, rps->boost_freq));
1243                 seq_printf(m, "Max freq: %d MHz\n",
1244                            intel_gpu_freq(dev_priv, rps->max_freq));
1245                 seq_printf(m,
1246                            "efficient (RPe) frequency: %d MHz\n",
1247                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1248         } else {
1249                 seq_puts(m, "no P-state info available\n");
1250         }
1251
1252         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1253         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1254         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1255
1256         intel_runtime_pm_put(dev_priv);
1257         return ret;
1258 }
1259
1260 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1261                                struct seq_file *m,
1262                                struct intel_instdone *instdone)
1263 {
1264         int slice;
1265         int subslice;
1266
1267         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1268                    instdone->instdone);
1269
1270         if (INTEL_GEN(dev_priv) <= 3)
1271                 return;
1272
1273         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1274                    instdone->slice_common);
1275
1276         if (INTEL_GEN(dev_priv) <= 6)
1277                 return;
1278
1279         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1280                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1281                            slice, subslice, instdone->sampler[slice][subslice]);
1282
1283         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1284                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1285                            slice, subslice, instdone->row[slice][subslice]);
1286 }
1287
1288 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1289 {
1290         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1291         struct intel_engine_cs *engine;
1292         u64 acthd[I915_NUM_ENGINES];
1293         u32 seqno[I915_NUM_ENGINES];
1294         struct intel_instdone instdone;
1295         enum intel_engine_id id;
1296
1297         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1298                 seq_puts(m, "Wedged\n");
1299         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1300                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1301         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1302                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1303         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1304                 seq_puts(m, "Waiter holding struct mutex\n");
1305         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1306                 seq_puts(m, "struct_mutex blocked for reset\n");
1307
1308         if (!i915_modparams.enable_hangcheck) {
1309                 seq_puts(m, "Hangcheck disabled\n");
1310                 return 0;
1311         }
1312
1313         intel_runtime_pm_get(dev_priv);
1314
1315         for_each_engine(engine, dev_priv, id) {
1316                 acthd[id] = intel_engine_get_active_head(engine);
1317                 seqno[id] = intel_engine_get_seqno(engine);
1318         }
1319
1320         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1321
1322         intel_runtime_pm_put(dev_priv);
1323
1324         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1325                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1326                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1327                                             jiffies));
1328         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1329                 seq_puts(m, "Hangcheck active, work pending\n");
1330         else
1331                 seq_puts(m, "Hangcheck inactive\n");
1332
1333         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1334
1335         for_each_engine(engine, dev_priv, id) {
1336                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1337                 struct rb_node *rb;
1338
1339                 seq_printf(m, "%s:\n", engine->name);
1340                 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1341                            engine->hangcheck.seqno, seqno[id],
1342                            intel_engine_last_submit(engine),
1343                            engine->timeline->inflight_seqnos);
1344                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1345                            yesno(intel_engine_has_waiter(engine)),
1346                            yesno(test_bit(engine->id,
1347                                           &dev_priv->gpu_error.missed_irq_rings)),
1348                            yesno(engine->hangcheck.stalled));
1349
1350                 spin_lock_irq(&b->rb_lock);
1351                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1352                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1353
1354                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1355                                    w->tsk->comm, w->tsk->pid, w->seqno);
1356                 }
1357                 spin_unlock_irq(&b->rb_lock);
1358
1359                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1360                            (long long)engine->hangcheck.acthd,
1361                            (long long)acthd[id]);
1362                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1363                            hangcheck_action_to_str(engine->hangcheck.action),
1364                            engine->hangcheck.action,
1365                            jiffies_to_msecs(jiffies -
1366                                             engine->hangcheck.action_timestamp));
1367
1368                 if (engine->id == RCS) {
1369                         seq_puts(m, "\tinstdone read =\n");
1370
1371                         i915_instdone_info(dev_priv, m, &instdone);
1372
1373                         seq_puts(m, "\tinstdone accu =\n");
1374
1375                         i915_instdone_info(dev_priv, m,
1376                                            &engine->hangcheck.instdone);
1377                 }
1378         }
1379
1380         return 0;
1381 }
1382
1383 static int i915_reset_info(struct seq_file *m, void *unused)
1384 {
1385         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1386         struct i915_gpu_error *error = &dev_priv->gpu_error;
1387         struct intel_engine_cs *engine;
1388         enum intel_engine_id id;
1389
1390         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1391
1392         for_each_engine(engine, dev_priv, id) {
1393                 seq_printf(m, "%s = %u\n", engine->name,
1394                            i915_reset_engine_count(error, engine));
1395         }
1396
1397         return 0;
1398 }
1399
1400 static int ironlake_drpc_info(struct seq_file *m)
1401 {
1402         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1403         u32 rgvmodectl, rstdbyctl;
1404         u16 crstandvid;
1405
1406         rgvmodectl = I915_READ(MEMMODECTL);
1407         rstdbyctl = I915_READ(RSTDBYCTL);
1408         crstandvid = I915_READ16(CRSTANDVID);
1409
1410         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1411         seq_printf(m, "Boost freq: %d\n",
1412                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1413                    MEMMODE_BOOST_FREQ_SHIFT);
1414         seq_printf(m, "HW control enabled: %s\n",
1415                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1416         seq_printf(m, "SW control enabled: %s\n",
1417                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1418         seq_printf(m, "Gated voltage change: %s\n",
1419                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1420         seq_printf(m, "Starting frequency: P%d\n",
1421                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1422         seq_printf(m, "Max P-state: P%d\n",
1423                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1424         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1425         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1426         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1427         seq_printf(m, "Render standby enabled: %s\n",
1428                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1429         seq_puts(m, "Current RS state: ");
1430         switch (rstdbyctl & RSX_STATUS_MASK) {
1431         case RSX_STATUS_ON:
1432                 seq_puts(m, "on\n");
1433                 break;
1434         case RSX_STATUS_RC1:
1435                 seq_puts(m, "RC1\n");
1436                 break;
1437         case RSX_STATUS_RC1E:
1438                 seq_puts(m, "RC1E\n");
1439                 break;
1440         case RSX_STATUS_RS1:
1441                 seq_puts(m, "RS1\n");
1442                 break;
1443         case RSX_STATUS_RS2:
1444                 seq_puts(m, "RS2 (RC6)\n");
1445                 break;
1446         case RSX_STATUS_RS3:
1447                 seq_puts(m, "RC3 (RC6+)\n");
1448                 break;
1449         default:
1450                 seq_puts(m, "unknown\n");
1451                 break;
1452         }
1453
1454         return 0;
1455 }
1456
1457 static int i915_forcewake_domains(struct seq_file *m, void *data)
1458 {
1459         struct drm_i915_private *i915 = node_to_i915(m->private);
1460         struct intel_uncore_forcewake_domain *fw_domain;
1461         unsigned int tmp;
1462
1463         seq_printf(m, "user.bypass_count = %u\n",
1464                    i915->uncore.user_forcewake.count);
1465
1466         for_each_fw_domain(fw_domain, i915, tmp)
1467                 seq_printf(m, "%s.wake_count = %u\n",
1468                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1469                            READ_ONCE(fw_domain->wake_count));
1470
1471         return 0;
1472 }
1473
1474 static void print_rc6_res(struct seq_file *m,
1475                           const char *title,
1476                           const i915_reg_t reg)
1477 {
1478         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1479
1480         seq_printf(m, "%s %u (%llu us)\n",
1481                    title, I915_READ(reg),
1482                    intel_rc6_residency_us(dev_priv, reg));
1483 }
1484
1485 static int vlv_drpc_info(struct seq_file *m)
1486 {
1487         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1488         u32 rcctl1, pw_status;
1489
1490         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1491         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1492
1493         seq_printf(m, "RC6 Enabled: %s\n",
1494                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1495                                         GEN6_RC_CTL_EI_MODE(1))));
1496         seq_printf(m, "Render Power Well: %s\n",
1497                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1498         seq_printf(m, "Media Power Well: %s\n",
1499                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1500
1501         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1502         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1503
1504         return i915_forcewake_domains(m, NULL);
1505 }
1506
1507 static int gen6_drpc_info(struct seq_file *m)
1508 {
1509         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1510         u32 gt_core_status, rcctl1, rc6vids = 0;
1511         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1512         unsigned forcewake_count;
1513         int count = 0;
1514
1515         forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1516         if (forcewake_count) {
1517                 seq_puts(m, "RC information inaccurate because somebody "
1518                             "holds a forcewake reference \n");
1519         } else {
1520                 /* NB: we cannot use forcewake, else we read the wrong values */
1521                 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1522                         udelay(10);
1523                 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1524         }
1525
1526         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1527         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1528
1529         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1530         if (INTEL_GEN(dev_priv) >= 9) {
1531                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1532                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1533         }
1534
1535         mutex_lock(&dev_priv->pcu_lock);
1536         sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1537         mutex_unlock(&dev_priv->pcu_lock);
1538
1539         seq_printf(m, "RC1e Enabled: %s\n",
1540                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1541         seq_printf(m, "RC6 Enabled: %s\n",
1542                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1543         if (INTEL_GEN(dev_priv) >= 9) {
1544                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1545                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1546                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1547                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1548         }
1549         seq_printf(m, "Deep RC6 Enabled: %s\n",
1550                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1551         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1552                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1553         seq_puts(m, "Current RC state: ");
1554         switch (gt_core_status & GEN6_RCn_MASK) {
1555         case GEN6_RC0:
1556                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1557                         seq_puts(m, "Core Power Down\n");
1558                 else
1559                         seq_puts(m, "on\n");
1560                 break;
1561         case GEN6_RC3:
1562                 seq_puts(m, "RC3\n");
1563                 break;
1564         case GEN6_RC6:
1565                 seq_puts(m, "RC6\n");
1566                 break;
1567         case GEN6_RC7:
1568                 seq_puts(m, "RC7\n");
1569                 break;
1570         default:
1571                 seq_puts(m, "Unknown\n");
1572                 break;
1573         }
1574
1575         seq_printf(m, "Core Power Down: %s\n",
1576                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1577         if (INTEL_GEN(dev_priv) >= 9) {
1578                 seq_printf(m, "Render Power Well: %s\n",
1579                         (gen9_powergate_status &
1580                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1581                 seq_printf(m, "Media Power Well: %s\n",
1582                         (gen9_powergate_status &
1583                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1584         }
1585
1586         /* Not exactly sure what this is */
1587         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1588                       GEN6_GT_GFX_RC6_LOCKED);
1589         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1590         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1591         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1592
1593         seq_printf(m, "RC6   voltage: %dmV\n",
1594                    GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1595         seq_printf(m, "RC6+  voltage: %dmV\n",
1596                    GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1597         seq_printf(m, "RC6++ voltage: %dmV\n",
1598                    GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1599         return i915_forcewake_domains(m, NULL);
1600 }
1601
1602 static int i915_drpc_info(struct seq_file *m, void *unused)
1603 {
1604         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1605         int err;
1606
1607         intel_runtime_pm_get(dev_priv);
1608
1609         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1610                 err = vlv_drpc_info(m);
1611         else if (INTEL_GEN(dev_priv) >= 6)
1612                 err = gen6_drpc_info(m);
1613         else
1614                 err = ironlake_drpc_info(m);
1615
1616         intel_runtime_pm_put(dev_priv);
1617
1618         return err;
1619 }
1620
1621 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1622 {
1623         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1624
1625         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1626                    dev_priv->fb_tracking.busy_bits);
1627
1628         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1629                    dev_priv->fb_tracking.flip_bits);
1630
1631         return 0;
1632 }
1633
1634 static int i915_fbc_status(struct seq_file *m, void *unused)
1635 {
1636         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1637
1638         if (!HAS_FBC(dev_priv)) {
1639                 seq_puts(m, "FBC unsupported on this chipset\n");
1640                 return 0;
1641         }
1642
1643         intel_runtime_pm_get(dev_priv);
1644         mutex_lock(&dev_priv->fbc.lock);
1645
1646         if (intel_fbc_is_active(dev_priv))
1647                 seq_puts(m, "FBC enabled\n");
1648         else
1649                 seq_printf(m, "FBC disabled: %s\n",
1650                            dev_priv->fbc.no_fbc_reason);
1651
1652         if (intel_fbc_is_active(dev_priv)) {
1653                 u32 mask;
1654
1655                 if (INTEL_GEN(dev_priv) >= 8)
1656                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1657                 else if (INTEL_GEN(dev_priv) >= 7)
1658                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1659                 else if (INTEL_GEN(dev_priv) >= 5)
1660                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1661                 else if (IS_G4X(dev_priv))
1662                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1663                 else
1664                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1665                                                         FBC_STAT_COMPRESSED);
1666
1667                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1668         }
1669
1670         mutex_unlock(&dev_priv->fbc.lock);
1671         intel_runtime_pm_put(dev_priv);
1672
1673         return 0;
1674 }
1675
1676 static int i915_fbc_false_color_get(void *data, u64 *val)
1677 {
1678         struct drm_i915_private *dev_priv = data;
1679
1680         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1681                 return -ENODEV;
1682
1683         *val = dev_priv->fbc.false_color;
1684
1685         return 0;
1686 }
1687
1688 static int i915_fbc_false_color_set(void *data, u64 val)
1689 {
1690         struct drm_i915_private *dev_priv = data;
1691         u32 reg;
1692
1693         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1694                 return -ENODEV;
1695
1696         mutex_lock(&dev_priv->fbc.lock);
1697
1698         reg = I915_READ(ILK_DPFC_CONTROL);
1699         dev_priv->fbc.false_color = val;
1700
1701         I915_WRITE(ILK_DPFC_CONTROL, val ?
1702                    (reg | FBC_CTL_FALSE_COLOR) :
1703                    (reg & ~FBC_CTL_FALSE_COLOR));
1704
1705         mutex_unlock(&dev_priv->fbc.lock);
1706         return 0;
1707 }
1708
1709 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1710                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1711                         "%llu\n");
1712
1713 static int i915_ips_status(struct seq_file *m, void *unused)
1714 {
1715         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1716
1717         if (!HAS_IPS(dev_priv)) {
1718                 seq_puts(m, "not supported\n");
1719                 return 0;
1720         }
1721
1722         intel_runtime_pm_get(dev_priv);
1723
1724         seq_printf(m, "Enabled by kernel parameter: %s\n",
1725                    yesno(i915_modparams.enable_ips));
1726
1727         if (INTEL_GEN(dev_priv) >= 8) {
1728                 seq_puts(m, "Currently: unknown\n");
1729         } else {
1730                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1731                         seq_puts(m, "Currently: enabled\n");
1732                 else
1733                         seq_puts(m, "Currently: disabled\n");
1734         }
1735
1736         intel_runtime_pm_put(dev_priv);
1737
1738         return 0;
1739 }
1740
1741 static int i915_sr_status(struct seq_file *m, void *unused)
1742 {
1743         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1744         bool sr_enabled = false;
1745
1746         intel_runtime_pm_get(dev_priv);
1747         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1748
1749         if (INTEL_GEN(dev_priv) >= 9)
1750                 /* no global SR status; inspect per-plane WM */;
1751         else if (HAS_PCH_SPLIT(dev_priv))
1752                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1753         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1754                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1755                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1756         else if (IS_I915GM(dev_priv))
1757                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1758         else if (IS_PINEVIEW(dev_priv))
1759                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1760         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1761                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1762
1763         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1764         intel_runtime_pm_put(dev_priv);
1765
1766         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1767
1768         return 0;
1769 }
1770
1771 static int i915_emon_status(struct seq_file *m, void *unused)
1772 {
1773         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1774         struct drm_device *dev = &dev_priv->drm;
1775         unsigned long temp, chipset, gfx;
1776         int ret;
1777
1778         if (!IS_GEN5(dev_priv))
1779                 return -ENODEV;
1780
1781         ret = mutex_lock_interruptible(&dev->struct_mutex);
1782         if (ret)
1783                 return ret;
1784
1785         temp = i915_mch_val(dev_priv);
1786         chipset = i915_chipset_val(dev_priv);
1787         gfx = i915_gfx_val(dev_priv);
1788         mutex_unlock(&dev->struct_mutex);
1789
1790         seq_printf(m, "GMCH temp: %ld\n", temp);
1791         seq_printf(m, "Chipset power: %ld\n", chipset);
1792         seq_printf(m, "GFX power: %ld\n", gfx);
1793         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1794
1795         return 0;
1796 }
1797
1798 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1799 {
1800         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1801         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1802         int ret = 0;
1803         int gpu_freq, ia_freq;
1804         unsigned int max_gpu_freq, min_gpu_freq;
1805
1806         if (!HAS_LLC(dev_priv)) {
1807                 seq_puts(m, "unsupported on this chipset\n");
1808                 return 0;
1809         }
1810
1811         intel_runtime_pm_get(dev_priv);
1812
1813         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1814         if (ret)
1815                 goto out;
1816
1817         if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1818                 /* Convert GT frequency to 50 HZ units */
1819                 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
1820                 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
1821         } else {
1822                 min_gpu_freq = rps->min_freq_softlimit;
1823                 max_gpu_freq = rps->max_freq_softlimit;
1824         }
1825
1826         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1827
1828         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1829                 ia_freq = gpu_freq;
1830                 sandybridge_pcode_read(dev_priv,
1831                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1832                                        &ia_freq);
1833                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1834                            intel_gpu_freq(dev_priv, (gpu_freq *
1835                                                      (IS_GEN9_BC(dev_priv) ||
1836                                                       IS_CANNONLAKE(dev_priv) ?
1837                                                       GEN9_FREQ_SCALER : 1))),
1838                            ((ia_freq >> 0) & 0xff) * 100,
1839                            ((ia_freq >> 8) & 0xff) * 100);
1840         }
1841
1842         mutex_unlock(&dev_priv->pcu_lock);
1843
1844 out:
1845         intel_runtime_pm_put(dev_priv);
1846         return ret;
1847 }
1848
1849 static int i915_opregion(struct seq_file *m, void *unused)
1850 {
1851         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1852         struct drm_device *dev = &dev_priv->drm;
1853         struct intel_opregion *opregion = &dev_priv->opregion;
1854         int ret;
1855
1856         ret = mutex_lock_interruptible(&dev->struct_mutex);
1857         if (ret)
1858                 goto out;
1859
1860         if (opregion->header)
1861                 seq_write(m, opregion->header, OPREGION_SIZE);
1862
1863         mutex_unlock(&dev->struct_mutex);
1864
1865 out:
1866         return 0;
1867 }
1868
1869 static int i915_vbt(struct seq_file *m, void *unused)
1870 {
1871         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1872
1873         if (opregion->vbt)
1874                 seq_write(m, opregion->vbt, opregion->vbt_size);
1875
1876         return 0;
1877 }
1878
1879 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1880 {
1881         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1882         struct drm_device *dev = &dev_priv->drm;
1883         struct intel_framebuffer *fbdev_fb = NULL;
1884         struct drm_framebuffer *drm_fb;
1885         int ret;
1886
1887         ret = mutex_lock_interruptible(&dev->struct_mutex);
1888         if (ret)
1889                 return ret;
1890
1891 #ifdef CONFIG_DRM_FBDEV_EMULATION
1892         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1893                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1894
1895                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1896                            fbdev_fb->base.width,
1897                            fbdev_fb->base.height,
1898                            fbdev_fb->base.format->depth,
1899                            fbdev_fb->base.format->cpp[0] * 8,
1900                            fbdev_fb->base.modifier,
1901                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1902                 describe_obj(m, fbdev_fb->obj);
1903                 seq_putc(m, '\n');
1904         }
1905 #endif
1906
1907         mutex_lock(&dev->mode_config.fb_lock);
1908         drm_for_each_fb(drm_fb, dev) {
1909                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1910                 if (fb == fbdev_fb)
1911                         continue;
1912
1913                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1914                            fb->base.width,
1915                            fb->base.height,
1916                            fb->base.format->depth,
1917                            fb->base.format->cpp[0] * 8,
1918                            fb->base.modifier,
1919                            drm_framebuffer_read_refcount(&fb->base));
1920                 describe_obj(m, fb->obj);
1921                 seq_putc(m, '\n');
1922         }
1923         mutex_unlock(&dev->mode_config.fb_lock);
1924         mutex_unlock(&dev->struct_mutex);
1925
1926         return 0;
1927 }
1928
1929 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1930 {
1931         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1932                    ring->space, ring->head, ring->tail);
1933 }
1934
1935 static int i915_context_status(struct seq_file *m, void *unused)
1936 {
1937         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1938         struct drm_device *dev = &dev_priv->drm;
1939         struct intel_engine_cs *engine;
1940         struct i915_gem_context *ctx;
1941         enum intel_engine_id id;
1942         int ret;
1943
1944         ret = mutex_lock_interruptible(&dev->struct_mutex);
1945         if (ret)
1946                 return ret;
1947
1948         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1949                 seq_printf(m, "HW context %u ", ctx->hw_id);
1950                 if (ctx->pid) {
1951                         struct task_struct *task;
1952
1953                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1954                         if (task) {
1955                                 seq_printf(m, "(%s [%d]) ",
1956                                            task->comm, task->pid);
1957                                 put_task_struct(task);
1958                         }
1959                 } else if (IS_ERR(ctx->file_priv)) {
1960                         seq_puts(m, "(deleted) ");
1961                 } else {
1962                         seq_puts(m, "(kernel) ");
1963                 }
1964
1965                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1966                 seq_putc(m, '\n');
1967
1968                 for_each_engine(engine, dev_priv, id) {
1969                         struct intel_context *ce = &ctx->engine[engine->id];
1970
1971                         seq_printf(m, "%s: ", engine->name);
1972                         if (ce->state)
1973                                 describe_obj(m, ce->state->obj);
1974                         if (ce->ring)
1975                                 describe_ctx_ring(m, ce->ring);
1976                         seq_putc(m, '\n');
1977                 }
1978
1979                 seq_putc(m, '\n');
1980         }
1981
1982         mutex_unlock(&dev->struct_mutex);
1983
1984         return 0;
1985 }
1986
1987 static const char *swizzle_string(unsigned swizzle)
1988 {
1989         switch (swizzle) {
1990         case I915_BIT_6_SWIZZLE_NONE:
1991                 return "none";
1992         case I915_BIT_6_SWIZZLE_9:
1993                 return "bit9";
1994         case I915_BIT_6_SWIZZLE_9_10:
1995                 return "bit9/bit10";
1996         case I915_BIT_6_SWIZZLE_9_11:
1997                 return "bit9/bit11";
1998         case I915_BIT_6_SWIZZLE_9_10_11:
1999                 return "bit9/bit10/bit11";
2000         case I915_BIT_6_SWIZZLE_9_17:
2001                 return "bit9/bit17";
2002         case I915_BIT_6_SWIZZLE_9_10_17:
2003                 return "bit9/bit10/bit17";
2004         case I915_BIT_6_SWIZZLE_UNKNOWN:
2005                 return "unknown";
2006         }
2007
2008         return "bug";
2009 }
2010
2011 static int i915_swizzle_info(struct seq_file *m, void *data)
2012 {
2013         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2014
2015         intel_runtime_pm_get(dev_priv);
2016
2017         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2018                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2019         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2020                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2021
2022         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2023                 seq_printf(m, "DDC = 0x%08x\n",
2024                            I915_READ(DCC));
2025                 seq_printf(m, "DDC2 = 0x%08x\n",
2026                            I915_READ(DCC2));
2027                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2028                            I915_READ16(C0DRB3));
2029                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2030                            I915_READ16(C1DRB3));
2031         } else if (INTEL_GEN(dev_priv) >= 6) {
2032                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2033                            I915_READ(MAD_DIMM_C0));
2034                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2035                            I915_READ(MAD_DIMM_C1));
2036                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2037                            I915_READ(MAD_DIMM_C2));
2038                 seq_printf(m, "TILECTL = 0x%08x\n",
2039                            I915_READ(TILECTL));
2040                 if (INTEL_GEN(dev_priv) >= 8)
2041                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2042                                    I915_READ(GAMTARBMODE));
2043                 else
2044                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2045                                    I915_READ(ARB_MODE));
2046                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2047                            I915_READ(DISP_ARB_CTL));
2048         }
2049
2050         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2051                 seq_puts(m, "L-shaped memory detected\n");
2052
2053         intel_runtime_pm_put(dev_priv);
2054
2055         return 0;
2056 }
2057
2058 static int per_file_ctx(int id, void *ptr, void *data)
2059 {
2060         struct i915_gem_context *ctx = ptr;
2061         struct seq_file *m = data;
2062         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2063
2064         if (!ppgtt) {
2065                 seq_printf(m, "  no ppgtt for context %d\n",
2066                            ctx->user_handle);
2067                 return 0;
2068         }
2069
2070         if (i915_gem_context_is_default(ctx))
2071                 seq_puts(m, "  default context:\n");
2072         else
2073                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2074         ppgtt->debug_dump(ppgtt, m);
2075
2076         return 0;
2077 }
2078
2079 static void gen8_ppgtt_info(struct seq_file *m,
2080                             struct drm_i915_private *dev_priv)
2081 {
2082         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2083         struct intel_engine_cs *engine;
2084         enum intel_engine_id id;
2085         int i;
2086
2087         if (!ppgtt)
2088                 return;
2089
2090         for_each_engine(engine, dev_priv, id) {
2091                 seq_printf(m, "%s\n", engine->name);
2092                 for (i = 0; i < 4; i++) {
2093                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2094                         pdp <<= 32;
2095                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2096                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2097                 }
2098         }
2099 }
2100
2101 static void gen6_ppgtt_info(struct seq_file *m,
2102                             struct drm_i915_private *dev_priv)
2103 {
2104         struct intel_engine_cs *engine;
2105         enum intel_engine_id id;
2106
2107         if (IS_GEN6(dev_priv))
2108                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2109
2110         for_each_engine(engine, dev_priv, id) {
2111                 seq_printf(m, "%s\n", engine->name);
2112                 if (IS_GEN7(dev_priv))
2113                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2114                                    I915_READ(RING_MODE_GEN7(engine)));
2115                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2116                            I915_READ(RING_PP_DIR_BASE(engine)));
2117                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2118                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2119                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2120                            I915_READ(RING_PP_DIR_DCLV(engine)));
2121         }
2122         if (dev_priv->mm.aliasing_ppgtt) {
2123                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2124
2125                 seq_puts(m, "aliasing PPGTT:\n");
2126                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2127
2128                 ppgtt->debug_dump(ppgtt, m);
2129         }
2130
2131         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2132 }
2133
2134 static int i915_ppgtt_info(struct seq_file *m, void *data)
2135 {
2136         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2137         struct drm_device *dev = &dev_priv->drm;
2138         struct drm_file *file;
2139         int ret;
2140
2141         mutex_lock(&dev->filelist_mutex);
2142         ret = mutex_lock_interruptible(&dev->struct_mutex);
2143         if (ret)
2144                 goto out_unlock;
2145
2146         intel_runtime_pm_get(dev_priv);
2147
2148         if (INTEL_GEN(dev_priv) >= 8)
2149                 gen8_ppgtt_info(m, dev_priv);
2150         else if (INTEL_GEN(dev_priv) >= 6)
2151                 gen6_ppgtt_info(m, dev_priv);
2152
2153         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2154                 struct drm_i915_file_private *file_priv = file->driver_priv;
2155                 struct task_struct *task;
2156
2157                 task = get_pid_task(file->pid, PIDTYPE_PID);
2158                 if (!task) {
2159                         ret = -ESRCH;
2160                         goto out_rpm;
2161                 }
2162                 seq_printf(m, "\nproc: %s\n", task->comm);
2163                 put_task_struct(task);
2164                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2165                              (void *)(unsigned long)m);
2166         }
2167
2168 out_rpm:
2169         intel_runtime_pm_put(dev_priv);
2170         mutex_unlock(&dev->struct_mutex);
2171 out_unlock:
2172         mutex_unlock(&dev->filelist_mutex);
2173         return ret;
2174 }
2175
2176 static int count_irq_waiters(struct drm_i915_private *i915)
2177 {
2178         struct intel_engine_cs *engine;
2179         enum intel_engine_id id;
2180         int count = 0;
2181
2182         for_each_engine(engine, i915, id)
2183                 count += intel_engine_has_waiter(engine);
2184
2185         return count;
2186 }
2187
2188 static const char *rps_power_to_str(unsigned int power)
2189 {
2190         static const char * const strings[] = {
2191                 [LOW_POWER] = "low power",
2192                 [BETWEEN] = "mixed",
2193                 [HIGH_POWER] = "high power",
2194         };
2195
2196         if (power >= ARRAY_SIZE(strings) || !strings[power])
2197                 return "unknown";
2198
2199         return strings[power];
2200 }
2201
2202 static int i915_rps_boost_info(struct seq_file *m, void *data)
2203 {
2204         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2205         struct drm_device *dev = &dev_priv->drm;
2206         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2207         struct drm_file *file;
2208
2209         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2210         seq_printf(m, "GPU busy? %s [%d requests]\n",
2211                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2212         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2213         seq_printf(m, "Boosts outstanding? %d\n",
2214                    atomic_read(&rps->num_waiters));
2215         seq_printf(m, "Frequency requested %d\n",
2216                    intel_gpu_freq(dev_priv, rps->cur_freq));
2217         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2218                    intel_gpu_freq(dev_priv, rps->min_freq),
2219                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2220                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2221                    intel_gpu_freq(dev_priv, rps->max_freq));
2222         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2223                    intel_gpu_freq(dev_priv, rps->idle_freq),
2224                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2225                    intel_gpu_freq(dev_priv, rps->boost_freq));
2226
2227         mutex_lock(&dev->filelist_mutex);
2228         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2229                 struct drm_i915_file_private *file_priv = file->driver_priv;
2230                 struct task_struct *task;
2231
2232                 rcu_read_lock();
2233                 task = pid_task(file->pid, PIDTYPE_PID);
2234                 seq_printf(m, "%s [%d]: %d boosts\n",
2235                            task ? task->comm : "<unknown>",
2236                            task ? task->pid : -1,
2237                            atomic_read(&file_priv->rps_client.boosts));
2238                 rcu_read_unlock();
2239         }
2240         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2241                    atomic_read(&rps->boosts));
2242         mutex_unlock(&dev->filelist_mutex);
2243
2244         if (INTEL_GEN(dev_priv) >= 6 &&
2245             rps->enabled &&
2246             dev_priv->gt.active_requests) {
2247                 u32 rpup, rpupei;
2248                 u32 rpdown, rpdownei;
2249
2250                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2251                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2252                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2253                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2254                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2255                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2256
2257                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2258                            rps_power_to_str(rps->power));
2259                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2260                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2261                            rps->up_threshold);
2262                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2263                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2264                            rps->down_threshold);
2265         } else {
2266                 seq_puts(m, "\nRPS Autotuning inactive\n");
2267         }
2268
2269         return 0;
2270 }
2271
2272 static int i915_llc(struct seq_file *m, void *data)
2273 {
2274         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2275         const bool edram = INTEL_GEN(dev_priv) > 8;
2276
2277         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2278         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2279                    intel_uncore_edram_size(dev_priv)/1024/1024);
2280
2281         return 0;
2282 }
2283
2284 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2285 {
2286         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2287         struct drm_printer p;
2288
2289         if (!HAS_HUC_UCODE(dev_priv))
2290                 return 0;
2291
2292         p = drm_seq_file_printer(m);
2293         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2294
2295         intel_runtime_pm_get(dev_priv);
2296         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2297         intel_runtime_pm_put(dev_priv);
2298
2299         return 0;
2300 }
2301
2302 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2303 {
2304         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2305         struct drm_printer p;
2306         u32 tmp, i;
2307
2308         if (!HAS_GUC_UCODE(dev_priv))
2309                 return 0;
2310
2311         p = drm_seq_file_printer(m);
2312         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2313
2314         intel_runtime_pm_get(dev_priv);
2315
2316         tmp = I915_READ(GUC_STATUS);
2317
2318         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2319         seq_printf(m, "\tBootrom status = 0x%x\n",
2320                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2321         seq_printf(m, "\tuKernel status = 0x%x\n",
2322                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2323         seq_printf(m, "\tMIA Core status = 0x%x\n",
2324                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2325         seq_puts(m, "\nScratch registers:\n");
2326         for (i = 0; i < 16; i++)
2327                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2328
2329         intel_runtime_pm_put(dev_priv);
2330
2331         return 0;
2332 }
2333
2334 static void i915_guc_log_info(struct seq_file *m,
2335                               struct drm_i915_private *dev_priv)
2336 {
2337         struct intel_guc *guc = &dev_priv->guc;
2338
2339         seq_puts(m, "\nGuC logging stats:\n");
2340
2341         seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
2342                    guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2343                    guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2344
2345         seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
2346                    guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2347                    guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2348
2349         seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2350                    guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2351                    guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2352
2353         seq_printf(m, "\tTotal flush interrupt count: %u\n",
2354                    guc->log.flush_interrupt_count);
2355
2356         seq_printf(m, "\tCapture miss count: %u\n",
2357                    guc->log.capture_miss_count);
2358 }
2359
2360 static void i915_guc_client_info(struct seq_file *m,
2361                                  struct drm_i915_private *dev_priv,
2362                                  struct intel_guc_client *client)
2363 {
2364         struct intel_engine_cs *engine;
2365         enum intel_engine_id id;
2366         uint64_t tot = 0;
2367
2368         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2369                 client->priority, client->stage_id, client->proc_desc_offset);
2370         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2371                 client->doorbell_id, client->doorbell_offset);
2372
2373         for_each_engine(engine, dev_priv, id) {
2374                 u64 submissions = client->submissions[id];
2375                 tot += submissions;
2376                 seq_printf(m, "\tSubmissions: %llu %s\n",
2377                                 submissions, engine->name);
2378         }
2379         seq_printf(m, "\tTotal: %llu\n", tot);
2380 }
2381
2382 static bool check_guc_submission(struct seq_file *m)
2383 {
2384         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2385         const struct intel_guc *guc = &dev_priv->guc;
2386
2387         if (!guc->execbuf_client) {
2388                 seq_printf(m, "GuC submission %s\n",
2389                            HAS_GUC_SCHED(dev_priv) ?
2390                            "disabled" :
2391                            "not supported");
2392                 return false;
2393         }
2394
2395         return true;
2396 }
2397
2398 static int i915_guc_info(struct seq_file *m, void *data)
2399 {
2400         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2401         const struct intel_guc *guc = &dev_priv->guc;
2402
2403         if (!check_guc_submission(m))
2404                 return 0;
2405
2406         seq_printf(m, "Doorbell map:\n");
2407         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2408         seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2409
2410         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2411         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2412         seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client);
2413         i915_guc_client_info(m, dev_priv, guc->preempt_client);
2414
2415         i915_guc_log_info(m, dev_priv);
2416
2417         /* Add more as required ... */
2418
2419         return 0;
2420 }
2421
2422 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2423 {
2424         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2425         const struct intel_guc *guc = &dev_priv->guc;
2426         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2427         struct intel_guc_client *client = guc->execbuf_client;
2428         unsigned int tmp;
2429         int index;
2430
2431         if (!check_guc_submission(m))
2432                 return 0;
2433
2434         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2435                 struct intel_engine_cs *engine;
2436
2437                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2438                         continue;
2439
2440                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2441                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2442                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2443                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2444                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2445                 seq_printf(m, "\tEngines used: 0x%x\n",
2446                            desc->engines_used);
2447                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2448                            desc->db_trigger_phy,
2449                            desc->db_trigger_cpu,
2450                            desc->db_trigger_uk);
2451                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2452                            desc->process_desc);
2453                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2454                            desc->wq_addr, desc->wq_size);
2455                 seq_putc(m, '\n');
2456
2457                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2458                         u32 guc_engine_id = engine->guc_id;
2459                         struct guc_execlist_context *lrc =
2460                                                 &desc->lrc[guc_engine_id];
2461
2462                         seq_printf(m, "\t%s LRC:\n", engine->name);
2463                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2464                                    lrc->context_desc);
2465                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2466                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2467                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2468                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2469                         seq_putc(m, '\n');
2470                 }
2471         }
2472
2473         return 0;
2474 }
2475
2476 static int i915_guc_log_dump(struct seq_file *m, void *data)
2477 {
2478         struct drm_info_node *node = m->private;
2479         struct drm_i915_private *dev_priv = node_to_i915(node);
2480         bool dump_load_err = !!node->info_ent->data;
2481         struct drm_i915_gem_object *obj = NULL;
2482         u32 *log;
2483         int i = 0;
2484
2485         if (dump_load_err)
2486                 obj = dev_priv->guc.load_err_log;
2487         else if (dev_priv->guc.log.vma)
2488                 obj = dev_priv->guc.log.vma->obj;
2489
2490         if (!obj)
2491                 return 0;
2492
2493         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2494         if (IS_ERR(log)) {
2495                 DRM_DEBUG("Failed to pin object\n");
2496                 seq_puts(m, "(log data unaccessible)\n");
2497                 return PTR_ERR(log);
2498         }
2499
2500         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2501                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2502                            *(log + i), *(log + i + 1),
2503                            *(log + i + 2), *(log + i + 3));
2504
2505         seq_putc(m, '\n');
2506
2507         i915_gem_object_unpin_map(obj);
2508
2509         return 0;
2510 }
2511
2512 static int i915_guc_log_control_get(void *data, u64 *val)
2513 {
2514         struct drm_i915_private *dev_priv = data;
2515
2516         if (!dev_priv->guc.log.vma)
2517                 return -EINVAL;
2518
2519         *val = i915_modparams.guc_log_level;
2520
2521         return 0;
2522 }
2523
2524 static int i915_guc_log_control_set(void *data, u64 val)
2525 {
2526         struct drm_i915_private *dev_priv = data;
2527         int ret;
2528
2529         if (!dev_priv->guc.log.vma)
2530                 return -EINVAL;
2531
2532         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
2533         if (ret)
2534                 return ret;
2535
2536         intel_runtime_pm_get(dev_priv);
2537         ret = i915_guc_log_control(dev_priv, val);
2538         intel_runtime_pm_put(dev_priv);
2539
2540         mutex_unlock(&dev_priv->drm.struct_mutex);
2541         return ret;
2542 }
2543
2544 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2545                         i915_guc_log_control_get, i915_guc_log_control_set,
2546                         "%lld\n");
2547
2548 static const char *psr2_live_status(u32 val)
2549 {
2550         static const char * const live_status[] = {
2551                 "IDLE",
2552                 "CAPTURE",
2553                 "CAPTURE_FS",
2554                 "SLEEP",
2555                 "BUFON_FW",
2556                 "ML_UP",
2557                 "SU_STANDBY",
2558                 "FAST_SLEEP",
2559                 "DEEP_SLEEP",
2560                 "BUF_ON",
2561                 "TG_ON"
2562         };
2563
2564         val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2565         if (val < ARRAY_SIZE(live_status))
2566                 return live_status[val];
2567
2568         return "unknown";
2569 }
2570
2571 static int i915_edp_psr_status(struct seq_file *m, void *data)
2572 {
2573         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2574         u32 psrperf = 0;
2575         u32 stat[3];
2576         enum pipe pipe;
2577         bool enabled = false;
2578
2579         if (!HAS_PSR(dev_priv)) {
2580                 seq_puts(m, "PSR not supported\n");
2581                 return 0;
2582         }
2583
2584         intel_runtime_pm_get(dev_priv);
2585
2586         mutex_lock(&dev_priv->psr.lock);
2587         seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2588         seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2589         seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2590         seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2591         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2592                    dev_priv->psr.busy_frontbuffer_bits);
2593         seq_printf(m, "Re-enable work scheduled: %s\n",
2594                    yesno(work_busy(&dev_priv->psr.work.work)));
2595
2596         if (HAS_DDI(dev_priv)) {
2597                 if (dev_priv->psr.psr2_support)
2598                         enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2599                 else
2600                         enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2601         } else {
2602                 for_each_pipe(dev_priv, pipe) {
2603                         enum transcoder cpu_transcoder =
2604                                 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2605                         enum intel_display_power_domain power_domain;
2606
2607                         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2608                         if (!intel_display_power_get_if_enabled(dev_priv,
2609                                                                 power_domain))
2610                                 continue;
2611
2612                         stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2613                                 VLV_EDP_PSR_CURR_STATE_MASK;
2614                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2615                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2616                                 enabled = true;
2617
2618                         intel_display_power_put(dev_priv, power_domain);
2619                 }
2620         }
2621
2622         seq_printf(m, "Main link in standby mode: %s\n",
2623                    yesno(dev_priv->psr.link_standby));
2624
2625         seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2626
2627         if (!HAS_DDI(dev_priv))
2628                 for_each_pipe(dev_priv, pipe) {
2629                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2630                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2631                                 seq_printf(m, " pipe %c", pipe_name(pipe));
2632                 }
2633         seq_puts(m, "\n");
2634
2635         /*
2636          * VLV/CHV PSR has no kind of performance counter
2637          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2638          */
2639         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2640                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2641                         EDP_PSR_PERF_CNT_MASK;
2642
2643                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2644         }
2645         if (dev_priv->psr.psr2_support) {
2646                 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
2647
2648                 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2649                            psr2, psr2_live_status(psr2));
2650         }
2651         mutex_unlock(&dev_priv->psr.lock);
2652
2653         intel_runtime_pm_put(dev_priv);
2654         return 0;
2655 }
2656
2657 static int i915_sink_crc(struct seq_file *m, void *data)
2658 {
2659         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2660         struct drm_device *dev = &dev_priv->drm;
2661         struct intel_connector *connector;
2662         struct drm_connector_list_iter conn_iter;
2663         struct intel_dp *intel_dp = NULL;
2664         struct drm_modeset_acquire_ctx ctx;
2665         int ret;
2666         u8 crc[6];
2667
2668         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2669
2670         drm_connector_list_iter_begin(dev, &conn_iter);
2671
2672         for_each_intel_connector_iter(connector, &conn_iter) {
2673                 struct drm_crtc *crtc;
2674                 struct drm_connector_state *state;
2675                 struct intel_crtc_state *crtc_state;
2676
2677                 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2678                         continue;
2679
2680 retry:
2681                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2682                 if (ret)
2683                         goto err;
2684
2685                 state = connector->base.state;
2686                 if (!state->best_encoder)
2687                         continue;
2688
2689                 crtc = state->crtc;
2690                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2691                 if (ret)
2692                         goto err;
2693
2694                 crtc_state = to_intel_crtc_state(crtc->state);
2695                 if (!crtc_state->base.active)
2696                         continue;
2697
2698                 /*
2699                  * We need to wait for all crtc updates to complete, to make
2700                  * sure any pending modesets and plane updates are completed.
2701                  */
2702                 if (crtc_state->base.commit) {
2703                         ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2704
2705                         if (ret)
2706                                 goto err;
2707                 }
2708
2709                 intel_dp = enc_to_intel_dp(state->best_encoder);
2710
2711                 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2712                 if (ret)
2713                         goto err;
2714
2715                 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2716                            crc[0], crc[1], crc[2],
2717                            crc[3], crc[4], crc[5]);
2718                 goto out;
2719
2720 err:
2721                 if (ret == -EDEADLK) {
2722                         ret = drm_modeset_backoff(&ctx);
2723                         if (!ret)
2724                                 goto retry;
2725                 }
2726                 goto out;
2727         }
2728         ret = -ENODEV;
2729 out:
2730         drm_connector_list_iter_end(&conn_iter);
2731         drm_modeset_drop_locks(&ctx);
2732         drm_modeset_acquire_fini(&ctx);
2733
2734         return ret;
2735 }
2736
2737 static int i915_energy_uJ(struct seq_file *m, void *data)
2738 {
2739         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2740         unsigned long long power;
2741         u32 units;
2742
2743         if (INTEL_GEN(dev_priv) < 6)
2744                 return -ENODEV;
2745
2746         intel_runtime_pm_get(dev_priv);
2747
2748         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2749                 intel_runtime_pm_put(dev_priv);
2750                 return -ENODEV;
2751         }
2752
2753         units = (power & 0x1f00) >> 8;
2754         power = I915_READ(MCH_SECP_NRG_STTS);
2755         power = (1000000 * power) >> units; /* convert to uJ */
2756
2757         intel_runtime_pm_put(dev_priv);
2758
2759         seq_printf(m, "%llu", power);
2760
2761         return 0;
2762 }
2763
2764 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2765 {
2766         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2767         struct pci_dev *pdev = dev_priv->drm.pdev;
2768
2769         if (!HAS_RUNTIME_PM(dev_priv))
2770                 seq_puts(m, "Runtime power management not supported\n");
2771
2772         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2773         seq_printf(m, "IRQs disabled: %s\n",
2774                    yesno(!intel_irqs_enabled(dev_priv)));
2775 #ifdef CONFIG_PM
2776         seq_printf(m, "Usage count: %d\n",
2777                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2778 #else
2779         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2780 #endif
2781         seq_printf(m, "PCI device power state: %s [%d]\n",
2782                    pci_power_name(pdev->current_state),
2783                    pdev->current_state);
2784
2785         return 0;
2786 }
2787
2788 static int i915_power_domain_info(struct seq_file *m, void *unused)
2789 {
2790         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2791         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2792         int i;
2793
2794         mutex_lock(&power_domains->lock);
2795
2796         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2797         for (i = 0; i < power_domains->power_well_count; i++) {
2798                 struct i915_power_well *power_well;
2799                 enum intel_display_power_domain power_domain;
2800
2801                 power_well = &power_domains->power_wells[i];
2802                 seq_printf(m, "%-25s %d\n", power_well->name,
2803                            power_well->count);
2804
2805                 for_each_power_domain(power_domain, power_well->domains)
2806                         seq_printf(m, "  %-23s %d\n",
2807                                  intel_display_power_domain_str(power_domain),
2808                                  power_domains->domain_use_count[power_domain]);
2809         }
2810
2811         mutex_unlock(&power_domains->lock);
2812
2813         return 0;
2814 }
2815
2816 static int i915_dmc_info(struct seq_file *m, void *unused)
2817 {
2818         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2819         struct intel_csr *csr;
2820
2821         if (!HAS_CSR(dev_priv)) {
2822                 seq_puts(m, "not supported\n");
2823                 return 0;
2824         }
2825
2826         csr = &dev_priv->csr;
2827
2828         intel_runtime_pm_get(dev_priv);
2829
2830         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2831         seq_printf(m, "path: %s\n", csr->fw_path);
2832
2833         if (!csr->dmc_payload)
2834                 goto out;
2835
2836         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2837                    CSR_VERSION_MINOR(csr->version));
2838
2839         if (IS_KABYLAKE(dev_priv) ||
2840             (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2841                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2842                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2843                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2844                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2845         } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2846                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2847                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2848         }
2849
2850 out:
2851         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2852         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2853         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2854
2855         intel_runtime_pm_put(dev_priv);
2856
2857         return 0;
2858 }
2859
2860 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2861                                  struct drm_display_mode *mode)
2862 {
2863         int i;
2864
2865         for (i = 0; i < tabs; i++)
2866                 seq_putc(m, '\t');
2867
2868         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2869                    mode->base.id, mode->name,
2870                    mode->vrefresh, mode->clock,
2871                    mode->hdisplay, mode->hsync_start,
2872                    mode->hsync_end, mode->htotal,
2873                    mode->vdisplay, mode->vsync_start,
2874                    mode->vsync_end, mode->vtotal,
2875                    mode->type, mode->flags);
2876 }
2877
2878 static void intel_encoder_info(struct seq_file *m,
2879                                struct intel_crtc *intel_crtc,
2880                                struct intel_encoder *intel_encoder)
2881 {
2882         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2883         struct drm_device *dev = &dev_priv->drm;
2884         struct drm_crtc *crtc = &intel_crtc->base;
2885         struct intel_connector *intel_connector;
2886         struct drm_encoder *encoder;
2887
2888         encoder = &intel_encoder->base;
2889         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2890                    encoder->base.id, encoder->name);
2891         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2892                 struct drm_connector *connector = &intel_connector->base;
2893                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2894                            connector->base.id,
2895                            connector->name,
2896                            drm_get_connector_status_name(connector->status));
2897                 if (connector->status == connector_status_connected) {
2898                         struct drm_display_mode *mode = &crtc->mode;
2899                         seq_printf(m, ", mode:\n");
2900                         intel_seq_print_mode(m, 2, mode);
2901                 } else {
2902                         seq_putc(m, '\n');
2903                 }
2904         }
2905 }
2906
2907 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2908 {
2909         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2910         struct drm_device *dev = &dev_priv->drm;
2911         struct drm_crtc *crtc = &intel_crtc->base;
2912         struct intel_encoder *intel_encoder;
2913         struct drm_plane_state *plane_state = crtc->primary->state;
2914         struct drm_framebuffer *fb = plane_state->fb;
2915
2916         if (fb)
2917                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2918                            fb->base.id, plane_state->src_x >> 16,
2919                            plane_state->src_y >> 16, fb->width, fb->height);
2920         else
2921                 seq_puts(m, "\tprimary plane disabled\n");
2922         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2923                 intel_encoder_info(m, intel_crtc, intel_encoder);
2924 }
2925
2926 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2927 {
2928         struct drm_display_mode *mode = panel->fixed_mode;
2929
2930         seq_printf(m, "\tfixed mode:\n");
2931         intel_seq_print_mode(m, 2, mode);
2932 }
2933
2934 static void intel_dp_info(struct seq_file *m,
2935                           struct intel_connector *intel_connector)
2936 {
2937         struct intel_encoder *intel_encoder = intel_connector->encoder;
2938         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2939
2940         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2941         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2942         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2943                 intel_panel_info(m, &intel_connector->panel);
2944
2945         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2946                                 &intel_dp->aux);
2947 }
2948
2949 static void intel_dp_mst_info(struct seq_file *m,
2950                           struct intel_connector *intel_connector)
2951 {
2952         struct intel_encoder *intel_encoder = intel_connector->encoder;
2953         struct intel_dp_mst_encoder *intel_mst =
2954                 enc_to_mst(&intel_encoder->base);
2955         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2956         struct intel_dp *intel_dp = &intel_dig_port->dp;
2957         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2958                                         intel_connector->port);
2959
2960         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2961 }
2962
2963 static void intel_hdmi_info(struct seq_file *m,
2964                             struct intel_connector *intel_connector)
2965 {
2966         struct intel_encoder *intel_encoder = intel_connector->encoder;
2967         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2968
2969         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2970 }
2971
2972 static void intel_lvds_info(struct seq_file *m,
2973                             struct intel_connector *intel_connector)
2974 {
2975         intel_panel_info(m, &intel_connector->panel);
2976 }
2977
2978 static void intel_connector_info(struct seq_file *m,
2979                                  struct drm_connector *connector)
2980 {
2981         struct intel_connector *intel_connector = to_intel_connector(connector);
2982         struct intel_encoder *intel_encoder = intel_connector->encoder;
2983         struct drm_display_mode *mode;
2984
2985         seq_printf(m, "connector %d: type %s, status: %s\n",
2986                    connector->base.id, connector->name,
2987                    drm_get_connector_status_name(connector->status));
2988         if (connector->status == connector_status_connected) {
2989                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2990                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2991                            connector->display_info.width_mm,
2992                            connector->display_info.height_mm);
2993                 seq_printf(m, "\tsubpixel order: %s\n",
2994                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2995                 seq_printf(m, "\tCEA rev: %d\n",
2996                            connector->display_info.cea_rev);
2997         }
2998
2999         if (!intel_encoder)
3000                 return;
3001
3002         switch (connector->connector_type) {
3003         case DRM_MODE_CONNECTOR_DisplayPort:
3004         case DRM_MODE_CONNECTOR_eDP:
3005                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3006                         intel_dp_mst_info(m, intel_connector);
3007                 else
3008                         intel_dp_info(m, intel_connector);
3009                 break;
3010         case DRM_MODE_CONNECTOR_LVDS:
3011                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3012                         intel_lvds_info(m, intel_connector);
3013                 break;
3014         case DRM_MODE_CONNECTOR_HDMIA:
3015                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3016                     intel_encoder->type == INTEL_OUTPUT_DDI)
3017                         intel_hdmi_info(m, intel_connector);
3018                 break;
3019         default:
3020                 break;
3021         }
3022
3023         seq_printf(m, "\tmodes:\n");
3024         list_for_each_entry(mode, &connector->modes, head)
3025                 intel_seq_print_mode(m, 2, mode);
3026 }
3027
3028 static const char *plane_type(enum drm_plane_type type)
3029 {
3030         switch (type) {
3031         case DRM_PLANE_TYPE_OVERLAY:
3032                 return "OVL";
3033         case DRM_PLANE_TYPE_PRIMARY:
3034                 return "PRI";
3035         case DRM_PLANE_TYPE_CURSOR:
3036                 return "CUR";
3037         /*
3038          * Deliberately omitting default: to generate compiler warnings
3039          * when a new drm_plane_type gets added.
3040          */
3041         }
3042
3043         return "unknown";
3044 }
3045
3046 static const char *plane_rotation(unsigned int rotation)
3047 {
3048         static char buf[48];
3049         /*
3050          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3051          * will print them all to visualize if the values are misused
3052          */
3053         snprintf(buf, sizeof(buf),
3054                  "%s%s%s%s%s%s(0x%08x)",
3055                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3056                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3057                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3058                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3059                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3060                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3061                  rotation);
3062
3063         return buf;
3064 }
3065
3066 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3067 {
3068         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3069         struct drm_device *dev = &dev_priv->drm;
3070         struct intel_plane *intel_plane;
3071
3072         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3073                 struct drm_plane_state *state;
3074                 struct drm_plane *plane = &intel_plane->base;
3075                 struct drm_format_name_buf format_name;
3076
3077                 if (!plane->state) {
3078                         seq_puts(m, "plane->state is NULL!\n");
3079                         continue;
3080                 }
3081
3082                 state = plane->state;
3083
3084                 if (state->fb) {
3085                         drm_get_format_name(state->fb->format->format,
3086                                             &format_name);
3087                 } else {
3088                         sprintf(format_name.str, "N/A");
3089                 }
3090
3091                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3092                            plane->base.id,
3093                            plane_type(intel_plane->base.type),
3094                            state->crtc_x, state->crtc_y,
3095                            state->crtc_w, state->crtc_h,
3096                            (state->src_x >> 16),
3097                            ((state->src_x & 0xffff) * 15625) >> 10,
3098                            (state->src_y >> 16),
3099                            ((state->src_y & 0xffff) * 15625) >> 10,
3100                            (state->src_w >> 16),
3101                            ((state->src_w & 0xffff) * 15625) >> 10,
3102                            (state->src_h >> 16),
3103                            ((state->src_h & 0xffff) * 15625) >> 10,
3104                            format_name.str,
3105                            plane_rotation(state->rotation));
3106         }
3107 }
3108
3109 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3110 {
3111         struct intel_crtc_state *pipe_config;
3112         int num_scalers = intel_crtc->num_scalers;
3113         int i;
3114
3115         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3116
3117         /* Not all platformas have a scaler */
3118         if (num_scalers) {
3119                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3120                            num_scalers,
3121                            pipe_config->scaler_state.scaler_users,
3122                            pipe_config->scaler_state.scaler_id);
3123
3124                 for (i = 0; i < num_scalers; i++) {
3125                         struct intel_scaler *sc =
3126                                         &pipe_config->scaler_state.scalers[i];
3127
3128                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3129                                    i, yesno(sc->in_use), sc->mode);
3130                 }
3131                 seq_puts(m, "\n");
3132         } else {
3133                 seq_puts(m, "\tNo scalers available on this platform\n");
3134         }
3135 }
3136
3137 static int i915_display_info(struct seq_file *m, void *unused)
3138 {
3139         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3140         struct drm_device *dev = &dev_priv->drm;
3141         struct intel_crtc *crtc;
3142         struct drm_connector *connector;
3143         struct drm_connector_list_iter conn_iter;
3144
3145         intel_runtime_pm_get(dev_priv);
3146         seq_printf(m, "CRTC info\n");
3147         seq_printf(m, "---------\n");
3148         for_each_intel_crtc(dev, crtc) {
3149                 struct intel_crtc_state *pipe_config;
3150
3151                 drm_modeset_lock(&crtc->base.mutex, NULL);
3152                 pipe_config = to_intel_crtc_state(crtc->base.state);
3153
3154                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3155                            crtc->base.base.id, pipe_name(crtc->pipe),
3156                            yesno(pipe_config->base.active),
3157                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3158                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3159
3160                 if (pipe_config->base.active) {
3161                         struct intel_plane *cursor =
3162                                 to_intel_plane(crtc->base.cursor);
3163
3164                         intel_crtc_info(m, crtc);
3165
3166                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3167                                    yesno(cursor->base.state->visible),
3168                                    cursor->base.state->crtc_x,
3169                                    cursor->base.state->crtc_y,
3170                                    cursor->base.state->crtc_w,
3171                                    cursor->base.state->crtc_h,
3172                                    cursor->cursor.base);
3173                         intel_scaler_info(m, crtc);
3174                         intel_plane_info(m, crtc);
3175                 }
3176
3177                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3178                            yesno(!crtc->cpu_fifo_underrun_disabled),
3179                            yesno(!crtc->pch_fifo_underrun_disabled));
3180                 drm_modeset_unlock(&crtc->base.mutex);
3181         }
3182
3183         seq_printf(m, "\n");
3184         seq_printf(m, "Connector info\n");
3185         seq_printf(m, "--------------\n");
3186         mutex_lock(&dev->mode_config.mutex);
3187         drm_connector_list_iter_begin(dev, &conn_iter);
3188         drm_for_each_connector_iter(connector, &conn_iter)
3189                 intel_connector_info(m, connector);
3190         drm_connector_list_iter_end(&conn_iter);
3191         mutex_unlock(&dev->mode_config.mutex);
3192
3193         intel_runtime_pm_put(dev_priv);
3194
3195         return 0;
3196 }
3197
3198 static int i915_engine_info(struct seq_file *m, void *unused)
3199 {
3200         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3201         struct intel_engine_cs *engine;
3202         enum intel_engine_id id;
3203         struct drm_printer p;
3204
3205         intel_runtime_pm_get(dev_priv);
3206
3207         seq_printf(m, "GT awake? %s\n",
3208                    yesno(dev_priv->gt.awake));
3209         seq_printf(m, "Global active requests: %d\n",
3210                    dev_priv->gt.active_requests);
3211         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3212                    dev_priv->info.cs_timestamp_frequency_khz);
3213
3214         p = drm_seq_file_printer(m);
3215         for_each_engine(engine, dev_priv, id)
3216                 intel_engine_dump(engine, &p);
3217
3218         intel_runtime_pm_put(dev_priv);
3219
3220         return 0;
3221 }
3222
3223 static int i915_shrinker_info(struct seq_file *m, void *unused)
3224 {
3225         struct drm_i915_private *i915 = node_to_i915(m->private);
3226
3227         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3228         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3229
3230         return 0;
3231 }
3232
3233 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3234 {
3235         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3236         struct drm_device *dev = &dev_priv->drm;
3237         int i;
3238
3239         drm_modeset_lock_all(dev);
3240         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3241                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3242
3243                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3244                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3245                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3246                 seq_printf(m, " tracked hardware state:\n");
3247                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3248                 seq_printf(m, " dpll_md: 0x%08x\n",
3249                            pll->state.hw_state.dpll_md);
3250                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3251                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3252                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3253         }
3254         drm_modeset_unlock_all(dev);
3255
3256         return 0;
3257 }
3258
3259 static int i915_wa_registers(struct seq_file *m, void *unused)
3260 {
3261         int i;
3262         int ret;
3263         struct intel_engine_cs *engine;
3264         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3265         struct drm_device *dev = &dev_priv->drm;
3266         struct i915_workarounds *workarounds = &dev_priv->workarounds;
3267         enum intel_engine_id id;
3268
3269         ret = mutex_lock_interruptible(&dev->struct_mutex);
3270         if (ret)
3271                 return ret;
3272
3273         intel_runtime_pm_get(dev_priv);
3274
3275         seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3276         for_each_engine(engine, dev_priv, id)
3277                 seq_printf(m, "HW whitelist count for %s: %d\n",
3278                            engine->name, workarounds->hw_whitelist_count[id]);
3279         for (i = 0; i < workarounds->count; ++i) {
3280                 i915_reg_t addr;
3281                 u32 mask, value, read;
3282                 bool ok;
3283
3284                 addr = workarounds->reg[i].addr;
3285                 mask = workarounds->reg[i].mask;
3286                 value = workarounds->reg[i].value;
3287                 read = I915_READ(addr);
3288                 ok = (value & mask) == (read & mask);
3289                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3290                            i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3291         }
3292
3293         intel_runtime_pm_put(dev_priv);
3294         mutex_unlock(&dev->struct_mutex);
3295
3296         return 0;
3297 }
3298
3299 static int i915_ipc_status_show(struct seq_file *m, void *data)
3300 {
3301         struct drm_i915_private *dev_priv = m->private;
3302
3303         seq_printf(m, "Isochronous Priority Control: %s\n",
3304                         yesno(dev_priv->ipc_enabled));
3305         return 0;
3306 }
3307
3308 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3309 {
3310         struct drm_i915_private *dev_priv = inode->i_private;
3311
3312         if (!HAS_IPC(dev_priv))
3313                 return -ENODEV;
3314
3315         return single_open(file, i915_ipc_status_show, dev_priv);
3316 }
3317
3318 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3319                                      size_t len, loff_t *offp)
3320 {
3321         struct seq_file *m = file->private_data;
3322         struct drm_i915_private *dev_priv = m->private;
3323         int ret;
3324         bool enable;
3325
3326         ret = kstrtobool_from_user(ubuf, len, &enable);
3327         if (ret < 0)
3328                 return ret;
3329
3330         intel_runtime_pm_get(dev_priv);
3331         if (!dev_priv->ipc_enabled && enable)
3332                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3333         dev_priv->wm.distrust_bios_wm = true;
3334         dev_priv->ipc_enabled = enable;
3335         intel_enable_ipc(dev_priv);
3336         intel_runtime_pm_put(dev_priv);
3337
3338         return len;
3339 }
3340
3341 static const struct file_operations i915_ipc_status_fops = {
3342         .owner = THIS_MODULE,
3343         .open = i915_ipc_status_open,
3344         .read = seq_read,
3345         .llseek = seq_lseek,
3346         .release = single_release,
3347         .write = i915_ipc_status_write
3348 };
3349
3350 static int i915_ddb_info(struct seq_file *m, void *unused)
3351 {
3352         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3353         struct drm_device *dev = &dev_priv->drm;
3354         struct skl_ddb_allocation *ddb;
3355         struct skl_ddb_entry *entry;
3356         enum pipe pipe;
3357         int plane;
3358
3359         if (INTEL_GEN(dev_priv) < 9)
3360                 return 0;
3361
3362         drm_modeset_lock_all(dev);
3363
3364         ddb = &dev_priv->wm.skl_hw.ddb;
3365
3366         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3367
3368         for_each_pipe(dev_priv, pipe) {
3369                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3370
3371                 for_each_universal_plane(dev_priv, pipe, plane) {
3372                         entry = &ddb->plane[pipe][plane];
3373                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3374                                    entry->start, entry->end,
3375                                    skl_ddb_entry_size(entry));
3376                 }
3377
3378                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3379                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3380                            entry->end, skl_ddb_entry_size(entry));
3381         }
3382
3383         drm_modeset_unlock_all(dev);
3384
3385         return 0;
3386 }
3387
3388 static void drrs_status_per_crtc(struct seq_file *m,
3389                                  struct drm_device *dev,
3390                                  struct intel_crtc *intel_crtc)
3391 {
3392         struct drm_i915_private *dev_priv = to_i915(dev);
3393         struct i915_drrs *drrs = &dev_priv->drrs;
3394         int vrefresh = 0;
3395         struct drm_connector *connector;
3396         struct drm_connector_list_iter conn_iter;
3397
3398         drm_connector_list_iter_begin(dev, &conn_iter);
3399         drm_for_each_connector_iter(connector, &conn_iter) {
3400                 if (connector->state->crtc != &intel_crtc->base)
3401                         continue;
3402
3403                 seq_printf(m, "%s:\n", connector->name);
3404         }
3405         drm_connector_list_iter_end(&conn_iter);
3406
3407         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3408                 seq_puts(m, "\tVBT: DRRS_type: Static");
3409         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3410                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3411         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3412                 seq_puts(m, "\tVBT: DRRS_type: None");
3413         else
3414                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3415
3416         seq_puts(m, "\n\n");
3417
3418         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3419                 struct intel_panel *panel;
3420
3421                 mutex_lock(&drrs->mutex);
3422                 /* DRRS Supported */
3423                 seq_puts(m, "\tDRRS Supported: Yes\n");
3424
3425                 /* disable_drrs() will make drrs->dp NULL */
3426                 if (!drrs->dp) {
3427                         seq_puts(m, "Idleness DRRS: Disabled");
3428                         mutex_unlock(&drrs->mutex);
3429                         return;
3430                 }
3431
3432                 panel = &drrs->dp->attached_connector->panel;
3433                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3434                                         drrs->busy_frontbuffer_bits);
3435
3436                 seq_puts(m, "\n\t\t");
3437                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3438                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3439                         vrefresh = panel->fixed_mode->vrefresh;
3440                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3441                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3442                         vrefresh = panel->downclock_mode->vrefresh;
3443                 } else {
3444                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3445                                                 drrs->refresh_rate_type);
3446                         mutex_unlock(&drrs->mutex);
3447                         return;
3448                 }
3449                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3450
3451                 seq_puts(m, "\n\t\t");
3452                 mutex_unlock(&drrs->mutex);
3453         } else {
3454                 /* DRRS not supported. Print the VBT parameter*/
3455                 seq_puts(m, "\tDRRS Supported : No");
3456         }
3457         seq_puts(m, "\n");
3458 }
3459
3460 static int i915_drrs_status(struct seq_file *m, void *unused)
3461 {
3462         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3463         struct drm_device *dev = &dev_priv->drm;
3464         struct intel_crtc *intel_crtc;
3465         int active_crtc_cnt = 0;
3466
3467         drm_modeset_lock_all(dev);
3468         for_each_intel_crtc(dev, intel_crtc) {
3469                 if (intel_crtc->base.state->active) {
3470                         active_crtc_cnt++;
3471                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3472
3473                         drrs_status_per_crtc(m, dev, intel_crtc);
3474                 }
3475         }
3476         drm_modeset_unlock_all(dev);
3477
3478         if (!active_crtc_cnt)
3479                 seq_puts(m, "No active crtc found\n");
3480
3481         return 0;
3482 }
3483
3484 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3485 {
3486         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3487         struct drm_device *dev = &dev_priv->drm;
3488         struct intel_encoder *intel_encoder;
3489         struct intel_digital_port *intel_dig_port;
3490         struct drm_connector *connector;
3491         struct drm_connector_list_iter conn_iter;
3492
3493         drm_connector_list_iter_begin(dev, &conn_iter);
3494         drm_for_each_connector_iter(connector, &conn_iter) {
3495                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3496                         continue;
3497
3498                 intel_encoder = intel_attached_encoder(connector);
3499                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3500                         continue;
3501
3502                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3503                 if (!intel_dig_port->dp.can_mst)
3504                         continue;
3505
3506                 seq_printf(m, "MST Source Port %c\n",
3507                            port_name(intel_dig_port->base.port));
3508                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3509         }
3510         drm_connector_list_iter_end(&conn_iter);
3511
3512         return 0;
3513 }
3514
3515 static ssize_t i915_displayport_test_active_write(struct file *file,
3516                                                   const char __user *ubuf,
3517                                                   size_t len, loff_t *offp)
3518 {
3519         char *input_buffer;
3520         int status = 0;
3521         struct drm_device *dev;
3522         struct drm_connector *connector;
3523         struct drm_connector_list_iter conn_iter;
3524         struct intel_dp *intel_dp;
3525         int val = 0;
3526
3527         dev = ((struct seq_file *)file->private_data)->private;
3528
3529         if (len == 0)
3530                 return 0;
3531
3532         input_buffer = memdup_user_nul(ubuf, len);
3533         if (IS_ERR(input_buffer))
3534                 return PTR_ERR(input_buffer);
3535
3536         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3537
3538         drm_connector_list_iter_begin(dev, &conn_iter);
3539         drm_for_each_connector_iter(connector, &conn_iter) {
3540                 struct intel_encoder *encoder;
3541
3542                 if (connector->connector_type !=
3543                     DRM_MODE_CONNECTOR_DisplayPort)
3544                         continue;
3545
3546                 encoder = to_intel_encoder(connector->encoder);
3547                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3548                         continue;
3549
3550                 if (encoder && connector->status == connector_status_connected) {
3551                         intel_dp = enc_to_intel_dp(&encoder->base);
3552                         status = kstrtoint(input_buffer, 10, &val);
3553                         if (status < 0)
3554                                 break;
3555                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3556                         /* To prevent erroneous activation of the compliance
3557                          * testing code, only accept an actual value of 1 here
3558                          */
3559                         if (val == 1)
3560                                 intel_dp->compliance.test_active = 1;
3561                         else
3562                                 intel_dp->compliance.test_active = 0;
3563                 }
3564         }
3565         drm_connector_list_iter_end(&conn_iter);
3566         kfree(input_buffer);
3567         if (status < 0)
3568                 return status;
3569
3570         *offp += len;
3571         return len;
3572 }
3573
3574 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3575 {
3576         struct drm_device *dev = m->private;
3577         struct drm_connector *connector;
3578         struct drm_connector_list_iter conn_iter;
3579         struct intel_dp *intel_dp;
3580
3581         drm_connector_list_iter_begin(dev, &conn_iter);
3582         drm_for_each_connector_iter(connector, &conn_iter) {
3583                 struct intel_encoder *encoder;
3584
3585                 if (connector->connector_type !=
3586                     DRM_MODE_CONNECTOR_DisplayPort)
3587                         continue;
3588
3589                 encoder = to_intel_encoder(connector->encoder);
3590                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3591                         continue;
3592
3593                 if (encoder && connector->status == connector_status_connected) {
3594                         intel_dp = enc_to_intel_dp(&encoder->base);
3595                         if (intel_dp->compliance.test_active)
3596                                 seq_puts(m, "1");
3597                         else
3598                                 seq_puts(m, "0");
3599                 } else
3600                         seq_puts(m, "0");
3601         }
3602         drm_connector_list_iter_end(&conn_iter);
3603
3604         return 0;
3605 }
3606
3607 static int i915_displayport_test_active_open(struct inode *inode,
3608                                              struct file *file)
3609 {
3610         struct drm_i915_private *dev_priv = inode->i_private;
3611
3612         return single_open(file, i915_displayport_test_active_show,
3613                            &dev_priv->drm);
3614 }
3615
3616 static const struct file_operations i915_displayport_test_active_fops = {
3617         .owner = THIS_MODULE,
3618         .open = i915_displayport_test_active_open,
3619         .read = seq_read,
3620         .llseek = seq_lseek,
3621         .release = single_release,
3622         .write = i915_displayport_test_active_write
3623 };
3624
3625 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3626 {
3627         struct drm_device *dev = m->private;
3628         struct drm_connector *connector;
3629         struct drm_connector_list_iter conn_iter;
3630         struct intel_dp *intel_dp;
3631
3632         drm_connector_list_iter_begin(dev, &conn_iter);
3633         drm_for_each_connector_iter(connector, &conn_iter) {
3634                 struct intel_encoder *encoder;
3635
3636                 if (connector->connector_type !=
3637                     DRM_MODE_CONNECTOR_DisplayPort)
3638                         continue;
3639
3640                 encoder = to_intel_encoder(connector->encoder);
3641                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3642                         continue;
3643
3644                 if (encoder && connector->status == connector_status_connected) {
3645                         intel_dp = enc_to_intel_dp(&encoder->base);
3646                         if (intel_dp->compliance.test_type ==
3647                             DP_TEST_LINK_EDID_READ)
3648                                 seq_printf(m, "%lx",
3649                                            intel_dp->compliance.test_data.edid);
3650                         else if (intel_dp->compliance.test_type ==
3651                                  DP_TEST_LINK_VIDEO_PATTERN) {
3652                                 seq_printf(m, "hdisplay: %d\n",
3653                                            intel_dp->compliance.test_data.hdisplay);
3654                                 seq_printf(m, "vdisplay: %d\n",
3655                                            intel_dp->compliance.test_data.vdisplay);
3656                                 seq_printf(m, "bpc: %u\n",
3657                                            intel_dp->compliance.test_data.bpc);
3658                         }
3659                 } else
3660                         seq_puts(m, "0");
3661         }
3662         drm_connector_list_iter_end(&conn_iter);
3663
3664         return 0;
3665 }
3666 static int i915_displayport_test_data_open(struct inode *inode,
3667                                            struct file *file)
3668 {
3669         struct drm_i915_private *dev_priv = inode->i_private;
3670
3671         return single_open(file, i915_displayport_test_data_show,
3672                            &dev_priv->drm);
3673 }
3674
3675 static const struct file_operations i915_displayport_test_data_fops = {
3676         .owner = THIS_MODULE,
3677         .open = i915_displayport_test_data_open,
3678         .read = seq_read,
3679         .llseek = seq_lseek,
3680         .release = single_release
3681 };
3682
3683 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3684 {
3685         struct drm_device *dev = m->private;
3686         struct drm_connector *connector;
3687         struct drm_connector_list_iter conn_iter;
3688         struct intel_dp *intel_dp;
3689
3690         drm_connector_list_iter_begin(dev, &conn_iter);
3691         drm_for_each_connector_iter(connector, &conn_iter) {
3692                 struct intel_encoder *encoder;
3693
3694                 if (connector->connector_type !=
3695                     DRM_MODE_CONNECTOR_DisplayPort)
3696                         continue;
3697
3698                 encoder = to_intel_encoder(connector->encoder);
3699                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3700                         continue;
3701
3702                 if (encoder && connector->status == connector_status_connected) {
3703                         intel_dp = enc_to_intel_dp(&encoder->base);
3704                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3705                 } else
3706                         seq_puts(m, "0");
3707         }
3708         drm_connector_list_iter_end(&conn_iter);
3709
3710         return 0;
3711 }
3712
3713 static int i915_displayport_test_type_open(struct inode *inode,
3714                                        struct file *file)
3715 {
3716         struct drm_i915_private *dev_priv = inode->i_private;
3717
3718         return single_open(file, i915_displayport_test_type_show,
3719                            &dev_priv->drm);
3720 }
3721
3722 static const struct file_operations i915_displayport_test_type_fops = {
3723         .owner = THIS_MODULE,
3724         .open = i915_displayport_test_type_open,
3725         .read = seq_read,
3726         .llseek = seq_lseek,
3727         .release = single_release
3728 };
3729
3730 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3731 {
3732         struct drm_i915_private *dev_priv = m->private;
3733         struct drm_device *dev = &dev_priv->drm;
3734         int level;
3735         int num_levels;
3736
3737         if (IS_CHERRYVIEW(dev_priv))
3738                 num_levels = 3;
3739         else if (IS_VALLEYVIEW(dev_priv))
3740                 num_levels = 1;
3741         else if (IS_G4X(dev_priv))
3742                 num_levels = 3;
3743         else
3744                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3745
3746         drm_modeset_lock_all(dev);
3747
3748         for (level = 0; level < num_levels; level++) {
3749                 unsigned int latency = wm[level];
3750
3751                 /*
3752                  * - WM1+ latency values in 0.5us units
3753                  * - latencies are in us on gen9/vlv/chv
3754                  */
3755                 if (INTEL_GEN(dev_priv) >= 9 ||
3756                     IS_VALLEYVIEW(dev_priv) ||
3757                     IS_CHERRYVIEW(dev_priv) ||
3758                     IS_G4X(dev_priv))
3759                         latency *= 10;
3760                 else if (level > 0)
3761                         latency *= 5;
3762
3763                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3764                            level, wm[level], latency / 10, latency % 10);
3765         }
3766
3767         drm_modeset_unlock_all(dev);
3768 }
3769
3770 static int pri_wm_latency_show(struct seq_file *m, void *data)
3771 {
3772         struct drm_i915_private *dev_priv = m->private;
3773         const uint16_t *latencies;
3774
3775         if (INTEL_GEN(dev_priv) >= 9)
3776                 latencies = dev_priv->wm.skl_latency;
3777         else
3778                 latencies = dev_priv->wm.pri_latency;
3779
3780         wm_latency_show(m, latencies);
3781
3782         return 0;
3783 }
3784
3785 static int spr_wm_latency_show(struct seq_file *m, void *data)
3786 {
3787         struct drm_i915_private *dev_priv = m->private;
3788         const uint16_t *latencies;
3789
3790         if (INTEL_GEN(dev_priv) >= 9)
3791                 latencies = dev_priv->wm.skl_latency;
3792         else
3793                 latencies = dev_priv->wm.spr_latency;
3794
3795         wm_latency_show(m, latencies);
3796
3797         return 0;
3798 }
3799
3800 static int cur_wm_latency_show(struct seq_file *m, void *data)
3801 {
3802         struct drm_i915_private *dev_priv = m->private;
3803         const uint16_t *latencies;
3804
3805         if (INTEL_GEN(dev_priv) >= 9)
3806                 latencies = dev_priv->wm.skl_latency;
3807         else
3808                 latencies = dev_priv->wm.cur_latency;
3809
3810         wm_latency_show(m, latencies);
3811
3812         return 0;
3813 }
3814
3815 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3816 {
3817         struct drm_i915_private *dev_priv = inode->i_private;
3818
3819         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3820                 return -ENODEV;
3821
3822         return single_open(file, pri_wm_latency_show, dev_priv);
3823 }
3824
3825 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3826 {
3827         struct drm_i915_private *dev_priv = inode->i_private;
3828
3829         if (HAS_GMCH_DISPLAY(dev_priv))
3830                 return -ENODEV;
3831
3832         return single_open(file, spr_wm_latency_show, dev_priv);
3833 }
3834
3835 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3836 {
3837         struct drm_i915_private *dev_priv = inode->i_private;
3838
3839         if (HAS_GMCH_DISPLAY(dev_priv))
3840                 return -ENODEV;
3841
3842         return single_open(file, cur_wm_latency_show, dev_priv);
3843 }
3844
3845 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3846                                 size_t len, loff_t *offp, uint16_t wm[8])
3847 {
3848         struct seq_file *m = file->private_data;
3849         struct drm_i915_private *dev_priv = m->private;
3850         struct drm_device *dev = &dev_priv->drm;
3851         uint16_t new[8] = { 0 };
3852         int num_levels;
3853         int level;
3854         int ret;
3855         char tmp[32];
3856
3857         if (IS_CHERRYVIEW(dev_priv))
3858                 num_levels = 3;
3859         else if (IS_VALLEYVIEW(dev_priv))
3860                 num_levels = 1;
3861         else if (IS_G4X(dev_priv))
3862                 num_levels = 3;
3863         else
3864                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3865
3866         if (len >= sizeof(tmp))
3867                 return -EINVAL;
3868
3869         if (copy_from_user(tmp, ubuf, len))
3870                 return -EFAULT;
3871
3872         tmp[len] = '\0';
3873
3874         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3875                      &new[0], &new[1], &new[2], &new[3],
3876                      &new[4], &new[5], &new[6], &new[7]);
3877         if (ret != num_levels)
3878                 return -EINVAL;
3879
3880         drm_modeset_lock_all(dev);
3881
3882         for (level = 0; level < num_levels; level++)
3883                 wm[level] = new[level];
3884
3885         drm_modeset_unlock_all(dev);
3886
3887         return len;
3888 }
3889
3890
3891 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3892                                     size_t len, loff_t *offp)
3893 {
3894         struct seq_file *m = file->private_data;
3895         struct drm_i915_private *dev_priv = m->private;
3896         uint16_t *latencies;
3897
3898         if (INTEL_GEN(dev_priv) >= 9)
3899                 latencies = dev_priv->wm.skl_latency;
3900         else
3901                 latencies = dev_priv->wm.pri_latency;
3902
3903         return wm_latency_write(file, ubuf, len, offp, latencies);
3904 }
3905
3906 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3907                                     size_t len, loff_t *offp)
3908 {
3909         struct seq_file *m = file->private_data;
3910         struct drm_i915_private *dev_priv = m->private;
3911         uint16_t *latencies;
3912
3913         if (INTEL_GEN(dev_priv) >= 9)
3914                 latencies = dev_priv->wm.skl_latency;
3915         else
3916                 latencies = dev_priv->wm.spr_latency;
3917
3918         return wm_latency_write(file, ubuf, len, offp, latencies);
3919 }
3920
3921 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3922                                     size_t len, loff_t *offp)
3923 {
3924         struct seq_file *m = file->private_data;
3925         struct drm_i915_private *dev_priv = m->private;
3926         uint16_t *latencies;
3927
3928         if (INTEL_GEN(dev_priv) >= 9)
3929                 latencies = dev_priv->wm.skl_latency;
3930         else
3931                 latencies = dev_priv->wm.cur_latency;
3932
3933         return wm_latency_write(file, ubuf, len, offp, latencies);
3934 }
3935
3936 static const struct file_operations i915_pri_wm_latency_fops = {
3937         .owner = THIS_MODULE,
3938         .open = pri_wm_latency_open,
3939         .read = seq_read,
3940         .llseek = seq_lseek,
3941         .release = single_release,
3942         .write = pri_wm_latency_write
3943 };
3944
3945 static const struct file_operations i915_spr_wm_latency_fops = {
3946         .owner = THIS_MODULE,
3947         .open = spr_wm_latency_open,
3948         .read = seq_read,
3949         .llseek = seq_lseek,
3950         .release = single_release,
3951         .write = spr_wm_latency_write
3952 };
3953
3954 static const struct file_operations i915_cur_wm_latency_fops = {
3955         .owner = THIS_MODULE,
3956         .open = cur_wm_latency_open,
3957         .read = seq_read,
3958         .llseek = seq_lseek,
3959         .release = single_release,
3960         .write = cur_wm_latency_write
3961 };
3962
3963 static int
3964 i915_wedged_get(void *data, u64 *val)
3965 {
3966         struct drm_i915_private *dev_priv = data;
3967
3968         *val = i915_terminally_wedged(&dev_priv->gpu_error);
3969
3970         return 0;
3971 }
3972
3973 static int
3974 i915_wedged_set(void *data, u64 val)
3975 {
3976         struct drm_i915_private *i915 = data;
3977         struct intel_engine_cs *engine;
3978         unsigned int tmp;
3979
3980         /*
3981          * There is no safeguard against this debugfs entry colliding
3982          * with the hangcheck calling same i915_handle_error() in
3983          * parallel, causing an explosion. For now we assume that the
3984          * test harness is responsible enough not to inject gpu hangs
3985          * while it is writing to 'i915_wedged'
3986          */
3987
3988         if (i915_reset_backoff(&i915->gpu_error))
3989                 return -EAGAIN;
3990
3991         for_each_engine_masked(engine, i915, val, tmp) {
3992                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3993                 engine->hangcheck.stalled = true;
3994         }
3995
3996         i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
3997
3998         wait_on_bit(&i915->gpu_error.flags,
3999                     I915_RESET_HANDOFF,
4000                     TASK_UNINTERRUPTIBLE);
4001
4002         return 0;
4003 }
4004
4005 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4006                         i915_wedged_get, i915_wedged_set,
4007                         "%llu\n");
4008
4009 static int
4010 fault_irq_set(struct drm_i915_private *i915,
4011               unsigned long *irq,
4012               unsigned long val)
4013 {
4014         int err;
4015
4016         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4017         if (err)
4018                 return err;
4019
4020         err = i915_gem_wait_for_idle(i915,
4021                                      I915_WAIT_LOCKED |
4022                                      I915_WAIT_INTERRUPTIBLE);
4023         if (err)
4024                 goto err_unlock;
4025
4026         *irq = val;
4027         mutex_unlock(&i915->drm.struct_mutex);
4028
4029         /* Flush idle worker to disarm irq */
4030         drain_delayed_work(&i915->gt.idle_work);
4031
4032         return 0;
4033
4034 err_unlock:
4035         mutex_unlock(&i915->drm.struct_mutex);
4036         return err;
4037 }
4038
4039 static int
4040 i915_ring_missed_irq_get(void *data, u64 *val)
4041 {
4042         struct drm_i915_private *dev_priv = data;
4043
4044         *val = dev_priv->gpu_error.missed_irq_rings;
4045         return 0;
4046 }
4047
4048 static int
4049 i915_ring_missed_irq_set(void *data, u64 val)
4050 {
4051         struct drm_i915_private *i915 = data;
4052
4053         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4054 }
4055
4056 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4057                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4058                         "0x%08llx\n");
4059
4060 static int
4061 i915_ring_test_irq_get(void *data, u64 *val)
4062 {
4063         struct drm_i915_private *dev_priv = data;
4064
4065         *val = dev_priv->gpu_error.test_irq_rings;
4066
4067         return 0;
4068 }
4069
4070 static int
4071 i915_ring_test_irq_set(void *data, u64 val)
4072 {
4073         struct drm_i915_private *i915 = data;
4074
4075         val &= INTEL_INFO(i915)->ring_mask;
4076         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4077
4078         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4079 }
4080
4081 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4082                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4083                         "0x%08llx\n");
4084
4085 #define DROP_UNBOUND    BIT(0)
4086 #define DROP_BOUND      BIT(1)
4087 #define DROP_RETIRE     BIT(2)
4088 #define DROP_ACTIVE     BIT(3)
4089 #define DROP_FREED      BIT(4)
4090 #define DROP_SHRINK_ALL BIT(5)
4091 #define DROP_IDLE       BIT(6)
4092 #define DROP_ALL (DROP_UNBOUND  | \
4093                   DROP_BOUND    | \
4094                   DROP_RETIRE   | \
4095                   DROP_ACTIVE   | \
4096                   DROP_FREED    | \
4097                   DROP_SHRINK_ALL |\
4098                   DROP_IDLE)
4099 static int
4100 i915_drop_caches_get(void *data, u64 *val)
4101 {
4102         *val = DROP_ALL;
4103
4104         return 0;
4105 }
4106
4107 static int
4108 i915_drop_caches_set(void *data, u64 val)
4109 {
4110         struct drm_i915_private *dev_priv = data;
4111         struct drm_device *dev = &dev_priv->drm;
4112         int ret = 0;
4113
4114         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4115                   val, val & DROP_ALL);
4116
4117         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4118          * on ioctls on -EAGAIN. */
4119         if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4120                 ret = mutex_lock_interruptible(&dev->struct_mutex);
4121                 if (ret)
4122                         return ret;
4123
4124                 if (val & DROP_ACTIVE)
4125                         ret = i915_gem_wait_for_idle(dev_priv,
4126                                                      I915_WAIT_INTERRUPTIBLE |
4127                                                      I915_WAIT_LOCKED);
4128
4129                 if (val & DROP_RETIRE)
4130                         i915_gem_retire_requests(dev_priv);
4131
4132                 mutex_unlock(&dev->struct_mutex);
4133         }
4134
4135         fs_reclaim_acquire(GFP_KERNEL);
4136         if (val & DROP_BOUND)
4137                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4138
4139         if (val & DROP_UNBOUND)
4140                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4141
4142         if (val & DROP_SHRINK_ALL)
4143                 i915_gem_shrink_all(dev_priv);
4144         fs_reclaim_release(GFP_KERNEL);
4145
4146         if (val & DROP_IDLE)
4147                 drain_delayed_work(&dev_priv->gt.idle_work);
4148
4149         if (val & DROP_FREED) {
4150                 synchronize_rcu();
4151                 i915_gem_drain_freed_objects(dev_priv);
4152         }
4153
4154         return ret;
4155 }
4156
4157 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4158                         i915_drop_caches_get, i915_drop_caches_set,
4159                         "0x%08llx\n");
4160
4161 static int
4162 i915_max_freq_get(void *data, u64 *val)
4163 {
4164         struct drm_i915_private *dev_priv = data;
4165
4166         if (INTEL_GEN(dev_priv) < 6)
4167                 return -ENODEV;
4168
4169         *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
4170         return 0;
4171 }
4172
4173 static int
4174 i915_max_freq_set(void *data, u64 val)
4175 {
4176         struct drm_i915_private *dev_priv = data;
4177         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4178         u32 hw_max, hw_min;
4179         int ret;
4180
4181         if (INTEL_GEN(dev_priv) < 6)
4182                 return -ENODEV;
4183
4184         DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4185
4186         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4187         if (ret)
4188                 return ret;
4189
4190         /*
4191          * Turbo will still be enabled, but won't go above the set value.
4192          */
4193         val = intel_freq_opcode(dev_priv, val);
4194
4195         hw_max = rps->max_freq;
4196         hw_min = rps->min_freq;
4197
4198         if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
4199                 mutex_unlock(&dev_priv->pcu_lock);
4200                 return -EINVAL;
4201         }
4202
4203         rps->max_freq_softlimit = val;
4204
4205         if (intel_set_rps(dev_priv, val))
4206                 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4207
4208         mutex_unlock(&dev_priv->pcu_lock);
4209
4210         return 0;
4211 }
4212
4213 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4214                         i915_max_freq_get, i915_max_freq_set,
4215                         "%llu\n");
4216
4217 static int
4218 i915_min_freq_get(void *data, u64 *val)
4219 {
4220         struct drm_i915_private *dev_priv = data;
4221
4222         if (INTEL_GEN(dev_priv) < 6)
4223                 return -ENODEV;
4224
4225         *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
4226         return 0;
4227 }
4228
4229 static int
4230 i915_min_freq_set(void *data, u64 val)
4231 {
4232         struct drm_i915_private *dev_priv = data;
4233         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4234         u32 hw_max, hw_min;
4235         int ret;
4236
4237         if (INTEL_GEN(dev_priv) < 6)
4238                 return -ENODEV;
4239
4240         DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4241
4242         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4243         if (ret)
4244                 return ret;
4245
4246         /*
4247          * Turbo will still be enabled, but won't go below the set value.
4248          */
4249         val = intel_freq_opcode(dev_priv, val);
4250
4251         hw_max = rps->max_freq;
4252         hw_min = rps->min_freq;
4253
4254         if (val < hw_min ||
4255             val > hw_max || val > rps->max_freq_softlimit) {
4256                 mutex_unlock(&dev_priv->pcu_lock);
4257                 return -EINVAL;
4258         }
4259
4260         rps->min_freq_softlimit = val;
4261
4262         if (intel_set_rps(dev_priv, val))
4263                 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4264
4265         mutex_unlock(&dev_priv->pcu_lock);
4266
4267         return 0;
4268 }
4269
4270 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4271                         i915_min_freq_get, i915_min_freq_set,
4272                         "%llu\n");
4273
4274 static int
4275 i915_cache_sharing_get(void *data, u64 *val)
4276 {
4277         struct drm_i915_private *dev_priv = data;
4278         u32 snpcr;
4279
4280         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4281                 return -ENODEV;
4282
4283         intel_runtime_pm_get(dev_priv);
4284
4285         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4286
4287         intel_runtime_pm_put(dev_priv);
4288
4289         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4290
4291         return 0;
4292 }
4293
4294 static int
4295 i915_cache_sharing_set(void *data, u64 val)
4296 {
4297         struct drm_i915_private *dev_priv = data;
4298         u32 snpcr;
4299
4300         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4301                 return -ENODEV;
4302
4303         if (val > 3)
4304                 return -EINVAL;
4305
4306         intel_runtime_pm_get(dev_priv);
4307         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4308
4309         /* Update the cache sharing policy here as well */
4310         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4311         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4312         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4313         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4314
4315         intel_runtime_pm_put(dev_priv);
4316         return 0;
4317 }
4318
4319 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4320                         i915_cache_sharing_get, i915_cache_sharing_set,
4321                         "%llu\n");
4322
4323 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4324                                           struct sseu_dev_info *sseu)
4325 {
4326         int ss_max = 2;
4327         int ss;
4328         u32 sig1[ss_max], sig2[ss_max];
4329
4330         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4331         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4332         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4333         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4334
4335         for (ss = 0; ss < ss_max; ss++) {
4336                 unsigned int eu_cnt;
4337
4338                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4339                         /* skip disabled subslice */
4340                         continue;
4341
4342                 sseu->slice_mask = BIT(0);
4343                 sseu->subslice_mask |= BIT(ss);
4344                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4345                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4346                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4347                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4348                 sseu->eu_total += eu_cnt;
4349                 sseu->eu_per_subslice = max_t(unsigned int,
4350                                               sseu->eu_per_subslice, eu_cnt);
4351         }
4352 }
4353
4354 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4355                                      struct sseu_dev_info *sseu)
4356 {
4357         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4358         int s_max = 6, ss_max = 4;
4359         int s, ss;
4360         u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
4361
4362         for (s = 0; s < s_max; s++) {
4363                 /*
4364                  * FIXME: Valid SS Mask respects the spec and read
4365                  * only valid bits for those registers, excluding reserverd
4366                  * although this seems wrong because it would leave many
4367                  * subslices without ACK.
4368                  */
4369                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4370                         GEN10_PGCTL_VALID_SS_MASK(s);
4371                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4372                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4373         }
4374
4375         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4376                      GEN9_PGCTL_SSA_EU19_ACK |
4377                      GEN9_PGCTL_SSA_EU210_ACK |
4378                      GEN9_PGCTL_SSA_EU311_ACK;
4379         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4380                      GEN9_PGCTL_SSB_EU19_ACK |
4381                      GEN9_PGCTL_SSB_EU210_ACK |
4382                      GEN9_PGCTL_SSB_EU311_ACK;
4383
4384         for (s = 0; s < s_max; s++) {
4385                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4386                         /* skip disabled slice */
4387                         continue;
4388
4389                 sseu->slice_mask |= BIT(s);
4390                 sseu->subslice_mask = info->sseu.subslice_mask;
4391
4392                 for (ss = 0; ss < ss_max; ss++) {
4393                         unsigned int eu_cnt;
4394
4395                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4396                                 /* skip disabled subslice */
4397                                 continue;
4398
4399                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4400                                                eu_mask[ss % 2]);
4401                         sseu->eu_total += eu_cnt;
4402                         sseu->eu_per_subslice = max_t(unsigned int,
4403                                                       sseu->eu_per_subslice,
4404                                                       eu_cnt);
4405                 }
4406         }
4407 }
4408
4409 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4410                                     struct sseu_dev_info *sseu)
4411 {
4412         int s_max = 3, ss_max = 4;
4413         int s, ss;
4414         u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4415
4416         /* BXT has a single slice and at most 3 subslices. */
4417         if (IS_GEN9_LP(dev_priv)) {
4418                 s_max = 1;
4419                 ss_max = 3;
4420         }
4421
4422         for (s = 0; s < s_max; s++) {
4423                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4424                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4425                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4426         }
4427
4428         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4429                      GEN9_PGCTL_SSA_EU19_ACK |
4430                      GEN9_PGCTL_SSA_EU210_ACK |
4431                      GEN9_PGCTL_SSA_EU311_ACK;
4432         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4433                      GEN9_PGCTL_SSB_EU19_ACK |
4434                      GEN9_PGCTL_SSB_EU210_ACK |
4435                      GEN9_PGCTL_SSB_EU311_ACK;
4436
4437         for (s = 0; s < s_max; s++) {
4438                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4439                         /* skip disabled slice */
4440                         continue;
4441
4442                 sseu->slice_mask |= BIT(s);
4443
4444                 if (IS_GEN9_BC(dev_priv))
4445                         sseu->subslice_mask =
4446                                 INTEL_INFO(dev_priv)->sseu.subslice_mask;
4447
4448                 for (ss = 0; ss < ss_max; ss++) {
4449                         unsigned int eu_cnt;
4450
4451                         if (IS_GEN9_LP(dev_priv)) {
4452                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4453                                         /* skip disabled subslice */
4454                                         continue;
4455
4456                                 sseu->subslice_mask |= BIT(ss);
4457                         }
4458
4459                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4460                                                eu_mask[ss%2]);
4461                         sseu->eu_total += eu_cnt;
4462                         sseu->eu_per_subslice = max_t(unsigned int,
4463                                                       sseu->eu_per_subslice,
4464                                                       eu_cnt);
4465                 }
4466         }
4467 }
4468
4469 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4470                                          struct sseu_dev_info *sseu)
4471 {
4472         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4473         int s;
4474
4475         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4476
4477         if (sseu->slice_mask) {
4478                 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
4479                 sseu->eu_per_subslice =
4480                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4481                 sseu->eu_total = sseu->eu_per_subslice *
4482                                  sseu_subslice_total(sseu);
4483
4484                 /* subtract fused off EU(s) from enabled slice(s) */
4485                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4486                         u8 subslice_7eu =
4487                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4488
4489                         sseu->eu_total -= hweight8(subslice_7eu);
4490                 }
4491         }
4492 }
4493
4494 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4495                                  const struct sseu_dev_info *sseu)
4496 {
4497         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4498         const char *type = is_available_info ? "Available" : "Enabled";
4499
4500         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4501                    sseu->slice_mask);
4502         seq_printf(m, "  %s Slice Total: %u\n", type,
4503                    hweight8(sseu->slice_mask));
4504         seq_printf(m, "  %s Subslice Total: %u\n", type,
4505                    sseu_subslice_total(sseu));
4506         seq_printf(m, "  %s Subslice Mask: %04x\n", type,
4507                    sseu->subslice_mask);
4508         seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
4509                    hweight8(sseu->subslice_mask));
4510         seq_printf(m, "  %s EU Total: %u\n", type,
4511                    sseu->eu_total);
4512         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4513                    sseu->eu_per_subslice);
4514
4515         if (!is_available_info)
4516                 return;
4517
4518         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4519         if (HAS_POOLED_EU(dev_priv))
4520                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4521
4522         seq_printf(m, "  Has Slice Power Gating: %s\n",
4523                    yesno(sseu->has_slice_pg));
4524         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4525                    yesno(sseu->has_subslice_pg));
4526         seq_printf(m, "  Has EU Power Gating: %s\n",
4527                    yesno(sseu->has_eu_pg));
4528 }
4529
4530 static int i915_sseu_status(struct seq_file *m, void *unused)
4531 {
4532         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4533         struct sseu_dev_info sseu;
4534
4535         if (INTEL_GEN(dev_priv) < 8)
4536                 return -ENODEV;
4537
4538         seq_puts(m, "SSEU Device Info\n");
4539         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4540
4541         seq_puts(m, "SSEU Device Status\n");
4542         memset(&sseu, 0, sizeof(sseu));
4543
4544         intel_runtime_pm_get(dev_priv);
4545
4546         if (IS_CHERRYVIEW(dev_priv)) {
4547                 cherryview_sseu_device_status(dev_priv, &sseu);
4548         } else if (IS_BROADWELL(dev_priv)) {
4549                 broadwell_sseu_device_status(dev_priv, &sseu);
4550         } else if (IS_GEN9(dev_priv)) {
4551                 gen9_sseu_device_status(dev_priv, &sseu);
4552         } else if (INTEL_GEN(dev_priv) >= 10) {
4553                 gen10_sseu_device_status(dev_priv, &sseu);
4554         }
4555
4556         intel_runtime_pm_put(dev_priv);
4557
4558         i915_print_sseu_info(m, false, &sseu);
4559
4560         return 0;
4561 }
4562
4563 static int i915_forcewake_open(struct inode *inode, struct file *file)
4564 {
4565         struct drm_i915_private *i915 = inode->i_private;
4566
4567         if (INTEL_GEN(i915) < 6)
4568                 return 0;
4569
4570         intel_runtime_pm_get(i915);
4571         intel_uncore_forcewake_user_get(i915);
4572
4573         return 0;
4574 }
4575
4576 static int i915_forcewake_release(struct inode *inode, struct file *file)
4577 {
4578         struct drm_i915_private *i915 = inode->i_private;
4579
4580         if (INTEL_GEN(i915) < 6)
4581                 return 0;
4582
4583         intel_uncore_forcewake_user_put(i915);
4584         intel_runtime_pm_put(i915);
4585
4586         return 0;
4587 }
4588
4589 static const struct file_operations i915_forcewake_fops = {
4590         .owner = THIS_MODULE,
4591         .open = i915_forcewake_open,
4592         .release = i915_forcewake_release,
4593 };
4594
4595 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4596 {
4597         struct drm_i915_private *dev_priv = m->private;
4598         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4599
4600         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4601         seq_printf(m, "Detected: %s\n",
4602                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4603
4604         return 0;
4605 }
4606
4607 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4608                                         const char __user *ubuf, size_t len,
4609                                         loff_t *offp)
4610 {
4611         struct seq_file *m = file->private_data;
4612         struct drm_i915_private *dev_priv = m->private;
4613         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4614         unsigned int new_threshold;
4615         int i;
4616         char *newline;
4617         char tmp[16];
4618
4619         if (len >= sizeof(tmp))
4620                 return -EINVAL;
4621
4622         if (copy_from_user(tmp, ubuf, len))
4623                 return -EFAULT;
4624
4625         tmp[len] = '\0';
4626
4627         /* Strip newline, if any */
4628         newline = strchr(tmp, '\n');
4629         if (newline)
4630                 *newline = '\0';
4631
4632         if (strcmp(tmp, "reset") == 0)
4633                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4634         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4635                 return -EINVAL;
4636
4637         if (new_threshold > 0)
4638                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4639                               new_threshold);
4640         else
4641                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4642
4643         spin_lock_irq(&dev_priv->irq_lock);
4644         hotplug->hpd_storm_threshold = new_threshold;
4645         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4646         for_each_hpd_pin(i)
4647                 hotplug->stats[i].count = 0;
4648         spin_unlock_irq(&dev_priv->irq_lock);
4649
4650         /* Re-enable hpd immediately if we were in an irq storm */
4651         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4652
4653         return len;
4654 }
4655
4656 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4657 {
4658         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4659 }
4660
4661 static const struct file_operations i915_hpd_storm_ctl_fops = {
4662         .owner = THIS_MODULE,
4663         .open = i915_hpd_storm_ctl_open,
4664         .read = seq_read,
4665         .llseek = seq_lseek,
4666         .release = single_release,
4667         .write = i915_hpd_storm_ctl_write
4668 };
4669
4670 static const struct drm_info_list i915_debugfs_list[] = {
4671         {"i915_capabilities", i915_capabilities, 0},
4672         {"i915_gem_objects", i915_gem_object_info, 0},
4673         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4674         {"i915_gem_stolen", i915_gem_stolen_list_info },
4675         {"i915_gem_seqno", i915_gem_seqno_info, 0},
4676         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4677         {"i915_gem_interrupt", i915_interrupt_info, 0},
4678         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4679         {"i915_guc_info", i915_guc_info, 0},
4680         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4681         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4682         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4683         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4684         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4685         {"i915_frequency_info", i915_frequency_info, 0},
4686         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4687         {"i915_reset_info", i915_reset_info, 0},
4688         {"i915_drpc_info", i915_drpc_info, 0},
4689         {"i915_emon_status", i915_emon_status, 0},
4690         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4691         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4692         {"i915_fbc_status", i915_fbc_status, 0},
4693         {"i915_ips_status", i915_ips_status, 0},
4694         {"i915_sr_status", i915_sr_status, 0},
4695         {"i915_opregion", i915_opregion, 0},
4696         {"i915_vbt", i915_vbt, 0},
4697         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4698         {"i915_context_status", i915_context_status, 0},
4699         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4700         {"i915_swizzle_info", i915_swizzle_info, 0},
4701         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4702         {"i915_llc", i915_llc, 0},
4703         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4704         {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4705         {"i915_energy_uJ", i915_energy_uJ, 0},
4706         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4707         {"i915_power_domain_info", i915_power_domain_info, 0},
4708         {"i915_dmc_info", i915_dmc_info, 0},
4709         {"i915_display_info", i915_display_info, 0},
4710         {"i915_engine_info", i915_engine_info, 0},
4711         {"i915_shrinker_info", i915_shrinker_info, 0},
4712         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4713         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4714         {"i915_wa_registers", i915_wa_registers, 0},
4715         {"i915_ddb_info", i915_ddb_info, 0},
4716         {"i915_sseu_status", i915_sseu_status, 0},
4717         {"i915_drrs_status", i915_drrs_status, 0},
4718         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4719 };
4720 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4721
4722 static const struct i915_debugfs_files {
4723         const char *name;
4724         const struct file_operations *fops;
4725 } i915_debugfs_files[] = {
4726         {"i915_wedged", &i915_wedged_fops},
4727         {"i915_max_freq", &i915_max_freq_fops},
4728         {"i915_min_freq", &i915_min_freq_fops},
4729         {"i915_cache_sharing", &i915_cache_sharing_fops},
4730         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4731         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4732         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4733 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4734         {"i915_error_state", &i915_error_state_fops},
4735         {"i915_gpu_info", &i915_gpu_info_fops},
4736 #endif
4737         {"i915_next_seqno", &i915_next_seqno_fops},
4738         {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4739         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4740         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4741         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4742         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4743         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4744         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4745         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4746         {"i915_guc_log_control", &i915_guc_log_control_fops},
4747         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4748         {"i915_ipc_status", &i915_ipc_status_fops}
4749 };
4750
4751 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4752 {
4753         struct drm_minor *minor = dev_priv->drm.primary;
4754         struct dentry *ent;
4755         int ret, i;
4756
4757         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4758                                   minor->debugfs_root, to_i915(minor->dev),
4759                                   &i915_forcewake_fops);
4760         if (!ent)
4761                 return -ENOMEM;
4762
4763         ret = intel_pipe_crc_create(minor);
4764         if (ret)
4765                 return ret;
4766
4767         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4768                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4769                                           S_IRUGO | S_IWUSR,
4770                                           minor->debugfs_root,
4771                                           to_i915(minor->dev),
4772                                           i915_debugfs_files[i].fops);
4773                 if (!ent)
4774                         return -ENOMEM;
4775         }
4776
4777         return drm_debugfs_create_files(i915_debugfs_list,
4778                                         I915_DEBUGFS_ENTRIES,
4779                                         minor->debugfs_root, minor);
4780 }
4781
4782 struct dpcd_block {
4783         /* DPCD dump start address. */
4784         unsigned int offset;
4785         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4786         unsigned int end;
4787         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4788         size_t size;
4789         /* Only valid for eDP. */
4790         bool edp;
4791 };
4792
4793 static const struct dpcd_block i915_dpcd_debug[] = {
4794         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4795         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4796         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4797         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4798         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4799         { .offset = DP_SET_POWER },
4800         { .offset = DP_EDP_DPCD_REV },
4801         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4802         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4803         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4804 };
4805
4806 static int i915_dpcd_show(struct seq_file *m, void *data)
4807 {
4808         struct drm_connector *connector = m->private;
4809         struct intel_dp *intel_dp =
4810                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4811         uint8_t buf[16];
4812         ssize_t err;
4813         int i;
4814
4815         if (connector->status != connector_status_connected)
4816                 return -ENODEV;
4817
4818         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4819                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4820                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4821
4822                 if (b->edp &&
4823                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4824                         continue;
4825
4826                 /* low tech for now */
4827                 if (WARN_ON(size > sizeof(buf)))
4828                         continue;
4829
4830                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4831                 if (err <= 0) {
4832                         DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4833                                   size, b->offset, err);
4834                         continue;
4835                 }
4836
4837                 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4838         }
4839
4840         return 0;
4841 }
4842
4843 static int i915_dpcd_open(struct inode *inode, struct file *file)
4844 {
4845         return single_open(file, i915_dpcd_show, inode->i_private);
4846 }
4847
4848 static const struct file_operations i915_dpcd_fops = {
4849         .owner = THIS_MODULE,
4850         .open = i915_dpcd_open,
4851         .read = seq_read,
4852         .llseek = seq_lseek,
4853         .release = single_release,
4854 };
4855
4856 static int i915_panel_show(struct seq_file *m, void *data)
4857 {
4858         struct drm_connector *connector = m->private;
4859         struct intel_dp *intel_dp =
4860                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4861
4862         if (connector->status != connector_status_connected)
4863                 return -ENODEV;
4864
4865         seq_printf(m, "Panel power up delay: %d\n",
4866                    intel_dp->panel_power_up_delay);
4867         seq_printf(m, "Panel power down delay: %d\n",
4868                    intel_dp->panel_power_down_delay);
4869         seq_printf(m, "Backlight on delay: %d\n",
4870                    intel_dp->backlight_on_delay);
4871         seq_printf(m, "Backlight off delay: %d\n",
4872                    intel_dp->backlight_off_delay);
4873
4874         return 0;
4875 }
4876
4877 static int i915_panel_open(struct inode *inode, struct file *file)
4878 {
4879         return single_open(file, i915_panel_show, inode->i_private);
4880 }
4881
4882 static const struct file_operations i915_panel_fops = {
4883         .owner = THIS_MODULE,
4884         .open = i915_panel_open,
4885         .read = seq_read,
4886         .llseek = seq_lseek,
4887         .release = single_release,
4888 };
4889
4890 /**
4891  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4892  * @connector: pointer to a registered drm_connector
4893  *
4894  * Cleanup will be done by drm_connector_unregister() through a call to
4895  * drm_debugfs_connector_remove().
4896  *
4897  * Returns 0 on success, negative error codes on error.
4898  */
4899 int i915_debugfs_connector_add(struct drm_connector *connector)
4900 {
4901         struct dentry *root = connector->debugfs_entry;
4902
4903         /* The connector must have been registered beforehands. */
4904         if (!root)
4905                 return -ENODEV;
4906
4907         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4908             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4909                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4910                                     connector, &i915_dpcd_fops);
4911
4912         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4913                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4914                                     connector, &i915_panel_fops);
4915
4916         return 0;
4917 }