drm/i915: properly init lockdep class
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static __always_inline void seq_print_param(struct seq_file *m,
41                                             const char *name,
42                                             const char *type,
43                                             const void *x)
44 {
45         if (!__builtin_strcmp(type, "bool"))
46                 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
47         else if (!__builtin_strcmp(type, "int"))
48                 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
49         else if (!__builtin_strcmp(type, "unsigned int"))
50                 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
51         else if (!__builtin_strcmp(type, "char *"))
52                 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
53         else
54                 BUILD_BUG();
55 }
56
57 static int i915_capabilities(struct seq_file *m, void *data)
58 {
59         struct drm_i915_private *dev_priv = node_to_i915(m->private);
60         const struct intel_device_info *info = INTEL_INFO(dev_priv);
61
62         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
63         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
64         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
65
66 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
67         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
68 #undef PRINT_FLAG
69
70         kernel_param_lock(THIS_MODULE);
71 #define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
72         I915_PARAMS_FOR_EACH(PRINT_PARAM);
73 #undef PRINT_PARAM
74         kernel_param_unlock(THIS_MODULE);
75
76         return 0;
77 }
78
79 static char get_active_flag(struct drm_i915_gem_object *obj)
80 {
81         return i915_gem_object_is_active(obj) ? '*' : ' ';
82 }
83
84 static char get_pin_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->pin_global ? 'p' : ' ';
87 }
88
89 static char get_tiling_flag(struct drm_i915_gem_object *obj)
90 {
91         switch (i915_gem_object_get_tiling(obj)) {
92         default:
93         case I915_TILING_NONE: return ' ';
94         case I915_TILING_X: return 'X';
95         case I915_TILING_Y: return 'Y';
96         }
97 }
98
99 static char get_global_flag(struct drm_i915_gem_object *obj)
100 {
101         return obj->userfault_count ? 'g' : ' ';
102 }
103
104 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
105 {
106         return obj->mm.mapping ? 'M' : ' ';
107 }
108
109 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
110 {
111         u64 size = 0;
112         struct i915_vma *vma;
113
114         for_each_ggtt_vma(vma, obj) {
115                 if (drm_mm_node_allocated(&vma->node))
116                         size += vma->node.size;
117         }
118
119         return size;
120 }
121
122 static const char *
123 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
124 {
125         size_t x = 0;
126
127         switch (page_sizes) {
128         case 0:
129                 return "";
130         case I915_GTT_PAGE_SIZE_4K:
131                 return "4K";
132         case I915_GTT_PAGE_SIZE_64K:
133                 return "64K";
134         case I915_GTT_PAGE_SIZE_2M:
135                 return "2M";
136         default:
137                 if (!buf)
138                         return "M";
139
140                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
141                         x += snprintf(buf + x, len - x, "2M, ");
142                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
143                         x += snprintf(buf + x, len - x, "64K, ");
144                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
145                         x += snprintf(buf + x, len - x, "4K, ");
146                 buf[x-2] = '\0';
147
148                 return buf;
149         }
150 }
151
152 static void
153 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
154 {
155         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
156         struct intel_engine_cs *engine;
157         struct i915_vma *vma;
158         unsigned int frontbuffer_bits;
159         int pin_count = 0;
160
161         lockdep_assert_held(&obj->base.dev->struct_mutex);
162
163         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
164                    &obj->base,
165                    get_active_flag(obj),
166                    get_pin_flag(obj),
167                    get_tiling_flag(obj),
168                    get_global_flag(obj),
169                    get_pin_mapped_flag(obj),
170                    obj->base.size / 1024,
171                    obj->base.read_domains,
172                    obj->base.write_domain,
173                    i915_cache_level_str(dev_priv, obj->cache_level),
174                    obj->mm.dirty ? " dirty" : "",
175                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
176         if (obj->base.name)
177                 seq_printf(m, " (name: %d)", obj->base.name);
178         list_for_each_entry(vma, &obj->vma_list, obj_link) {
179                 if (i915_vma_is_pinned(vma))
180                         pin_count++;
181         }
182         seq_printf(m, " (pinned x %d)", pin_count);
183         if (obj->pin_global)
184                 seq_printf(m, " (global)");
185         list_for_each_entry(vma, &obj->vma_list, obj_link) {
186                 if (!drm_mm_node_allocated(&vma->node))
187                         continue;
188
189                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
190                            i915_vma_is_ggtt(vma) ? "g" : "pp",
191                            vma->node.start, vma->node.size,
192                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
193                 if (i915_vma_is_ggtt(vma)) {
194                         switch (vma->ggtt_view.type) {
195                         case I915_GGTT_VIEW_NORMAL:
196                                 seq_puts(m, ", normal");
197                                 break;
198
199                         case I915_GGTT_VIEW_PARTIAL:
200                                 seq_printf(m, ", partial [%08llx+%x]",
201                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
202                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
203                                 break;
204
205                         case I915_GGTT_VIEW_ROTATED:
206                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
207                                            vma->ggtt_view.rotated.plane[0].width,
208                                            vma->ggtt_view.rotated.plane[0].height,
209                                            vma->ggtt_view.rotated.plane[0].stride,
210                                            vma->ggtt_view.rotated.plane[0].offset,
211                                            vma->ggtt_view.rotated.plane[1].width,
212                                            vma->ggtt_view.rotated.plane[1].height,
213                                            vma->ggtt_view.rotated.plane[1].stride,
214                                            vma->ggtt_view.rotated.plane[1].offset);
215                                 break;
216
217                         default:
218                                 MISSING_CASE(vma->ggtt_view.type);
219                                 break;
220                         }
221                 }
222                 if (vma->fence)
223                         seq_printf(m, " , fence: %d%s",
224                                    vma->fence->id,
225                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
226                 seq_puts(m, ")");
227         }
228         if (obj->stolen)
229                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
230
231         engine = i915_gem_object_last_write_engine(obj);
232         if (engine)
233                 seq_printf(m, " (%s)", engine->name);
234
235         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
236         if (frontbuffer_bits)
237                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
238 }
239
240 static int obj_rank_by_stolen(const void *A, const void *B)
241 {
242         const struct drm_i915_gem_object *a =
243                 *(const struct drm_i915_gem_object **)A;
244         const struct drm_i915_gem_object *b =
245                 *(const struct drm_i915_gem_object **)B;
246
247         if (a->stolen->start < b->stolen->start)
248                 return -1;
249         if (a->stolen->start > b->stolen->start)
250                 return 1;
251         return 0;
252 }
253
254 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
255 {
256         struct drm_i915_private *dev_priv = node_to_i915(m->private);
257         struct drm_device *dev = &dev_priv->drm;
258         struct drm_i915_gem_object **objects;
259         struct drm_i915_gem_object *obj;
260         u64 total_obj_size, total_gtt_size;
261         unsigned long total, count, n;
262         int ret;
263
264         total = READ_ONCE(dev_priv->mm.object_count);
265         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
266         if (!objects)
267                 return -ENOMEM;
268
269         ret = mutex_lock_interruptible(&dev->struct_mutex);
270         if (ret)
271                 goto out;
272
273         total_obj_size = total_gtt_size = count = 0;
274
275         spin_lock(&dev_priv->mm.obj_lock);
276         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
277                 if (count == total)
278                         break;
279
280                 if (obj->stolen == NULL)
281                         continue;
282
283                 objects[count++] = obj;
284                 total_obj_size += obj->base.size;
285                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
286
287         }
288         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
289                 if (count == total)
290                         break;
291
292                 if (obj->stolen == NULL)
293                         continue;
294
295                 objects[count++] = obj;
296                 total_obj_size += obj->base.size;
297         }
298         spin_unlock(&dev_priv->mm.obj_lock);
299
300         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
301
302         seq_puts(m, "Stolen:\n");
303         for (n = 0; n < count; n++) {
304                 seq_puts(m, "   ");
305                 describe_obj(m, objects[n]);
306                 seq_putc(m, '\n');
307         }
308         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
309                    count, total_obj_size, total_gtt_size);
310
311         mutex_unlock(&dev->struct_mutex);
312 out:
313         kvfree(objects);
314         return ret;
315 }
316
317 struct file_stats {
318         struct drm_i915_file_private *file_priv;
319         unsigned long count;
320         u64 total, unbound;
321         u64 global, shared;
322         u64 active, inactive;
323 };
324
325 static int per_file_stats(int id, void *ptr, void *data)
326 {
327         struct drm_i915_gem_object *obj = ptr;
328         struct file_stats *stats = data;
329         struct i915_vma *vma;
330
331         lockdep_assert_held(&obj->base.dev->struct_mutex);
332
333         stats->count++;
334         stats->total += obj->base.size;
335         if (!obj->bind_count)
336                 stats->unbound += obj->base.size;
337         if (obj->base.name || obj->base.dma_buf)
338                 stats->shared += obj->base.size;
339
340         list_for_each_entry(vma, &obj->vma_list, obj_link) {
341                 if (!drm_mm_node_allocated(&vma->node))
342                         continue;
343
344                 if (i915_vma_is_ggtt(vma)) {
345                         stats->global += vma->node.size;
346                 } else {
347                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
348
349                         if (ppgtt->base.file != stats->file_priv)
350                                 continue;
351                 }
352
353                 if (i915_vma_is_active(vma))
354                         stats->active += vma->node.size;
355                 else
356                         stats->inactive += vma->node.size;
357         }
358
359         return 0;
360 }
361
362 #define print_file_stats(m, name, stats) do { \
363         if (stats.count) \
364                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
365                            name, \
366                            stats.count, \
367                            stats.total, \
368                            stats.active, \
369                            stats.inactive, \
370                            stats.global, \
371                            stats.shared, \
372                            stats.unbound); \
373 } while (0)
374
375 static void print_batch_pool_stats(struct seq_file *m,
376                                    struct drm_i915_private *dev_priv)
377 {
378         struct drm_i915_gem_object *obj;
379         struct file_stats stats;
380         struct intel_engine_cs *engine;
381         enum intel_engine_id id;
382         int j;
383
384         memset(&stats, 0, sizeof(stats));
385
386         for_each_engine(engine, dev_priv, id) {
387                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
388                         list_for_each_entry(obj,
389                                             &engine->batch_pool.cache_list[j],
390                                             batch_pool_link)
391                                 per_file_stats(0, obj, &stats);
392                 }
393         }
394
395         print_file_stats(m, "[k]batch pool", stats);
396 }
397
398 static int per_file_ctx_stats(int id, void *ptr, void *data)
399 {
400         struct i915_gem_context *ctx = ptr;
401         int n;
402
403         for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
404                 if (ctx->engine[n].state)
405                         per_file_stats(0, ctx->engine[n].state->obj, data);
406                 if (ctx->engine[n].ring)
407                         per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
408         }
409
410         return 0;
411 }
412
413 static void print_context_stats(struct seq_file *m,
414                                 struct drm_i915_private *dev_priv)
415 {
416         struct drm_device *dev = &dev_priv->drm;
417         struct file_stats stats;
418         struct drm_file *file;
419
420         memset(&stats, 0, sizeof(stats));
421
422         mutex_lock(&dev->struct_mutex);
423         if (dev_priv->kernel_context)
424                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
425
426         list_for_each_entry(file, &dev->filelist, lhead) {
427                 struct drm_i915_file_private *fpriv = file->driver_priv;
428                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
429         }
430         mutex_unlock(&dev->struct_mutex);
431
432         print_file_stats(m, "[k]contexts", stats);
433 }
434
435 static int i915_gem_object_info(struct seq_file *m, void *data)
436 {
437         struct drm_i915_private *dev_priv = node_to_i915(m->private);
438         struct drm_device *dev = &dev_priv->drm;
439         struct i915_ggtt *ggtt = &dev_priv->ggtt;
440         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
441         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
442         struct drm_i915_gem_object *obj;
443         unsigned int page_sizes = 0;
444         struct drm_file *file;
445         char buf[80];
446         int ret;
447
448         ret = mutex_lock_interruptible(&dev->struct_mutex);
449         if (ret)
450                 return ret;
451
452         seq_printf(m, "%u objects, %llu bytes\n",
453                    dev_priv->mm.object_count,
454                    dev_priv->mm.object_memory);
455
456         size = count = 0;
457         mapped_size = mapped_count = 0;
458         purgeable_size = purgeable_count = 0;
459         huge_size = huge_count = 0;
460
461         spin_lock(&dev_priv->mm.obj_lock);
462         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
463                 size += obj->base.size;
464                 ++count;
465
466                 if (obj->mm.madv == I915_MADV_DONTNEED) {
467                         purgeable_size += obj->base.size;
468                         ++purgeable_count;
469                 }
470
471                 if (obj->mm.mapping) {
472                         mapped_count++;
473                         mapped_size += obj->base.size;
474                 }
475
476                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
477                         huge_count++;
478                         huge_size += obj->base.size;
479                         page_sizes |= obj->mm.page_sizes.sg;
480                 }
481         }
482         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
483
484         size = count = dpy_size = dpy_count = 0;
485         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
486                 size += obj->base.size;
487                 ++count;
488
489                 if (obj->pin_global) {
490                         dpy_size += obj->base.size;
491                         ++dpy_count;
492                 }
493
494                 if (obj->mm.madv == I915_MADV_DONTNEED) {
495                         purgeable_size += obj->base.size;
496                         ++purgeable_count;
497                 }
498
499                 if (obj->mm.mapping) {
500                         mapped_count++;
501                         mapped_size += obj->base.size;
502                 }
503
504                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
505                         huge_count++;
506                         huge_size += obj->base.size;
507                         page_sizes |= obj->mm.page_sizes.sg;
508                 }
509         }
510         spin_unlock(&dev_priv->mm.obj_lock);
511
512         seq_printf(m, "%u bound objects, %llu bytes\n",
513                    count, size);
514         seq_printf(m, "%u purgeable objects, %llu bytes\n",
515                    purgeable_count, purgeable_size);
516         seq_printf(m, "%u mapped objects, %llu bytes\n",
517                    mapped_count, mapped_size);
518         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
519                    huge_count,
520                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
521                    huge_size);
522         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
523                    dpy_count, dpy_size);
524
525         seq_printf(m, "%llu [%pa] gtt total\n",
526                    ggtt->base.total, &ggtt->mappable_end);
527         seq_printf(m, "Supported page sizes: %s\n",
528                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
529                                         buf, sizeof(buf)));
530
531         seq_putc(m, '\n');
532         print_batch_pool_stats(m, dev_priv);
533         mutex_unlock(&dev->struct_mutex);
534
535         mutex_lock(&dev->filelist_mutex);
536         print_context_stats(m, dev_priv);
537         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
538                 struct file_stats stats;
539                 struct drm_i915_file_private *file_priv = file->driver_priv;
540                 struct drm_i915_gem_request *request;
541                 struct task_struct *task;
542
543                 mutex_lock(&dev->struct_mutex);
544
545                 memset(&stats, 0, sizeof(stats));
546                 stats.file_priv = file->driver_priv;
547                 spin_lock(&file->table_lock);
548                 idr_for_each(&file->object_idr, per_file_stats, &stats);
549                 spin_unlock(&file->table_lock);
550                 /*
551                  * Although we have a valid reference on file->pid, that does
552                  * not guarantee that the task_struct who called get_pid() is
553                  * still alive (e.g. get_pid(current) => fork() => exit()).
554                  * Therefore, we need to protect this ->comm access using RCU.
555                  */
556                 request = list_first_entry_or_null(&file_priv->mm.request_list,
557                                                    struct drm_i915_gem_request,
558                                                    client_link);
559                 rcu_read_lock();
560                 task = pid_task(request && request->ctx->pid ?
561                                 request->ctx->pid : file->pid,
562                                 PIDTYPE_PID);
563                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
564                 rcu_read_unlock();
565
566                 mutex_unlock(&dev->struct_mutex);
567         }
568         mutex_unlock(&dev->filelist_mutex);
569
570         return 0;
571 }
572
573 static int i915_gem_gtt_info(struct seq_file *m, void *data)
574 {
575         struct drm_info_node *node = m->private;
576         struct drm_i915_private *dev_priv = node_to_i915(node);
577         struct drm_device *dev = &dev_priv->drm;
578         struct drm_i915_gem_object **objects;
579         struct drm_i915_gem_object *obj;
580         u64 total_obj_size, total_gtt_size;
581         unsigned long nobject, n;
582         int count, ret;
583
584         nobject = READ_ONCE(dev_priv->mm.object_count);
585         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
586         if (!objects)
587                 return -ENOMEM;
588
589         ret = mutex_lock_interruptible(&dev->struct_mutex);
590         if (ret)
591                 return ret;
592
593         count = 0;
594         spin_lock(&dev_priv->mm.obj_lock);
595         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
596                 objects[count++] = obj;
597                 if (count == nobject)
598                         break;
599         }
600         spin_unlock(&dev_priv->mm.obj_lock);
601
602         total_obj_size = total_gtt_size = 0;
603         for (n = 0;  n < count; n++) {
604                 obj = objects[n];
605
606                 seq_puts(m, "   ");
607                 describe_obj(m, obj);
608                 seq_putc(m, '\n');
609                 total_obj_size += obj->base.size;
610                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
611         }
612
613         mutex_unlock(&dev->struct_mutex);
614
615         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
616                    count, total_obj_size, total_gtt_size);
617         kvfree(objects);
618
619         return 0;
620 }
621
622 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
623 {
624         struct drm_i915_private *dev_priv = node_to_i915(m->private);
625         struct drm_device *dev = &dev_priv->drm;
626         struct drm_i915_gem_object *obj;
627         struct intel_engine_cs *engine;
628         enum intel_engine_id id;
629         int total = 0;
630         int ret, j;
631
632         ret = mutex_lock_interruptible(&dev->struct_mutex);
633         if (ret)
634                 return ret;
635
636         for_each_engine(engine, dev_priv, id) {
637                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
638                         int count;
639
640                         count = 0;
641                         list_for_each_entry(obj,
642                                             &engine->batch_pool.cache_list[j],
643                                             batch_pool_link)
644                                 count++;
645                         seq_printf(m, "%s cache[%d]: %d objects\n",
646                                    engine->name, j, count);
647
648                         list_for_each_entry(obj,
649                                             &engine->batch_pool.cache_list[j],
650                                             batch_pool_link) {
651                                 seq_puts(m, "   ");
652                                 describe_obj(m, obj);
653                                 seq_putc(m, '\n');
654                         }
655
656                         total += count;
657                 }
658         }
659
660         seq_printf(m, "total: %d\n", total);
661
662         mutex_unlock(&dev->struct_mutex);
663
664         return 0;
665 }
666
667 static int i915_interrupt_info(struct seq_file *m, void *data)
668 {
669         struct drm_i915_private *dev_priv = node_to_i915(m->private);
670         struct intel_engine_cs *engine;
671         enum intel_engine_id id;
672         int i, pipe;
673
674         intel_runtime_pm_get(dev_priv);
675
676         if (IS_CHERRYVIEW(dev_priv)) {
677                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
678                            I915_READ(GEN8_MASTER_IRQ));
679
680                 seq_printf(m, "Display IER:\t%08x\n",
681                            I915_READ(VLV_IER));
682                 seq_printf(m, "Display IIR:\t%08x\n",
683                            I915_READ(VLV_IIR));
684                 seq_printf(m, "Display IIR_RW:\t%08x\n",
685                            I915_READ(VLV_IIR_RW));
686                 seq_printf(m, "Display IMR:\t%08x\n",
687                            I915_READ(VLV_IMR));
688                 for_each_pipe(dev_priv, pipe) {
689                         enum intel_display_power_domain power_domain;
690
691                         power_domain = POWER_DOMAIN_PIPE(pipe);
692                         if (!intel_display_power_get_if_enabled(dev_priv,
693                                                                 power_domain)) {
694                                 seq_printf(m, "Pipe %c power disabled\n",
695                                            pipe_name(pipe));
696                                 continue;
697                         }
698
699                         seq_printf(m, "Pipe %c stat:\t%08x\n",
700                                    pipe_name(pipe),
701                                    I915_READ(PIPESTAT(pipe)));
702
703                         intel_display_power_put(dev_priv, power_domain);
704                 }
705
706                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
707                 seq_printf(m, "Port hotplug:\t%08x\n",
708                            I915_READ(PORT_HOTPLUG_EN));
709                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
710                            I915_READ(VLV_DPFLIPSTAT));
711                 seq_printf(m, "DPINVGTT:\t%08x\n",
712                            I915_READ(DPINVGTT));
713                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
714
715                 for (i = 0; i < 4; i++) {
716                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
717                                    i, I915_READ(GEN8_GT_IMR(i)));
718                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
719                                    i, I915_READ(GEN8_GT_IIR(i)));
720                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
721                                    i, I915_READ(GEN8_GT_IER(i)));
722                 }
723
724                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
725                            I915_READ(GEN8_PCU_IMR));
726                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
727                            I915_READ(GEN8_PCU_IIR));
728                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
729                            I915_READ(GEN8_PCU_IER));
730         } else if (INTEL_GEN(dev_priv) >= 8) {
731                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
732                            I915_READ(GEN8_MASTER_IRQ));
733
734                 for (i = 0; i < 4; i++) {
735                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
736                                    i, I915_READ(GEN8_GT_IMR(i)));
737                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
738                                    i, I915_READ(GEN8_GT_IIR(i)));
739                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
740                                    i, I915_READ(GEN8_GT_IER(i)));
741                 }
742
743                 for_each_pipe(dev_priv, pipe) {
744                         enum intel_display_power_domain power_domain;
745
746                         power_domain = POWER_DOMAIN_PIPE(pipe);
747                         if (!intel_display_power_get_if_enabled(dev_priv,
748                                                                 power_domain)) {
749                                 seq_printf(m, "Pipe %c power disabled\n",
750                                            pipe_name(pipe));
751                                 continue;
752                         }
753                         seq_printf(m, "Pipe %c IMR:\t%08x\n",
754                                    pipe_name(pipe),
755                                    I915_READ(GEN8_DE_PIPE_IMR(pipe)));
756                         seq_printf(m, "Pipe %c IIR:\t%08x\n",
757                                    pipe_name(pipe),
758                                    I915_READ(GEN8_DE_PIPE_IIR(pipe)));
759                         seq_printf(m, "Pipe %c IER:\t%08x\n",
760                                    pipe_name(pipe),
761                                    I915_READ(GEN8_DE_PIPE_IER(pipe)));
762
763                         intel_display_power_put(dev_priv, power_domain);
764                 }
765
766                 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
767                            I915_READ(GEN8_DE_PORT_IMR));
768                 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
769                            I915_READ(GEN8_DE_PORT_IIR));
770                 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
771                            I915_READ(GEN8_DE_PORT_IER));
772
773                 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
774                            I915_READ(GEN8_DE_MISC_IMR));
775                 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
776                            I915_READ(GEN8_DE_MISC_IIR));
777                 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
778                            I915_READ(GEN8_DE_MISC_IER));
779
780                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
781                            I915_READ(GEN8_PCU_IMR));
782                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
783                            I915_READ(GEN8_PCU_IIR));
784                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
785                            I915_READ(GEN8_PCU_IER));
786         } else if (IS_VALLEYVIEW(dev_priv)) {
787                 seq_printf(m, "Display IER:\t%08x\n",
788                            I915_READ(VLV_IER));
789                 seq_printf(m, "Display IIR:\t%08x\n",
790                            I915_READ(VLV_IIR));
791                 seq_printf(m, "Display IIR_RW:\t%08x\n",
792                            I915_READ(VLV_IIR_RW));
793                 seq_printf(m, "Display IMR:\t%08x\n",
794                            I915_READ(VLV_IMR));
795                 for_each_pipe(dev_priv, pipe) {
796                         enum intel_display_power_domain power_domain;
797
798                         power_domain = POWER_DOMAIN_PIPE(pipe);
799                         if (!intel_display_power_get_if_enabled(dev_priv,
800                                                                 power_domain)) {
801                                 seq_printf(m, "Pipe %c power disabled\n",
802                                            pipe_name(pipe));
803                                 continue;
804                         }
805
806                         seq_printf(m, "Pipe %c stat:\t%08x\n",
807                                    pipe_name(pipe),
808                                    I915_READ(PIPESTAT(pipe)));
809                         intel_display_power_put(dev_priv, power_domain);
810                 }
811
812                 seq_printf(m, "Master IER:\t%08x\n",
813                            I915_READ(VLV_MASTER_IER));
814
815                 seq_printf(m, "Render IER:\t%08x\n",
816                            I915_READ(GTIER));
817                 seq_printf(m, "Render IIR:\t%08x\n",
818                            I915_READ(GTIIR));
819                 seq_printf(m, "Render IMR:\t%08x\n",
820                            I915_READ(GTIMR));
821
822                 seq_printf(m, "PM IER:\t\t%08x\n",
823                            I915_READ(GEN6_PMIER));
824                 seq_printf(m, "PM IIR:\t\t%08x\n",
825                            I915_READ(GEN6_PMIIR));
826                 seq_printf(m, "PM IMR:\t\t%08x\n",
827                            I915_READ(GEN6_PMIMR));
828
829                 seq_printf(m, "Port hotplug:\t%08x\n",
830                            I915_READ(PORT_HOTPLUG_EN));
831                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
832                            I915_READ(VLV_DPFLIPSTAT));
833                 seq_printf(m, "DPINVGTT:\t%08x\n",
834                            I915_READ(DPINVGTT));
835
836         } else if (!HAS_PCH_SPLIT(dev_priv)) {
837                 seq_printf(m, "Interrupt enable:    %08x\n",
838                            I915_READ(IER));
839                 seq_printf(m, "Interrupt identity:  %08x\n",
840                            I915_READ(IIR));
841                 seq_printf(m, "Interrupt mask:      %08x\n",
842                            I915_READ(IMR));
843                 for_each_pipe(dev_priv, pipe)
844                         seq_printf(m, "Pipe %c stat:         %08x\n",
845                                    pipe_name(pipe),
846                                    I915_READ(PIPESTAT(pipe)));
847         } else {
848                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
849                            I915_READ(DEIER));
850                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
851                            I915_READ(DEIIR));
852                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
853                            I915_READ(DEIMR));
854                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
855                            I915_READ(SDEIER));
856                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
857                            I915_READ(SDEIIR));
858                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
859                            I915_READ(SDEIMR));
860                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
861                            I915_READ(GTIER));
862                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
863                            I915_READ(GTIIR));
864                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
865                            I915_READ(GTIMR));
866         }
867         if (INTEL_GEN(dev_priv) >= 6) {
868                 for_each_engine(engine, dev_priv, id) {
869                         seq_printf(m,
870                                    "Graphics Interrupt mask (%s):       %08x\n",
871                                    engine->name, I915_READ_IMR(engine));
872                 }
873         }
874         intel_runtime_pm_put(dev_priv);
875
876         return 0;
877 }
878
879 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
880 {
881         struct drm_i915_private *dev_priv = node_to_i915(m->private);
882         struct drm_device *dev = &dev_priv->drm;
883         int i, ret;
884
885         ret = mutex_lock_interruptible(&dev->struct_mutex);
886         if (ret)
887                 return ret;
888
889         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
890         for (i = 0; i < dev_priv->num_fence_regs; i++) {
891                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
892
893                 seq_printf(m, "Fence %d, pin count = %d, object = ",
894                            i, dev_priv->fence_regs[i].pin_count);
895                 if (!vma)
896                         seq_puts(m, "unused");
897                 else
898                         describe_obj(m, vma->obj);
899                 seq_putc(m, '\n');
900         }
901
902         mutex_unlock(&dev->struct_mutex);
903         return 0;
904 }
905
906 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
907 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
908                               size_t count, loff_t *pos)
909 {
910         struct i915_gpu_state *error = file->private_data;
911         struct drm_i915_error_state_buf str;
912         ssize_t ret;
913         loff_t tmp;
914
915         if (!error)
916                 return 0;
917
918         ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
919         if (ret)
920                 return ret;
921
922         ret = i915_error_state_to_str(&str, error);
923         if (ret)
924                 goto out;
925
926         tmp = 0;
927         ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
928         if (ret < 0)
929                 goto out;
930
931         *pos = str.start + ret;
932 out:
933         i915_error_state_buf_release(&str);
934         return ret;
935 }
936
937 static int gpu_state_release(struct inode *inode, struct file *file)
938 {
939         i915_gpu_state_put(file->private_data);
940         return 0;
941 }
942
943 static int i915_gpu_info_open(struct inode *inode, struct file *file)
944 {
945         struct drm_i915_private *i915 = inode->i_private;
946         struct i915_gpu_state *gpu;
947
948         intel_runtime_pm_get(i915);
949         gpu = i915_capture_gpu_state(i915);
950         intel_runtime_pm_put(i915);
951         if (!gpu)
952                 return -ENOMEM;
953
954         file->private_data = gpu;
955         return 0;
956 }
957
958 static const struct file_operations i915_gpu_info_fops = {
959         .owner = THIS_MODULE,
960         .open = i915_gpu_info_open,
961         .read = gpu_state_read,
962         .llseek = default_llseek,
963         .release = gpu_state_release,
964 };
965
966 static ssize_t
967 i915_error_state_write(struct file *filp,
968                        const char __user *ubuf,
969                        size_t cnt,
970                        loff_t *ppos)
971 {
972         struct i915_gpu_state *error = filp->private_data;
973
974         if (!error)
975                 return 0;
976
977         DRM_DEBUG_DRIVER("Resetting error state\n");
978         i915_reset_error_state(error->i915);
979
980         return cnt;
981 }
982
983 static int i915_error_state_open(struct inode *inode, struct file *file)
984 {
985         file->private_data = i915_first_error_state(inode->i_private);
986         return 0;
987 }
988
989 static const struct file_operations i915_error_state_fops = {
990         .owner = THIS_MODULE,
991         .open = i915_error_state_open,
992         .read = gpu_state_read,
993         .write = i915_error_state_write,
994         .llseek = default_llseek,
995         .release = gpu_state_release,
996 };
997 #endif
998
999 static int
1000 i915_next_seqno_set(void *data, u64 val)
1001 {
1002         struct drm_i915_private *dev_priv = data;
1003         struct drm_device *dev = &dev_priv->drm;
1004         int ret;
1005
1006         ret = mutex_lock_interruptible(&dev->struct_mutex);
1007         if (ret)
1008                 return ret;
1009
1010         ret = i915_gem_set_global_seqno(dev, val);
1011         mutex_unlock(&dev->struct_mutex);
1012
1013         return ret;
1014 }
1015
1016 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1017                         NULL, i915_next_seqno_set,
1018                         "0x%llx\n");
1019
1020 static int i915_frequency_info(struct seq_file *m, void *unused)
1021 {
1022         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1023         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1024         int ret = 0;
1025
1026         intel_runtime_pm_get(dev_priv);
1027
1028         if (IS_GEN5(dev_priv)) {
1029                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1030                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1031
1032                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1033                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1034                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1035                            MEMSTAT_VID_SHIFT);
1036                 seq_printf(m, "Current P-state: %d\n",
1037                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1038         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1039                 u32 rpmodectl, freq_sts;
1040
1041                 mutex_lock(&dev_priv->pcu_lock);
1042
1043                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1044                 seq_printf(m, "Video Turbo Mode: %s\n",
1045                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1046                 seq_printf(m, "HW control enabled: %s\n",
1047                            yesno(rpmodectl & GEN6_RP_ENABLE));
1048                 seq_printf(m, "SW control enabled: %s\n",
1049                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1050                                   GEN6_RP_MEDIA_SW_MODE));
1051
1052                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1053                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1054                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1055
1056                 seq_printf(m, "actual GPU freq: %d MHz\n",
1057                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1058
1059                 seq_printf(m, "current GPU freq: %d MHz\n",
1060                            intel_gpu_freq(dev_priv, rps->cur_freq));
1061
1062                 seq_printf(m, "max GPU freq: %d MHz\n",
1063                            intel_gpu_freq(dev_priv, rps->max_freq));
1064
1065                 seq_printf(m, "min GPU freq: %d MHz\n",
1066                            intel_gpu_freq(dev_priv, rps->min_freq));
1067
1068                 seq_printf(m, "idle GPU freq: %d MHz\n",
1069                            intel_gpu_freq(dev_priv, rps->idle_freq));
1070
1071                 seq_printf(m,
1072                            "efficient (RPe) frequency: %d MHz\n",
1073                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1074                 mutex_unlock(&dev_priv->pcu_lock);
1075         } else if (INTEL_GEN(dev_priv) >= 6) {
1076                 u32 rp_state_limits;
1077                 u32 gt_perf_status;
1078                 u32 rp_state_cap;
1079                 u32 rpmodectl, rpinclimit, rpdeclimit;
1080                 u32 rpstat, cagf, reqf;
1081                 u32 rpupei, rpcurup, rpprevup;
1082                 u32 rpdownei, rpcurdown, rpprevdown;
1083                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1084                 int max_freq;
1085
1086                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1087                 if (IS_GEN9_LP(dev_priv)) {
1088                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1089                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1090                 } else {
1091                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1092                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1093                 }
1094
1095                 /* RPSTAT1 is in the GT power well */
1096                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1097
1098                 reqf = I915_READ(GEN6_RPNSWREQ);
1099                 if (INTEL_GEN(dev_priv) >= 9)
1100                         reqf >>= 23;
1101                 else {
1102                         reqf &= ~GEN6_TURBO_DISABLE;
1103                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1104                                 reqf >>= 24;
1105                         else
1106                                 reqf >>= 25;
1107                 }
1108                 reqf = intel_gpu_freq(dev_priv, reqf);
1109
1110                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1111                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1112                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1113
1114                 rpstat = I915_READ(GEN6_RPSTAT1);
1115                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1116                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1117                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1118                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1119                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1120                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1121                 cagf = intel_gpu_freq(dev_priv,
1122                                       intel_get_cagf(dev_priv, rpstat));
1123
1124                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1125
1126                 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1127                         pm_ier = I915_READ(GEN6_PMIER);
1128                         pm_imr = I915_READ(GEN6_PMIMR);
1129                         pm_isr = I915_READ(GEN6_PMISR);
1130                         pm_iir = I915_READ(GEN6_PMIIR);
1131                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1132                 } else {
1133                         pm_ier = I915_READ(GEN8_GT_IER(2));
1134                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1135                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1136                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1137                         pm_mask = I915_READ(GEN6_PMINTRMSK);
1138                 }
1139                 seq_printf(m, "Video Turbo Mode: %s\n",
1140                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1141                 seq_printf(m, "HW control enabled: %s\n",
1142                            yesno(rpmodectl & GEN6_RP_ENABLE));
1143                 seq_printf(m, "SW control enabled: %s\n",
1144                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1145                                   GEN6_RP_MEDIA_SW_MODE));
1146                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1147                            pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1148                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1149                            rps->pm_intrmsk_mbz);
1150                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1151                 seq_printf(m, "Render p-state ratio: %d\n",
1152                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1153                 seq_printf(m, "Render p-state VID: %d\n",
1154                            gt_perf_status & 0xff);
1155                 seq_printf(m, "Render p-state limit: %d\n",
1156                            rp_state_limits & 0xff);
1157                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1158                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1159                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1160                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1161                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1162                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1163                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1164                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1165                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1166                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1167                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1168                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1169                 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1170
1171                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1172                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1173                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1174                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1175                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1176                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1177                 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1178
1179                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1180                             rp_state_cap >> 16) & 0xff;
1181                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1182                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1183                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1184                            intel_gpu_freq(dev_priv, max_freq));
1185
1186                 max_freq = (rp_state_cap & 0xff00) >> 8;
1187                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1188                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1189                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1190                            intel_gpu_freq(dev_priv, max_freq));
1191
1192                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1193                             rp_state_cap >> 0) & 0xff;
1194                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1195                              IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1196                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1197                            intel_gpu_freq(dev_priv, max_freq));
1198                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1199                            intel_gpu_freq(dev_priv, rps->max_freq));
1200
1201                 seq_printf(m, "Current freq: %d MHz\n",
1202                            intel_gpu_freq(dev_priv, rps->cur_freq));
1203                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1204                 seq_printf(m, "Idle freq: %d MHz\n",
1205                            intel_gpu_freq(dev_priv, rps->idle_freq));
1206                 seq_printf(m, "Min freq: %d MHz\n",
1207                            intel_gpu_freq(dev_priv, rps->min_freq));
1208                 seq_printf(m, "Boost freq: %d MHz\n",
1209                            intel_gpu_freq(dev_priv, rps->boost_freq));
1210                 seq_printf(m, "Max freq: %d MHz\n",
1211                            intel_gpu_freq(dev_priv, rps->max_freq));
1212                 seq_printf(m,
1213                            "efficient (RPe) frequency: %d MHz\n",
1214                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1215         } else {
1216                 seq_puts(m, "no P-state info available\n");
1217         }
1218
1219         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1220         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1221         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1222
1223         intel_runtime_pm_put(dev_priv);
1224         return ret;
1225 }
1226
1227 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1228                                struct seq_file *m,
1229                                struct intel_instdone *instdone)
1230 {
1231         int slice;
1232         int subslice;
1233
1234         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1235                    instdone->instdone);
1236
1237         if (INTEL_GEN(dev_priv) <= 3)
1238                 return;
1239
1240         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1241                    instdone->slice_common);
1242
1243         if (INTEL_GEN(dev_priv) <= 6)
1244                 return;
1245
1246         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1247                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1248                            slice, subslice, instdone->sampler[slice][subslice]);
1249
1250         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1251                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1252                            slice, subslice, instdone->row[slice][subslice]);
1253 }
1254
1255 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1256 {
1257         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1258         struct intel_engine_cs *engine;
1259         u64 acthd[I915_NUM_ENGINES];
1260         u32 seqno[I915_NUM_ENGINES];
1261         struct intel_instdone instdone;
1262         enum intel_engine_id id;
1263
1264         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1265                 seq_puts(m, "Wedged\n");
1266         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1267                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1268         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1269                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1270         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1271                 seq_puts(m, "Waiter holding struct mutex\n");
1272         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1273                 seq_puts(m, "struct_mutex blocked for reset\n");
1274
1275         if (!i915_modparams.enable_hangcheck) {
1276                 seq_puts(m, "Hangcheck disabled\n");
1277                 return 0;
1278         }
1279
1280         intel_runtime_pm_get(dev_priv);
1281
1282         for_each_engine(engine, dev_priv, id) {
1283                 acthd[id] = intel_engine_get_active_head(engine);
1284                 seqno[id] = intel_engine_get_seqno(engine);
1285         }
1286
1287         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1288
1289         intel_runtime_pm_put(dev_priv);
1290
1291         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1292                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1293                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1294                                             jiffies));
1295         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1296                 seq_puts(m, "Hangcheck active, work pending\n");
1297         else
1298                 seq_puts(m, "Hangcheck inactive\n");
1299
1300         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1301
1302         for_each_engine(engine, dev_priv, id) {
1303                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1304                 struct rb_node *rb;
1305
1306                 seq_printf(m, "%s:\n", engine->name);
1307                 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1308                            engine->hangcheck.seqno, seqno[id],
1309                            intel_engine_last_submit(engine),
1310                            engine->timeline->inflight_seqnos);
1311                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1312                            yesno(intel_engine_has_waiter(engine)),
1313                            yesno(test_bit(engine->id,
1314                                           &dev_priv->gpu_error.missed_irq_rings)),
1315                            yesno(engine->hangcheck.stalled));
1316
1317                 spin_lock_irq(&b->rb_lock);
1318                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1319                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1320
1321                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1322                                    w->tsk->comm, w->tsk->pid, w->seqno);
1323                 }
1324                 spin_unlock_irq(&b->rb_lock);
1325
1326                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1327                            (long long)engine->hangcheck.acthd,
1328                            (long long)acthd[id]);
1329                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1330                            hangcheck_action_to_str(engine->hangcheck.action),
1331                            engine->hangcheck.action,
1332                            jiffies_to_msecs(jiffies -
1333                                             engine->hangcheck.action_timestamp));
1334
1335                 if (engine->id == RCS) {
1336                         seq_puts(m, "\tinstdone read =\n");
1337
1338                         i915_instdone_info(dev_priv, m, &instdone);
1339
1340                         seq_puts(m, "\tinstdone accu =\n");
1341
1342                         i915_instdone_info(dev_priv, m,
1343                                            &engine->hangcheck.instdone);
1344                 }
1345         }
1346
1347         return 0;
1348 }
1349
1350 static int i915_reset_info(struct seq_file *m, void *unused)
1351 {
1352         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1353         struct i915_gpu_error *error = &dev_priv->gpu_error;
1354         struct intel_engine_cs *engine;
1355         enum intel_engine_id id;
1356
1357         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1358
1359         for_each_engine(engine, dev_priv, id) {
1360                 seq_printf(m, "%s = %u\n", engine->name,
1361                            i915_reset_engine_count(error, engine));
1362         }
1363
1364         return 0;
1365 }
1366
1367 static int ironlake_drpc_info(struct seq_file *m)
1368 {
1369         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1370         u32 rgvmodectl, rstdbyctl;
1371         u16 crstandvid;
1372
1373         rgvmodectl = I915_READ(MEMMODECTL);
1374         rstdbyctl = I915_READ(RSTDBYCTL);
1375         crstandvid = I915_READ16(CRSTANDVID);
1376
1377         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1378         seq_printf(m, "Boost freq: %d\n",
1379                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1380                    MEMMODE_BOOST_FREQ_SHIFT);
1381         seq_printf(m, "HW control enabled: %s\n",
1382                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1383         seq_printf(m, "SW control enabled: %s\n",
1384                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1385         seq_printf(m, "Gated voltage change: %s\n",
1386                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1387         seq_printf(m, "Starting frequency: P%d\n",
1388                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1389         seq_printf(m, "Max P-state: P%d\n",
1390                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1391         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1392         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1393         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1394         seq_printf(m, "Render standby enabled: %s\n",
1395                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1396         seq_puts(m, "Current RS state: ");
1397         switch (rstdbyctl & RSX_STATUS_MASK) {
1398         case RSX_STATUS_ON:
1399                 seq_puts(m, "on\n");
1400                 break;
1401         case RSX_STATUS_RC1:
1402                 seq_puts(m, "RC1\n");
1403                 break;
1404         case RSX_STATUS_RC1E:
1405                 seq_puts(m, "RC1E\n");
1406                 break;
1407         case RSX_STATUS_RS1:
1408                 seq_puts(m, "RS1\n");
1409                 break;
1410         case RSX_STATUS_RS2:
1411                 seq_puts(m, "RS2 (RC6)\n");
1412                 break;
1413         case RSX_STATUS_RS3:
1414                 seq_puts(m, "RC3 (RC6+)\n");
1415                 break;
1416         default:
1417                 seq_puts(m, "unknown\n");
1418                 break;
1419         }
1420
1421         return 0;
1422 }
1423
1424 static int i915_forcewake_domains(struct seq_file *m, void *data)
1425 {
1426         struct drm_i915_private *i915 = node_to_i915(m->private);
1427         struct intel_uncore_forcewake_domain *fw_domain;
1428         unsigned int tmp;
1429
1430         seq_printf(m, "user.bypass_count = %u\n",
1431                    i915->uncore.user_forcewake.count);
1432
1433         for_each_fw_domain(fw_domain, i915, tmp)
1434                 seq_printf(m, "%s.wake_count = %u\n",
1435                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1436                            READ_ONCE(fw_domain->wake_count));
1437
1438         return 0;
1439 }
1440
1441 static void print_rc6_res(struct seq_file *m,
1442                           const char *title,
1443                           const i915_reg_t reg)
1444 {
1445         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1446
1447         seq_printf(m, "%s %u (%llu us)\n",
1448                    title, I915_READ(reg),
1449                    intel_rc6_residency_us(dev_priv, reg));
1450 }
1451
1452 static int vlv_drpc_info(struct seq_file *m)
1453 {
1454         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1455         u32 rcctl1, pw_status;
1456
1457         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1458         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1459
1460         seq_printf(m, "RC6 Enabled: %s\n",
1461                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1462                                         GEN6_RC_CTL_EI_MODE(1))));
1463         seq_printf(m, "Render Power Well: %s\n",
1464                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1465         seq_printf(m, "Media Power Well: %s\n",
1466                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1467
1468         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1469         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1470
1471         return i915_forcewake_domains(m, NULL);
1472 }
1473
1474 static int gen6_drpc_info(struct seq_file *m)
1475 {
1476         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1477         u32 gt_core_status, rcctl1, rc6vids = 0;
1478         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1479         unsigned forcewake_count;
1480         int count = 0;
1481
1482         forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
1483         if (forcewake_count) {
1484                 seq_puts(m, "RC information inaccurate because somebody "
1485                             "holds a forcewake reference \n");
1486         } else {
1487                 /* NB: we cannot use forcewake, else we read the wrong values */
1488                 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1489                         udelay(10);
1490                 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1491         }
1492
1493         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1494         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1495
1496         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1497         if (INTEL_GEN(dev_priv) >= 9) {
1498                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1499                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1500         }
1501
1502         mutex_lock(&dev_priv->pcu_lock);
1503         sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1504         mutex_unlock(&dev_priv->pcu_lock);
1505
1506         seq_printf(m, "RC1e Enabled: %s\n",
1507                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1508         seq_printf(m, "RC6 Enabled: %s\n",
1509                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1510         if (INTEL_GEN(dev_priv) >= 9) {
1511                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1512                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1513                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1514                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1515         }
1516         seq_printf(m, "Deep RC6 Enabled: %s\n",
1517                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1518         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1519                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1520         seq_puts(m, "Current RC state: ");
1521         switch (gt_core_status & GEN6_RCn_MASK) {
1522         case GEN6_RC0:
1523                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1524                         seq_puts(m, "Core Power Down\n");
1525                 else
1526                         seq_puts(m, "on\n");
1527                 break;
1528         case GEN6_RC3:
1529                 seq_puts(m, "RC3\n");
1530                 break;
1531         case GEN6_RC6:
1532                 seq_puts(m, "RC6\n");
1533                 break;
1534         case GEN6_RC7:
1535                 seq_puts(m, "RC7\n");
1536                 break;
1537         default:
1538                 seq_puts(m, "Unknown\n");
1539                 break;
1540         }
1541
1542         seq_printf(m, "Core Power Down: %s\n",
1543                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1544         if (INTEL_GEN(dev_priv) >= 9) {
1545                 seq_printf(m, "Render Power Well: %s\n",
1546                         (gen9_powergate_status &
1547                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1548                 seq_printf(m, "Media Power Well: %s\n",
1549                         (gen9_powergate_status &
1550                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1551         }
1552
1553         /* Not exactly sure what this is */
1554         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1555                       GEN6_GT_GFX_RC6_LOCKED);
1556         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1557         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1558         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1559
1560         seq_printf(m, "RC6   voltage: %dmV\n",
1561                    GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1562         seq_printf(m, "RC6+  voltage: %dmV\n",
1563                    GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1564         seq_printf(m, "RC6++ voltage: %dmV\n",
1565                    GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1566         return i915_forcewake_domains(m, NULL);
1567 }
1568
1569 static int i915_drpc_info(struct seq_file *m, void *unused)
1570 {
1571         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1572         int err;
1573
1574         intel_runtime_pm_get(dev_priv);
1575
1576         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1577                 err = vlv_drpc_info(m);
1578         else if (INTEL_GEN(dev_priv) >= 6)
1579                 err = gen6_drpc_info(m);
1580         else
1581                 err = ironlake_drpc_info(m);
1582
1583         intel_runtime_pm_put(dev_priv);
1584
1585         return err;
1586 }
1587
1588 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1589 {
1590         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1591
1592         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1593                    dev_priv->fb_tracking.busy_bits);
1594
1595         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1596                    dev_priv->fb_tracking.flip_bits);
1597
1598         return 0;
1599 }
1600
1601 static int i915_fbc_status(struct seq_file *m, void *unused)
1602 {
1603         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1604
1605         if (!HAS_FBC(dev_priv)) {
1606                 seq_puts(m, "FBC unsupported on this chipset\n");
1607                 return 0;
1608         }
1609
1610         intel_runtime_pm_get(dev_priv);
1611         mutex_lock(&dev_priv->fbc.lock);
1612
1613         if (intel_fbc_is_active(dev_priv))
1614                 seq_puts(m, "FBC enabled\n");
1615         else
1616                 seq_printf(m, "FBC disabled: %s\n",
1617                            dev_priv->fbc.no_fbc_reason);
1618
1619         if (intel_fbc_is_active(dev_priv)) {
1620                 u32 mask;
1621
1622                 if (INTEL_GEN(dev_priv) >= 8)
1623                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1624                 else if (INTEL_GEN(dev_priv) >= 7)
1625                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1626                 else if (INTEL_GEN(dev_priv) >= 5)
1627                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1628                 else if (IS_G4X(dev_priv))
1629                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1630                 else
1631                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1632                                                         FBC_STAT_COMPRESSED);
1633
1634                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1635         }
1636
1637         mutex_unlock(&dev_priv->fbc.lock);
1638         intel_runtime_pm_put(dev_priv);
1639
1640         return 0;
1641 }
1642
1643 static int i915_fbc_false_color_get(void *data, u64 *val)
1644 {
1645         struct drm_i915_private *dev_priv = data;
1646
1647         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1648                 return -ENODEV;
1649
1650         *val = dev_priv->fbc.false_color;
1651
1652         return 0;
1653 }
1654
1655 static int i915_fbc_false_color_set(void *data, u64 val)
1656 {
1657         struct drm_i915_private *dev_priv = data;
1658         u32 reg;
1659
1660         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1661                 return -ENODEV;
1662
1663         mutex_lock(&dev_priv->fbc.lock);
1664
1665         reg = I915_READ(ILK_DPFC_CONTROL);
1666         dev_priv->fbc.false_color = val;
1667
1668         I915_WRITE(ILK_DPFC_CONTROL, val ?
1669                    (reg | FBC_CTL_FALSE_COLOR) :
1670                    (reg & ~FBC_CTL_FALSE_COLOR));
1671
1672         mutex_unlock(&dev_priv->fbc.lock);
1673         return 0;
1674 }
1675
1676 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1677                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1678                         "%llu\n");
1679
1680 static int i915_ips_status(struct seq_file *m, void *unused)
1681 {
1682         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1683
1684         if (!HAS_IPS(dev_priv)) {
1685                 seq_puts(m, "not supported\n");
1686                 return 0;
1687         }
1688
1689         intel_runtime_pm_get(dev_priv);
1690
1691         seq_printf(m, "Enabled by kernel parameter: %s\n",
1692                    yesno(i915_modparams.enable_ips));
1693
1694         if (INTEL_GEN(dev_priv) >= 8) {
1695                 seq_puts(m, "Currently: unknown\n");
1696         } else {
1697                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1698                         seq_puts(m, "Currently: enabled\n");
1699                 else
1700                         seq_puts(m, "Currently: disabled\n");
1701         }
1702
1703         intel_runtime_pm_put(dev_priv);
1704
1705         return 0;
1706 }
1707
1708 static int i915_sr_status(struct seq_file *m, void *unused)
1709 {
1710         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1711         bool sr_enabled = false;
1712
1713         intel_runtime_pm_get(dev_priv);
1714         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1715
1716         if (INTEL_GEN(dev_priv) >= 9)
1717                 /* no global SR status; inspect per-plane WM */;
1718         else if (HAS_PCH_SPLIT(dev_priv))
1719                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1720         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1721                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1722                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1723         else if (IS_I915GM(dev_priv))
1724                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1725         else if (IS_PINEVIEW(dev_priv))
1726                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1727         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1728                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1729
1730         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1731         intel_runtime_pm_put(dev_priv);
1732
1733         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1734
1735         return 0;
1736 }
1737
1738 static int i915_emon_status(struct seq_file *m, void *unused)
1739 {
1740         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1741         struct drm_device *dev = &dev_priv->drm;
1742         unsigned long temp, chipset, gfx;
1743         int ret;
1744
1745         if (!IS_GEN5(dev_priv))
1746                 return -ENODEV;
1747
1748         ret = mutex_lock_interruptible(&dev->struct_mutex);
1749         if (ret)
1750                 return ret;
1751
1752         temp = i915_mch_val(dev_priv);
1753         chipset = i915_chipset_val(dev_priv);
1754         gfx = i915_gfx_val(dev_priv);
1755         mutex_unlock(&dev->struct_mutex);
1756
1757         seq_printf(m, "GMCH temp: %ld\n", temp);
1758         seq_printf(m, "Chipset power: %ld\n", chipset);
1759         seq_printf(m, "GFX power: %ld\n", gfx);
1760         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1761
1762         return 0;
1763 }
1764
1765 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1766 {
1767         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1768         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1769         int ret = 0;
1770         int gpu_freq, ia_freq;
1771         unsigned int max_gpu_freq, min_gpu_freq;
1772
1773         if (!HAS_LLC(dev_priv)) {
1774                 seq_puts(m, "unsupported on this chipset\n");
1775                 return 0;
1776         }
1777
1778         intel_runtime_pm_get(dev_priv);
1779
1780         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1781         if (ret)
1782                 goto out;
1783
1784         if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1785                 /* Convert GT frequency to 50 HZ units */
1786                 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
1787                 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
1788         } else {
1789                 min_gpu_freq = rps->min_freq_softlimit;
1790                 max_gpu_freq = rps->max_freq_softlimit;
1791         }
1792
1793         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1794
1795         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1796                 ia_freq = gpu_freq;
1797                 sandybridge_pcode_read(dev_priv,
1798                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1799                                        &ia_freq);
1800                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1801                            intel_gpu_freq(dev_priv, (gpu_freq *
1802                                                      (IS_GEN9_BC(dev_priv) ||
1803                                                       IS_CANNONLAKE(dev_priv) ?
1804                                                       GEN9_FREQ_SCALER : 1))),
1805                            ((ia_freq >> 0) & 0xff) * 100,
1806                            ((ia_freq >> 8) & 0xff) * 100);
1807         }
1808
1809         mutex_unlock(&dev_priv->pcu_lock);
1810
1811 out:
1812         intel_runtime_pm_put(dev_priv);
1813         return ret;
1814 }
1815
1816 static int i915_opregion(struct seq_file *m, void *unused)
1817 {
1818         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1819         struct drm_device *dev = &dev_priv->drm;
1820         struct intel_opregion *opregion = &dev_priv->opregion;
1821         int ret;
1822
1823         ret = mutex_lock_interruptible(&dev->struct_mutex);
1824         if (ret)
1825                 goto out;
1826
1827         if (opregion->header)
1828                 seq_write(m, opregion->header, OPREGION_SIZE);
1829
1830         mutex_unlock(&dev->struct_mutex);
1831
1832 out:
1833         return 0;
1834 }
1835
1836 static int i915_vbt(struct seq_file *m, void *unused)
1837 {
1838         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1839
1840         if (opregion->vbt)
1841                 seq_write(m, opregion->vbt, opregion->vbt_size);
1842
1843         return 0;
1844 }
1845
1846 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1847 {
1848         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1849         struct drm_device *dev = &dev_priv->drm;
1850         struct intel_framebuffer *fbdev_fb = NULL;
1851         struct drm_framebuffer *drm_fb;
1852         int ret;
1853
1854         ret = mutex_lock_interruptible(&dev->struct_mutex);
1855         if (ret)
1856                 return ret;
1857
1858 #ifdef CONFIG_DRM_FBDEV_EMULATION
1859         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1860                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1861
1862                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1863                            fbdev_fb->base.width,
1864                            fbdev_fb->base.height,
1865                            fbdev_fb->base.format->depth,
1866                            fbdev_fb->base.format->cpp[0] * 8,
1867                            fbdev_fb->base.modifier,
1868                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1869                 describe_obj(m, fbdev_fb->obj);
1870                 seq_putc(m, '\n');
1871         }
1872 #endif
1873
1874         mutex_lock(&dev->mode_config.fb_lock);
1875         drm_for_each_fb(drm_fb, dev) {
1876                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1877                 if (fb == fbdev_fb)
1878                         continue;
1879
1880                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1881                            fb->base.width,
1882                            fb->base.height,
1883                            fb->base.format->depth,
1884                            fb->base.format->cpp[0] * 8,
1885                            fb->base.modifier,
1886                            drm_framebuffer_read_refcount(&fb->base));
1887                 describe_obj(m, fb->obj);
1888                 seq_putc(m, '\n');
1889         }
1890         mutex_unlock(&dev->mode_config.fb_lock);
1891         mutex_unlock(&dev->struct_mutex);
1892
1893         return 0;
1894 }
1895
1896 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1897 {
1898         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1899                    ring->space, ring->head, ring->tail);
1900 }
1901
1902 static int i915_context_status(struct seq_file *m, void *unused)
1903 {
1904         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1905         struct drm_device *dev = &dev_priv->drm;
1906         struct intel_engine_cs *engine;
1907         struct i915_gem_context *ctx;
1908         enum intel_engine_id id;
1909         int ret;
1910
1911         ret = mutex_lock_interruptible(&dev->struct_mutex);
1912         if (ret)
1913                 return ret;
1914
1915         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1916                 seq_printf(m, "HW context %u ", ctx->hw_id);
1917                 if (ctx->pid) {
1918                         struct task_struct *task;
1919
1920                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1921                         if (task) {
1922                                 seq_printf(m, "(%s [%d]) ",
1923                                            task->comm, task->pid);
1924                                 put_task_struct(task);
1925                         }
1926                 } else if (IS_ERR(ctx->file_priv)) {
1927                         seq_puts(m, "(deleted) ");
1928                 } else {
1929                         seq_puts(m, "(kernel) ");
1930                 }
1931
1932                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1933                 seq_putc(m, '\n');
1934
1935                 for_each_engine(engine, dev_priv, id) {
1936                         struct intel_context *ce = &ctx->engine[engine->id];
1937
1938                         seq_printf(m, "%s: ", engine->name);
1939                         if (ce->state)
1940                                 describe_obj(m, ce->state->obj);
1941                         if (ce->ring)
1942                                 describe_ctx_ring(m, ce->ring);
1943                         seq_putc(m, '\n');
1944                 }
1945
1946                 seq_putc(m, '\n');
1947         }
1948
1949         mutex_unlock(&dev->struct_mutex);
1950
1951         return 0;
1952 }
1953
1954 static const char *swizzle_string(unsigned swizzle)
1955 {
1956         switch (swizzle) {
1957         case I915_BIT_6_SWIZZLE_NONE:
1958                 return "none";
1959         case I915_BIT_6_SWIZZLE_9:
1960                 return "bit9";
1961         case I915_BIT_6_SWIZZLE_9_10:
1962                 return "bit9/bit10";
1963         case I915_BIT_6_SWIZZLE_9_11:
1964                 return "bit9/bit11";
1965         case I915_BIT_6_SWIZZLE_9_10_11:
1966                 return "bit9/bit10/bit11";
1967         case I915_BIT_6_SWIZZLE_9_17:
1968                 return "bit9/bit17";
1969         case I915_BIT_6_SWIZZLE_9_10_17:
1970                 return "bit9/bit10/bit17";
1971         case I915_BIT_6_SWIZZLE_UNKNOWN:
1972                 return "unknown";
1973         }
1974
1975         return "bug";
1976 }
1977
1978 static int i915_swizzle_info(struct seq_file *m, void *data)
1979 {
1980         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1981
1982         intel_runtime_pm_get(dev_priv);
1983
1984         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1985                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1986         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1987                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1988
1989         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
1990                 seq_printf(m, "DDC = 0x%08x\n",
1991                            I915_READ(DCC));
1992                 seq_printf(m, "DDC2 = 0x%08x\n",
1993                            I915_READ(DCC2));
1994                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1995                            I915_READ16(C0DRB3));
1996                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1997                            I915_READ16(C1DRB3));
1998         } else if (INTEL_GEN(dev_priv) >= 6) {
1999                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2000                            I915_READ(MAD_DIMM_C0));
2001                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2002                            I915_READ(MAD_DIMM_C1));
2003                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2004                            I915_READ(MAD_DIMM_C2));
2005                 seq_printf(m, "TILECTL = 0x%08x\n",
2006                            I915_READ(TILECTL));
2007                 if (INTEL_GEN(dev_priv) >= 8)
2008                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2009                                    I915_READ(GAMTARBMODE));
2010                 else
2011                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2012                                    I915_READ(ARB_MODE));
2013                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2014                            I915_READ(DISP_ARB_CTL));
2015         }
2016
2017         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2018                 seq_puts(m, "L-shaped memory detected\n");
2019
2020         intel_runtime_pm_put(dev_priv);
2021
2022         return 0;
2023 }
2024
2025 static int per_file_ctx(int id, void *ptr, void *data)
2026 {
2027         struct i915_gem_context *ctx = ptr;
2028         struct seq_file *m = data;
2029         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2030
2031         if (!ppgtt) {
2032                 seq_printf(m, "  no ppgtt for context %d\n",
2033                            ctx->user_handle);
2034                 return 0;
2035         }
2036
2037         if (i915_gem_context_is_default(ctx))
2038                 seq_puts(m, "  default context:\n");
2039         else
2040                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2041         ppgtt->debug_dump(ppgtt, m);
2042
2043         return 0;
2044 }
2045
2046 static void gen8_ppgtt_info(struct seq_file *m,
2047                             struct drm_i915_private *dev_priv)
2048 {
2049         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2050         struct intel_engine_cs *engine;
2051         enum intel_engine_id id;
2052         int i;
2053
2054         if (!ppgtt)
2055                 return;
2056
2057         for_each_engine(engine, dev_priv, id) {
2058                 seq_printf(m, "%s\n", engine->name);
2059                 for (i = 0; i < 4; i++) {
2060                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2061                         pdp <<= 32;
2062                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2063                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2064                 }
2065         }
2066 }
2067
2068 static void gen6_ppgtt_info(struct seq_file *m,
2069                             struct drm_i915_private *dev_priv)
2070 {
2071         struct intel_engine_cs *engine;
2072         enum intel_engine_id id;
2073
2074         if (IS_GEN6(dev_priv))
2075                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2076
2077         for_each_engine(engine, dev_priv, id) {
2078                 seq_printf(m, "%s\n", engine->name);
2079                 if (IS_GEN7(dev_priv))
2080                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2081                                    I915_READ(RING_MODE_GEN7(engine)));
2082                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2083                            I915_READ(RING_PP_DIR_BASE(engine)));
2084                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2085                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2086                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2087                            I915_READ(RING_PP_DIR_DCLV(engine)));
2088         }
2089         if (dev_priv->mm.aliasing_ppgtt) {
2090                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2091
2092                 seq_puts(m, "aliasing PPGTT:\n");
2093                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2094
2095                 ppgtt->debug_dump(ppgtt, m);
2096         }
2097
2098         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2099 }
2100
2101 static int i915_ppgtt_info(struct seq_file *m, void *data)
2102 {
2103         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2104         struct drm_device *dev = &dev_priv->drm;
2105         struct drm_file *file;
2106         int ret;
2107
2108         mutex_lock(&dev->filelist_mutex);
2109         ret = mutex_lock_interruptible(&dev->struct_mutex);
2110         if (ret)
2111                 goto out_unlock;
2112
2113         intel_runtime_pm_get(dev_priv);
2114
2115         if (INTEL_GEN(dev_priv) >= 8)
2116                 gen8_ppgtt_info(m, dev_priv);
2117         else if (INTEL_GEN(dev_priv) >= 6)
2118                 gen6_ppgtt_info(m, dev_priv);
2119
2120         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2121                 struct drm_i915_file_private *file_priv = file->driver_priv;
2122                 struct task_struct *task;
2123
2124                 task = get_pid_task(file->pid, PIDTYPE_PID);
2125                 if (!task) {
2126                         ret = -ESRCH;
2127                         goto out_rpm;
2128                 }
2129                 seq_printf(m, "\nproc: %s\n", task->comm);
2130                 put_task_struct(task);
2131                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2132                              (void *)(unsigned long)m);
2133         }
2134
2135 out_rpm:
2136         intel_runtime_pm_put(dev_priv);
2137         mutex_unlock(&dev->struct_mutex);
2138 out_unlock:
2139         mutex_unlock(&dev->filelist_mutex);
2140         return ret;
2141 }
2142
2143 static int count_irq_waiters(struct drm_i915_private *i915)
2144 {
2145         struct intel_engine_cs *engine;
2146         enum intel_engine_id id;
2147         int count = 0;
2148
2149         for_each_engine(engine, i915, id)
2150                 count += intel_engine_has_waiter(engine);
2151
2152         return count;
2153 }
2154
2155 static const char *rps_power_to_str(unsigned int power)
2156 {
2157         static const char * const strings[] = {
2158                 [LOW_POWER] = "low power",
2159                 [BETWEEN] = "mixed",
2160                 [HIGH_POWER] = "high power",
2161         };
2162
2163         if (power >= ARRAY_SIZE(strings) || !strings[power])
2164                 return "unknown";
2165
2166         return strings[power];
2167 }
2168
2169 static int i915_rps_boost_info(struct seq_file *m, void *data)
2170 {
2171         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2172         struct drm_device *dev = &dev_priv->drm;
2173         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2174         struct drm_file *file;
2175
2176         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2177         seq_printf(m, "GPU busy? %s [%d requests]\n",
2178                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2179         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2180         seq_printf(m, "Boosts outstanding? %d\n",
2181                    atomic_read(&rps->num_waiters));
2182         seq_printf(m, "Frequency requested %d\n",
2183                    intel_gpu_freq(dev_priv, rps->cur_freq));
2184         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2185                    intel_gpu_freq(dev_priv, rps->min_freq),
2186                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2187                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2188                    intel_gpu_freq(dev_priv, rps->max_freq));
2189         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2190                    intel_gpu_freq(dev_priv, rps->idle_freq),
2191                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2192                    intel_gpu_freq(dev_priv, rps->boost_freq));
2193
2194         mutex_lock(&dev->filelist_mutex);
2195         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2196                 struct drm_i915_file_private *file_priv = file->driver_priv;
2197                 struct task_struct *task;
2198
2199                 rcu_read_lock();
2200                 task = pid_task(file->pid, PIDTYPE_PID);
2201                 seq_printf(m, "%s [%d]: %d boosts\n",
2202                            task ? task->comm : "<unknown>",
2203                            task ? task->pid : -1,
2204                            atomic_read(&file_priv->rps_client.boosts));
2205                 rcu_read_unlock();
2206         }
2207         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2208                    atomic_read(&rps->boosts));
2209         mutex_unlock(&dev->filelist_mutex);
2210
2211         if (INTEL_GEN(dev_priv) >= 6 &&
2212             rps->enabled &&
2213             dev_priv->gt.active_requests) {
2214                 u32 rpup, rpupei;
2215                 u32 rpdown, rpdownei;
2216
2217                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2218                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2219                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2220                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2221                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2222                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2223
2224                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2225                            rps_power_to_str(rps->power));
2226                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2227                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2228                            rps->up_threshold);
2229                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2230                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2231                            rps->down_threshold);
2232         } else {
2233                 seq_puts(m, "\nRPS Autotuning inactive\n");
2234         }
2235
2236         return 0;
2237 }
2238
2239 static int i915_llc(struct seq_file *m, void *data)
2240 {
2241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2242         const bool edram = INTEL_GEN(dev_priv) > 8;
2243
2244         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2245         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2246                    intel_uncore_edram_size(dev_priv)/1024/1024);
2247
2248         return 0;
2249 }
2250
2251 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2252 {
2253         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2254         struct drm_printer p;
2255
2256         if (!HAS_HUC_UCODE(dev_priv))
2257                 return 0;
2258
2259         p = drm_seq_file_printer(m);
2260         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2261
2262         intel_runtime_pm_get(dev_priv);
2263         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2264         intel_runtime_pm_put(dev_priv);
2265
2266         return 0;
2267 }
2268
2269 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2270 {
2271         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2272         struct drm_printer p;
2273         u32 tmp, i;
2274
2275         if (!HAS_GUC_UCODE(dev_priv))
2276                 return 0;
2277
2278         p = drm_seq_file_printer(m);
2279         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2280
2281         intel_runtime_pm_get(dev_priv);
2282
2283         tmp = I915_READ(GUC_STATUS);
2284
2285         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2286         seq_printf(m, "\tBootrom status = 0x%x\n",
2287                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2288         seq_printf(m, "\tuKernel status = 0x%x\n",
2289                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2290         seq_printf(m, "\tMIA Core status = 0x%x\n",
2291                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2292         seq_puts(m, "\nScratch registers:\n");
2293         for (i = 0; i < 16; i++)
2294                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2295
2296         intel_runtime_pm_put(dev_priv);
2297
2298         return 0;
2299 }
2300
2301 static void i915_guc_log_info(struct seq_file *m,
2302                               struct drm_i915_private *dev_priv)
2303 {
2304         struct intel_guc *guc = &dev_priv->guc;
2305
2306         seq_puts(m, "\nGuC logging stats:\n");
2307
2308         seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
2309                    guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2310                    guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2311
2312         seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
2313                    guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2314                    guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2315
2316         seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2317                    guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2318                    guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2319
2320         seq_printf(m, "\tTotal flush interrupt count: %u\n",
2321                    guc->log.flush_interrupt_count);
2322
2323         seq_printf(m, "\tCapture miss count: %u\n",
2324                    guc->log.capture_miss_count);
2325 }
2326
2327 static void i915_guc_client_info(struct seq_file *m,
2328                                  struct drm_i915_private *dev_priv,
2329                                  struct intel_guc_client *client)
2330 {
2331         struct intel_engine_cs *engine;
2332         enum intel_engine_id id;
2333         uint64_t tot = 0;
2334
2335         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2336                 client->priority, client->stage_id, client->proc_desc_offset);
2337         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2338                 client->doorbell_id, client->doorbell_offset);
2339
2340         for_each_engine(engine, dev_priv, id) {
2341                 u64 submissions = client->submissions[id];
2342                 tot += submissions;
2343                 seq_printf(m, "\tSubmissions: %llu %s\n",
2344                                 submissions, engine->name);
2345         }
2346         seq_printf(m, "\tTotal: %llu\n", tot);
2347 }
2348
2349 static bool check_guc_submission(struct seq_file *m)
2350 {
2351         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2352         const struct intel_guc *guc = &dev_priv->guc;
2353
2354         if (!guc->execbuf_client) {
2355                 seq_printf(m, "GuC submission %s\n",
2356                            HAS_GUC_SCHED(dev_priv) ?
2357                            "disabled" :
2358                            "not supported");
2359                 return false;
2360         }
2361
2362         return true;
2363 }
2364
2365 static int i915_guc_info(struct seq_file *m, void *data)
2366 {
2367         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2368         const struct intel_guc *guc = &dev_priv->guc;
2369
2370         if (!check_guc_submission(m))
2371                 return 0;
2372
2373         seq_printf(m, "Doorbell map:\n");
2374         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2375         seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2376
2377         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2378         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2379         seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client);
2380         i915_guc_client_info(m, dev_priv, guc->preempt_client);
2381
2382         i915_guc_log_info(m, dev_priv);
2383
2384         /* Add more as required ... */
2385
2386         return 0;
2387 }
2388
2389 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2390 {
2391         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2392         const struct intel_guc *guc = &dev_priv->guc;
2393         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2394         struct intel_guc_client *client = guc->execbuf_client;
2395         unsigned int tmp;
2396         int index;
2397
2398         if (!check_guc_submission(m))
2399                 return 0;
2400
2401         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2402                 struct intel_engine_cs *engine;
2403
2404                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2405                         continue;
2406
2407                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2408                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2409                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2410                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2411                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2412                 seq_printf(m, "\tEngines used: 0x%x\n",
2413                            desc->engines_used);
2414                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2415                            desc->db_trigger_phy,
2416                            desc->db_trigger_cpu,
2417                            desc->db_trigger_uk);
2418                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2419                            desc->process_desc);
2420                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2421                            desc->wq_addr, desc->wq_size);
2422                 seq_putc(m, '\n');
2423
2424                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2425                         u32 guc_engine_id = engine->guc_id;
2426                         struct guc_execlist_context *lrc =
2427                                                 &desc->lrc[guc_engine_id];
2428
2429                         seq_printf(m, "\t%s LRC:\n", engine->name);
2430                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2431                                    lrc->context_desc);
2432                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2433                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2434                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2435                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2436                         seq_putc(m, '\n');
2437                 }
2438         }
2439
2440         return 0;
2441 }
2442
2443 static int i915_guc_log_dump(struct seq_file *m, void *data)
2444 {
2445         struct drm_info_node *node = m->private;
2446         struct drm_i915_private *dev_priv = node_to_i915(node);
2447         bool dump_load_err = !!node->info_ent->data;
2448         struct drm_i915_gem_object *obj = NULL;
2449         u32 *log;
2450         int i = 0;
2451
2452         if (dump_load_err)
2453                 obj = dev_priv->guc.load_err_log;
2454         else if (dev_priv->guc.log.vma)
2455                 obj = dev_priv->guc.log.vma->obj;
2456
2457         if (!obj)
2458                 return 0;
2459
2460         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2461         if (IS_ERR(log)) {
2462                 DRM_DEBUG("Failed to pin object\n");
2463                 seq_puts(m, "(log data unaccessible)\n");
2464                 return PTR_ERR(log);
2465         }
2466
2467         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2468                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2469                            *(log + i), *(log + i + 1),
2470                            *(log + i + 2), *(log + i + 3));
2471
2472         seq_putc(m, '\n');
2473
2474         i915_gem_object_unpin_map(obj);
2475
2476         return 0;
2477 }
2478
2479 static int i915_guc_log_control_get(void *data, u64 *val)
2480 {
2481         struct drm_i915_private *dev_priv = data;
2482
2483         if (!dev_priv->guc.log.vma)
2484                 return -EINVAL;
2485
2486         *val = i915_modparams.guc_log_level;
2487
2488         return 0;
2489 }
2490
2491 static int i915_guc_log_control_set(void *data, u64 val)
2492 {
2493         struct drm_i915_private *dev_priv = data;
2494         int ret;
2495
2496         if (!dev_priv->guc.log.vma)
2497                 return -EINVAL;
2498
2499         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
2500         if (ret)
2501                 return ret;
2502
2503         intel_runtime_pm_get(dev_priv);
2504         ret = i915_guc_log_control(dev_priv, val);
2505         intel_runtime_pm_put(dev_priv);
2506
2507         mutex_unlock(&dev_priv->drm.struct_mutex);
2508         return ret;
2509 }
2510
2511 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2512                         i915_guc_log_control_get, i915_guc_log_control_set,
2513                         "%lld\n");
2514
2515 static const char *psr2_live_status(u32 val)
2516 {
2517         static const char * const live_status[] = {
2518                 "IDLE",
2519                 "CAPTURE",
2520                 "CAPTURE_FS",
2521                 "SLEEP",
2522                 "BUFON_FW",
2523                 "ML_UP",
2524                 "SU_STANDBY",
2525                 "FAST_SLEEP",
2526                 "DEEP_SLEEP",
2527                 "BUF_ON",
2528                 "TG_ON"
2529         };
2530
2531         val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2532         if (val < ARRAY_SIZE(live_status))
2533                 return live_status[val];
2534
2535         return "unknown";
2536 }
2537
2538 static int i915_edp_psr_status(struct seq_file *m, void *data)
2539 {
2540         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2541         u32 psrperf = 0;
2542         u32 stat[3];
2543         enum pipe pipe;
2544         bool enabled = false;
2545
2546         if (!HAS_PSR(dev_priv)) {
2547                 seq_puts(m, "PSR not supported\n");
2548                 return 0;
2549         }
2550
2551         intel_runtime_pm_get(dev_priv);
2552
2553         mutex_lock(&dev_priv->psr.lock);
2554         seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2555         seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2556         seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2557         seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2558         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2559                    dev_priv->psr.busy_frontbuffer_bits);
2560         seq_printf(m, "Re-enable work scheduled: %s\n",
2561                    yesno(work_busy(&dev_priv->psr.work.work)));
2562
2563         if (HAS_DDI(dev_priv)) {
2564                 if (dev_priv->psr.psr2_support)
2565                         enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2566                 else
2567                         enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2568         } else {
2569                 for_each_pipe(dev_priv, pipe) {
2570                         enum transcoder cpu_transcoder =
2571                                 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2572                         enum intel_display_power_domain power_domain;
2573
2574                         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2575                         if (!intel_display_power_get_if_enabled(dev_priv,
2576                                                                 power_domain))
2577                                 continue;
2578
2579                         stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2580                                 VLV_EDP_PSR_CURR_STATE_MASK;
2581                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2582                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2583                                 enabled = true;
2584
2585                         intel_display_power_put(dev_priv, power_domain);
2586                 }
2587         }
2588
2589         seq_printf(m, "Main link in standby mode: %s\n",
2590                    yesno(dev_priv->psr.link_standby));
2591
2592         seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2593
2594         if (!HAS_DDI(dev_priv))
2595                 for_each_pipe(dev_priv, pipe) {
2596                         if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2597                             (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2598                                 seq_printf(m, " pipe %c", pipe_name(pipe));
2599                 }
2600         seq_puts(m, "\n");
2601
2602         /*
2603          * VLV/CHV PSR has no kind of performance counter
2604          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2605          */
2606         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2607                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2608                         EDP_PSR_PERF_CNT_MASK;
2609
2610                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2611         }
2612         if (dev_priv->psr.psr2_support) {
2613                 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
2614
2615                 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
2616                            psr2, psr2_live_status(psr2));
2617         }
2618         mutex_unlock(&dev_priv->psr.lock);
2619
2620         intel_runtime_pm_put(dev_priv);
2621         return 0;
2622 }
2623
2624 static int i915_sink_crc(struct seq_file *m, void *data)
2625 {
2626         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2627         struct drm_device *dev = &dev_priv->drm;
2628         struct intel_connector *connector;
2629         struct drm_connector_list_iter conn_iter;
2630         struct intel_dp *intel_dp = NULL;
2631         struct drm_modeset_acquire_ctx ctx;
2632         int ret;
2633         u8 crc[6];
2634
2635         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2636
2637         drm_connector_list_iter_begin(dev, &conn_iter);
2638
2639         for_each_intel_connector_iter(connector, &conn_iter) {
2640                 struct drm_crtc *crtc;
2641                 struct drm_connector_state *state;
2642                 struct intel_crtc_state *crtc_state;
2643
2644                 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2645                         continue;
2646
2647 retry:
2648                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2649                 if (ret)
2650                         goto err;
2651
2652                 state = connector->base.state;
2653                 if (!state->best_encoder)
2654                         continue;
2655
2656                 crtc = state->crtc;
2657                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2658                 if (ret)
2659                         goto err;
2660
2661                 crtc_state = to_intel_crtc_state(crtc->state);
2662                 if (!crtc_state->base.active)
2663                         continue;
2664
2665                 /*
2666                  * We need to wait for all crtc updates to complete, to make
2667                  * sure any pending modesets and plane updates are completed.
2668                  */
2669                 if (crtc_state->base.commit) {
2670                         ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2671
2672                         if (ret)
2673                                 goto err;
2674                 }
2675
2676                 intel_dp = enc_to_intel_dp(state->best_encoder);
2677
2678                 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2679                 if (ret)
2680                         goto err;
2681
2682                 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2683                            crc[0], crc[1], crc[2],
2684                            crc[3], crc[4], crc[5]);
2685                 goto out;
2686
2687 err:
2688                 if (ret == -EDEADLK) {
2689                         ret = drm_modeset_backoff(&ctx);
2690                         if (!ret)
2691                                 goto retry;
2692                 }
2693                 goto out;
2694         }
2695         ret = -ENODEV;
2696 out:
2697         drm_connector_list_iter_end(&conn_iter);
2698         drm_modeset_drop_locks(&ctx);
2699         drm_modeset_acquire_fini(&ctx);
2700
2701         return ret;
2702 }
2703
2704 static int i915_energy_uJ(struct seq_file *m, void *data)
2705 {
2706         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2707         unsigned long long power;
2708         u32 units;
2709
2710         if (INTEL_GEN(dev_priv) < 6)
2711                 return -ENODEV;
2712
2713         intel_runtime_pm_get(dev_priv);
2714
2715         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2716                 intel_runtime_pm_put(dev_priv);
2717                 return -ENODEV;
2718         }
2719
2720         units = (power & 0x1f00) >> 8;
2721         power = I915_READ(MCH_SECP_NRG_STTS);
2722         power = (1000000 * power) >> units; /* convert to uJ */
2723
2724         intel_runtime_pm_put(dev_priv);
2725
2726         seq_printf(m, "%llu", power);
2727
2728         return 0;
2729 }
2730
2731 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2732 {
2733         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2734         struct pci_dev *pdev = dev_priv->drm.pdev;
2735
2736         if (!HAS_RUNTIME_PM(dev_priv))
2737                 seq_puts(m, "Runtime power management not supported\n");
2738
2739         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2740         seq_printf(m, "IRQs disabled: %s\n",
2741                    yesno(!intel_irqs_enabled(dev_priv)));
2742 #ifdef CONFIG_PM
2743         seq_printf(m, "Usage count: %d\n",
2744                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2745 #else
2746         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2747 #endif
2748         seq_printf(m, "PCI device power state: %s [%d]\n",
2749                    pci_power_name(pdev->current_state),
2750                    pdev->current_state);
2751
2752         return 0;
2753 }
2754
2755 static int i915_power_domain_info(struct seq_file *m, void *unused)
2756 {
2757         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2758         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2759         int i;
2760
2761         mutex_lock(&power_domains->lock);
2762
2763         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2764         for (i = 0; i < power_domains->power_well_count; i++) {
2765                 struct i915_power_well *power_well;
2766                 enum intel_display_power_domain power_domain;
2767
2768                 power_well = &power_domains->power_wells[i];
2769                 seq_printf(m, "%-25s %d\n", power_well->name,
2770                            power_well->count);
2771
2772                 for_each_power_domain(power_domain, power_well->domains)
2773                         seq_printf(m, "  %-23s %d\n",
2774                                  intel_display_power_domain_str(power_domain),
2775                                  power_domains->domain_use_count[power_domain]);
2776         }
2777
2778         mutex_unlock(&power_domains->lock);
2779
2780         return 0;
2781 }
2782
2783 static int i915_dmc_info(struct seq_file *m, void *unused)
2784 {
2785         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2786         struct intel_csr *csr;
2787
2788         if (!HAS_CSR(dev_priv)) {
2789                 seq_puts(m, "not supported\n");
2790                 return 0;
2791         }
2792
2793         csr = &dev_priv->csr;
2794
2795         intel_runtime_pm_get(dev_priv);
2796
2797         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2798         seq_printf(m, "path: %s\n", csr->fw_path);
2799
2800         if (!csr->dmc_payload)
2801                 goto out;
2802
2803         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2804                    CSR_VERSION_MINOR(csr->version));
2805
2806         if (IS_KABYLAKE(dev_priv) ||
2807             (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2808                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2809                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2810                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2811                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2812         } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2813                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2814                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2815         }
2816
2817 out:
2818         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2819         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2820         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2821
2822         intel_runtime_pm_put(dev_priv);
2823
2824         return 0;
2825 }
2826
2827 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2828                                  struct drm_display_mode *mode)
2829 {
2830         int i;
2831
2832         for (i = 0; i < tabs; i++)
2833                 seq_putc(m, '\t');
2834
2835         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2836                    mode->base.id, mode->name,
2837                    mode->vrefresh, mode->clock,
2838                    mode->hdisplay, mode->hsync_start,
2839                    mode->hsync_end, mode->htotal,
2840                    mode->vdisplay, mode->vsync_start,
2841                    mode->vsync_end, mode->vtotal,
2842                    mode->type, mode->flags);
2843 }
2844
2845 static void intel_encoder_info(struct seq_file *m,
2846                                struct intel_crtc *intel_crtc,
2847                                struct intel_encoder *intel_encoder)
2848 {
2849         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2850         struct drm_device *dev = &dev_priv->drm;
2851         struct drm_crtc *crtc = &intel_crtc->base;
2852         struct intel_connector *intel_connector;
2853         struct drm_encoder *encoder;
2854
2855         encoder = &intel_encoder->base;
2856         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2857                    encoder->base.id, encoder->name);
2858         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2859                 struct drm_connector *connector = &intel_connector->base;
2860                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2861                            connector->base.id,
2862                            connector->name,
2863                            drm_get_connector_status_name(connector->status));
2864                 if (connector->status == connector_status_connected) {
2865                         struct drm_display_mode *mode = &crtc->mode;
2866                         seq_printf(m, ", mode:\n");
2867                         intel_seq_print_mode(m, 2, mode);
2868                 } else {
2869                         seq_putc(m, '\n');
2870                 }
2871         }
2872 }
2873
2874 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2875 {
2876         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2877         struct drm_device *dev = &dev_priv->drm;
2878         struct drm_crtc *crtc = &intel_crtc->base;
2879         struct intel_encoder *intel_encoder;
2880         struct drm_plane_state *plane_state = crtc->primary->state;
2881         struct drm_framebuffer *fb = plane_state->fb;
2882
2883         if (fb)
2884                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2885                            fb->base.id, plane_state->src_x >> 16,
2886                            plane_state->src_y >> 16, fb->width, fb->height);
2887         else
2888                 seq_puts(m, "\tprimary plane disabled\n");
2889         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2890                 intel_encoder_info(m, intel_crtc, intel_encoder);
2891 }
2892
2893 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2894 {
2895         struct drm_display_mode *mode = panel->fixed_mode;
2896
2897         seq_printf(m, "\tfixed mode:\n");
2898         intel_seq_print_mode(m, 2, mode);
2899 }
2900
2901 static void intel_dp_info(struct seq_file *m,
2902                           struct intel_connector *intel_connector)
2903 {
2904         struct intel_encoder *intel_encoder = intel_connector->encoder;
2905         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2906
2907         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2908         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2909         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2910                 intel_panel_info(m, &intel_connector->panel);
2911
2912         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2913                                 &intel_dp->aux);
2914 }
2915
2916 static void intel_dp_mst_info(struct seq_file *m,
2917                           struct intel_connector *intel_connector)
2918 {
2919         struct intel_encoder *intel_encoder = intel_connector->encoder;
2920         struct intel_dp_mst_encoder *intel_mst =
2921                 enc_to_mst(&intel_encoder->base);
2922         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2923         struct intel_dp *intel_dp = &intel_dig_port->dp;
2924         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2925                                         intel_connector->port);
2926
2927         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2928 }
2929
2930 static void intel_hdmi_info(struct seq_file *m,
2931                             struct intel_connector *intel_connector)
2932 {
2933         struct intel_encoder *intel_encoder = intel_connector->encoder;
2934         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2935
2936         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2937 }
2938
2939 static void intel_lvds_info(struct seq_file *m,
2940                             struct intel_connector *intel_connector)
2941 {
2942         intel_panel_info(m, &intel_connector->panel);
2943 }
2944
2945 static void intel_connector_info(struct seq_file *m,
2946                                  struct drm_connector *connector)
2947 {
2948         struct intel_connector *intel_connector = to_intel_connector(connector);
2949         struct intel_encoder *intel_encoder = intel_connector->encoder;
2950         struct drm_display_mode *mode;
2951
2952         seq_printf(m, "connector %d: type %s, status: %s\n",
2953                    connector->base.id, connector->name,
2954                    drm_get_connector_status_name(connector->status));
2955         if (connector->status == connector_status_connected) {
2956                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2957                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2958                            connector->display_info.width_mm,
2959                            connector->display_info.height_mm);
2960                 seq_printf(m, "\tsubpixel order: %s\n",
2961                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2962                 seq_printf(m, "\tCEA rev: %d\n",
2963                            connector->display_info.cea_rev);
2964         }
2965
2966         if (!intel_encoder)
2967                 return;
2968
2969         switch (connector->connector_type) {
2970         case DRM_MODE_CONNECTOR_DisplayPort:
2971         case DRM_MODE_CONNECTOR_eDP:
2972                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2973                         intel_dp_mst_info(m, intel_connector);
2974                 else
2975                         intel_dp_info(m, intel_connector);
2976                 break;
2977         case DRM_MODE_CONNECTOR_LVDS:
2978                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2979                         intel_lvds_info(m, intel_connector);
2980                 break;
2981         case DRM_MODE_CONNECTOR_HDMIA:
2982                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2983                     intel_encoder->type == INTEL_OUTPUT_DDI)
2984                         intel_hdmi_info(m, intel_connector);
2985                 break;
2986         default:
2987                 break;
2988         }
2989
2990         seq_printf(m, "\tmodes:\n");
2991         list_for_each_entry(mode, &connector->modes, head)
2992                 intel_seq_print_mode(m, 2, mode);
2993 }
2994
2995 static const char *plane_type(enum drm_plane_type type)
2996 {
2997         switch (type) {
2998         case DRM_PLANE_TYPE_OVERLAY:
2999                 return "OVL";
3000         case DRM_PLANE_TYPE_PRIMARY:
3001                 return "PRI";
3002         case DRM_PLANE_TYPE_CURSOR:
3003                 return "CUR";
3004         /*
3005          * Deliberately omitting default: to generate compiler warnings
3006          * when a new drm_plane_type gets added.
3007          */
3008         }
3009
3010         return "unknown";
3011 }
3012
3013 static const char *plane_rotation(unsigned int rotation)
3014 {
3015         static char buf[48];
3016         /*
3017          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3018          * will print them all to visualize if the values are misused
3019          */
3020         snprintf(buf, sizeof(buf),
3021                  "%s%s%s%s%s%s(0x%08x)",
3022                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3023                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3024                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3025                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3026                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3027                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3028                  rotation);
3029
3030         return buf;
3031 }
3032
3033 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3034 {
3035         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3036         struct drm_device *dev = &dev_priv->drm;
3037         struct intel_plane *intel_plane;
3038
3039         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3040                 struct drm_plane_state *state;
3041                 struct drm_plane *plane = &intel_plane->base;
3042                 struct drm_format_name_buf format_name;
3043
3044                 if (!plane->state) {
3045                         seq_puts(m, "plane->state is NULL!\n");
3046                         continue;
3047                 }
3048
3049                 state = plane->state;
3050
3051                 if (state->fb) {
3052                         drm_get_format_name(state->fb->format->format,
3053                                             &format_name);
3054                 } else {
3055                         sprintf(format_name.str, "N/A");
3056                 }
3057
3058                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3059                            plane->base.id,
3060                            plane_type(intel_plane->base.type),
3061                            state->crtc_x, state->crtc_y,
3062                            state->crtc_w, state->crtc_h,
3063                            (state->src_x >> 16),
3064                            ((state->src_x & 0xffff) * 15625) >> 10,
3065                            (state->src_y >> 16),
3066                            ((state->src_y & 0xffff) * 15625) >> 10,
3067                            (state->src_w >> 16),
3068                            ((state->src_w & 0xffff) * 15625) >> 10,
3069                            (state->src_h >> 16),
3070                            ((state->src_h & 0xffff) * 15625) >> 10,
3071                            format_name.str,
3072                            plane_rotation(state->rotation));
3073         }
3074 }
3075
3076 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3077 {
3078         struct intel_crtc_state *pipe_config;
3079         int num_scalers = intel_crtc->num_scalers;
3080         int i;
3081
3082         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3083
3084         /* Not all platformas have a scaler */
3085         if (num_scalers) {
3086                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3087                            num_scalers,
3088                            pipe_config->scaler_state.scaler_users,
3089                            pipe_config->scaler_state.scaler_id);
3090
3091                 for (i = 0; i < num_scalers; i++) {
3092                         struct intel_scaler *sc =
3093                                         &pipe_config->scaler_state.scalers[i];
3094
3095                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3096                                    i, yesno(sc->in_use), sc->mode);
3097                 }
3098                 seq_puts(m, "\n");
3099         } else {
3100                 seq_puts(m, "\tNo scalers available on this platform\n");
3101         }
3102 }
3103
3104 static int i915_display_info(struct seq_file *m, void *unused)
3105 {
3106         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3107         struct drm_device *dev = &dev_priv->drm;
3108         struct intel_crtc *crtc;
3109         struct drm_connector *connector;
3110         struct drm_connector_list_iter conn_iter;
3111
3112         intel_runtime_pm_get(dev_priv);
3113         seq_printf(m, "CRTC info\n");
3114         seq_printf(m, "---------\n");
3115         for_each_intel_crtc(dev, crtc) {
3116                 struct intel_crtc_state *pipe_config;
3117
3118                 drm_modeset_lock(&crtc->base.mutex, NULL);
3119                 pipe_config = to_intel_crtc_state(crtc->base.state);
3120
3121                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3122                            crtc->base.base.id, pipe_name(crtc->pipe),
3123                            yesno(pipe_config->base.active),
3124                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3125                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3126
3127                 if (pipe_config->base.active) {
3128                         struct intel_plane *cursor =
3129                                 to_intel_plane(crtc->base.cursor);
3130
3131                         intel_crtc_info(m, crtc);
3132
3133                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3134                                    yesno(cursor->base.state->visible),
3135                                    cursor->base.state->crtc_x,
3136                                    cursor->base.state->crtc_y,
3137                                    cursor->base.state->crtc_w,
3138                                    cursor->base.state->crtc_h,
3139                                    cursor->cursor.base);
3140                         intel_scaler_info(m, crtc);
3141                         intel_plane_info(m, crtc);
3142                 }
3143
3144                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3145                            yesno(!crtc->cpu_fifo_underrun_disabled),
3146                            yesno(!crtc->pch_fifo_underrun_disabled));
3147                 drm_modeset_unlock(&crtc->base.mutex);
3148         }
3149
3150         seq_printf(m, "\n");
3151         seq_printf(m, "Connector info\n");
3152         seq_printf(m, "--------------\n");
3153         mutex_lock(&dev->mode_config.mutex);
3154         drm_connector_list_iter_begin(dev, &conn_iter);
3155         drm_for_each_connector_iter(connector, &conn_iter)
3156                 intel_connector_info(m, connector);
3157         drm_connector_list_iter_end(&conn_iter);
3158         mutex_unlock(&dev->mode_config.mutex);
3159
3160         intel_runtime_pm_put(dev_priv);
3161
3162         return 0;
3163 }
3164
3165 static int i915_engine_info(struct seq_file *m, void *unused)
3166 {
3167         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3168         struct intel_engine_cs *engine;
3169         enum intel_engine_id id;
3170         struct drm_printer p;
3171
3172         intel_runtime_pm_get(dev_priv);
3173
3174         seq_printf(m, "GT awake? %s\n",
3175                    yesno(dev_priv->gt.awake));
3176         seq_printf(m, "Global active requests: %d\n",
3177                    dev_priv->gt.active_requests);
3178         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3179                    dev_priv->info.cs_timestamp_frequency_khz);
3180
3181         p = drm_seq_file_printer(m);
3182         for_each_engine(engine, dev_priv, id)
3183                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3184
3185         intel_runtime_pm_put(dev_priv);
3186
3187         return 0;
3188 }
3189
3190 static int i915_shrinker_info(struct seq_file *m, void *unused)
3191 {
3192         struct drm_i915_private *i915 = node_to_i915(m->private);
3193
3194         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3195         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3196
3197         return 0;
3198 }
3199
3200 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3201 {
3202         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3203         struct drm_device *dev = &dev_priv->drm;
3204         int i;
3205
3206         drm_modeset_lock_all(dev);
3207         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3208                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3209
3210                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3211                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3212                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3213                 seq_printf(m, " tracked hardware state:\n");
3214                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3215                 seq_printf(m, " dpll_md: 0x%08x\n",
3216                            pll->state.hw_state.dpll_md);
3217                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3218                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3219                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3220         }
3221         drm_modeset_unlock_all(dev);
3222
3223         return 0;
3224 }
3225
3226 static int i915_wa_registers(struct seq_file *m, void *unused)
3227 {
3228         int i;
3229         int ret;
3230         struct intel_engine_cs *engine;
3231         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3232         struct drm_device *dev = &dev_priv->drm;
3233         struct i915_workarounds *workarounds = &dev_priv->workarounds;
3234         enum intel_engine_id id;
3235
3236         ret = mutex_lock_interruptible(&dev->struct_mutex);
3237         if (ret)
3238                 return ret;
3239
3240         intel_runtime_pm_get(dev_priv);
3241
3242         seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3243         for_each_engine(engine, dev_priv, id)
3244                 seq_printf(m, "HW whitelist count for %s: %d\n",
3245                            engine->name, workarounds->hw_whitelist_count[id]);
3246         for (i = 0; i < workarounds->count; ++i) {
3247                 i915_reg_t addr;
3248                 u32 mask, value, read;
3249                 bool ok;
3250
3251                 addr = workarounds->reg[i].addr;
3252                 mask = workarounds->reg[i].mask;
3253                 value = workarounds->reg[i].value;
3254                 read = I915_READ(addr);
3255                 ok = (value & mask) == (read & mask);
3256                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3257                            i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3258         }
3259
3260         intel_runtime_pm_put(dev_priv);
3261         mutex_unlock(&dev->struct_mutex);
3262
3263         return 0;
3264 }
3265
3266 static int i915_ipc_status_show(struct seq_file *m, void *data)
3267 {
3268         struct drm_i915_private *dev_priv = m->private;
3269
3270         seq_printf(m, "Isochronous Priority Control: %s\n",
3271                         yesno(dev_priv->ipc_enabled));
3272         return 0;
3273 }
3274
3275 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3276 {
3277         struct drm_i915_private *dev_priv = inode->i_private;
3278
3279         if (!HAS_IPC(dev_priv))
3280                 return -ENODEV;
3281
3282         return single_open(file, i915_ipc_status_show, dev_priv);
3283 }
3284
3285 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3286                                      size_t len, loff_t *offp)
3287 {
3288         struct seq_file *m = file->private_data;
3289         struct drm_i915_private *dev_priv = m->private;
3290         int ret;
3291         bool enable;
3292
3293         ret = kstrtobool_from_user(ubuf, len, &enable);
3294         if (ret < 0)
3295                 return ret;
3296
3297         intel_runtime_pm_get(dev_priv);
3298         if (!dev_priv->ipc_enabled && enable)
3299                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3300         dev_priv->wm.distrust_bios_wm = true;
3301         dev_priv->ipc_enabled = enable;
3302         intel_enable_ipc(dev_priv);
3303         intel_runtime_pm_put(dev_priv);
3304
3305         return len;
3306 }
3307
3308 static const struct file_operations i915_ipc_status_fops = {
3309         .owner = THIS_MODULE,
3310         .open = i915_ipc_status_open,
3311         .read = seq_read,
3312         .llseek = seq_lseek,
3313         .release = single_release,
3314         .write = i915_ipc_status_write
3315 };
3316
3317 static int i915_ddb_info(struct seq_file *m, void *unused)
3318 {
3319         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3320         struct drm_device *dev = &dev_priv->drm;
3321         struct skl_ddb_allocation *ddb;
3322         struct skl_ddb_entry *entry;
3323         enum pipe pipe;
3324         int plane;
3325
3326         if (INTEL_GEN(dev_priv) < 9)
3327                 return 0;
3328
3329         drm_modeset_lock_all(dev);
3330
3331         ddb = &dev_priv->wm.skl_hw.ddb;
3332
3333         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3334
3335         for_each_pipe(dev_priv, pipe) {
3336                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3337
3338                 for_each_universal_plane(dev_priv, pipe, plane) {
3339                         entry = &ddb->plane[pipe][plane];
3340                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3341                                    entry->start, entry->end,
3342                                    skl_ddb_entry_size(entry));
3343                 }
3344
3345                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3346                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3347                            entry->end, skl_ddb_entry_size(entry));
3348         }
3349
3350         drm_modeset_unlock_all(dev);
3351
3352         return 0;
3353 }
3354
3355 static void drrs_status_per_crtc(struct seq_file *m,
3356                                  struct drm_device *dev,
3357                                  struct intel_crtc *intel_crtc)
3358 {
3359         struct drm_i915_private *dev_priv = to_i915(dev);
3360         struct i915_drrs *drrs = &dev_priv->drrs;
3361         int vrefresh = 0;
3362         struct drm_connector *connector;
3363         struct drm_connector_list_iter conn_iter;
3364
3365         drm_connector_list_iter_begin(dev, &conn_iter);
3366         drm_for_each_connector_iter(connector, &conn_iter) {
3367                 if (connector->state->crtc != &intel_crtc->base)
3368                         continue;
3369
3370                 seq_printf(m, "%s:\n", connector->name);
3371         }
3372         drm_connector_list_iter_end(&conn_iter);
3373
3374         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3375                 seq_puts(m, "\tVBT: DRRS_type: Static");
3376         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3377                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3378         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3379                 seq_puts(m, "\tVBT: DRRS_type: None");
3380         else
3381                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3382
3383         seq_puts(m, "\n\n");
3384
3385         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3386                 struct intel_panel *panel;
3387
3388                 mutex_lock(&drrs->mutex);
3389                 /* DRRS Supported */
3390                 seq_puts(m, "\tDRRS Supported: Yes\n");
3391
3392                 /* disable_drrs() will make drrs->dp NULL */
3393                 if (!drrs->dp) {
3394                         seq_puts(m, "Idleness DRRS: Disabled");
3395                         mutex_unlock(&drrs->mutex);
3396                         return;
3397                 }
3398
3399                 panel = &drrs->dp->attached_connector->panel;
3400                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3401                                         drrs->busy_frontbuffer_bits);
3402
3403                 seq_puts(m, "\n\t\t");
3404                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3405                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3406                         vrefresh = panel->fixed_mode->vrefresh;
3407                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3408                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3409                         vrefresh = panel->downclock_mode->vrefresh;
3410                 } else {
3411                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3412                                                 drrs->refresh_rate_type);
3413                         mutex_unlock(&drrs->mutex);
3414                         return;
3415                 }
3416                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3417
3418                 seq_puts(m, "\n\t\t");
3419                 mutex_unlock(&drrs->mutex);
3420         } else {
3421                 /* DRRS not supported. Print the VBT parameter*/
3422                 seq_puts(m, "\tDRRS Supported : No");
3423         }
3424         seq_puts(m, "\n");
3425 }
3426
3427 static int i915_drrs_status(struct seq_file *m, void *unused)
3428 {
3429         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3430         struct drm_device *dev = &dev_priv->drm;
3431         struct intel_crtc *intel_crtc;
3432         int active_crtc_cnt = 0;
3433
3434         drm_modeset_lock_all(dev);
3435         for_each_intel_crtc(dev, intel_crtc) {
3436                 if (intel_crtc->base.state->active) {
3437                         active_crtc_cnt++;
3438                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3439
3440                         drrs_status_per_crtc(m, dev, intel_crtc);
3441                 }
3442         }
3443         drm_modeset_unlock_all(dev);
3444
3445         if (!active_crtc_cnt)
3446                 seq_puts(m, "No active crtc found\n");
3447
3448         return 0;
3449 }
3450
3451 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3452 {
3453         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3454         struct drm_device *dev = &dev_priv->drm;
3455         struct intel_encoder *intel_encoder;
3456         struct intel_digital_port *intel_dig_port;
3457         struct drm_connector *connector;
3458         struct drm_connector_list_iter conn_iter;
3459
3460         drm_connector_list_iter_begin(dev, &conn_iter);
3461         drm_for_each_connector_iter(connector, &conn_iter) {
3462                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3463                         continue;
3464
3465                 intel_encoder = intel_attached_encoder(connector);
3466                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3467                         continue;
3468
3469                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3470                 if (!intel_dig_port->dp.can_mst)
3471                         continue;
3472
3473                 seq_printf(m, "MST Source Port %c\n",
3474                            port_name(intel_dig_port->base.port));
3475                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3476         }
3477         drm_connector_list_iter_end(&conn_iter);
3478
3479         return 0;
3480 }
3481
3482 static ssize_t i915_displayport_test_active_write(struct file *file,
3483                                                   const char __user *ubuf,
3484                                                   size_t len, loff_t *offp)
3485 {
3486         char *input_buffer;
3487         int status = 0;
3488         struct drm_device *dev;
3489         struct drm_connector *connector;
3490         struct drm_connector_list_iter conn_iter;
3491         struct intel_dp *intel_dp;
3492         int val = 0;
3493
3494         dev = ((struct seq_file *)file->private_data)->private;
3495
3496         if (len == 0)
3497                 return 0;
3498
3499         input_buffer = memdup_user_nul(ubuf, len);
3500         if (IS_ERR(input_buffer))
3501                 return PTR_ERR(input_buffer);
3502
3503         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3504
3505         drm_connector_list_iter_begin(dev, &conn_iter);
3506         drm_for_each_connector_iter(connector, &conn_iter) {
3507                 struct intel_encoder *encoder;
3508
3509                 if (connector->connector_type !=
3510                     DRM_MODE_CONNECTOR_DisplayPort)
3511                         continue;
3512
3513                 encoder = to_intel_encoder(connector->encoder);
3514                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3515                         continue;
3516
3517                 if (encoder && connector->status == connector_status_connected) {
3518                         intel_dp = enc_to_intel_dp(&encoder->base);
3519                         status = kstrtoint(input_buffer, 10, &val);
3520                         if (status < 0)
3521                                 break;
3522                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3523                         /* To prevent erroneous activation of the compliance
3524                          * testing code, only accept an actual value of 1 here
3525                          */
3526                         if (val == 1)
3527                                 intel_dp->compliance.test_active = 1;
3528                         else
3529                                 intel_dp->compliance.test_active = 0;
3530                 }
3531         }
3532         drm_connector_list_iter_end(&conn_iter);
3533         kfree(input_buffer);
3534         if (status < 0)
3535                 return status;
3536
3537         *offp += len;
3538         return len;
3539 }
3540
3541 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3542 {
3543         struct drm_device *dev = m->private;
3544         struct drm_connector *connector;
3545         struct drm_connector_list_iter conn_iter;
3546         struct intel_dp *intel_dp;
3547
3548         drm_connector_list_iter_begin(dev, &conn_iter);
3549         drm_for_each_connector_iter(connector, &conn_iter) {
3550                 struct intel_encoder *encoder;
3551
3552                 if (connector->connector_type !=
3553                     DRM_MODE_CONNECTOR_DisplayPort)
3554                         continue;
3555
3556                 encoder = to_intel_encoder(connector->encoder);
3557                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3558                         continue;
3559
3560                 if (encoder && connector->status == connector_status_connected) {
3561                         intel_dp = enc_to_intel_dp(&encoder->base);
3562                         if (intel_dp->compliance.test_active)
3563                                 seq_puts(m, "1");
3564                         else
3565                                 seq_puts(m, "0");
3566                 } else
3567                         seq_puts(m, "0");
3568         }
3569         drm_connector_list_iter_end(&conn_iter);
3570
3571         return 0;
3572 }
3573
3574 static int i915_displayport_test_active_open(struct inode *inode,
3575                                              struct file *file)
3576 {
3577         struct drm_i915_private *dev_priv = inode->i_private;
3578
3579         return single_open(file, i915_displayport_test_active_show,
3580                            &dev_priv->drm);
3581 }
3582
3583 static const struct file_operations i915_displayport_test_active_fops = {
3584         .owner = THIS_MODULE,
3585         .open = i915_displayport_test_active_open,
3586         .read = seq_read,
3587         .llseek = seq_lseek,
3588         .release = single_release,
3589         .write = i915_displayport_test_active_write
3590 };
3591
3592 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3593 {
3594         struct drm_device *dev = m->private;
3595         struct drm_connector *connector;
3596         struct drm_connector_list_iter conn_iter;
3597         struct intel_dp *intel_dp;
3598
3599         drm_connector_list_iter_begin(dev, &conn_iter);
3600         drm_for_each_connector_iter(connector, &conn_iter) {
3601                 struct intel_encoder *encoder;
3602
3603                 if (connector->connector_type !=
3604                     DRM_MODE_CONNECTOR_DisplayPort)
3605                         continue;
3606
3607                 encoder = to_intel_encoder(connector->encoder);
3608                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3609                         continue;
3610
3611                 if (encoder && connector->status == connector_status_connected) {
3612                         intel_dp = enc_to_intel_dp(&encoder->base);
3613                         if (intel_dp->compliance.test_type ==
3614                             DP_TEST_LINK_EDID_READ)
3615                                 seq_printf(m, "%lx",
3616                                            intel_dp->compliance.test_data.edid);
3617                         else if (intel_dp->compliance.test_type ==
3618                                  DP_TEST_LINK_VIDEO_PATTERN) {
3619                                 seq_printf(m, "hdisplay: %d\n",
3620                                            intel_dp->compliance.test_data.hdisplay);
3621                                 seq_printf(m, "vdisplay: %d\n",
3622                                            intel_dp->compliance.test_data.vdisplay);
3623                                 seq_printf(m, "bpc: %u\n",
3624                                            intel_dp->compliance.test_data.bpc);
3625                         }
3626                 } else
3627                         seq_puts(m, "0");
3628         }
3629         drm_connector_list_iter_end(&conn_iter);
3630
3631         return 0;
3632 }
3633 static int i915_displayport_test_data_open(struct inode *inode,
3634                                            struct file *file)
3635 {
3636         struct drm_i915_private *dev_priv = inode->i_private;
3637
3638         return single_open(file, i915_displayport_test_data_show,
3639                            &dev_priv->drm);
3640 }
3641
3642 static const struct file_operations i915_displayport_test_data_fops = {
3643         .owner = THIS_MODULE,
3644         .open = i915_displayport_test_data_open,
3645         .read = seq_read,
3646         .llseek = seq_lseek,
3647         .release = single_release
3648 };
3649
3650 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3651 {
3652         struct drm_device *dev = m->private;
3653         struct drm_connector *connector;
3654         struct drm_connector_list_iter conn_iter;
3655         struct intel_dp *intel_dp;
3656
3657         drm_connector_list_iter_begin(dev, &conn_iter);
3658         drm_for_each_connector_iter(connector, &conn_iter) {
3659                 struct intel_encoder *encoder;
3660
3661                 if (connector->connector_type !=
3662                     DRM_MODE_CONNECTOR_DisplayPort)
3663                         continue;
3664
3665                 encoder = to_intel_encoder(connector->encoder);
3666                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3667                         continue;
3668
3669                 if (encoder && connector->status == connector_status_connected) {
3670                         intel_dp = enc_to_intel_dp(&encoder->base);
3671                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3672                 } else
3673                         seq_puts(m, "0");
3674         }
3675         drm_connector_list_iter_end(&conn_iter);
3676
3677         return 0;
3678 }
3679
3680 static int i915_displayport_test_type_open(struct inode *inode,
3681                                        struct file *file)
3682 {
3683         struct drm_i915_private *dev_priv = inode->i_private;
3684
3685         return single_open(file, i915_displayport_test_type_show,
3686                            &dev_priv->drm);
3687 }
3688
3689 static const struct file_operations i915_displayport_test_type_fops = {
3690         .owner = THIS_MODULE,
3691         .open = i915_displayport_test_type_open,
3692         .read = seq_read,
3693         .llseek = seq_lseek,
3694         .release = single_release
3695 };
3696
3697 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3698 {
3699         struct drm_i915_private *dev_priv = m->private;
3700         struct drm_device *dev = &dev_priv->drm;
3701         int level;
3702         int num_levels;
3703
3704         if (IS_CHERRYVIEW(dev_priv))
3705                 num_levels = 3;
3706         else if (IS_VALLEYVIEW(dev_priv))
3707                 num_levels = 1;
3708         else if (IS_G4X(dev_priv))
3709                 num_levels = 3;
3710         else
3711                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3712
3713         drm_modeset_lock_all(dev);
3714
3715         for (level = 0; level < num_levels; level++) {
3716                 unsigned int latency = wm[level];
3717
3718                 /*
3719                  * - WM1+ latency values in 0.5us units
3720                  * - latencies are in us on gen9/vlv/chv
3721                  */
3722                 if (INTEL_GEN(dev_priv) >= 9 ||
3723                     IS_VALLEYVIEW(dev_priv) ||
3724                     IS_CHERRYVIEW(dev_priv) ||
3725                     IS_G4X(dev_priv))
3726                         latency *= 10;
3727                 else if (level > 0)
3728                         latency *= 5;
3729
3730                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3731                            level, wm[level], latency / 10, latency % 10);
3732         }
3733
3734         drm_modeset_unlock_all(dev);
3735 }
3736
3737 static int pri_wm_latency_show(struct seq_file *m, void *data)
3738 {
3739         struct drm_i915_private *dev_priv = m->private;
3740         const uint16_t *latencies;
3741
3742         if (INTEL_GEN(dev_priv) >= 9)
3743                 latencies = dev_priv->wm.skl_latency;
3744         else
3745                 latencies = dev_priv->wm.pri_latency;
3746
3747         wm_latency_show(m, latencies);
3748
3749         return 0;
3750 }
3751
3752 static int spr_wm_latency_show(struct seq_file *m, void *data)
3753 {
3754         struct drm_i915_private *dev_priv = m->private;
3755         const uint16_t *latencies;
3756
3757         if (INTEL_GEN(dev_priv) >= 9)
3758                 latencies = dev_priv->wm.skl_latency;
3759         else
3760                 latencies = dev_priv->wm.spr_latency;
3761
3762         wm_latency_show(m, latencies);
3763
3764         return 0;
3765 }
3766
3767 static int cur_wm_latency_show(struct seq_file *m, void *data)
3768 {
3769         struct drm_i915_private *dev_priv = m->private;
3770         const uint16_t *latencies;
3771
3772         if (INTEL_GEN(dev_priv) >= 9)
3773                 latencies = dev_priv->wm.skl_latency;
3774         else
3775                 latencies = dev_priv->wm.cur_latency;
3776
3777         wm_latency_show(m, latencies);
3778
3779         return 0;
3780 }
3781
3782 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3783 {
3784         struct drm_i915_private *dev_priv = inode->i_private;
3785
3786         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3787                 return -ENODEV;
3788
3789         return single_open(file, pri_wm_latency_show, dev_priv);
3790 }
3791
3792 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3793 {
3794         struct drm_i915_private *dev_priv = inode->i_private;
3795
3796         if (HAS_GMCH_DISPLAY(dev_priv))
3797                 return -ENODEV;
3798
3799         return single_open(file, spr_wm_latency_show, dev_priv);
3800 }
3801
3802 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3803 {
3804         struct drm_i915_private *dev_priv = inode->i_private;
3805
3806         if (HAS_GMCH_DISPLAY(dev_priv))
3807                 return -ENODEV;
3808
3809         return single_open(file, cur_wm_latency_show, dev_priv);
3810 }
3811
3812 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3813                                 size_t len, loff_t *offp, uint16_t wm[8])
3814 {
3815         struct seq_file *m = file->private_data;
3816         struct drm_i915_private *dev_priv = m->private;
3817         struct drm_device *dev = &dev_priv->drm;
3818         uint16_t new[8] = { 0 };
3819         int num_levels;
3820         int level;
3821         int ret;
3822         char tmp[32];
3823
3824         if (IS_CHERRYVIEW(dev_priv))
3825                 num_levels = 3;
3826         else if (IS_VALLEYVIEW(dev_priv))
3827                 num_levels = 1;
3828         else if (IS_G4X(dev_priv))
3829                 num_levels = 3;
3830         else
3831                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3832
3833         if (len >= sizeof(tmp))
3834                 return -EINVAL;
3835
3836         if (copy_from_user(tmp, ubuf, len))
3837                 return -EFAULT;
3838
3839         tmp[len] = '\0';
3840
3841         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3842                      &new[0], &new[1], &new[2], &new[3],
3843                      &new[4], &new[5], &new[6], &new[7]);
3844         if (ret != num_levels)
3845                 return -EINVAL;
3846
3847         drm_modeset_lock_all(dev);
3848
3849         for (level = 0; level < num_levels; level++)
3850                 wm[level] = new[level];
3851
3852         drm_modeset_unlock_all(dev);
3853
3854         return len;
3855 }
3856
3857
3858 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3859                                     size_t len, loff_t *offp)
3860 {
3861         struct seq_file *m = file->private_data;
3862         struct drm_i915_private *dev_priv = m->private;
3863         uint16_t *latencies;
3864
3865         if (INTEL_GEN(dev_priv) >= 9)
3866                 latencies = dev_priv->wm.skl_latency;
3867         else
3868                 latencies = dev_priv->wm.pri_latency;
3869
3870         return wm_latency_write(file, ubuf, len, offp, latencies);
3871 }
3872
3873 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3874                                     size_t len, loff_t *offp)
3875 {
3876         struct seq_file *m = file->private_data;
3877         struct drm_i915_private *dev_priv = m->private;
3878         uint16_t *latencies;
3879
3880         if (INTEL_GEN(dev_priv) >= 9)
3881                 latencies = dev_priv->wm.skl_latency;
3882         else
3883                 latencies = dev_priv->wm.spr_latency;
3884
3885         return wm_latency_write(file, ubuf, len, offp, latencies);
3886 }
3887
3888 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3889                                     size_t len, loff_t *offp)
3890 {
3891         struct seq_file *m = file->private_data;
3892         struct drm_i915_private *dev_priv = m->private;
3893         uint16_t *latencies;
3894
3895         if (INTEL_GEN(dev_priv) >= 9)
3896                 latencies = dev_priv->wm.skl_latency;
3897         else
3898                 latencies = dev_priv->wm.cur_latency;
3899
3900         return wm_latency_write(file, ubuf, len, offp, latencies);
3901 }
3902
3903 static const struct file_operations i915_pri_wm_latency_fops = {
3904         .owner = THIS_MODULE,
3905         .open = pri_wm_latency_open,
3906         .read = seq_read,
3907         .llseek = seq_lseek,
3908         .release = single_release,
3909         .write = pri_wm_latency_write
3910 };
3911
3912 static const struct file_operations i915_spr_wm_latency_fops = {
3913         .owner = THIS_MODULE,
3914         .open = spr_wm_latency_open,
3915         .read = seq_read,
3916         .llseek = seq_lseek,
3917         .release = single_release,
3918         .write = spr_wm_latency_write
3919 };
3920
3921 static const struct file_operations i915_cur_wm_latency_fops = {
3922         .owner = THIS_MODULE,
3923         .open = cur_wm_latency_open,
3924         .read = seq_read,
3925         .llseek = seq_lseek,
3926         .release = single_release,
3927         .write = cur_wm_latency_write
3928 };
3929
3930 static int
3931 i915_wedged_get(void *data, u64 *val)
3932 {
3933         struct drm_i915_private *dev_priv = data;
3934
3935         *val = i915_terminally_wedged(&dev_priv->gpu_error);
3936
3937         return 0;
3938 }
3939
3940 static int
3941 i915_wedged_set(void *data, u64 val)
3942 {
3943         struct drm_i915_private *i915 = data;
3944         struct intel_engine_cs *engine;
3945         unsigned int tmp;
3946
3947         /*
3948          * There is no safeguard against this debugfs entry colliding
3949          * with the hangcheck calling same i915_handle_error() in
3950          * parallel, causing an explosion. For now we assume that the
3951          * test harness is responsible enough not to inject gpu hangs
3952          * while it is writing to 'i915_wedged'
3953          */
3954
3955         if (i915_reset_backoff(&i915->gpu_error))
3956                 return -EAGAIN;
3957
3958         for_each_engine_masked(engine, i915, val, tmp) {
3959                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3960                 engine->hangcheck.stalled = true;
3961         }
3962
3963         i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
3964
3965         wait_on_bit(&i915->gpu_error.flags,
3966                     I915_RESET_HANDOFF,
3967                     TASK_UNINTERRUPTIBLE);
3968
3969         return 0;
3970 }
3971
3972 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3973                         i915_wedged_get, i915_wedged_set,
3974                         "%llu\n");
3975
3976 static int
3977 fault_irq_set(struct drm_i915_private *i915,
3978               unsigned long *irq,
3979               unsigned long val)
3980 {
3981         int err;
3982
3983         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3984         if (err)
3985                 return err;
3986
3987         err = i915_gem_wait_for_idle(i915,
3988                                      I915_WAIT_LOCKED |
3989                                      I915_WAIT_INTERRUPTIBLE);
3990         if (err)
3991                 goto err_unlock;
3992
3993         *irq = val;
3994         mutex_unlock(&i915->drm.struct_mutex);
3995
3996         /* Flush idle worker to disarm irq */
3997         drain_delayed_work(&i915->gt.idle_work);
3998
3999         return 0;
4000
4001 err_unlock:
4002         mutex_unlock(&i915->drm.struct_mutex);
4003         return err;
4004 }
4005
4006 static int
4007 i915_ring_missed_irq_get(void *data, u64 *val)
4008 {
4009         struct drm_i915_private *dev_priv = data;
4010
4011         *val = dev_priv->gpu_error.missed_irq_rings;
4012         return 0;
4013 }
4014
4015 static int
4016 i915_ring_missed_irq_set(void *data, u64 val)
4017 {
4018         struct drm_i915_private *i915 = data;
4019
4020         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4021 }
4022
4023 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4024                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4025                         "0x%08llx\n");
4026
4027 static int
4028 i915_ring_test_irq_get(void *data, u64 *val)
4029 {
4030         struct drm_i915_private *dev_priv = data;
4031
4032         *val = dev_priv->gpu_error.test_irq_rings;
4033
4034         return 0;
4035 }
4036
4037 static int
4038 i915_ring_test_irq_set(void *data, u64 val)
4039 {
4040         struct drm_i915_private *i915 = data;
4041
4042         val &= INTEL_INFO(i915)->ring_mask;
4043         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4044
4045         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4046 }
4047
4048 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4049                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4050                         "0x%08llx\n");
4051
4052 #define DROP_UNBOUND    BIT(0)
4053 #define DROP_BOUND      BIT(1)
4054 #define DROP_RETIRE     BIT(2)
4055 #define DROP_ACTIVE     BIT(3)
4056 #define DROP_FREED      BIT(4)
4057 #define DROP_SHRINK_ALL BIT(5)
4058 #define DROP_IDLE       BIT(6)
4059 #define DROP_ALL (DROP_UNBOUND  | \
4060                   DROP_BOUND    | \
4061                   DROP_RETIRE   | \
4062                   DROP_ACTIVE   | \
4063                   DROP_FREED    | \
4064                   DROP_SHRINK_ALL |\
4065                   DROP_IDLE)
4066 static int
4067 i915_drop_caches_get(void *data, u64 *val)
4068 {
4069         *val = DROP_ALL;
4070
4071         return 0;
4072 }
4073
4074 static int
4075 i915_drop_caches_set(void *data, u64 val)
4076 {
4077         struct drm_i915_private *dev_priv = data;
4078         struct drm_device *dev = &dev_priv->drm;
4079         int ret = 0;
4080
4081         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4082                   val, val & DROP_ALL);
4083
4084         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4085          * on ioctls on -EAGAIN. */
4086         if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4087                 ret = mutex_lock_interruptible(&dev->struct_mutex);
4088                 if (ret)
4089                         return ret;
4090
4091                 if (val & DROP_ACTIVE)
4092                         ret = i915_gem_wait_for_idle(dev_priv,
4093                                                      I915_WAIT_INTERRUPTIBLE |
4094                                                      I915_WAIT_LOCKED);
4095
4096                 if (val & DROP_RETIRE)
4097                         i915_gem_retire_requests(dev_priv);
4098
4099                 mutex_unlock(&dev->struct_mutex);
4100         }
4101
4102         fs_reclaim_acquire(GFP_KERNEL);
4103         if (val & DROP_BOUND)
4104                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4105
4106         if (val & DROP_UNBOUND)
4107                 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4108
4109         if (val & DROP_SHRINK_ALL)
4110                 i915_gem_shrink_all(dev_priv);
4111         fs_reclaim_release(GFP_KERNEL);
4112
4113         if (val & DROP_IDLE)
4114                 drain_delayed_work(&dev_priv->gt.idle_work);
4115
4116         if (val & DROP_FREED) {
4117                 synchronize_rcu();
4118                 i915_gem_drain_freed_objects(dev_priv);
4119         }
4120
4121         return ret;
4122 }
4123
4124 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4125                         i915_drop_caches_get, i915_drop_caches_set,
4126                         "0x%08llx\n");
4127
4128 static int
4129 i915_max_freq_get(void *data, u64 *val)
4130 {
4131         struct drm_i915_private *dev_priv = data;
4132
4133         if (INTEL_GEN(dev_priv) < 6)
4134                 return -ENODEV;
4135
4136         *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
4137         return 0;
4138 }
4139
4140 static int
4141 i915_max_freq_set(void *data, u64 val)
4142 {
4143         struct drm_i915_private *dev_priv = data;
4144         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4145         u32 hw_max, hw_min;
4146         int ret;
4147
4148         if (INTEL_GEN(dev_priv) < 6)
4149                 return -ENODEV;
4150
4151         DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4152
4153         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4154         if (ret)
4155                 return ret;
4156
4157         /*
4158          * Turbo will still be enabled, but won't go above the set value.
4159          */
4160         val = intel_freq_opcode(dev_priv, val);
4161
4162         hw_max = rps->max_freq;
4163         hw_min = rps->min_freq;
4164
4165         if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
4166                 mutex_unlock(&dev_priv->pcu_lock);
4167                 return -EINVAL;
4168         }
4169
4170         rps->max_freq_softlimit = val;
4171
4172         if (intel_set_rps(dev_priv, val))
4173                 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4174
4175         mutex_unlock(&dev_priv->pcu_lock);
4176
4177         return 0;
4178 }
4179
4180 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4181                         i915_max_freq_get, i915_max_freq_set,
4182                         "%llu\n");
4183
4184 static int
4185 i915_min_freq_get(void *data, u64 *val)
4186 {
4187         struct drm_i915_private *dev_priv = data;
4188
4189         if (INTEL_GEN(dev_priv) < 6)
4190                 return -ENODEV;
4191
4192         *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
4193         return 0;
4194 }
4195
4196 static int
4197 i915_min_freq_set(void *data, u64 val)
4198 {
4199         struct drm_i915_private *dev_priv = data;
4200         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4201         u32 hw_max, hw_min;
4202         int ret;
4203
4204         if (INTEL_GEN(dev_priv) < 6)
4205                 return -ENODEV;
4206
4207         DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4208
4209         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4210         if (ret)
4211                 return ret;
4212
4213         /*
4214          * Turbo will still be enabled, but won't go below the set value.
4215          */
4216         val = intel_freq_opcode(dev_priv, val);
4217
4218         hw_max = rps->max_freq;
4219         hw_min = rps->min_freq;
4220
4221         if (val < hw_min ||
4222             val > hw_max || val > rps->max_freq_softlimit) {
4223                 mutex_unlock(&dev_priv->pcu_lock);
4224                 return -EINVAL;
4225         }
4226
4227         rps->min_freq_softlimit = val;
4228
4229         if (intel_set_rps(dev_priv, val))
4230                 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4231
4232         mutex_unlock(&dev_priv->pcu_lock);
4233
4234         return 0;
4235 }
4236
4237 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4238                         i915_min_freq_get, i915_min_freq_set,
4239                         "%llu\n");
4240
4241 static int
4242 i915_cache_sharing_get(void *data, u64 *val)
4243 {
4244         struct drm_i915_private *dev_priv = data;
4245         u32 snpcr;
4246
4247         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4248                 return -ENODEV;
4249
4250         intel_runtime_pm_get(dev_priv);
4251
4252         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4253
4254         intel_runtime_pm_put(dev_priv);
4255
4256         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4257
4258         return 0;
4259 }
4260
4261 static int
4262 i915_cache_sharing_set(void *data, u64 val)
4263 {
4264         struct drm_i915_private *dev_priv = data;
4265         u32 snpcr;
4266
4267         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4268                 return -ENODEV;
4269
4270         if (val > 3)
4271                 return -EINVAL;
4272
4273         intel_runtime_pm_get(dev_priv);
4274         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4275
4276         /* Update the cache sharing policy here as well */
4277         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4278         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4279         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4280         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4281
4282         intel_runtime_pm_put(dev_priv);
4283         return 0;
4284 }
4285
4286 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4287                         i915_cache_sharing_get, i915_cache_sharing_set,
4288                         "%llu\n");
4289
4290 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4291                                           struct sseu_dev_info *sseu)
4292 {
4293         int ss_max = 2;
4294         int ss;
4295         u32 sig1[ss_max], sig2[ss_max];
4296
4297         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4298         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4299         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4300         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4301
4302         for (ss = 0; ss < ss_max; ss++) {
4303                 unsigned int eu_cnt;
4304
4305                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4306                         /* skip disabled subslice */
4307                         continue;
4308
4309                 sseu->slice_mask = BIT(0);
4310                 sseu->subslice_mask |= BIT(ss);
4311                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4312                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4313                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4314                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4315                 sseu->eu_total += eu_cnt;
4316                 sseu->eu_per_subslice = max_t(unsigned int,
4317                                               sseu->eu_per_subslice, eu_cnt);
4318         }
4319 }
4320
4321 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4322                                      struct sseu_dev_info *sseu)
4323 {
4324         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4325         int s_max = 6, ss_max = 4;
4326         int s, ss;
4327         u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
4328
4329         for (s = 0; s < s_max; s++) {
4330                 /*
4331                  * FIXME: Valid SS Mask respects the spec and read
4332                  * only valid bits for those registers, excluding reserverd
4333                  * although this seems wrong because it would leave many
4334                  * subslices without ACK.
4335                  */
4336                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4337                         GEN10_PGCTL_VALID_SS_MASK(s);
4338                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4339                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4340         }
4341
4342         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4343                      GEN9_PGCTL_SSA_EU19_ACK |
4344                      GEN9_PGCTL_SSA_EU210_ACK |
4345                      GEN9_PGCTL_SSA_EU311_ACK;
4346         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4347                      GEN9_PGCTL_SSB_EU19_ACK |
4348                      GEN9_PGCTL_SSB_EU210_ACK |
4349                      GEN9_PGCTL_SSB_EU311_ACK;
4350
4351         for (s = 0; s < s_max; s++) {
4352                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4353                         /* skip disabled slice */
4354                         continue;
4355
4356                 sseu->slice_mask |= BIT(s);
4357                 sseu->subslice_mask = info->sseu.subslice_mask;
4358
4359                 for (ss = 0; ss < ss_max; ss++) {
4360                         unsigned int eu_cnt;
4361
4362                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4363                                 /* skip disabled subslice */
4364                                 continue;
4365
4366                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4367                                                eu_mask[ss % 2]);
4368                         sseu->eu_total += eu_cnt;
4369                         sseu->eu_per_subslice = max_t(unsigned int,
4370                                                       sseu->eu_per_subslice,
4371                                                       eu_cnt);
4372                 }
4373         }
4374 }
4375
4376 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4377                                     struct sseu_dev_info *sseu)
4378 {
4379         int s_max = 3, ss_max = 4;
4380         int s, ss;
4381         u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4382
4383         /* BXT has a single slice and at most 3 subslices. */
4384         if (IS_GEN9_LP(dev_priv)) {
4385                 s_max = 1;
4386                 ss_max = 3;
4387         }
4388
4389         for (s = 0; s < s_max; s++) {
4390                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4391                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4392                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4393         }
4394
4395         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4396                      GEN9_PGCTL_SSA_EU19_ACK |
4397                      GEN9_PGCTL_SSA_EU210_ACK |
4398                      GEN9_PGCTL_SSA_EU311_ACK;
4399         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4400                      GEN9_PGCTL_SSB_EU19_ACK |
4401                      GEN9_PGCTL_SSB_EU210_ACK |
4402                      GEN9_PGCTL_SSB_EU311_ACK;
4403
4404         for (s = 0; s < s_max; s++) {
4405                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4406                         /* skip disabled slice */
4407                         continue;
4408
4409                 sseu->slice_mask |= BIT(s);
4410
4411                 if (IS_GEN9_BC(dev_priv))
4412                         sseu->subslice_mask =
4413                                 INTEL_INFO(dev_priv)->sseu.subslice_mask;
4414
4415                 for (ss = 0; ss < ss_max; ss++) {
4416                         unsigned int eu_cnt;
4417
4418                         if (IS_GEN9_LP(dev_priv)) {
4419                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4420                                         /* skip disabled subslice */
4421                                         continue;
4422
4423                                 sseu->subslice_mask |= BIT(ss);
4424                         }
4425
4426                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4427                                                eu_mask[ss%2]);
4428                         sseu->eu_total += eu_cnt;
4429                         sseu->eu_per_subslice = max_t(unsigned int,
4430                                                       sseu->eu_per_subslice,
4431                                                       eu_cnt);
4432                 }
4433         }
4434 }
4435
4436 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4437                                          struct sseu_dev_info *sseu)
4438 {
4439         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4440         int s;
4441
4442         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4443
4444         if (sseu->slice_mask) {
4445                 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
4446                 sseu->eu_per_subslice =
4447                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4448                 sseu->eu_total = sseu->eu_per_subslice *
4449                                  sseu_subslice_total(sseu);
4450
4451                 /* subtract fused off EU(s) from enabled slice(s) */
4452                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4453                         u8 subslice_7eu =
4454                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4455
4456                         sseu->eu_total -= hweight8(subslice_7eu);
4457                 }
4458         }
4459 }
4460
4461 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4462                                  const struct sseu_dev_info *sseu)
4463 {
4464         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4465         const char *type = is_available_info ? "Available" : "Enabled";
4466
4467         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4468                    sseu->slice_mask);
4469         seq_printf(m, "  %s Slice Total: %u\n", type,
4470                    hweight8(sseu->slice_mask));
4471         seq_printf(m, "  %s Subslice Total: %u\n", type,
4472                    sseu_subslice_total(sseu));
4473         seq_printf(m, "  %s Subslice Mask: %04x\n", type,
4474                    sseu->subslice_mask);
4475         seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
4476                    hweight8(sseu->subslice_mask));
4477         seq_printf(m, "  %s EU Total: %u\n", type,
4478                    sseu->eu_total);
4479         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4480                    sseu->eu_per_subslice);
4481
4482         if (!is_available_info)
4483                 return;
4484
4485         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4486         if (HAS_POOLED_EU(dev_priv))
4487                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4488
4489         seq_printf(m, "  Has Slice Power Gating: %s\n",
4490                    yesno(sseu->has_slice_pg));
4491         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4492                    yesno(sseu->has_subslice_pg));
4493         seq_printf(m, "  Has EU Power Gating: %s\n",
4494                    yesno(sseu->has_eu_pg));
4495 }
4496
4497 static int i915_sseu_status(struct seq_file *m, void *unused)
4498 {
4499         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4500         struct sseu_dev_info sseu;
4501
4502         if (INTEL_GEN(dev_priv) < 8)
4503                 return -ENODEV;
4504
4505         seq_puts(m, "SSEU Device Info\n");
4506         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4507
4508         seq_puts(m, "SSEU Device Status\n");
4509         memset(&sseu, 0, sizeof(sseu));
4510
4511         intel_runtime_pm_get(dev_priv);
4512
4513         if (IS_CHERRYVIEW(dev_priv)) {
4514                 cherryview_sseu_device_status(dev_priv, &sseu);
4515         } else if (IS_BROADWELL(dev_priv)) {
4516                 broadwell_sseu_device_status(dev_priv, &sseu);
4517         } else if (IS_GEN9(dev_priv)) {
4518                 gen9_sseu_device_status(dev_priv, &sseu);
4519         } else if (INTEL_GEN(dev_priv) >= 10) {
4520                 gen10_sseu_device_status(dev_priv, &sseu);
4521         }
4522
4523         intel_runtime_pm_put(dev_priv);
4524
4525         i915_print_sseu_info(m, false, &sseu);
4526
4527         return 0;
4528 }
4529
4530 static int i915_forcewake_open(struct inode *inode, struct file *file)
4531 {
4532         struct drm_i915_private *i915 = inode->i_private;
4533
4534         if (INTEL_GEN(i915) < 6)
4535                 return 0;
4536
4537         intel_runtime_pm_get(i915);
4538         intel_uncore_forcewake_user_get(i915);
4539
4540         return 0;
4541 }
4542
4543 static int i915_forcewake_release(struct inode *inode, struct file *file)
4544 {
4545         struct drm_i915_private *i915 = inode->i_private;
4546
4547         if (INTEL_GEN(i915) < 6)
4548                 return 0;
4549
4550         intel_uncore_forcewake_user_put(i915);
4551         intel_runtime_pm_put(i915);
4552
4553         return 0;
4554 }
4555
4556 static const struct file_operations i915_forcewake_fops = {
4557         .owner = THIS_MODULE,
4558         .open = i915_forcewake_open,
4559         .release = i915_forcewake_release,
4560 };
4561
4562 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4563 {
4564         struct drm_i915_private *dev_priv = m->private;
4565         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4566
4567         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4568         seq_printf(m, "Detected: %s\n",
4569                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4570
4571         return 0;
4572 }
4573
4574 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4575                                         const char __user *ubuf, size_t len,
4576                                         loff_t *offp)
4577 {
4578         struct seq_file *m = file->private_data;
4579         struct drm_i915_private *dev_priv = m->private;
4580         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4581         unsigned int new_threshold;
4582         int i;
4583         char *newline;
4584         char tmp[16];
4585
4586         if (len >= sizeof(tmp))
4587                 return -EINVAL;
4588
4589         if (copy_from_user(tmp, ubuf, len))
4590                 return -EFAULT;
4591
4592         tmp[len] = '\0';
4593
4594         /* Strip newline, if any */
4595         newline = strchr(tmp, '\n');
4596         if (newline)
4597                 *newline = '\0';
4598
4599         if (strcmp(tmp, "reset") == 0)
4600                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4601         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4602                 return -EINVAL;
4603
4604         if (new_threshold > 0)
4605                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4606                               new_threshold);
4607         else
4608                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4609
4610         spin_lock_irq(&dev_priv->irq_lock);
4611         hotplug->hpd_storm_threshold = new_threshold;
4612         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4613         for_each_hpd_pin(i)
4614                 hotplug->stats[i].count = 0;
4615         spin_unlock_irq(&dev_priv->irq_lock);
4616
4617         /* Re-enable hpd immediately if we were in an irq storm */
4618         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4619
4620         return len;
4621 }
4622
4623 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4624 {
4625         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4626 }
4627
4628 static const struct file_operations i915_hpd_storm_ctl_fops = {
4629         .owner = THIS_MODULE,
4630         .open = i915_hpd_storm_ctl_open,
4631         .read = seq_read,
4632         .llseek = seq_lseek,
4633         .release = single_release,
4634         .write = i915_hpd_storm_ctl_write
4635 };
4636
4637 static const struct drm_info_list i915_debugfs_list[] = {
4638         {"i915_capabilities", i915_capabilities, 0},
4639         {"i915_gem_objects", i915_gem_object_info, 0},
4640         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4641         {"i915_gem_stolen", i915_gem_stolen_list_info },
4642         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4643         {"i915_gem_interrupt", i915_interrupt_info, 0},
4644         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4645         {"i915_guc_info", i915_guc_info, 0},
4646         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4647         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4648         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4649         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4650         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4651         {"i915_frequency_info", i915_frequency_info, 0},
4652         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4653         {"i915_reset_info", i915_reset_info, 0},
4654         {"i915_drpc_info", i915_drpc_info, 0},
4655         {"i915_emon_status", i915_emon_status, 0},
4656         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4657         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4658         {"i915_fbc_status", i915_fbc_status, 0},
4659         {"i915_ips_status", i915_ips_status, 0},
4660         {"i915_sr_status", i915_sr_status, 0},
4661         {"i915_opregion", i915_opregion, 0},
4662         {"i915_vbt", i915_vbt, 0},
4663         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4664         {"i915_context_status", i915_context_status, 0},
4665         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4666         {"i915_swizzle_info", i915_swizzle_info, 0},
4667         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4668         {"i915_llc", i915_llc, 0},
4669         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4670         {"i915_sink_crc_eDP1", i915_sink_crc, 0},
4671         {"i915_energy_uJ", i915_energy_uJ, 0},
4672         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4673         {"i915_power_domain_info", i915_power_domain_info, 0},
4674         {"i915_dmc_info", i915_dmc_info, 0},
4675         {"i915_display_info", i915_display_info, 0},
4676         {"i915_engine_info", i915_engine_info, 0},
4677         {"i915_shrinker_info", i915_shrinker_info, 0},
4678         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4679         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4680         {"i915_wa_registers", i915_wa_registers, 0},
4681         {"i915_ddb_info", i915_ddb_info, 0},
4682         {"i915_sseu_status", i915_sseu_status, 0},
4683         {"i915_drrs_status", i915_drrs_status, 0},
4684         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4685 };
4686 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4687
4688 static const struct i915_debugfs_files {
4689         const char *name;
4690         const struct file_operations *fops;
4691 } i915_debugfs_files[] = {
4692         {"i915_wedged", &i915_wedged_fops},
4693         {"i915_max_freq", &i915_max_freq_fops},
4694         {"i915_min_freq", &i915_min_freq_fops},
4695         {"i915_cache_sharing", &i915_cache_sharing_fops},
4696         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4697         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4698         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4699 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4700         {"i915_error_state", &i915_error_state_fops},
4701         {"i915_gpu_info", &i915_gpu_info_fops},
4702 #endif
4703         {"i915_next_seqno", &i915_next_seqno_fops},
4704         {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4705         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4706         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4707         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4708         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4709         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4710         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4711         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4712         {"i915_guc_log_control", &i915_guc_log_control_fops},
4713         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4714         {"i915_ipc_status", &i915_ipc_status_fops}
4715 };
4716
4717 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4718 {
4719         struct drm_minor *minor = dev_priv->drm.primary;
4720         struct dentry *ent;
4721         int ret, i;
4722
4723         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4724                                   minor->debugfs_root, to_i915(minor->dev),
4725                                   &i915_forcewake_fops);
4726         if (!ent)
4727                 return -ENOMEM;
4728
4729         ret = intel_pipe_crc_create(minor);
4730         if (ret)
4731                 return ret;
4732
4733         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4734                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4735                                           S_IRUGO | S_IWUSR,
4736                                           minor->debugfs_root,
4737                                           to_i915(minor->dev),
4738                                           i915_debugfs_files[i].fops);
4739                 if (!ent)
4740                         return -ENOMEM;
4741         }
4742
4743         return drm_debugfs_create_files(i915_debugfs_list,
4744                                         I915_DEBUGFS_ENTRIES,
4745                                         minor->debugfs_root, minor);
4746 }
4747
4748 struct dpcd_block {
4749         /* DPCD dump start address. */
4750         unsigned int offset;
4751         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4752         unsigned int end;
4753         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4754         size_t size;
4755         /* Only valid for eDP. */
4756         bool edp;
4757 };
4758
4759 static const struct dpcd_block i915_dpcd_debug[] = {
4760         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4761         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4762         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4763         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4764         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4765         { .offset = DP_SET_POWER },
4766         { .offset = DP_EDP_DPCD_REV },
4767         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4768         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4769         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4770 };
4771
4772 static int i915_dpcd_show(struct seq_file *m, void *data)
4773 {
4774         struct drm_connector *connector = m->private;
4775         struct intel_dp *intel_dp =
4776                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4777         uint8_t buf[16];
4778         ssize_t err;
4779         int i;
4780
4781         if (connector->status != connector_status_connected)
4782                 return -ENODEV;
4783
4784         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4785                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4786                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4787
4788                 if (b->edp &&
4789                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4790                         continue;
4791
4792                 /* low tech for now */
4793                 if (WARN_ON(size > sizeof(buf)))
4794                         continue;
4795
4796                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4797                 if (err <= 0) {
4798                         DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4799                                   size, b->offset, err);
4800                         continue;
4801                 }
4802
4803                 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4804         }
4805
4806         return 0;
4807 }
4808
4809 static int i915_dpcd_open(struct inode *inode, struct file *file)
4810 {
4811         return single_open(file, i915_dpcd_show, inode->i_private);
4812 }
4813
4814 static const struct file_operations i915_dpcd_fops = {
4815         .owner = THIS_MODULE,
4816         .open = i915_dpcd_open,
4817         .read = seq_read,
4818         .llseek = seq_lseek,
4819         .release = single_release,
4820 };
4821
4822 static int i915_panel_show(struct seq_file *m, void *data)
4823 {
4824         struct drm_connector *connector = m->private;
4825         struct intel_dp *intel_dp =
4826                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4827
4828         if (connector->status != connector_status_connected)
4829                 return -ENODEV;
4830
4831         seq_printf(m, "Panel power up delay: %d\n",
4832                    intel_dp->panel_power_up_delay);
4833         seq_printf(m, "Panel power down delay: %d\n",
4834                    intel_dp->panel_power_down_delay);
4835         seq_printf(m, "Backlight on delay: %d\n",
4836                    intel_dp->backlight_on_delay);
4837         seq_printf(m, "Backlight off delay: %d\n",
4838                    intel_dp->backlight_off_delay);
4839
4840         return 0;
4841 }
4842
4843 static int i915_panel_open(struct inode *inode, struct file *file)
4844 {
4845         return single_open(file, i915_panel_show, inode->i_private);
4846 }
4847
4848 static const struct file_operations i915_panel_fops = {
4849         .owner = THIS_MODULE,
4850         .open = i915_panel_open,
4851         .read = seq_read,
4852         .llseek = seq_lseek,
4853         .release = single_release,
4854 };
4855
4856 /**
4857  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4858  * @connector: pointer to a registered drm_connector
4859  *
4860  * Cleanup will be done by drm_connector_unregister() through a call to
4861  * drm_debugfs_connector_remove().
4862  *
4863  * Returns 0 on success, negative error codes on error.
4864  */
4865 int i915_debugfs_connector_add(struct drm_connector *connector)
4866 {
4867         struct dentry *root = connector->debugfs_entry;
4868
4869         /* The connector must have been registered beforehands. */
4870         if (!root)
4871                 return -ENODEV;
4872
4873         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4874             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4875                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4876                                     connector, &i915_dpcd_fops);
4877
4878         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4879                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4880                                     connector, &i915_panel_fops);
4881
4882         return 0;
4883 }