Merge tag 'csky-for-linus-4.21' of git://github.com/c-sky/csky-linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(info, &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct drm_i915_file_private *file_priv;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305 };
306
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309         struct drm_i915_gem_object *obj = ptr;
310         struct file_stats *stats = data;
311         struct i915_vma *vma;
312
313         lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315         stats->count++;
316         stats->total += obj->base.size;
317         if (!obj->bind_count)
318                 stats->unbound += obj->base.size;
319         if (obj->base.name || obj->base.dma_buf)
320                 stats->shared += obj->base.size;
321
322         list_for_each_entry(vma, &obj->vma_list, obj_link) {
323                 if (!drm_mm_node_allocated(&vma->node))
324                         continue;
325
326                 if (i915_vma_is_ggtt(vma)) {
327                         stats->global += vma->node.size;
328                 } else {
329                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331                         if (ppgtt->vm.file != stats->file_priv)
332                                 continue;
333                 }
334
335                 if (i915_vma_is_active(vma))
336                         stats->active += vma->node.size;
337                 else
338                         stats->inactive += vma->node.size;
339         }
340
341         return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345         if (stats.count) \
346                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347                            name, \
348                            stats.count, \
349                            stats.total, \
350                            stats.active, \
351                            stats.inactive, \
352                            stats.global, \
353                            stats.shared, \
354                            stats.unbound); \
355 } while (0)
356
357 static void print_batch_pool_stats(struct seq_file *m,
358                                    struct drm_i915_private *dev_priv)
359 {
360         struct drm_i915_gem_object *obj;
361         struct file_stats stats;
362         struct intel_engine_cs *engine;
363         enum intel_engine_id id;
364         int j;
365
366         memset(&stats, 0, sizeof(stats));
367
368         for_each_engine(engine, dev_priv, id) {
369                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370                         list_for_each_entry(obj,
371                                             &engine->batch_pool.cache_list[j],
372                                             batch_pool_link)
373                                 per_file_stats(0, obj, &stats);
374                 }
375         }
376
377         print_file_stats(m, "[k]batch pool", stats);
378 }
379
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382         struct i915_gem_context *ctx = ptr;
383         struct intel_engine_cs *engine;
384         enum intel_engine_id id;
385
386         for_each_engine(engine, ctx->i915, id) {
387                 struct intel_context *ce = to_intel_context(ctx, engine);
388
389                 if (ce->state)
390                         per_file_stats(0, ce->state->obj, data);
391                 if (ce->ring)
392                         per_file_stats(0, ce->ring->vma->obj, data);
393         }
394
395         return 0;
396 }
397
398 static void print_context_stats(struct seq_file *m,
399                                 struct drm_i915_private *dev_priv)
400 {
401         struct drm_device *dev = &dev_priv->drm;
402         struct file_stats stats;
403         struct drm_file *file;
404
405         memset(&stats, 0, sizeof(stats));
406
407         mutex_lock(&dev->struct_mutex);
408         if (dev_priv->kernel_context)
409                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411         list_for_each_entry(file, &dev->filelist, lhead) {
412                 struct drm_i915_file_private *fpriv = file->driver_priv;
413                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414         }
415         mutex_unlock(&dev->struct_mutex);
416
417         print_file_stats(m, "[k]contexts", stats);
418 }
419
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422         struct drm_i915_private *dev_priv = node_to_i915(m->private);
423         struct drm_device *dev = &dev_priv->drm;
424         struct i915_ggtt *ggtt = &dev_priv->ggtt;
425         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427         struct drm_i915_gem_object *obj;
428         unsigned int page_sizes = 0;
429         struct drm_file *file;
430         char buf[80];
431         int ret;
432
433         ret = mutex_lock_interruptible(&dev->struct_mutex);
434         if (ret)
435                 return ret;
436
437         seq_printf(m, "%u objects, %llu bytes\n",
438                    dev_priv->mm.object_count,
439                    dev_priv->mm.object_memory);
440
441         size = count = 0;
442         mapped_size = mapped_count = 0;
443         purgeable_size = purgeable_count = 0;
444         huge_size = huge_count = 0;
445
446         spin_lock(&dev_priv->mm.obj_lock);
447         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448                 size += obj->base.size;
449                 ++count;
450
451                 if (obj->mm.madv == I915_MADV_DONTNEED) {
452                         purgeable_size += obj->base.size;
453                         ++purgeable_count;
454                 }
455
456                 if (obj->mm.mapping) {
457                         mapped_count++;
458                         mapped_size += obj->base.size;
459                 }
460
461                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462                         huge_count++;
463                         huge_size += obj->base.size;
464                         page_sizes |= obj->mm.page_sizes.sg;
465                 }
466         }
467         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469         size = count = dpy_size = dpy_count = 0;
470         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471                 size += obj->base.size;
472                 ++count;
473
474                 if (obj->pin_global) {
475                         dpy_size += obj->base.size;
476                         ++dpy_count;
477                 }
478
479                 if (obj->mm.madv == I915_MADV_DONTNEED) {
480                         purgeable_size += obj->base.size;
481                         ++purgeable_count;
482                 }
483
484                 if (obj->mm.mapping) {
485                         mapped_count++;
486                         mapped_size += obj->base.size;
487                 }
488
489                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490                         huge_count++;
491                         huge_size += obj->base.size;
492                         page_sizes |= obj->mm.page_sizes.sg;
493                 }
494         }
495         spin_unlock(&dev_priv->mm.obj_lock);
496
497         seq_printf(m, "%u bound objects, %llu bytes\n",
498                    count, size);
499         seq_printf(m, "%u purgeable objects, %llu bytes\n",
500                    purgeable_count, purgeable_size);
501         seq_printf(m, "%u mapped objects, %llu bytes\n",
502                    mapped_count, mapped_size);
503         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504                    huge_count,
505                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506                    huge_size);
507         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508                    dpy_count, dpy_size);
509
510         seq_printf(m, "%llu [%pa] gtt total\n",
511                    ggtt->vm.total, &ggtt->mappable_end);
512         seq_printf(m, "Supported page sizes: %s\n",
513                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514                                         buf, sizeof(buf)));
515
516         seq_putc(m, '\n');
517         print_batch_pool_stats(m, dev_priv);
518         mutex_unlock(&dev->struct_mutex);
519
520         mutex_lock(&dev->filelist_mutex);
521         print_context_stats(m, dev_priv);
522         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523                 struct file_stats stats;
524                 struct drm_i915_file_private *file_priv = file->driver_priv;
525                 struct i915_request *request;
526                 struct task_struct *task;
527
528                 mutex_lock(&dev->struct_mutex);
529
530                 memset(&stats, 0, sizeof(stats));
531                 stats.file_priv = file->driver_priv;
532                 spin_lock(&file->table_lock);
533                 idr_for_each(&file->object_idr, per_file_stats, &stats);
534                 spin_unlock(&file->table_lock);
535                 /*
536                  * Although we have a valid reference on file->pid, that does
537                  * not guarantee that the task_struct who called get_pid() is
538                  * still alive (e.g. get_pid(current) => fork() => exit()).
539                  * Therefore, we need to protect this ->comm access using RCU.
540                  */
541                 request = list_first_entry_or_null(&file_priv->mm.request_list,
542                                                    struct i915_request,
543                                                    client_link);
544                 rcu_read_lock();
545                 task = pid_task(request && request->gem_context->pid ?
546                                 request->gem_context->pid : file->pid,
547                                 PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550
551                 mutex_unlock(&dev->struct_mutex);
552         }
553         mutex_unlock(&dev->filelist_mutex);
554
555         return 0;
556 }
557
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560         struct drm_info_node *node = m->private;
561         struct drm_i915_private *dev_priv = node_to_i915(node);
562         struct drm_device *dev = &dev_priv->drm;
563         struct drm_i915_gem_object **objects;
564         struct drm_i915_gem_object *obj;
565         u64 total_obj_size, total_gtt_size;
566         unsigned long nobject, n;
567         int count, ret;
568
569         nobject = READ_ONCE(dev_priv->mm.object_count);
570         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571         if (!objects)
572                 return -ENOMEM;
573
574         ret = mutex_lock_interruptible(&dev->struct_mutex);
575         if (ret)
576                 return ret;
577
578         count = 0;
579         spin_lock(&dev_priv->mm.obj_lock);
580         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581                 objects[count++] = obj;
582                 if (count == nobject)
583                         break;
584         }
585         spin_unlock(&dev_priv->mm.obj_lock);
586
587         total_obj_size = total_gtt_size = 0;
588         for (n = 0;  n < count; n++) {
589                 obj = objects[n];
590
591                 seq_puts(m, "   ");
592                 describe_obj(m, obj);
593                 seq_putc(m, '\n');
594                 total_obj_size += obj->base.size;
595                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596         }
597
598         mutex_unlock(&dev->struct_mutex);
599
600         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601                    count, total_obj_size, total_gtt_size);
602         kvfree(objects);
603
604         return 0;
605 }
606
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609         struct drm_i915_private *dev_priv = node_to_i915(m->private);
610         struct drm_device *dev = &dev_priv->drm;
611         struct drm_i915_gem_object *obj;
612         struct intel_engine_cs *engine;
613         enum intel_engine_id id;
614         int total = 0;
615         int ret, j;
616
617         ret = mutex_lock_interruptible(&dev->struct_mutex);
618         if (ret)
619                 return ret;
620
621         for_each_engine(engine, dev_priv, id) {
622                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623                         int count;
624
625                         count = 0;
626                         list_for_each_entry(obj,
627                                             &engine->batch_pool.cache_list[j],
628                                             batch_pool_link)
629                                 count++;
630                         seq_printf(m, "%s cache[%d]: %d objects\n",
631                                    engine->name, j, count);
632
633                         list_for_each_entry(obj,
634                                             &engine->batch_pool.cache_list[j],
635                                             batch_pool_link) {
636                                 seq_puts(m, "   ");
637                                 describe_obj(m, obj);
638                                 seq_putc(m, '\n');
639                         }
640
641                         total += count;
642                 }
643         }
644
645         seq_printf(m, "total: %d\n", total);
646
647         mutex_unlock(&dev->struct_mutex);
648
649         return 0;
650 }
651
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654         struct drm_i915_private *dev_priv = node_to_i915(m->private);
655         int pipe;
656
657         for_each_pipe(dev_priv, pipe) {
658                 enum intel_display_power_domain power_domain;
659
660                 power_domain = POWER_DOMAIN_PIPE(pipe);
661                 if (!intel_display_power_get_if_enabled(dev_priv,
662                                                         power_domain)) {
663                         seq_printf(m, "Pipe %c power disabled\n",
664                                    pipe_name(pipe));
665                         continue;
666                 }
667                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668                            pipe_name(pipe),
669                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671                            pipe_name(pipe),
672                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673                 seq_printf(m, "Pipe %c IER:\t%08x\n",
674                            pipe_name(pipe),
675                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677                 intel_display_power_put(dev_priv, power_domain);
678         }
679
680         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681                    I915_READ(GEN8_DE_PORT_IMR));
682         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683                    I915_READ(GEN8_DE_PORT_IIR));
684         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685                    I915_READ(GEN8_DE_PORT_IER));
686
687         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688                    I915_READ(GEN8_DE_MISC_IMR));
689         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690                    I915_READ(GEN8_DE_MISC_IIR));
691         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692                    I915_READ(GEN8_DE_MISC_IER));
693
694         seq_printf(m, "PCU interrupt mask:\t%08x\n",
695                    I915_READ(GEN8_PCU_IMR));
696         seq_printf(m, "PCU interrupt identity:\t%08x\n",
697                    I915_READ(GEN8_PCU_IIR));
698         seq_printf(m, "PCU interrupt enable:\t%08x\n",
699                    I915_READ(GEN8_PCU_IER));
700 }
701
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
705         struct intel_engine_cs *engine;
706         enum intel_engine_id id;
707         int i, pipe;
708
709         intel_runtime_pm_get(dev_priv);
710
711         if (IS_CHERRYVIEW(dev_priv)) {
712                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713                            I915_READ(GEN8_MASTER_IRQ));
714
715                 seq_printf(m, "Display IER:\t%08x\n",
716                            I915_READ(VLV_IER));
717                 seq_printf(m, "Display IIR:\t%08x\n",
718                            I915_READ(VLV_IIR));
719                 seq_printf(m, "Display IIR_RW:\t%08x\n",
720                            I915_READ(VLV_IIR_RW));
721                 seq_printf(m, "Display IMR:\t%08x\n",
722                            I915_READ(VLV_IMR));
723                 for_each_pipe(dev_priv, pipe) {
724                         enum intel_display_power_domain power_domain;
725
726                         power_domain = POWER_DOMAIN_PIPE(pipe);
727                         if (!intel_display_power_get_if_enabled(dev_priv,
728                                                                 power_domain)) {
729                                 seq_printf(m, "Pipe %c power disabled\n",
730                                            pipe_name(pipe));
731                                 continue;
732                         }
733
734                         seq_printf(m, "Pipe %c stat:\t%08x\n",
735                                    pipe_name(pipe),
736                                    I915_READ(PIPESTAT(pipe)));
737
738                         intel_display_power_put(dev_priv, power_domain);
739                 }
740
741                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742                 seq_printf(m, "Port hotplug:\t%08x\n",
743                            I915_READ(PORT_HOTPLUG_EN));
744                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745                            I915_READ(VLV_DPFLIPSTAT));
746                 seq_printf(m, "DPINVGTT:\t%08x\n",
747                            I915_READ(DPINVGTT));
748                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750                 for (i = 0; i < 4; i++) {
751                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752                                    i, I915_READ(GEN8_GT_IMR(i)));
753                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754                                    i, I915_READ(GEN8_GT_IIR(i)));
755                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756                                    i, I915_READ(GEN8_GT_IER(i)));
757                 }
758
759                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760                            I915_READ(GEN8_PCU_IMR));
761                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762                            I915_READ(GEN8_PCU_IIR));
763                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764                            I915_READ(GEN8_PCU_IER));
765         } else if (INTEL_GEN(dev_priv) >= 11) {
766                 seq_printf(m, "Master Interrupt Control:  %08x\n",
767                            I915_READ(GEN11_GFX_MSTR_IRQ));
768
769                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783                            I915_READ(GEN11_DISPLAY_INT_CTL));
784
785                 gen8_display_interrupt_info(m);
786         } else if (INTEL_GEN(dev_priv) >= 8) {
787                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788                            I915_READ(GEN8_MASTER_IRQ));
789
790                 for (i = 0; i < 4; i++) {
791                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792                                    i, I915_READ(GEN8_GT_IMR(i)));
793                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794                                    i, I915_READ(GEN8_GT_IIR(i)));
795                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796                                    i, I915_READ(GEN8_GT_IER(i)));
797                 }
798
799                 gen8_display_interrupt_info(m);
800         } else if (IS_VALLEYVIEW(dev_priv)) {
801                 seq_printf(m, "Display IER:\t%08x\n",
802                            I915_READ(VLV_IER));
803                 seq_printf(m, "Display IIR:\t%08x\n",
804                            I915_READ(VLV_IIR));
805                 seq_printf(m, "Display IIR_RW:\t%08x\n",
806                            I915_READ(VLV_IIR_RW));
807                 seq_printf(m, "Display IMR:\t%08x\n",
808                            I915_READ(VLV_IMR));
809                 for_each_pipe(dev_priv, pipe) {
810                         enum intel_display_power_domain power_domain;
811
812                         power_domain = POWER_DOMAIN_PIPE(pipe);
813                         if (!intel_display_power_get_if_enabled(dev_priv,
814                                                                 power_domain)) {
815                                 seq_printf(m, "Pipe %c power disabled\n",
816                                            pipe_name(pipe));
817                                 continue;
818                         }
819
820                         seq_printf(m, "Pipe %c stat:\t%08x\n",
821                                    pipe_name(pipe),
822                                    I915_READ(PIPESTAT(pipe)));
823                         intel_display_power_put(dev_priv, power_domain);
824                 }
825
826                 seq_printf(m, "Master IER:\t%08x\n",
827                            I915_READ(VLV_MASTER_IER));
828
829                 seq_printf(m, "Render IER:\t%08x\n",
830                            I915_READ(GTIER));
831                 seq_printf(m, "Render IIR:\t%08x\n",
832                            I915_READ(GTIIR));
833                 seq_printf(m, "Render IMR:\t%08x\n",
834                            I915_READ(GTIMR));
835
836                 seq_printf(m, "PM IER:\t\t%08x\n",
837                            I915_READ(GEN6_PMIER));
838                 seq_printf(m, "PM IIR:\t\t%08x\n",
839                            I915_READ(GEN6_PMIIR));
840                 seq_printf(m, "PM IMR:\t\t%08x\n",
841                            I915_READ(GEN6_PMIMR));
842
843                 seq_printf(m, "Port hotplug:\t%08x\n",
844                            I915_READ(PORT_HOTPLUG_EN));
845                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846                            I915_READ(VLV_DPFLIPSTAT));
847                 seq_printf(m, "DPINVGTT:\t%08x\n",
848                            I915_READ(DPINVGTT));
849
850         } else if (!HAS_PCH_SPLIT(dev_priv)) {
851                 seq_printf(m, "Interrupt enable:    %08x\n",
852                            I915_READ(IER));
853                 seq_printf(m, "Interrupt identity:  %08x\n",
854                            I915_READ(IIR));
855                 seq_printf(m, "Interrupt mask:      %08x\n",
856                            I915_READ(IMR));
857                 for_each_pipe(dev_priv, pipe)
858                         seq_printf(m, "Pipe %c stat:         %08x\n",
859                                    pipe_name(pipe),
860                                    I915_READ(PIPESTAT(pipe)));
861         } else {
862                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
863                            I915_READ(DEIER));
864                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
865                            I915_READ(DEIIR));
866                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
867                            I915_READ(DEIMR));
868                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
869                            I915_READ(SDEIER));
870                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
871                            I915_READ(SDEIIR));
872                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
873                            I915_READ(SDEIMR));
874                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
875                            I915_READ(GTIER));
876                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
877                            I915_READ(GTIIR));
878                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
879                            I915_READ(GTIMR));
880         }
881
882         if (INTEL_GEN(dev_priv) >= 11) {
883                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894                            I915_READ(GEN11_GUC_SG_INTR_MASK));
895                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902         } else if (INTEL_GEN(dev_priv) >= 6) {
903                 for_each_engine(engine, dev_priv, id) {
904                         seq_printf(m,
905                                    "Graphics Interrupt mask (%s):       %08x\n",
906                                    engine->name, I915_READ_IMR(engine));
907                 }
908         }
909
910         intel_runtime_pm_put(dev_priv);
911
912         return 0;
913 }
914
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917         struct drm_i915_private *dev_priv = node_to_i915(m->private);
918         struct drm_device *dev = &dev_priv->drm;
919         int i, ret;
920
921         ret = mutex_lock_interruptible(&dev->struct_mutex);
922         if (ret)
923                 return ret;
924
925         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926         for (i = 0; i < dev_priv->num_fence_regs; i++) {
927                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929                 seq_printf(m, "Fence %d, pin count = %d, object = ",
930                            i, dev_priv->fence_regs[i].pin_count);
931                 if (!vma)
932                         seq_puts(m, "unused");
933                 else
934                         describe_obj(m, vma->obj);
935                 seq_putc(m, '\n');
936         }
937
938         mutex_unlock(&dev->struct_mutex);
939         return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944                               size_t count, loff_t *pos)
945 {
946         struct i915_gpu_state *error;
947         ssize_t ret;
948         void *buf;
949
950         error = file->private_data;
951         if (!error)
952                 return 0;
953
954         /* Bounce buffer required because of kernfs __user API convenience. */
955         buf = kmalloc(count, GFP_KERNEL);
956         if (!buf)
957                 return -ENOMEM;
958
959         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
960         if (ret <= 0)
961                 goto out;
962
963         if (!copy_to_user(ubuf, buf, ret))
964                 *pos += ret;
965         else
966                 ret = -EFAULT;
967
968 out:
969         kfree(buf);
970         return ret;
971 }
972
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975         i915_gpu_state_put(file->private_data);
976         return 0;
977 }
978
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981         struct drm_i915_private *i915 = inode->i_private;
982         struct i915_gpu_state *gpu;
983
984         intel_runtime_pm_get(i915);
985         gpu = i915_capture_gpu_state(i915);
986         intel_runtime_pm_put(i915);
987         if (!gpu)
988                 return -ENOMEM;
989
990         file->private_data = gpu;
991         return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995         .owner = THIS_MODULE,
996         .open = i915_gpu_info_open,
997         .read = gpu_state_read,
998         .llseek = default_llseek,
999         .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004                        const char __user *ubuf,
1005                        size_t cnt,
1006                        loff_t *ppos)
1007 {
1008         struct i915_gpu_state *error = filp->private_data;
1009
1010         if (!error)
1011                 return 0;
1012
1013         DRM_DEBUG_DRIVER("Resetting error state\n");
1014         i915_reset_error_state(error->i915);
1015
1016         return cnt;
1017 }
1018
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021         file->private_data = i915_first_error_state(inode->i_private);
1022         return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026         .owner = THIS_MODULE,
1027         .open = i915_error_state_open,
1028         .read = gpu_state_read,
1029         .write = i915_error_state_write,
1030         .llseek = default_llseek,
1031         .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038         struct drm_i915_private *dev_priv = data;
1039         struct drm_device *dev = &dev_priv->drm;
1040         int ret;
1041
1042         ret = mutex_lock_interruptible(&dev->struct_mutex);
1043         if (ret)
1044                 return ret;
1045
1046         intel_runtime_pm_get(dev_priv);
1047         ret = i915_gem_set_global_seqno(dev, val);
1048         intel_runtime_pm_put(dev_priv);
1049
1050         mutex_unlock(&dev->struct_mutex);
1051
1052         return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056                         NULL, i915_next_seqno_set,
1057                         "0x%llx\n");
1058
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063         int ret = 0;
1064
1065         intel_runtime_pm_get(dev_priv);
1066
1067         if (IS_GEN5(dev_priv)) {
1068                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074                            MEMSTAT_VID_SHIFT);
1075                 seq_printf(m, "Current P-state: %d\n",
1076                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078                 u32 rpmodectl, freq_sts;
1079
1080                 mutex_lock(&dev_priv->pcu_lock);
1081
1082                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083                 seq_printf(m, "Video Turbo Mode: %s\n",
1084                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085                 seq_printf(m, "HW control enabled: %s\n",
1086                            yesno(rpmodectl & GEN6_RP_ENABLE));
1087                 seq_printf(m, "SW control enabled: %s\n",
1088                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089                                   GEN6_RP_MEDIA_SW_MODE));
1090
1091                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095                 seq_printf(m, "actual GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098                 seq_printf(m, "current GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101                 seq_printf(m, "max GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104                 seq_printf(m, "min GPU freq: %d MHz\n",
1105                            intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107                 seq_printf(m, "idle GPU freq: %d MHz\n",
1108                            intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110                 seq_printf(m,
1111                            "efficient (RPe) frequency: %d MHz\n",
1112                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1113                 mutex_unlock(&dev_priv->pcu_lock);
1114         } else if (INTEL_GEN(dev_priv) >= 6) {
1115                 u32 rp_state_limits;
1116                 u32 gt_perf_status;
1117                 u32 rp_state_cap;
1118                 u32 rpmodectl, rpinclimit, rpdeclimit;
1119                 u32 rpstat, cagf, reqf;
1120                 u32 rpupei, rpcurup, rpprevup;
1121                 u32 rpdownei, rpcurdown, rpprevdown;
1122                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123                 int max_freq;
1124
1125                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126                 if (IS_GEN9_LP(dev_priv)) {
1127                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129                 } else {
1130                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132                 }
1133
1134                 /* RPSTAT1 is in the GT power well */
1135                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137                 reqf = I915_READ(GEN6_RPNSWREQ);
1138                 if (INTEL_GEN(dev_priv) >= 9)
1139                         reqf >>= 23;
1140                 else {
1141                         reqf &= ~GEN6_TURBO_DISABLE;
1142                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143                                 reqf >>= 24;
1144                         else
1145                                 reqf >>= 25;
1146                 }
1147                 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153                 rpstat = I915_READ(GEN6_RPSTAT1);
1154                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160                 cagf = intel_gpu_freq(dev_priv,
1161                                       intel_get_cagf(dev_priv, rpstat));
1162
1163                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165                 if (INTEL_GEN(dev_priv) >= 11) {
1166                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168                         /*
1169                          * The equivalent to the PM ISR & IIR cannot be read
1170                          * without affecting the current state of the system
1171                          */
1172                         pm_isr = 0;
1173                         pm_iir = 0;
1174                 } else if (INTEL_GEN(dev_priv) >= 8) {
1175                         pm_ier = I915_READ(GEN8_GT_IER(2));
1176                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1177                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1178                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1179                 } else {
1180                         pm_ier = I915_READ(GEN6_PMIER);
1181                         pm_imr = I915_READ(GEN6_PMIMR);
1182                         pm_isr = I915_READ(GEN6_PMISR);
1183                         pm_iir = I915_READ(GEN6_PMIIR);
1184                 }
1185                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
1187                 seq_printf(m, "Video Turbo Mode: %s\n",
1188                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189                 seq_printf(m, "HW control enabled: %s\n",
1190                            yesno(rpmodectl & GEN6_RP_ENABLE));
1191                 seq_printf(m, "SW control enabled: %s\n",
1192                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193                                   GEN6_RP_MEDIA_SW_MODE));
1194
1195                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196                            pm_ier, pm_imr, pm_mask);
1197                 if (INTEL_GEN(dev_priv) <= 10)
1198                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199                                    pm_isr, pm_iir);
1200                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201                            rps->pm_intrmsk_mbz);
1202                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203                 seq_printf(m, "Render p-state ratio: %d\n",
1204                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205                 seq_printf(m, "Render p-state VID: %d\n",
1206                            gt_perf_status & 0xff);
1207                 seq_printf(m, "Render p-state limit: %d\n",
1208                            rp_state_limits & 0xff);
1209                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221                 seq_printf(m, "Up threshold: %d%%\n",
1222                            rps->power.up_threshold);
1223
1224                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230                 seq_printf(m, "Down threshold: %d%%\n",
1231                            rps->power.down_threshold);
1232
1233                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234                             rp_state_cap >> 16) & 0xff;
1235                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1236                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238                            intel_gpu_freq(dev_priv, max_freq));
1239
1240                 max_freq = (rp_state_cap & 0xff00) >> 8;
1241                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1242                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244                            intel_gpu_freq(dev_priv, max_freq));
1245
1246                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247                             rp_state_cap >> 0) & 0xff;
1248                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1249                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251                            intel_gpu_freq(dev_priv, max_freq));
1252                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253                            intel_gpu_freq(dev_priv, rps->max_freq));
1254
1255                 seq_printf(m, "Current freq: %d MHz\n",
1256                            intel_gpu_freq(dev_priv, rps->cur_freq));
1257                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258                 seq_printf(m, "Idle freq: %d MHz\n",
1259                            intel_gpu_freq(dev_priv, rps->idle_freq));
1260                 seq_printf(m, "Min freq: %d MHz\n",
1261                            intel_gpu_freq(dev_priv, rps->min_freq));
1262                 seq_printf(m, "Boost freq: %d MHz\n",
1263                            intel_gpu_freq(dev_priv, rps->boost_freq));
1264                 seq_printf(m, "Max freq: %d MHz\n",
1265                            intel_gpu_freq(dev_priv, rps->max_freq));
1266                 seq_printf(m,
1267                            "efficient (RPe) frequency: %d MHz\n",
1268                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1269         } else {
1270                 seq_puts(m, "no P-state info available\n");
1271         }
1272
1273         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
1277         intel_runtime_pm_put(dev_priv);
1278         return ret;
1279 }
1280
1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282                                struct seq_file *m,
1283                                struct intel_instdone *instdone)
1284 {
1285         int slice;
1286         int subslice;
1287
1288         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289                    instdone->instdone);
1290
1291         if (INTEL_GEN(dev_priv) <= 3)
1292                 return;
1293
1294         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295                    instdone->slice_common);
1296
1297         if (INTEL_GEN(dev_priv) <= 6)
1298                 return;
1299
1300         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302                            slice, subslice, instdone->sampler[slice][subslice]);
1303
1304         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306                            slice, subslice, instdone->row[slice][subslice]);
1307 }
1308
1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312         struct intel_engine_cs *engine;
1313         u64 acthd[I915_NUM_ENGINES];
1314         u32 seqno[I915_NUM_ENGINES];
1315         struct intel_instdone instdone;
1316         enum intel_engine_id id;
1317
1318         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319                 seq_puts(m, "Wedged\n");
1320         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325                 seq_puts(m, "Waiter holding struct mutex\n");
1326         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327                 seq_puts(m, "struct_mutex blocked for reset\n");
1328
1329         if (!i915_modparams.enable_hangcheck) {
1330                 seq_puts(m, "Hangcheck disabled\n");
1331                 return 0;
1332         }
1333
1334         intel_runtime_pm_get(dev_priv);
1335
1336         for_each_engine(engine, dev_priv, id) {
1337                 acthd[id] = intel_engine_get_active_head(engine);
1338                 seqno[id] = intel_engine_get_seqno(engine);
1339         }
1340
1341         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342
1343         intel_runtime_pm_put(dev_priv);
1344
1345         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348                                             jiffies));
1349         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350                 seq_puts(m, "Hangcheck active, work pending\n");
1351         else
1352                 seq_puts(m, "Hangcheck inactive\n");
1353
1354         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
1356         for_each_engine(engine, dev_priv, id) {
1357                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358                 struct rb_node *rb;
1359
1360                 seq_printf(m, "%s:\n", engine->name);
1361                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362                            engine->hangcheck.seqno, seqno[id],
1363                            intel_engine_last_submit(engine));
1364                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365                            yesno(intel_engine_has_waiter(engine)),
1366                            yesno(test_bit(engine->id,
1367                                           &dev_priv->gpu_error.missed_irq_rings)),
1368                            yesno(engine->hangcheck.stalled),
1369                            yesno(engine->hangcheck.wedged));
1370
1371                 spin_lock_irq(&b->rb_lock);
1372                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374
1375                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1376                                    w->tsk->comm, w->tsk->pid, w->seqno);
1377                 }
1378                 spin_unlock_irq(&b->rb_lock);
1379
1380                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381                            (long long)engine->hangcheck.acthd,
1382                            (long long)acthd[id]);
1383                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384                            hangcheck_action_to_str(engine->hangcheck.action),
1385                            engine->hangcheck.action,
1386                            jiffies_to_msecs(jiffies -
1387                                             engine->hangcheck.action_timestamp));
1388
1389                 if (engine->id == RCS) {
1390                         seq_puts(m, "\tinstdone read =\n");
1391
1392                         i915_instdone_info(dev_priv, m, &instdone);
1393
1394                         seq_puts(m, "\tinstdone accu =\n");
1395
1396                         i915_instdone_info(dev_priv, m,
1397                                            &engine->hangcheck.instdone);
1398                 }
1399         }
1400
1401         return 0;
1402 }
1403
1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407         struct i915_gpu_error *error = &dev_priv->gpu_error;
1408         struct intel_engine_cs *engine;
1409         enum intel_engine_id id;
1410
1411         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413         for_each_engine(engine, dev_priv, id) {
1414                 seq_printf(m, "%s = %u\n", engine->name,
1415                            i915_reset_engine_count(error, engine));
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424         u32 rgvmodectl, rstdbyctl;
1425         u16 crstandvid;
1426
1427         rgvmodectl = I915_READ(MEMMODECTL);
1428         rstdbyctl = I915_READ(RSTDBYCTL);
1429         crstandvid = I915_READ16(CRSTANDVID);
1430
1431         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432         seq_printf(m, "Boost freq: %d\n",
1433                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434                    MEMMODE_BOOST_FREQ_SHIFT);
1435         seq_printf(m, "HW control enabled: %s\n",
1436                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437         seq_printf(m, "SW control enabled: %s\n",
1438                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439         seq_printf(m, "Gated voltage change: %s\n",
1440                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441         seq_printf(m, "Starting frequency: P%d\n",
1442                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443         seq_printf(m, "Max P-state: P%d\n",
1444                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448         seq_printf(m, "Render standby enabled: %s\n",
1449                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450         seq_puts(m, "Current RS state: ");
1451         switch (rstdbyctl & RSX_STATUS_MASK) {
1452         case RSX_STATUS_ON:
1453                 seq_puts(m, "on\n");
1454                 break;
1455         case RSX_STATUS_RC1:
1456                 seq_puts(m, "RC1\n");
1457                 break;
1458         case RSX_STATUS_RC1E:
1459                 seq_puts(m, "RC1E\n");
1460                 break;
1461         case RSX_STATUS_RS1:
1462                 seq_puts(m, "RS1\n");
1463                 break;
1464         case RSX_STATUS_RS2:
1465                 seq_puts(m, "RS2 (RC6)\n");
1466                 break;
1467         case RSX_STATUS_RS3:
1468                 seq_puts(m, "RC3 (RC6+)\n");
1469                 break;
1470         default:
1471                 seq_puts(m, "unknown\n");
1472                 break;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480         struct drm_i915_private *i915 = node_to_i915(m->private);
1481         struct intel_uncore_forcewake_domain *fw_domain;
1482         unsigned int tmp;
1483
1484         seq_printf(m, "user.bypass_count = %u\n",
1485                    i915->uncore.user_forcewake.count);
1486
1487         for_each_fw_domain(fw_domain, i915, tmp)
1488                 seq_printf(m, "%s.wake_count = %u\n",
1489                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490                            READ_ONCE(fw_domain->wake_count));
1491
1492         return 0;
1493 }
1494
1495 static void print_rc6_res(struct seq_file *m,
1496                           const char *title,
1497                           const i915_reg_t reg)
1498 {
1499         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501         seq_printf(m, "%s %u (%llu us)\n",
1502                    title, I915_READ(reg),
1503                    intel_rc6_residency_us(dev_priv, reg));
1504 }
1505
1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509         u32 rcctl1, pw_status;
1510
1511         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
1514         seq_printf(m, "RC6 Enabled: %s\n",
1515                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516                                         GEN6_RC_CTL_EI_MODE(1))));
1517         seq_printf(m, "Render Power Well: %s\n",
1518                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519         seq_printf(m, "Media Power Well: %s\n",
1520                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521
1522         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524
1525         return i915_forcewake_domains(m, NULL);
1526 }
1527
1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531         u32 gt_core_status, rcctl1, rc6vids = 0;
1532         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533
1534         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536
1537         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538         if (INTEL_GEN(dev_priv) >= 9) {
1539                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541         }
1542
1543         if (INTEL_GEN(dev_priv) <= 7) {
1544                 mutex_lock(&dev_priv->pcu_lock);
1545                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546                                        &rc6vids);
1547                 mutex_unlock(&dev_priv->pcu_lock);
1548         }
1549
1550         seq_printf(m, "RC1e Enabled: %s\n",
1551                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552         seq_printf(m, "RC6 Enabled: %s\n",
1553                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554         if (INTEL_GEN(dev_priv) >= 9) {
1555                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559         }
1560         seq_printf(m, "Deep RC6 Enabled: %s\n",
1561                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564         seq_puts(m, "Current RC state: ");
1565         switch (gt_core_status & GEN6_RCn_MASK) {
1566         case GEN6_RC0:
1567                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568                         seq_puts(m, "Core Power Down\n");
1569                 else
1570                         seq_puts(m, "on\n");
1571                 break;
1572         case GEN6_RC3:
1573                 seq_puts(m, "RC3\n");
1574                 break;
1575         case GEN6_RC6:
1576                 seq_puts(m, "RC6\n");
1577                 break;
1578         case GEN6_RC7:
1579                 seq_puts(m, "RC7\n");
1580                 break;
1581         default:
1582                 seq_puts(m, "Unknown\n");
1583                 break;
1584         }
1585
1586         seq_printf(m, "Core Power Down: %s\n",
1587                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588         if (INTEL_GEN(dev_priv) >= 9) {
1589                 seq_printf(m, "Render Power Well: %s\n",
1590                         (gen9_powergate_status &
1591                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592                 seq_printf(m, "Media Power Well: %s\n",
1593                         (gen9_powergate_status &
1594                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595         }
1596
1597         /* Not exactly sure what this is */
1598         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599                       GEN6_GT_GFX_RC6_LOCKED);
1600         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603
1604         if (INTEL_GEN(dev_priv) <= 7) {
1605                 seq_printf(m, "RC6   voltage: %dmV\n",
1606                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607                 seq_printf(m, "RC6+  voltage: %dmV\n",
1608                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609                 seq_printf(m, "RC6++ voltage: %dmV\n",
1610                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611         }
1612
1613         return i915_forcewake_domains(m, NULL);
1614 }
1615
1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619         int err;
1620
1621         intel_runtime_pm_get(dev_priv);
1622
1623         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624                 err = vlv_drpc_info(m);
1625         else if (INTEL_GEN(dev_priv) >= 6)
1626                 err = gen6_drpc_info(m);
1627         else
1628                 err = ironlake_drpc_info(m);
1629
1630         intel_runtime_pm_put(dev_priv);
1631
1632         return err;
1633 }
1634
1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638
1639         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640                    dev_priv->fb_tracking.busy_bits);
1641
1642         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643                    dev_priv->fb_tracking.flip_bits);
1644
1645         return 0;
1646 }
1647
1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651         struct intel_fbc *fbc = &dev_priv->fbc;
1652
1653         if (!HAS_FBC(dev_priv))
1654                 return -ENODEV;
1655
1656         intel_runtime_pm_get(dev_priv);
1657         mutex_lock(&fbc->lock);
1658
1659         if (intel_fbc_is_active(dev_priv))
1660                 seq_puts(m, "FBC enabled\n");
1661         else
1662                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
1664         if (intel_fbc_is_active(dev_priv)) {
1665                 u32 mask;
1666
1667                 if (INTEL_GEN(dev_priv) >= 8)
1668                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669                 else if (INTEL_GEN(dev_priv) >= 7)
1670                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671                 else if (INTEL_GEN(dev_priv) >= 5)
1672                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673                 else if (IS_G4X(dev_priv))
1674                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675                 else
1676                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677                                                         FBC_STAT_COMPRESSED);
1678
1679                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1680         }
1681
1682         mutex_unlock(&fbc->lock);
1683         intel_runtime_pm_put(dev_priv);
1684
1685         return 0;
1686 }
1687
1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690         struct drm_i915_private *dev_priv = data;
1691
1692         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693                 return -ENODEV;
1694
1695         *val = dev_priv->fbc.false_color;
1696
1697         return 0;
1698 }
1699
1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702         struct drm_i915_private *dev_priv = data;
1703         u32 reg;
1704
1705         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706                 return -ENODEV;
1707
1708         mutex_lock(&dev_priv->fbc.lock);
1709
1710         reg = I915_READ(ILK_DPFC_CONTROL);
1711         dev_priv->fbc.false_color = val;
1712
1713         I915_WRITE(ILK_DPFC_CONTROL, val ?
1714                    (reg | FBC_CTL_FALSE_COLOR) :
1715                    (reg & ~FBC_CTL_FALSE_COLOR));
1716
1717         mutex_unlock(&dev_priv->fbc.lock);
1718         return 0;
1719 }
1720
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1723                         "%llu\n");
1724
1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728
1729         if (!HAS_IPS(dev_priv))
1730                 return -ENODEV;
1731
1732         intel_runtime_pm_get(dev_priv);
1733
1734         seq_printf(m, "Enabled by kernel parameter: %s\n",
1735                    yesno(i915_modparams.enable_ips));
1736
1737         if (INTEL_GEN(dev_priv) >= 8) {
1738                 seq_puts(m, "Currently: unknown\n");
1739         } else {
1740                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741                         seq_puts(m, "Currently: enabled\n");
1742                 else
1743                         seq_puts(m, "Currently: disabled\n");
1744         }
1745
1746         intel_runtime_pm_put(dev_priv);
1747
1748         return 0;
1749 }
1750
1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754         bool sr_enabled = false;
1755
1756         intel_runtime_pm_get(dev_priv);
1757         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758
1759         if (INTEL_GEN(dev_priv) >= 9)
1760                 /* no global SR status; inspect per-plane WM */;
1761         else if (HAS_PCH_SPLIT(dev_priv))
1762                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766         else if (IS_I915GM(dev_priv))
1767                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768         else if (IS_PINEVIEW(dev_priv))
1769                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772
1773         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774         intel_runtime_pm_put(dev_priv);
1775
1776         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777
1778         return 0;
1779 }
1780
1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784         struct drm_device *dev = &dev_priv->drm;
1785         unsigned long temp, chipset, gfx;
1786         int ret;
1787
1788         if (!IS_GEN5(dev_priv))
1789                 return -ENODEV;
1790
1791         intel_runtime_pm_get(dev_priv);
1792
1793         ret = mutex_lock_interruptible(&dev->struct_mutex);
1794         if (ret)
1795                 return ret;
1796
1797         temp = i915_mch_val(dev_priv);
1798         chipset = i915_chipset_val(dev_priv);
1799         gfx = i915_gfx_val(dev_priv);
1800         mutex_unlock(&dev->struct_mutex);
1801
1802         seq_printf(m, "GMCH temp: %ld\n", temp);
1803         seq_printf(m, "Chipset power: %ld\n", chipset);
1804         seq_printf(m, "GFX power: %ld\n", gfx);
1805         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1806
1807         intel_runtime_pm_put(dev_priv);
1808
1809         return 0;
1810 }
1811
1812 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1813 {
1814         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1815         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1816         unsigned int max_gpu_freq, min_gpu_freq;
1817         int gpu_freq, ia_freq;
1818         int ret;
1819
1820         if (!HAS_LLC(dev_priv))
1821                 return -ENODEV;
1822
1823         intel_runtime_pm_get(dev_priv);
1824
1825         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1826         if (ret)
1827                 goto out;
1828
1829         min_gpu_freq = rps->min_freq;
1830         max_gpu_freq = rps->max_freq;
1831         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1832                 /* Convert GT frequency to 50 HZ units */
1833                 min_gpu_freq /= GEN9_FREQ_SCALER;
1834                 max_gpu_freq /= GEN9_FREQ_SCALER;
1835         }
1836
1837         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1838
1839         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1840                 ia_freq = gpu_freq;
1841                 sandybridge_pcode_read(dev_priv,
1842                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1843                                        &ia_freq);
1844                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1845                            intel_gpu_freq(dev_priv, (gpu_freq *
1846                                                      (IS_GEN9_BC(dev_priv) ||
1847                                                       INTEL_GEN(dev_priv) >= 10 ?
1848                                                       GEN9_FREQ_SCALER : 1))),
1849                            ((ia_freq >> 0) & 0xff) * 100,
1850                            ((ia_freq >> 8) & 0xff) * 100);
1851         }
1852
1853         mutex_unlock(&dev_priv->pcu_lock);
1854
1855 out:
1856         intel_runtime_pm_put(dev_priv);
1857         return ret;
1858 }
1859
1860 static int i915_opregion(struct seq_file *m, void *unused)
1861 {
1862         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1863         struct drm_device *dev = &dev_priv->drm;
1864         struct intel_opregion *opregion = &dev_priv->opregion;
1865         int ret;
1866
1867         ret = mutex_lock_interruptible(&dev->struct_mutex);
1868         if (ret)
1869                 goto out;
1870
1871         if (opregion->header)
1872                 seq_write(m, opregion->header, OPREGION_SIZE);
1873
1874         mutex_unlock(&dev->struct_mutex);
1875
1876 out:
1877         return 0;
1878 }
1879
1880 static int i915_vbt(struct seq_file *m, void *unused)
1881 {
1882         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1883
1884         if (opregion->vbt)
1885                 seq_write(m, opregion->vbt, opregion->vbt_size);
1886
1887         return 0;
1888 }
1889
1890 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1891 {
1892         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1893         struct drm_device *dev = &dev_priv->drm;
1894         struct intel_framebuffer *fbdev_fb = NULL;
1895         struct drm_framebuffer *drm_fb;
1896         int ret;
1897
1898         ret = mutex_lock_interruptible(&dev->struct_mutex);
1899         if (ret)
1900                 return ret;
1901
1902 #ifdef CONFIG_DRM_FBDEV_EMULATION
1903         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1904                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1905
1906                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1907                            fbdev_fb->base.width,
1908                            fbdev_fb->base.height,
1909                            fbdev_fb->base.format->depth,
1910                            fbdev_fb->base.format->cpp[0] * 8,
1911                            fbdev_fb->base.modifier,
1912                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1913                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1914                 seq_putc(m, '\n');
1915         }
1916 #endif
1917
1918         mutex_lock(&dev->mode_config.fb_lock);
1919         drm_for_each_fb(drm_fb, dev) {
1920                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1921                 if (fb == fbdev_fb)
1922                         continue;
1923
1924                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1925                            fb->base.width,
1926                            fb->base.height,
1927                            fb->base.format->depth,
1928                            fb->base.format->cpp[0] * 8,
1929                            fb->base.modifier,
1930                            drm_framebuffer_read_refcount(&fb->base));
1931                 describe_obj(m, intel_fb_obj(&fb->base));
1932                 seq_putc(m, '\n');
1933         }
1934         mutex_unlock(&dev->mode_config.fb_lock);
1935         mutex_unlock(&dev->struct_mutex);
1936
1937         return 0;
1938 }
1939
1940 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1941 {
1942         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1943                    ring->space, ring->head, ring->tail, ring->emit);
1944 }
1945
1946 static int i915_context_status(struct seq_file *m, void *unused)
1947 {
1948         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1949         struct drm_device *dev = &dev_priv->drm;
1950         struct intel_engine_cs *engine;
1951         struct i915_gem_context *ctx;
1952         enum intel_engine_id id;
1953         int ret;
1954
1955         ret = mutex_lock_interruptible(&dev->struct_mutex);
1956         if (ret)
1957                 return ret;
1958
1959         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1960                 seq_puts(m, "HW context ");
1961                 if (!list_empty(&ctx->hw_id_link))
1962                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1963                                    atomic_read(&ctx->hw_id_pin_count));
1964                 if (ctx->pid) {
1965                         struct task_struct *task;
1966
1967                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1968                         if (task) {
1969                                 seq_printf(m, "(%s [%d]) ",
1970                                            task->comm, task->pid);
1971                                 put_task_struct(task);
1972                         }
1973                 } else if (IS_ERR(ctx->file_priv)) {
1974                         seq_puts(m, "(deleted) ");
1975                 } else {
1976                         seq_puts(m, "(kernel) ");
1977                 }
1978
1979                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1980                 seq_putc(m, '\n');
1981
1982                 for_each_engine(engine, dev_priv, id) {
1983                         struct intel_context *ce =
1984                                 to_intel_context(ctx, engine);
1985
1986                         seq_printf(m, "%s: ", engine->name);
1987                         if (ce->state)
1988                                 describe_obj(m, ce->state->obj);
1989                         if (ce->ring)
1990                                 describe_ctx_ring(m, ce->ring);
1991                         seq_putc(m, '\n');
1992                 }
1993
1994                 seq_putc(m, '\n');
1995         }
1996
1997         mutex_unlock(&dev->struct_mutex);
1998
1999         return 0;
2000 }
2001
2002 static const char *swizzle_string(unsigned swizzle)
2003 {
2004         switch (swizzle) {
2005         case I915_BIT_6_SWIZZLE_NONE:
2006                 return "none";
2007         case I915_BIT_6_SWIZZLE_9:
2008                 return "bit9";
2009         case I915_BIT_6_SWIZZLE_9_10:
2010                 return "bit9/bit10";
2011         case I915_BIT_6_SWIZZLE_9_11:
2012                 return "bit9/bit11";
2013         case I915_BIT_6_SWIZZLE_9_10_11:
2014                 return "bit9/bit10/bit11";
2015         case I915_BIT_6_SWIZZLE_9_17:
2016                 return "bit9/bit17";
2017         case I915_BIT_6_SWIZZLE_9_10_17:
2018                 return "bit9/bit10/bit17";
2019         case I915_BIT_6_SWIZZLE_UNKNOWN:
2020                 return "unknown";
2021         }
2022
2023         return "bug";
2024 }
2025
2026 static int i915_swizzle_info(struct seq_file *m, void *data)
2027 {
2028         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2029
2030         intel_runtime_pm_get(dev_priv);
2031
2032         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2033                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2034         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2035                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2036
2037         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2038                 seq_printf(m, "DDC = 0x%08x\n",
2039                            I915_READ(DCC));
2040                 seq_printf(m, "DDC2 = 0x%08x\n",
2041                            I915_READ(DCC2));
2042                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2043                            I915_READ16(C0DRB3));
2044                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2045                            I915_READ16(C1DRB3));
2046         } else if (INTEL_GEN(dev_priv) >= 6) {
2047                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2048                            I915_READ(MAD_DIMM_C0));
2049                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2050                            I915_READ(MAD_DIMM_C1));
2051                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2052                            I915_READ(MAD_DIMM_C2));
2053                 seq_printf(m, "TILECTL = 0x%08x\n",
2054                            I915_READ(TILECTL));
2055                 if (INTEL_GEN(dev_priv) >= 8)
2056                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2057                                    I915_READ(GAMTARBMODE));
2058                 else
2059                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2060                                    I915_READ(ARB_MODE));
2061                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2062                            I915_READ(DISP_ARB_CTL));
2063         }
2064
2065         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2066                 seq_puts(m, "L-shaped memory detected\n");
2067
2068         intel_runtime_pm_put(dev_priv);
2069
2070         return 0;
2071 }
2072
2073 static int per_file_ctx(int id, void *ptr, void *data)
2074 {
2075         struct i915_gem_context *ctx = ptr;
2076         struct seq_file *m = data;
2077         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2078
2079         if (!ppgtt) {
2080                 seq_printf(m, "  no ppgtt for context %d\n",
2081                            ctx->user_handle);
2082                 return 0;
2083         }
2084
2085         if (i915_gem_context_is_default(ctx))
2086                 seq_puts(m, "  default context:\n");
2087         else
2088                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2089         ppgtt->debug_dump(ppgtt, m);
2090
2091         return 0;
2092 }
2093
2094 static void gen8_ppgtt_info(struct seq_file *m,
2095                             struct drm_i915_private *dev_priv)
2096 {
2097         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2098         struct intel_engine_cs *engine;
2099         enum intel_engine_id id;
2100         int i;
2101
2102         if (!ppgtt)
2103                 return;
2104
2105         for_each_engine(engine, dev_priv, id) {
2106                 seq_printf(m, "%s\n", engine->name);
2107                 for (i = 0; i < 4; i++) {
2108                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2109                         pdp <<= 32;
2110                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2111                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2112                 }
2113         }
2114 }
2115
2116 static void gen6_ppgtt_info(struct seq_file *m,
2117                             struct drm_i915_private *dev_priv)
2118 {
2119         struct intel_engine_cs *engine;
2120         enum intel_engine_id id;
2121
2122         if (IS_GEN6(dev_priv))
2123                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2124
2125         for_each_engine(engine, dev_priv, id) {
2126                 seq_printf(m, "%s\n", engine->name);
2127                 if (IS_GEN7(dev_priv))
2128                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2129                                    I915_READ(RING_MODE_GEN7(engine)));
2130                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2131                            I915_READ(RING_PP_DIR_BASE(engine)));
2132                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2133                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2134                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2135                            I915_READ(RING_PP_DIR_DCLV(engine)));
2136         }
2137         if (dev_priv->mm.aliasing_ppgtt) {
2138                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2139
2140                 seq_puts(m, "aliasing PPGTT:\n");
2141                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2142
2143                 ppgtt->debug_dump(ppgtt, m);
2144         }
2145
2146         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2147 }
2148
2149 static int i915_ppgtt_info(struct seq_file *m, void *data)
2150 {
2151         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2152         struct drm_device *dev = &dev_priv->drm;
2153         struct drm_file *file;
2154         int ret;
2155
2156         mutex_lock(&dev->filelist_mutex);
2157         ret = mutex_lock_interruptible(&dev->struct_mutex);
2158         if (ret)
2159                 goto out_unlock;
2160
2161         intel_runtime_pm_get(dev_priv);
2162
2163         if (INTEL_GEN(dev_priv) >= 8)
2164                 gen8_ppgtt_info(m, dev_priv);
2165         else if (INTEL_GEN(dev_priv) >= 6)
2166                 gen6_ppgtt_info(m, dev_priv);
2167
2168         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2169                 struct drm_i915_file_private *file_priv = file->driver_priv;
2170                 struct task_struct *task;
2171
2172                 task = get_pid_task(file->pid, PIDTYPE_PID);
2173                 if (!task) {
2174                         ret = -ESRCH;
2175                         goto out_rpm;
2176                 }
2177                 seq_printf(m, "\nproc: %s\n", task->comm);
2178                 put_task_struct(task);
2179                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2180                              (void *)(unsigned long)m);
2181         }
2182
2183 out_rpm:
2184         intel_runtime_pm_put(dev_priv);
2185         mutex_unlock(&dev->struct_mutex);
2186 out_unlock:
2187         mutex_unlock(&dev->filelist_mutex);
2188         return ret;
2189 }
2190
2191 static int count_irq_waiters(struct drm_i915_private *i915)
2192 {
2193         struct intel_engine_cs *engine;
2194         enum intel_engine_id id;
2195         int count = 0;
2196
2197         for_each_engine(engine, i915, id)
2198                 count += intel_engine_has_waiter(engine);
2199
2200         return count;
2201 }
2202
2203 static const char *rps_power_to_str(unsigned int power)
2204 {
2205         static const char * const strings[] = {
2206                 [LOW_POWER] = "low power",
2207                 [BETWEEN] = "mixed",
2208                 [HIGH_POWER] = "high power",
2209         };
2210
2211         if (power >= ARRAY_SIZE(strings) || !strings[power])
2212                 return "unknown";
2213
2214         return strings[power];
2215 }
2216
2217 static int i915_rps_boost_info(struct seq_file *m, void *data)
2218 {
2219         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2220         struct drm_device *dev = &dev_priv->drm;
2221         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2222         u32 act_freq = rps->cur_freq;
2223         struct drm_file *file;
2224
2225         if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2226                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2227                         mutex_lock(&dev_priv->pcu_lock);
2228                         act_freq = vlv_punit_read(dev_priv,
2229                                                   PUNIT_REG_GPU_FREQ_STS);
2230                         act_freq = (act_freq >> 8) & 0xff;
2231                         mutex_unlock(&dev_priv->pcu_lock);
2232                 } else {
2233                         act_freq = intel_get_cagf(dev_priv,
2234                                                   I915_READ(GEN6_RPSTAT1));
2235                 }
2236                 intel_runtime_pm_put(dev_priv);
2237         }
2238
2239         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2240         seq_printf(m, "GPU busy? %s [%d requests]\n",
2241                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2242         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2243         seq_printf(m, "Boosts outstanding? %d\n",
2244                    atomic_read(&rps->num_waiters));
2245         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2246         seq_printf(m, "Frequency requested %d, actual %d\n",
2247                    intel_gpu_freq(dev_priv, rps->cur_freq),
2248                    intel_gpu_freq(dev_priv, act_freq));
2249         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2250                    intel_gpu_freq(dev_priv, rps->min_freq),
2251                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2252                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2253                    intel_gpu_freq(dev_priv, rps->max_freq));
2254         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2255                    intel_gpu_freq(dev_priv, rps->idle_freq),
2256                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2257                    intel_gpu_freq(dev_priv, rps->boost_freq));
2258
2259         mutex_lock(&dev->filelist_mutex);
2260         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2261                 struct drm_i915_file_private *file_priv = file->driver_priv;
2262                 struct task_struct *task;
2263
2264                 rcu_read_lock();
2265                 task = pid_task(file->pid, PIDTYPE_PID);
2266                 seq_printf(m, "%s [%d]: %d boosts\n",
2267                            task ? task->comm : "<unknown>",
2268                            task ? task->pid : -1,
2269                            atomic_read(&file_priv->rps_client.boosts));
2270                 rcu_read_unlock();
2271         }
2272         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2273                    atomic_read(&rps->boosts));
2274         mutex_unlock(&dev->filelist_mutex);
2275
2276         if (INTEL_GEN(dev_priv) >= 6 &&
2277             rps->enabled &&
2278             dev_priv->gt.active_requests) {
2279                 u32 rpup, rpupei;
2280                 u32 rpdown, rpdownei;
2281
2282                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2283                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2284                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2285                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2286                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2287                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2288
2289                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2290                            rps_power_to_str(rps->power.mode));
2291                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2292                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2293                            rps->power.up_threshold);
2294                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2295                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2296                            rps->power.down_threshold);
2297         } else {
2298                 seq_puts(m, "\nRPS Autotuning inactive\n");
2299         }
2300
2301         return 0;
2302 }
2303
2304 static int i915_llc(struct seq_file *m, void *data)
2305 {
2306         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2307         const bool edram = INTEL_GEN(dev_priv) > 8;
2308
2309         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2310         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2311                    intel_uncore_edram_size(dev_priv)/1024/1024);
2312
2313         return 0;
2314 }
2315
2316 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2317 {
2318         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2319         struct drm_printer p;
2320
2321         if (!HAS_HUC(dev_priv))
2322                 return -ENODEV;
2323
2324         p = drm_seq_file_printer(m);
2325         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2326
2327         intel_runtime_pm_get(dev_priv);
2328         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2329         intel_runtime_pm_put(dev_priv);
2330
2331         return 0;
2332 }
2333
2334 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2335 {
2336         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2337         struct drm_printer p;
2338         u32 tmp, i;
2339
2340         if (!HAS_GUC(dev_priv))
2341                 return -ENODEV;
2342
2343         p = drm_seq_file_printer(m);
2344         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2345
2346         intel_runtime_pm_get(dev_priv);
2347
2348         tmp = I915_READ(GUC_STATUS);
2349
2350         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2351         seq_printf(m, "\tBootrom status = 0x%x\n",
2352                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2353         seq_printf(m, "\tuKernel status = 0x%x\n",
2354                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2355         seq_printf(m, "\tMIA Core status = 0x%x\n",
2356                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2357         seq_puts(m, "\nScratch registers:\n");
2358         for (i = 0; i < 16; i++)
2359                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2360
2361         intel_runtime_pm_put(dev_priv);
2362
2363         return 0;
2364 }
2365
2366 static const char *
2367 stringify_guc_log_type(enum guc_log_buffer_type type)
2368 {
2369         switch (type) {
2370         case GUC_ISR_LOG_BUFFER:
2371                 return "ISR";
2372         case GUC_DPC_LOG_BUFFER:
2373                 return "DPC";
2374         case GUC_CRASH_DUMP_LOG_BUFFER:
2375                 return "CRASH";
2376         default:
2377                 MISSING_CASE(type);
2378         }
2379
2380         return "";
2381 }
2382
2383 static void i915_guc_log_info(struct seq_file *m,
2384                               struct drm_i915_private *dev_priv)
2385 {
2386         struct intel_guc_log *log = &dev_priv->guc.log;
2387         enum guc_log_buffer_type type;
2388
2389         if (!intel_guc_log_relay_enabled(log)) {
2390                 seq_puts(m, "GuC log relay disabled\n");
2391                 return;
2392         }
2393
2394         seq_puts(m, "GuC logging stats:\n");
2395
2396         seq_printf(m, "\tRelay full count: %u\n",
2397                    log->relay.full_count);
2398
2399         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2400                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2401                            stringify_guc_log_type(type),
2402                            log->stats[type].flush,
2403                            log->stats[type].sampled_overflow);
2404         }
2405 }
2406
2407 static void i915_guc_client_info(struct seq_file *m,
2408                                  struct drm_i915_private *dev_priv,
2409                                  struct intel_guc_client *client)
2410 {
2411         struct intel_engine_cs *engine;
2412         enum intel_engine_id id;
2413         uint64_t tot = 0;
2414
2415         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2416                 client->priority, client->stage_id, client->proc_desc_offset);
2417         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2418                 client->doorbell_id, client->doorbell_offset);
2419
2420         for_each_engine(engine, dev_priv, id) {
2421                 u64 submissions = client->submissions[id];
2422                 tot += submissions;
2423                 seq_printf(m, "\tSubmissions: %llu %s\n",
2424                                 submissions, engine->name);
2425         }
2426         seq_printf(m, "\tTotal: %llu\n", tot);
2427 }
2428
2429 static int i915_guc_info(struct seq_file *m, void *data)
2430 {
2431         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2432         const struct intel_guc *guc = &dev_priv->guc;
2433
2434         if (!USES_GUC(dev_priv))
2435                 return -ENODEV;
2436
2437         i915_guc_log_info(m, dev_priv);
2438
2439         if (!USES_GUC_SUBMISSION(dev_priv))
2440                 return 0;
2441
2442         GEM_BUG_ON(!guc->execbuf_client);
2443
2444         seq_printf(m, "\nDoorbell map:\n");
2445         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2446         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2447
2448         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2449         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2450         if (guc->preempt_client) {
2451                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2452                            guc->preempt_client);
2453                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2454         }
2455
2456         /* Add more as required ... */
2457
2458         return 0;
2459 }
2460
2461 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2462 {
2463         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2464         const struct intel_guc *guc = &dev_priv->guc;
2465         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2466         struct intel_guc_client *client = guc->execbuf_client;
2467         unsigned int tmp;
2468         int index;
2469
2470         if (!USES_GUC_SUBMISSION(dev_priv))
2471                 return -ENODEV;
2472
2473         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2474                 struct intel_engine_cs *engine;
2475
2476                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2477                         continue;
2478
2479                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2480                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2481                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2482                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2483                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2484                 seq_printf(m, "\tEngines used: 0x%x\n",
2485                            desc->engines_used);
2486                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2487                            desc->db_trigger_phy,
2488                            desc->db_trigger_cpu,
2489                            desc->db_trigger_uk);
2490                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2491                            desc->process_desc);
2492                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2493                            desc->wq_addr, desc->wq_size);
2494                 seq_putc(m, '\n');
2495
2496                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2497                         u32 guc_engine_id = engine->guc_id;
2498                         struct guc_execlist_context *lrc =
2499                                                 &desc->lrc[guc_engine_id];
2500
2501                         seq_printf(m, "\t%s LRC:\n", engine->name);
2502                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2503                                    lrc->context_desc);
2504                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2505                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2506                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2507                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2508                         seq_putc(m, '\n');
2509                 }
2510         }
2511
2512         return 0;
2513 }
2514
2515 static int i915_guc_log_dump(struct seq_file *m, void *data)
2516 {
2517         struct drm_info_node *node = m->private;
2518         struct drm_i915_private *dev_priv = node_to_i915(node);
2519         bool dump_load_err = !!node->info_ent->data;
2520         struct drm_i915_gem_object *obj = NULL;
2521         u32 *log;
2522         int i = 0;
2523
2524         if (!HAS_GUC(dev_priv))
2525                 return -ENODEV;
2526
2527         if (dump_load_err)
2528                 obj = dev_priv->guc.load_err_log;
2529         else if (dev_priv->guc.log.vma)
2530                 obj = dev_priv->guc.log.vma->obj;
2531
2532         if (!obj)
2533                 return 0;
2534
2535         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2536         if (IS_ERR(log)) {
2537                 DRM_DEBUG("Failed to pin object\n");
2538                 seq_puts(m, "(log data unaccessible)\n");
2539                 return PTR_ERR(log);
2540         }
2541
2542         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2543                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2544                            *(log + i), *(log + i + 1),
2545                            *(log + i + 2), *(log + i + 3));
2546
2547         seq_putc(m, '\n');
2548
2549         i915_gem_object_unpin_map(obj);
2550
2551         return 0;
2552 }
2553
2554 static int i915_guc_log_level_get(void *data, u64 *val)
2555 {
2556         struct drm_i915_private *dev_priv = data;
2557
2558         if (!USES_GUC(dev_priv))
2559                 return -ENODEV;
2560
2561         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2562
2563         return 0;
2564 }
2565
2566 static int i915_guc_log_level_set(void *data, u64 val)
2567 {
2568         struct drm_i915_private *dev_priv = data;
2569
2570         if (!USES_GUC(dev_priv))
2571                 return -ENODEV;
2572
2573         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2574 }
2575
2576 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2577                         i915_guc_log_level_get, i915_guc_log_level_set,
2578                         "%lld\n");
2579
2580 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2581 {
2582         struct drm_i915_private *dev_priv = inode->i_private;
2583
2584         if (!USES_GUC(dev_priv))
2585                 return -ENODEV;
2586
2587         file->private_data = &dev_priv->guc.log;
2588
2589         return intel_guc_log_relay_open(&dev_priv->guc.log);
2590 }
2591
2592 static ssize_t
2593 i915_guc_log_relay_write(struct file *filp,
2594                          const char __user *ubuf,
2595                          size_t cnt,
2596                          loff_t *ppos)
2597 {
2598         struct intel_guc_log *log = filp->private_data;
2599
2600         intel_guc_log_relay_flush(log);
2601
2602         return cnt;
2603 }
2604
2605 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2606 {
2607         struct drm_i915_private *dev_priv = inode->i_private;
2608
2609         intel_guc_log_relay_close(&dev_priv->guc.log);
2610
2611         return 0;
2612 }
2613
2614 static const struct file_operations i915_guc_log_relay_fops = {
2615         .owner = THIS_MODULE,
2616         .open = i915_guc_log_relay_open,
2617         .write = i915_guc_log_relay_write,
2618         .release = i915_guc_log_relay_release,
2619 };
2620
2621 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2622 {
2623         u8 val;
2624         static const char * const sink_status[] = {
2625                 "inactive",
2626                 "transition to active, capture and display",
2627                 "active, display from RFB",
2628                 "active, capture and display on sink device timings",
2629                 "transition to inactive, capture and display, timing re-sync",
2630                 "reserved",
2631                 "reserved",
2632                 "sink internal error",
2633         };
2634         struct drm_connector *connector = m->private;
2635         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2636         struct intel_dp *intel_dp =
2637                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2638         int ret;
2639
2640         if (!CAN_PSR(dev_priv)) {
2641                 seq_puts(m, "PSR Unsupported\n");
2642                 return -ENODEV;
2643         }
2644
2645         if (connector->status != connector_status_connected)
2646                 return -ENODEV;
2647
2648         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2649
2650         if (ret == 1) {
2651                 const char *str = "unknown";
2652
2653                 val &= DP_PSR_SINK_STATE_MASK;
2654                 if (val < ARRAY_SIZE(sink_status))
2655                         str = sink_status[val];
2656                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2657         } else {
2658                 return ret;
2659         }
2660
2661         return 0;
2662 }
2663 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2664
2665 static void
2666 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2667 {
2668         u32 val, psr_status;
2669
2670         if (dev_priv->psr.psr2_enabled) {
2671                 static const char * const live_status[] = {
2672                         "IDLE",
2673                         "CAPTURE",
2674                         "CAPTURE_FS",
2675                         "SLEEP",
2676                         "BUFON_FW",
2677                         "ML_UP",
2678                         "SU_STANDBY",
2679                         "FAST_SLEEP",
2680                         "DEEP_SLEEP",
2681                         "BUF_ON",
2682                         "TG_ON"
2683                 };
2684                 psr_status = I915_READ(EDP_PSR2_STATUS);
2685                 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2686                         EDP_PSR2_STATUS_STATE_SHIFT;
2687                 if (val < ARRAY_SIZE(live_status)) {
2688                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2689                                    psr_status, live_status[val]);
2690                         return;
2691                 }
2692         } else {
2693                 static const char * const live_status[] = {
2694                         "IDLE",
2695                         "SRDONACK",
2696                         "SRDENT",
2697                         "BUFOFF",
2698                         "BUFON",
2699                         "AUXACK",
2700                         "SRDOFFACK",
2701                         "SRDENT_ON",
2702                 };
2703                 psr_status = I915_READ(EDP_PSR_STATUS);
2704                 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2705                         EDP_PSR_STATUS_STATE_SHIFT;
2706                 if (val < ARRAY_SIZE(live_status)) {
2707                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2708                                    psr_status, live_status[val]);
2709                         return;
2710                 }
2711         }
2712
2713         seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2714 }
2715
2716 static int i915_edp_psr_status(struct seq_file *m, void *data)
2717 {
2718         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2719         u32 psrperf = 0;
2720         bool enabled = false;
2721         bool sink_support;
2722
2723         if (!HAS_PSR(dev_priv))
2724                 return -ENODEV;
2725
2726         sink_support = dev_priv->psr.sink_support;
2727         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2728         if (!sink_support)
2729                 return 0;
2730
2731         intel_runtime_pm_get(dev_priv);
2732
2733         mutex_lock(&dev_priv->psr.lock);
2734         seq_printf(m, "PSR mode: %s\n",
2735                    dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2736         seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2737         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2738                    dev_priv->psr.busy_frontbuffer_bits);
2739
2740         if (dev_priv->psr.psr2_enabled)
2741                 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2742         else
2743                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2744
2745         seq_printf(m, "Main link in standby mode: %s\n",
2746                    yesno(dev_priv->psr.link_standby));
2747
2748         seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2749
2750         /*
2751          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2752          */
2753         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2754                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2755                         EDP_PSR_PERF_CNT_MASK;
2756
2757                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2758         }
2759
2760         psr_source_status(dev_priv, m);
2761         mutex_unlock(&dev_priv->psr.lock);
2762
2763         if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2764                 seq_printf(m, "Last attempted entry at: %lld\n",
2765                            dev_priv->psr.last_entry_attempt);
2766                 seq_printf(m, "Last exit at: %lld\n",
2767                            dev_priv->psr.last_exit);
2768         }
2769
2770         intel_runtime_pm_put(dev_priv);
2771         return 0;
2772 }
2773
2774 static int
2775 i915_edp_psr_debug_set(void *data, u64 val)
2776 {
2777         struct drm_i915_private *dev_priv = data;
2778         struct drm_modeset_acquire_ctx ctx;
2779         int ret;
2780
2781         if (!CAN_PSR(dev_priv))
2782                 return -ENODEV;
2783
2784         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2785
2786         intel_runtime_pm_get(dev_priv);
2787
2788         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2789
2790 retry:
2791         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2792         if (ret == -EDEADLK) {
2793                 ret = drm_modeset_backoff(&ctx);
2794                 if (!ret)
2795                         goto retry;
2796         }
2797
2798         drm_modeset_drop_locks(&ctx);
2799         drm_modeset_acquire_fini(&ctx);
2800
2801         intel_runtime_pm_put(dev_priv);
2802
2803         return ret;
2804 }
2805
2806 static int
2807 i915_edp_psr_debug_get(void *data, u64 *val)
2808 {
2809         struct drm_i915_private *dev_priv = data;
2810
2811         if (!CAN_PSR(dev_priv))
2812                 return -ENODEV;
2813
2814         *val = READ_ONCE(dev_priv->psr.debug);
2815         return 0;
2816 }
2817
2818 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2819                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2820                         "%llu\n");
2821
2822 static int i915_energy_uJ(struct seq_file *m, void *data)
2823 {
2824         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2825         unsigned long long power;
2826         u32 units;
2827
2828         if (INTEL_GEN(dev_priv) < 6)
2829                 return -ENODEV;
2830
2831         intel_runtime_pm_get(dev_priv);
2832
2833         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2834                 intel_runtime_pm_put(dev_priv);
2835                 return -ENODEV;
2836         }
2837
2838         units = (power & 0x1f00) >> 8;
2839         power = I915_READ(MCH_SECP_NRG_STTS);
2840         power = (1000000 * power) >> units; /* convert to uJ */
2841
2842         intel_runtime_pm_put(dev_priv);
2843
2844         seq_printf(m, "%llu", power);
2845
2846         return 0;
2847 }
2848
2849 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2850 {
2851         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2852         struct pci_dev *pdev = dev_priv->drm.pdev;
2853
2854         if (!HAS_RUNTIME_PM(dev_priv))
2855                 seq_puts(m, "Runtime power management not supported\n");
2856
2857         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2858                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2859         seq_printf(m, "IRQs disabled: %s\n",
2860                    yesno(!intel_irqs_enabled(dev_priv)));
2861 #ifdef CONFIG_PM
2862         seq_printf(m, "Usage count: %d\n",
2863                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2864 #else
2865         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2866 #endif
2867         seq_printf(m, "PCI device power state: %s [%d]\n",
2868                    pci_power_name(pdev->current_state),
2869                    pdev->current_state);
2870
2871         return 0;
2872 }
2873
2874 static int i915_power_domain_info(struct seq_file *m, void *unused)
2875 {
2876         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2877         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2878         int i;
2879
2880         mutex_lock(&power_domains->lock);
2881
2882         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2883         for (i = 0; i < power_domains->power_well_count; i++) {
2884                 struct i915_power_well *power_well;
2885                 enum intel_display_power_domain power_domain;
2886
2887                 power_well = &power_domains->power_wells[i];
2888                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2889                            power_well->count);
2890
2891                 for_each_power_domain(power_domain, power_well->desc->domains)
2892                         seq_printf(m, "  %-23s %d\n",
2893                                  intel_display_power_domain_str(power_domain),
2894                                  power_domains->domain_use_count[power_domain]);
2895         }
2896
2897         mutex_unlock(&power_domains->lock);
2898
2899         return 0;
2900 }
2901
2902 static int i915_dmc_info(struct seq_file *m, void *unused)
2903 {
2904         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2905         struct intel_csr *csr;
2906
2907         if (!HAS_CSR(dev_priv))
2908                 return -ENODEV;
2909
2910         csr = &dev_priv->csr;
2911
2912         intel_runtime_pm_get(dev_priv);
2913
2914         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2915         seq_printf(m, "path: %s\n", csr->fw_path);
2916
2917         if (!csr->dmc_payload)
2918                 goto out;
2919
2920         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2921                    CSR_VERSION_MINOR(csr->version));
2922
2923         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2924                 goto out;
2925
2926         seq_printf(m, "DC3 -> DC5 count: %d\n",
2927                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2928                                                     SKL_CSR_DC3_DC5_COUNT));
2929         if (!IS_GEN9_LP(dev_priv))
2930                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2931                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2932
2933 out:
2934         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2935         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2936         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2937
2938         intel_runtime_pm_put(dev_priv);
2939
2940         return 0;
2941 }
2942
2943 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2944                                  struct drm_display_mode *mode)
2945 {
2946         int i;
2947
2948         for (i = 0; i < tabs; i++)
2949                 seq_putc(m, '\t');
2950
2951         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2952                    mode->base.id, mode->name,
2953                    mode->vrefresh, mode->clock,
2954                    mode->hdisplay, mode->hsync_start,
2955                    mode->hsync_end, mode->htotal,
2956                    mode->vdisplay, mode->vsync_start,
2957                    mode->vsync_end, mode->vtotal,
2958                    mode->type, mode->flags);
2959 }
2960
2961 static void intel_encoder_info(struct seq_file *m,
2962                                struct intel_crtc *intel_crtc,
2963                                struct intel_encoder *intel_encoder)
2964 {
2965         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2966         struct drm_device *dev = &dev_priv->drm;
2967         struct drm_crtc *crtc = &intel_crtc->base;
2968         struct intel_connector *intel_connector;
2969         struct drm_encoder *encoder;
2970
2971         encoder = &intel_encoder->base;
2972         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2973                    encoder->base.id, encoder->name);
2974         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2975                 struct drm_connector *connector = &intel_connector->base;
2976                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2977                            connector->base.id,
2978                            connector->name,
2979                            drm_get_connector_status_name(connector->status));
2980                 if (connector->status == connector_status_connected) {
2981                         struct drm_display_mode *mode = &crtc->mode;
2982                         seq_printf(m, ", mode:\n");
2983                         intel_seq_print_mode(m, 2, mode);
2984                 } else {
2985                         seq_putc(m, '\n');
2986                 }
2987         }
2988 }
2989
2990 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2991 {
2992         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2993         struct drm_device *dev = &dev_priv->drm;
2994         struct drm_crtc *crtc = &intel_crtc->base;
2995         struct intel_encoder *intel_encoder;
2996         struct drm_plane_state *plane_state = crtc->primary->state;
2997         struct drm_framebuffer *fb = plane_state->fb;
2998
2999         if (fb)
3000                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3001                            fb->base.id, plane_state->src_x >> 16,
3002                            plane_state->src_y >> 16, fb->width, fb->height);
3003         else
3004                 seq_puts(m, "\tprimary plane disabled\n");
3005         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3006                 intel_encoder_info(m, intel_crtc, intel_encoder);
3007 }
3008
3009 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3010 {
3011         struct drm_display_mode *mode = panel->fixed_mode;
3012
3013         seq_printf(m, "\tfixed mode:\n");
3014         intel_seq_print_mode(m, 2, mode);
3015 }
3016
3017 static void intel_dp_info(struct seq_file *m,
3018                           struct intel_connector *intel_connector)
3019 {
3020         struct intel_encoder *intel_encoder = intel_connector->encoder;
3021         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3022
3023         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3024         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3025         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3026                 intel_panel_info(m, &intel_connector->panel);
3027
3028         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3029                                 &intel_dp->aux);
3030 }
3031
3032 static void intel_dp_mst_info(struct seq_file *m,
3033                           struct intel_connector *intel_connector)
3034 {
3035         struct intel_encoder *intel_encoder = intel_connector->encoder;
3036         struct intel_dp_mst_encoder *intel_mst =
3037                 enc_to_mst(&intel_encoder->base);
3038         struct intel_digital_port *intel_dig_port = intel_mst->primary;
3039         struct intel_dp *intel_dp = &intel_dig_port->dp;
3040         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3041                                         intel_connector->port);
3042
3043         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3044 }
3045
3046 static void intel_hdmi_info(struct seq_file *m,
3047                             struct intel_connector *intel_connector)
3048 {
3049         struct intel_encoder *intel_encoder = intel_connector->encoder;
3050         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3051
3052         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3053 }
3054
3055 static void intel_lvds_info(struct seq_file *m,
3056                             struct intel_connector *intel_connector)
3057 {
3058         intel_panel_info(m, &intel_connector->panel);
3059 }
3060
3061 static void intel_connector_info(struct seq_file *m,
3062                                  struct drm_connector *connector)
3063 {
3064         struct intel_connector *intel_connector = to_intel_connector(connector);
3065         struct intel_encoder *intel_encoder = intel_connector->encoder;
3066         struct drm_display_mode *mode;
3067
3068         seq_printf(m, "connector %d: type %s, status: %s\n",
3069                    connector->base.id, connector->name,
3070                    drm_get_connector_status_name(connector->status));
3071
3072         if (connector->status == connector_status_disconnected)
3073                 return;
3074
3075         seq_printf(m, "\tname: %s\n", connector->display_info.name);
3076         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3077                    connector->display_info.width_mm,
3078                    connector->display_info.height_mm);
3079         seq_printf(m, "\tsubpixel order: %s\n",
3080                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3081         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3082
3083         if (!intel_encoder)
3084                 return;
3085
3086         switch (connector->connector_type) {
3087         case DRM_MODE_CONNECTOR_DisplayPort:
3088         case DRM_MODE_CONNECTOR_eDP:
3089                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3090                         intel_dp_mst_info(m, intel_connector);
3091                 else
3092                         intel_dp_info(m, intel_connector);
3093                 break;
3094         case DRM_MODE_CONNECTOR_LVDS:
3095                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3096                         intel_lvds_info(m, intel_connector);
3097                 break;
3098         case DRM_MODE_CONNECTOR_HDMIA:
3099                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3100                     intel_encoder->type == INTEL_OUTPUT_DDI)
3101                         intel_hdmi_info(m, intel_connector);
3102                 break;
3103         default:
3104                 break;
3105         }
3106
3107         seq_printf(m, "\tmodes:\n");
3108         list_for_each_entry(mode, &connector->modes, head)
3109                 intel_seq_print_mode(m, 2, mode);
3110 }
3111
3112 static const char *plane_type(enum drm_plane_type type)
3113 {
3114         switch (type) {
3115         case DRM_PLANE_TYPE_OVERLAY:
3116                 return "OVL";
3117         case DRM_PLANE_TYPE_PRIMARY:
3118                 return "PRI";
3119         case DRM_PLANE_TYPE_CURSOR:
3120                 return "CUR";
3121         /*
3122          * Deliberately omitting default: to generate compiler warnings
3123          * when a new drm_plane_type gets added.
3124          */
3125         }
3126
3127         return "unknown";
3128 }
3129
3130 static const char *plane_rotation(unsigned int rotation)
3131 {
3132         static char buf[48];
3133         /*
3134          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3135          * will print them all to visualize if the values are misused
3136          */
3137         snprintf(buf, sizeof(buf),
3138                  "%s%s%s%s%s%s(0x%08x)",
3139                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3140                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3141                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3142                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3143                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3144                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3145                  rotation);
3146
3147         return buf;
3148 }
3149
3150 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3151 {
3152         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3153         struct drm_device *dev = &dev_priv->drm;
3154         struct intel_plane *intel_plane;
3155
3156         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3157                 struct drm_plane_state *state;
3158                 struct drm_plane *plane = &intel_plane->base;
3159                 struct drm_format_name_buf format_name;
3160
3161                 if (!plane->state) {
3162                         seq_puts(m, "plane->state is NULL!\n");
3163                         continue;
3164                 }
3165
3166                 state = plane->state;
3167
3168                 if (state->fb) {
3169                         drm_get_format_name(state->fb->format->format,
3170                                             &format_name);
3171                 } else {
3172                         sprintf(format_name.str, "N/A");
3173                 }
3174
3175                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3176                            plane->base.id,
3177                            plane_type(intel_plane->base.type),
3178                            state->crtc_x, state->crtc_y,
3179                            state->crtc_w, state->crtc_h,
3180                            (state->src_x >> 16),
3181                            ((state->src_x & 0xffff) * 15625) >> 10,
3182                            (state->src_y >> 16),
3183                            ((state->src_y & 0xffff) * 15625) >> 10,
3184                            (state->src_w >> 16),
3185                            ((state->src_w & 0xffff) * 15625) >> 10,
3186                            (state->src_h >> 16),
3187                            ((state->src_h & 0xffff) * 15625) >> 10,
3188                            format_name.str,
3189                            plane_rotation(state->rotation));
3190         }
3191 }
3192
3193 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3194 {
3195         struct intel_crtc_state *pipe_config;
3196         int num_scalers = intel_crtc->num_scalers;
3197         int i;
3198
3199         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3200
3201         /* Not all platformas have a scaler */
3202         if (num_scalers) {
3203                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3204                            num_scalers,
3205                            pipe_config->scaler_state.scaler_users,
3206                            pipe_config->scaler_state.scaler_id);
3207
3208                 for (i = 0; i < num_scalers; i++) {
3209                         struct intel_scaler *sc =
3210                                         &pipe_config->scaler_state.scalers[i];
3211
3212                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3213                                    i, yesno(sc->in_use), sc->mode);
3214                 }
3215                 seq_puts(m, "\n");
3216         } else {
3217                 seq_puts(m, "\tNo scalers available on this platform\n");
3218         }
3219 }
3220
3221 static int i915_display_info(struct seq_file *m, void *unused)
3222 {
3223         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3224         struct drm_device *dev = &dev_priv->drm;
3225         struct intel_crtc *crtc;
3226         struct drm_connector *connector;
3227         struct drm_connector_list_iter conn_iter;
3228
3229         intel_runtime_pm_get(dev_priv);
3230         seq_printf(m, "CRTC info\n");
3231         seq_printf(m, "---------\n");
3232         for_each_intel_crtc(dev, crtc) {
3233                 struct intel_crtc_state *pipe_config;
3234
3235                 drm_modeset_lock(&crtc->base.mutex, NULL);
3236                 pipe_config = to_intel_crtc_state(crtc->base.state);
3237
3238                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3239                            crtc->base.base.id, pipe_name(crtc->pipe),
3240                            yesno(pipe_config->base.active),
3241                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3242                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3243
3244                 if (pipe_config->base.active) {
3245                         struct intel_plane *cursor =
3246                                 to_intel_plane(crtc->base.cursor);
3247
3248                         intel_crtc_info(m, crtc);
3249
3250                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3251                                    yesno(cursor->base.state->visible),
3252                                    cursor->base.state->crtc_x,
3253                                    cursor->base.state->crtc_y,
3254                                    cursor->base.state->crtc_w,
3255                                    cursor->base.state->crtc_h,
3256                                    cursor->cursor.base);
3257                         intel_scaler_info(m, crtc);
3258                         intel_plane_info(m, crtc);
3259                 }
3260
3261                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3262                            yesno(!crtc->cpu_fifo_underrun_disabled),
3263                            yesno(!crtc->pch_fifo_underrun_disabled));
3264                 drm_modeset_unlock(&crtc->base.mutex);
3265         }
3266
3267         seq_printf(m, "\n");
3268         seq_printf(m, "Connector info\n");
3269         seq_printf(m, "--------------\n");
3270         mutex_lock(&dev->mode_config.mutex);
3271         drm_connector_list_iter_begin(dev, &conn_iter);
3272         drm_for_each_connector_iter(connector, &conn_iter)
3273                 intel_connector_info(m, connector);
3274         drm_connector_list_iter_end(&conn_iter);
3275         mutex_unlock(&dev->mode_config.mutex);
3276
3277         intel_runtime_pm_put(dev_priv);
3278
3279         return 0;
3280 }
3281
3282 static int i915_engine_info(struct seq_file *m, void *unused)
3283 {
3284         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3285         struct intel_engine_cs *engine;
3286         enum intel_engine_id id;
3287         struct drm_printer p;
3288
3289         intel_runtime_pm_get(dev_priv);
3290
3291         seq_printf(m, "GT awake? %s (epoch %u)\n",
3292                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3293         seq_printf(m, "Global active requests: %d\n",
3294                    dev_priv->gt.active_requests);
3295         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3296                    dev_priv->info.cs_timestamp_frequency_khz);
3297
3298         p = drm_seq_file_printer(m);
3299         for_each_engine(engine, dev_priv, id)
3300                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3301
3302         intel_runtime_pm_put(dev_priv);
3303
3304         return 0;
3305 }
3306
3307 static int i915_rcs_topology(struct seq_file *m, void *unused)
3308 {
3309         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3310         struct drm_printer p = drm_seq_file_printer(m);
3311
3312         intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3313
3314         return 0;
3315 }
3316
3317 static int i915_shrinker_info(struct seq_file *m, void *unused)
3318 {
3319         struct drm_i915_private *i915 = node_to_i915(m->private);
3320
3321         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3322         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3323
3324         return 0;
3325 }
3326
3327 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3328 {
3329         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3330         struct drm_device *dev = &dev_priv->drm;
3331         int i;
3332
3333         drm_modeset_lock_all(dev);
3334         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3335                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3336
3337                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3338                            pll->info->id);
3339                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3340                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3341                 seq_printf(m, " tracked hardware state:\n");
3342                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3343                 seq_printf(m, " dpll_md: 0x%08x\n",
3344                            pll->state.hw_state.dpll_md);
3345                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3346                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3347                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3348                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3349                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3350                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3351                            pll->state.hw_state.mg_refclkin_ctl);
3352                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3353                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3354                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3355                            pll->state.hw_state.mg_clktop2_hsclkctl);
3356                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3357                            pll->state.hw_state.mg_pll_div0);
3358                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3359                            pll->state.hw_state.mg_pll_div1);
3360                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3361                            pll->state.hw_state.mg_pll_lf);
3362                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3363                            pll->state.hw_state.mg_pll_frac_lock);
3364                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3365                            pll->state.hw_state.mg_pll_ssc);
3366                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3367                            pll->state.hw_state.mg_pll_bias);
3368                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3369                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3370         }
3371         drm_modeset_unlock_all(dev);
3372
3373         return 0;
3374 }
3375
3376 static int i915_wa_registers(struct seq_file *m, void *unused)
3377 {
3378         struct drm_i915_private *i915 = node_to_i915(m->private);
3379         const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3380         struct i915_wa *wa;
3381         unsigned int i;
3382
3383         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3384         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3385                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3386                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3387
3388         return 0;
3389 }
3390
3391 static int i915_ipc_status_show(struct seq_file *m, void *data)
3392 {
3393         struct drm_i915_private *dev_priv = m->private;
3394
3395         seq_printf(m, "Isochronous Priority Control: %s\n",
3396                         yesno(dev_priv->ipc_enabled));
3397         return 0;
3398 }
3399
3400 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3401 {
3402         struct drm_i915_private *dev_priv = inode->i_private;
3403
3404         if (!HAS_IPC(dev_priv))
3405                 return -ENODEV;
3406
3407         return single_open(file, i915_ipc_status_show, dev_priv);
3408 }
3409
3410 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3411                                      size_t len, loff_t *offp)
3412 {
3413         struct seq_file *m = file->private_data;
3414         struct drm_i915_private *dev_priv = m->private;
3415         int ret;
3416         bool enable;
3417
3418         ret = kstrtobool_from_user(ubuf, len, &enable);
3419         if (ret < 0)
3420                 return ret;
3421
3422         intel_runtime_pm_get(dev_priv);
3423         if (!dev_priv->ipc_enabled && enable)
3424                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3425         dev_priv->wm.distrust_bios_wm = true;
3426         dev_priv->ipc_enabled = enable;
3427         intel_enable_ipc(dev_priv);
3428         intel_runtime_pm_put(dev_priv);
3429
3430         return len;
3431 }
3432
3433 static const struct file_operations i915_ipc_status_fops = {
3434         .owner = THIS_MODULE,
3435         .open = i915_ipc_status_open,
3436         .read = seq_read,
3437         .llseek = seq_lseek,
3438         .release = single_release,
3439         .write = i915_ipc_status_write
3440 };
3441
3442 static int i915_ddb_info(struct seq_file *m, void *unused)
3443 {
3444         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3445         struct drm_device *dev = &dev_priv->drm;
3446         struct skl_ddb_entry *entry;
3447         struct intel_crtc *crtc;
3448
3449         if (INTEL_GEN(dev_priv) < 9)
3450                 return -ENODEV;
3451
3452         drm_modeset_lock_all(dev);
3453
3454         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3455
3456         for_each_intel_crtc(&dev_priv->drm, crtc) {
3457                 struct intel_crtc_state *crtc_state =
3458                         to_intel_crtc_state(crtc->base.state);
3459                 enum pipe pipe = crtc->pipe;
3460                 enum plane_id plane_id;
3461
3462                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3463
3464                 for_each_plane_id_on_crtc(crtc, plane_id) {
3465                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3466                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3467                                    entry->start, entry->end,
3468                                    skl_ddb_entry_size(entry));
3469                 }
3470
3471                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3472                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3473                            entry->end, skl_ddb_entry_size(entry));
3474         }
3475
3476         drm_modeset_unlock_all(dev);
3477
3478         return 0;
3479 }
3480
3481 static void drrs_status_per_crtc(struct seq_file *m,
3482                                  struct drm_device *dev,
3483                                  struct intel_crtc *intel_crtc)
3484 {
3485         struct drm_i915_private *dev_priv = to_i915(dev);
3486         struct i915_drrs *drrs = &dev_priv->drrs;
3487         int vrefresh = 0;
3488         struct drm_connector *connector;
3489         struct drm_connector_list_iter conn_iter;
3490
3491         drm_connector_list_iter_begin(dev, &conn_iter);
3492         drm_for_each_connector_iter(connector, &conn_iter) {
3493                 if (connector->state->crtc != &intel_crtc->base)
3494                         continue;
3495
3496                 seq_printf(m, "%s:\n", connector->name);
3497         }
3498         drm_connector_list_iter_end(&conn_iter);
3499
3500         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3501                 seq_puts(m, "\tVBT: DRRS_type: Static");
3502         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3503                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3504         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3505                 seq_puts(m, "\tVBT: DRRS_type: None");
3506         else
3507                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3508
3509         seq_puts(m, "\n\n");
3510
3511         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3512                 struct intel_panel *panel;
3513
3514                 mutex_lock(&drrs->mutex);
3515                 /* DRRS Supported */
3516                 seq_puts(m, "\tDRRS Supported: Yes\n");
3517
3518                 /* disable_drrs() will make drrs->dp NULL */
3519                 if (!drrs->dp) {
3520                         seq_puts(m, "Idleness DRRS: Disabled\n");
3521                         if (dev_priv->psr.enabled)
3522                                 seq_puts(m,
3523                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3524                         mutex_unlock(&drrs->mutex);
3525                         return;
3526                 }
3527
3528                 panel = &drrs->dp->attached_connector->panel;
3529                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3530                                         drrs->busy_frontbuffer_bits);
3531
3532                 seq_puts(m, "\n\t\t");
3533                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3534                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3535                         vrefresh = panel->fixed_mode->vrefresh;
3536                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3537                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3538                         vrefresh = panel->downclock_mode->vrefresh;
3539                 } else {
3540                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3541                                                 drrs->refresh_rate_type);
3542                         mutex_unlock(&drrs->mutex);
3543                         return;
3544                 }
3545                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3546
3547                 seq_puts(m, "\n\t\t");
3548                 mutex_unlock(&drrs->mutex);
3549         } else {
3550                 /* DRRS not supported. Print the VBT parameter*/
3551                 seq_puts(m, "\tDRRS Supported : No");
3552         }
3553         seq_puts(m, "\n");
3554 }
3555
3556 static int i915_drrs_status(struct seq_file *m, void *unused)
3557 {
3558         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3559         struct drm_device *dev = &dev_priv->drm;
3560         struct intel_crtc *intel_crtc;
3561         int active_crtc_cnt = 0;
3562
3563         drm_modeset_lock_all(dev);
3564         for_each_intel_crtc(dev, intel_crtc) {
3565                 if (intel_crtc->base.state->active) {
3566                         active_crtc_cnt++;
3567                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3568
3569                         drrs_status_per_crtc(m, dev, intel_crtc);
3570                 }
3571         }
3572         drm_modeset_unlock_all(dev);
3573
3574         if (!active_crtc_cnt)
3575                 seq_puts(m, "No active crtc found\n");
3576
3577         return 0;
3578 }
3579
3580 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3581 {
3582         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3583         struct drm_device *dev = &dev_priv->drm;
3584         struct intel_encoder *intel_encoder;
3585         struct intel_digital_port *intel_dig_port;
3586         struct drm_connector *connector;
3587         struct drm_connector_list_iter conn_iter;
3588
3589         drm_connector_list_iter_begin(dev, &conn_iter);
3590         drm_for_each_connector_iter(connector, &conn_iter) {
3591                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3592                         continue;
3593
3594                 intel_encoder = intel_attached_encoder(connector);
3595                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3596                         continue;
3597
3598                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3599                 if (!intel_dig_port->dp.can_mst)
3600                         continue;
3601
3602                 seq_printf(m, "MST Source Port %c\n",
3603                            port_name(intel_dig_port->base.port));
3604                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3605         }
3606         drm_connector_list_iter_end(&conn_iter);
3607
3608         return 0;
3609 }
3610
3611 static ssize_t i915_displayport_test_active_write(struct file *file,
3612                                                   const char __user *ubuf,
3613                                                   size_t len, loff_t *offp)
3614 {
3615         char *input_buffer;
3616         int status = 0;
3617         struct drm_device *dev;
3618         struct drm_connector *connector;
3619         struct drm_connector_list_iter conn_iter;
3620         struct intel_dp *intel_dp;
3621         int val = 0;
3622
3623         dev = ((struct seq_file *)file->private_data)->private;
3624
3625         if (len == 0)
3626                 return 0;
3627
3628         input_buffer = memdup_user_nul(ubuf, len);
3629         if (IS_ERR(input_buffer))
3630                 return PTR_ERR(input_buffer);
3631
3632         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3633
3634         drm_connector_list_iter_begin(dev, &conn_iter);
3635         drm_for_each_connector_iter(connector, &conn_iter) {
3636                 struct intel_encoder *encoder;
3637
3638                 if (connector->connector_type !=
3639                     DRM_MODE_CONNECTOR_DisplayPort)
3640                         continue;
3641
3642                 encoder = to_intel_encoder(connector->encoder);
3643                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3644                         continue;
3645
3646                 if (encoder && connector->status == connector_status_connected) {
3647                         intel_dp = enc_to_intel_dp(&encoder->base);
3648                         status = kstrtoint(input_buffer, 10, &val);
3649                         if (status < 0)
3650                                 break;
3651                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3652                         /* To prevent erroneous activation of the compliance
3653                          * testing code, only accept an actual value of 1 here
3654                          */
3655                         if (val == 1)
3656                                 intel_dp->compliance.test_active = 1;
3657                         else
3658                                 intel_dp->compliance.test_active = 0;
3659                 }
3660         }
3661         drm_connector_list_iter_end(&conn_iter);
3662         kfree(input_buffer);
3663         if (status < 0)
3664                 return status;
3665
3666         *offp += len;
3667         return len;
3668 }
3669
3670 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3671 {
3672         struct drm_i915_private *dev_priv = m->private;
3673         struct drm_device *dev = &dev_priv->drm;
3674         struct drm_connector *connector;
3675         struct drm_connector_list_iter conn_iter;
3676         struct intel_dp *intel_dp;
3677
3678         drm_connector_list_iter_begin(dev, &conn_iter);
3679         drm_for_each_connector_iter(connector, &conn_iter) {
3680                 struct intel_encoder *encoder;
3681
3682                 if (connector->connector_type !=
3683                     DRM_MODE_CONNECTOR_DisplayPort)
3684                         continue;
3685
3686                 encoder = to_intel_encoder(connector->encoder);
3687                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3688                         continue;
3689
3690                 if (encoder && connector->status == connector_status_connected) {
3691                         intel_dp = enc_to_intel_dp(&encoder->base);
3692                         if (intel_dp->compliance.test_active)
3693                                 seq_puts(m, "1");
3694                         else
3695                                 seq_puts(m, "0");
3696                 } else
3697                         seq_puts(m, "0");
3698         }
3699         drm_connector_list_iter_end(&conn_iter);
3700
3701         return 0;
3702 }
3703
3704 static int i915_displayport_test_active_open(struct inode *inode,
3705                                              struct file *file)
3706 {
3707         return single_open(file, i915_displayport_test_active_show,
3708                            inode->i_private);
3709 }
3710
3711 static const struct file_operations i915_displayport_test_active_fops = {
3712         .owner = THIS_MODULE,
3713         .open = i915_displayport_test_active_open,
3714         .read = seq_read,
3715         .llseek = seq_lseek,
3716         .release = single_release,
3717         .write = i915_displayport_test_active_write
3718 };
3719
3720 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3721 {
3722         struct drm_i915_private *dev_priv = m->private;
3723         struct drm_device *dev = &dev_priv->drm;
3724         struct drm_connector *connector;
3725         struct drm_connector_list_iter conn_iter;
3726         struct intel_dp *intel_dp;
3727
3728         drm_connector_list_iter_begin(dev, &conn_iter);
3729         drm_for_each_connector_iter(connector, &conn_iter) {
3730                 struct intel_encoder *encoder;
3731
3732                 if (connector->connector_type !=
3733                     DRM_MODE_CONNECTOR_DisplayPort)
3734                         continue;
3735
3736                 encoder = to_intel_encoder(connector->encoder);
3737                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3738                         continue;
3739
3740                 if (encoder && connector->status == connector_status_connected) {
3741                         intel_dp = enc_to_intel_dp(&encoder->base);
3742                         if (intel_dp->compliance.test_type ==
3743                             DP_TEST_LINK_EDID_READ)
3744                                 seq_printf(m, "%lx",
3745                                            intel_dp->compliance.test_data.edid);
3746                         else if (intel_dp->compliance.test_type ==
3747                                  DP_TEST_LINK_VIDEO_PATTERN) {
3748                                 seq_printf(m, "hdisplay: %d\n",
3749                                            intel_dp->compliance.test_data.hdisplay);
3750                                 seq_printf(m, "vdisplay: %d\n",
3751                                            intel_dp->compliance.test_data.vdisplay);
3752                                 seq_printf(m, "bpc: %u\n",
3753                                            intel_dp->compliance.test_data.bpc);
3754                         }
3755                 } else
3756                         seq_puts(m, "0");
3757         }
3758         drm_connector_list_iter_end(&conn_iter);
3759
3760         return 0;
3761 }
3762 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3763
3764 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3765 {
3766         struct drm_i915_private *dev_priv = m->private;
3767         struct drm_device *dev = &dev_priv->drm;
3768         struct drm_connector *connector;
3769         struct drm_connector_list_iter conn_iter;
3770         struct intel_dp *intel_dp;
3771
3772         drm_connector_list_iter_begin(dev, &conn_iter);
3773         drm_for_each_connector_iter(connector, &conn_iter) {
3774                 struct intel_encoder *encoder;
3775
3776                 if (connector->connector_type !=
3777                     DRM_MODE_CONNECTOR_DisplayPort)
3778                         continue;
3779
3780                 encoder = to_intel_encoder(connector->encoder);
3781                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3782                         continue;
3783
3784                 if (encoder && connector->status == connector_status_connected) {
3785                         intel_dp = enc_to_intel_dp(&encoder->base);
3786                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3787                 } else
3788                         seq_puts(m, "0");
3789         }
3790         drm_connector_list_iter_end(&conn_iter);
3791
3792         return 0;
3793 }
3794 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3795
3796 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3797 {
3798         struct drm_i915_private *dev_priv = m->private;
3799         struct drm_device *dev = &dev_priv->drm;
3800         int level;
3801         int num_levels;
3802
3803         if (IS_CHERRYVIEW(dev_priv))
3804                 num_levels = 3;
3805         else if (IS_VALLEYVIEW(dev_priv))
3806                 num_levels = 1;
3807         else if (IS_G4X(dev_priv))
3808                 num_levels = 3;
3809         else
3810                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3811
3812         drm_modeset_lock_all(dev);
3813
3814         for (level = 0; level < num_levels; level++) {
3815                 unsigned int latency = wm[level];
3816
3817                 /*
3818                  * - WM1+ latency values in 0.5us units
3819                  * - latencies are in us on gen9/vlv/chv
3820                  */
3821                 if (INTEL_GEN(dev_priv) >= 9 ||
3822                     IS_VALLEYVIEW(dev_priv) ||
3823                     IS_CHERRYVIEW(dev_priv) ||
3824                     IS_G4X(dev_priv))
3825                         latency *= 10;
3826                 else if (level > 0)
3827                         latency *= 5;
3828
3829                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3830                            level, wm[level], latency / 10, latency % 10);
3831         }
3832
3833         drm_modeset_unlock_all(dev);
3834 }
3835
3836 static int pri_wm_latency_show(struct seq_file *m, void *data)
3837 {
3838         struct drm_i915_private *dev_priv = m->private;
3839         const uint16_t *latencies;
3840
3841         if (INTEL_GEN(dev_priv) >= 9)
3842                 latencies = dev_priv->wm.skl_latency;
3843         else
3844                 latencies = dev_priv->wm.pri_latency;
3845
3846         wm_latency_show(m, latencies);
3847
3848         return 0;
3849 }
3850
3851 static int spr_wm_latency_show(struct seq_file *m, void *data)
3852 {
3853         struct drm_i915_private *dev_priv = m->private;
3854         const uint16_t *latencies;
3855
3856         if (INTEL_GEN(dev_priv) >= 9)
3857                 latencies = dev_priv->wm.skl_latency;
3858         else
3859                 latencies = dev_priv->wm.spr_latency;
3860
3861         wm_latency_show(m, latencies);
3862
3863         return 0;
3864 }
3865
3866 static int cur_wm_latency_show(struct seq_file *m, void *data)
3867 {
3868         struct drm_i915_private *dev_priv = m->private;
3869         const uint16_t *latencies;
3870
3871         if (INTEL_GEN(dev_priv) >= 9)
3872                 latencies = dev_priv->wm.skl_latency;
3873         else
3874                 latencies = dev_priv->wm.cur_latency;
3875
3876         wm_latency_show(m, latencies);
3877
3878         return 0;
3879 }
3880
3881 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3882 {
3883         struct drm_i915_private *dev_priv = inode->i_private;
3884
3885         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3886                 return -ENODEV;
3887
3888         return single_open(file, pri_wm_latency_show, dev_priv);
3889 }
3890
3891 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3892 {
3893         struct drm_i915_private *dev_priv = inode->i_private;
3894
3895         if (HAS_GMCH_DISPLAY(dev_priv))
3896                 return -ENODEV;
3897
3898         return single_open(file, spr_wm_latency_show, dev_priv);
3899 }
3900
3901 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3902 {
3903         struct drm_i915_private *dev_priv = inode->i_private;
3904
3905         if (HAS_GMCH_DISPLAY(dev_priv))
3906                 return -ENODEV;
3907
3908         return single_open(file, cur_wm_latency_show, dev_priv);
3909 }
3910
3911 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3912                                 size_t len, loff_t *offp, uint16_t wm[8])
3913 {
3914         struct seq_file *m = file->private_data;
3915         struct drm_i915_private *dev_priv = m->private;
3916         struct drm_device *dev = &dev_priv->drm;
3917         uint16_t new[8] = { 0 };
3918         int num_levels;
3919         int level;
3920         int ret;
3921         char tmp[32];
3922
3923         if (IS_CHERRYVIEW(dev_priv))
3924                 num_levels = 3;
3925         else if (IS_VALLEYVIEW(dev_priv))
3926                 num_levels = 1;
3927         else if (IS_G4X(dev_priv))
3928                 num_levels = 3;
3929         else
3930                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3931
3932         if (len >= sizeof(tmp))
3933                 return -EINVAL;
3934
3935         if (copy_from_user(tmp, ubuf, len))
3936                 return -EFAULT;
3937
3938         tmp[len] = '\0';
3939
3940         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3941                      &new[0], &new[1], &new[2], &new[3],
3942                      &new[4], &new[5], &new[6], &new[7]);
3943         if (ret != num_levels)
3944                 return -EINVAL;
3945
3946         drm_modeset_lock_all(dev);
3947
3948         for (level = 0; level < num_levels; level++)
3949                 wm[level] = new[level];
3950
3951         drm_modeset_unlock_all(dev);
3952
3953         return len;
3954 }
3955
3956
3957 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3958                                     size_t len, loff_t *offp)
3959 {
3960         struct seq_file *m = file->private_data;
3961         struct drm_i915_private *dev_priv = m->private;
3962         uint16_t *latencies;
3963
3964         if (INTEL_GEN(dev_priv) >= 9)
3965                 latencies = dev_priv->wm.skl_latency;
3966         else
3967                 latencies = dev_priv->wm.pri_latency;
3968
3969         return wm_latency_write(file, ubuf, len, offp, latencies);
3970 }
3971
3972 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3973                                     size_t len, loff_t *offp)
3974 {
3975         struct seq_file *m = file->private_data;
3976         struct drm_i915_private *dev_priv = m->private;
3977         uint16_t *latencies;
3978
3979         if (INTEL_GEN(dev_priv) >= 9)
3980                 latencies = dev_priv->wm.skl_latency;
3981         else
3982                 latencies = dev_priv->wm.spr_latency;
3983
3984         return wm_latency_write(file, ubuf, len, offp, latencies);
3985 }
3986
3987 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3988                                     size_t len, loff_t *offp)
3989 {
3990         struct seq_file *m = file->private_data;
3991         struct drm_i915_private *dev_priv = m->private;
3992         uint16_t *latencies;
3993
3994         if (INTEL_GEN(dev_priv) >= 9)
3995                 latencies = dev_priv->wm.skl_latency;
3996         else
3997                 latencies = dev_priv->wm.cur_latency;
3998
3999         return wm_latency_write(file, ubuf, len, offp, latencies);
4000 }
4001
4002 static const struct file_operations i915_pri_wm_latency_fops = {
4003         .owner = THIS_MODULE,
4004         .open = pri_wm_latency_open,
4005         .read = seq_read,
4006         .llseek = seq_lseek,
4007         .release = single_release,
4008         .write = pri_wm_latency_write
4009 };
4010
4011 static const struct file_operations i915_spr_wm_latency_fops = {
4012         .owner = THIS_MODULE,
4013         .open = spr_wm_latency_open,
4014         .read = seq_read,
4015         .llseek = seq_lseek,
4016         .release = single_release,
4017         .write = spr_wm_latency_write
4018 };
4019
4020 static const struct file_operations i915_cur_wm_latency_fops = {
4021         .owner = THIS_MODULE,
4022         .open = cur_wm_latency_open,
4023         .read = seq_read,
4024         .llseek = seq_lseek,
4025         .release = single_release,
4026         .write = cur_wm_latency_write
4027 };
4028
4029 static int
4030 i915_wedged_get(void *data, u64 *val)
4031 {
4032         struct drm_i915_private *dev_priv = data;
4033
4034         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4035
4036         return 0;
4037 }
4038
4039 static int
4040 i915_wedged_set(void *data, u64 val)
4041 {
4042         struct drm_i915_private *i915 = data;
4043         struct intel_engine_cs *engine;
4044         unsigned int tmp;
4045
4046         /*
4047          * There is no safeguard against this debugfs entry colliding
4048          * with the hangcheck calling same i915_handle_error() in
4049          * parallel, causing an explosion. For now we assume that the
4050          * test harness is responsible enough not to inject gpu hangs
4051          * while it is writing to 'i915_wedged'
4052          */
4053
4054         if (i915_reset_backoff(&i915->gpu_error))
4055                 return -EAGAIN;
4056
4057         for_each_engine_masked(engine, i915, val, tmp) {
4058                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4059                 engine->hangcheck.stalled = true;
4060         }
4061
4062         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4063                           "Manually set wedged engine mask = %llx", val);
4064
4065         wait_on_bit(&i915->gpu_error.flags,
4066                     I915_RESET_HANDOFF,
4067                     TASK_UNINTERRUPTIBLE);
4068
4069         return 0;
4070 }
4071
4072 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4073                         i915_wedged_get, i915_wedged_set,
4074                         "%llu\n");
4075
4076 static int
4077 fault_irq_set(struct drm_i915_private *i915,
4078               unsigned long *irq,
4079               unsigned long val)
4080 {
4081         int err;
4082
4083         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4084         if (err)
4085                 return err;
4086
4087         err = i915_gem_wait_for_idle(i915,
4088                                      I915_WAIT_LOCKED |
4089                                      I915_WAIT_INTERRUPTIBLE,
4090                                      MAX_SCHEDULE_TIMEOUT);
4091         if (err)
4092                 goto err_unlock;
4093
4094         *irq = val;
4095         mutex_unlock(&i915->drm.struct_mutex);
4096
4097         /* Flush idle worker to disarm irq */
4098         drain_delayed_work(&i915->gt.idle_work);
4099
4100         return 0;
4101
4102 err_unlock:
4103         mutex_unlock(&i915->drm.struct_mutex);
4104         return err;
4105 }
4106
4107 static int
4108 i915_ring_missed_irq_get(void *data, u64 *val)
4109 {
4110         struct drm_i915_private *dev_priv = data;
4111
4112         *val = dev_priv->gpu_error.missed_irq_rings;
4113         return 0;
4114 }
4115
4116 static int
4117 i915_ring_missed_irq_set(void *data, u64 val)
4118 {
4119         struct drm_i915_private *i915 = data;
4120
4121         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4122 }
4123
4124 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4125                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4126                         "0x%08llx\n");
4127
4128 static int
4129 i915_ring_test_irq_get(void *data, u64 *val)
4130 {
4131         struct drm_i915_private *dev_priv = data;
4132
4133         *val = dev_priv->gpu_error.test_irq_rings;
4134
4135         return 0;
4136 }
4137
4138 static int
4139 i915_ring_test_irq_set(void *data, u64 val)
4140 {
4141         struct drm_i915_private *i915 = data;
4142
4143         /* GuC keeps the user interrupt permanently enabled for submission */
4144         if (USES_GUC_SUBMISSION(i915))
4145                 return -ENODEV;
4146
4147         /*
4148          * From icl, we can no longer individually mask interrupt generation
4149          * from each engine.
4150          */
4151         if (INTEL_GEN(i915) >= 11)
4152                 return -ENODEV;
4153
4154         val &= INTEL_INFO(i915)->ring_mask;
4155         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4156
4157         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4158 }
4159
4160 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4161                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4162                         "0x%08llx\n");
4163
4164 #define DROP_UNBOUND    BIT(0)
4165 #define DROP_BOUND      BIT(1)
4166 #define DROP_RETIRE     BIT(2)
4167 #define DROP_ACTIVE     BIT(3)
4168 #define DROP_FREED      BIT(4)
4169 #define DROP_SHRINK_ALL BIT(5)
4170 #define DROP_IDLE       BIT(6)
4171 #define DROP_RESET_ACTIVE       BIT(7)
4172 #define DROP_RESET_SEQNO        BIT(8)
4173 #define DROP_ALL (DROP_UNBOUND  | \
4174                   DROP_BOUND    | \
4175                   DROP_RETIRE   | \
4176                   DROP_ACTIVE   | \
4177                   DROP_FREED    | \
4178                   DROP_SHRINK_ALL |\
4179                   DROP_IDLE     | \
4180                   DROP_RESET_ACTIVE | \
4181                   DROP_RESET_SEQNO)
4182 static int
4183 i915_drop_caches_get(void *data, u64 *val)
4184 {
4185         *val = DROP_ALL;
4186
4187         return 0;
4188 }
4189
4190 static int
4191 i915_drop_caches_set(void *data, u64 val)
4192 {
4193         struct drm_i915_private *i915 = data;
4194         int ret = 0;
4195
4196         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4197                   val, val & DROP_ALL);
4198         intel_runtime_pm_get(i915);
4199
4200         if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4201                 i915_gem_set_wedged(i915);
4202
4203         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4204          * on ioctls on -EAGAIN. */
4205         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4206                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4207                 if (ret)
4208                         goto out;
4209
4210                 if (val & DROP_ACTIVE)
4211                         ret = i915_gem_wait_for_idle(i915,
4212                                                      I915_WAIT_INTERRUPTIBLE |
4213                                                      I915_WAIT_LOCKED,
4214                                                      MAX_SCHEDULE_TIMEOUT);
4215
4216                 if (ret == 0 && val & DROP_RESET_SEQNO)
4217                         ret = i915_gem_set_global_seqno(&i915->drm, 1);
4218
4219                 if (val & DROP_RETIRE)
4220                         i915_retire_requests(i915);
4221
4222                 mutex_unlock(&i915->drm.struct_mutex);
4223         }
4224
4225         if (val & DROP_RESET_ACTIVE &&
4226             i915_terminally_wedged(&i915->gpu_error)) {
4227                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4228                 wait_on_bit(&i915->gpu_error.flags,
4229                             I915_RESET_HANDOFF,
4230                             TASK_UNINTERRUPTIBLE);
4231         }
4232
4233         fs_reclaim_acquire(GFP_KERNEL);
4234         if (val & DROP_BOUND)
4235                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4236
4237         if (val & DROP_UNBOUND)
4238                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4239
4240         if (val & DROP_SHRINK_ALL)
4241                 i915_gem_shrink_all(i915);
4242         fs_reclaim_release(GFP_KERNEL);
4243
4244         if (val & DROP_IDLE) {
4245                 do {
4246                         if (READ_ONCE(i915->gt.active_requests))
4247                                 flush_delayed_work(&i915->gt.retire_work);
4248                         drain_delayed_work(&i915->gt.idle_work);
4249                 } while (READ_ONCE(i915->gt.awake));
4250         }
4251
4252         if (val & DROP_FREED)
4253                 i915_gem_drain_freed_objects(i915);
4254
4255 out:
4256         intel_runtime_pm_put(i915);
4257
4258         return ret;
4259 }
4260
4261 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4262                         i915_drop_caches_get, i915_drop_caches_set,
4263                         "0x%08llx\n");
4264
4265 static int
4266 i915_cache_sharing_get(void *data, u64 *val)
4267 {
4268         struct drm_i915_private *dev_priv = data;
4269         u32 snpcr;
4270
4271         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4272                 return -ENODEV;
4273
4274         intel_runtime_pm_get(dev_priv);
4275
4276         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4277
4278         intel_runtime_pm_put(dev_priv);
4279
4280         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4281
4282         return 0;
4283 }
4284
4285 static int
4286 i915_cache_sharing_set(void *data, u64 val)
4287 {
4288         struct drm_i915_private *dev_priv = data;
4289         u32 snpcr;
4290
4291         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4292                 return -ENODEV;
4293
4294         if (val > 3)
4295                 return -EINVAL;
4296
4297         intel_runtime_pm_get(dev_priv);
4298         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4299
4300         /* Update the cache sharing policy here as well */
4301         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4302         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4303         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4304         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4305
4306         intel_runtime_pm_put(dev_priv);
4307         return 0;
4308 }
4309
4310 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4311                         i915_cache_sharing_get, i915_cache_sharing_set,
4312                         "%llu\n");
4313
4314 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4315                                           struct sseu_dev_info *sseu)
4316 {
4317 #define SS_MAX 2
4318         const int ss_max = SS_MAX;
4319         u32 sig1[SS_MAX], sig2[SS_MAX];
4320         int ss;
4321
4322         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4323         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4324         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4325         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4326
4327         for (ss = 0; ss < ss_max; ss++) {
4328                 unsigned int eu_cnt;
4329
4330                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4331                         /* skip disabled subslice */
4332                         continue;
4333
4334                 sseu->slice_mask = BIT(0);
4335                 sseu->subslice_mask[0] |= BIT(ss);
4336                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4337                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4338                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4339                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4340                 sseu->eu_total += eu_cnt;
4341                 sseu->eu_per_subslice = max_t(unsigned int,
4342                                               sseu->eu_per_subslice, eu_cnt);
4343         }
4344 #undef SS_MAX
4345 }
4346
4347 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4348                                      struct sseu_dev_info *sseu)
4349 {
4350 #define SS_MAX 6
4351         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4352         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4353         int s, ss;
4354
4355         for (s = 0; s < info->sseu.max_slices; s++) {
4356                 /*
4357                  * FIXME: Valid SS Mask respects the spec and read
4358                  * only valid bits for those registers, excluding reserved
4359                  * although this seems wrong because it would leave many
4360                  * subslices without ACK.
4361                  */
4362                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4363                         GEN10_PGCTL_VALID_SS_MASK(s);
4364                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4365                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4366         }
4367
4368         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4369                      GEN9_PGCTL_SSA_EU19_ACK |
4370                      GEN9_PGCTL_SSA_EU210_ACK |
4371                      GEN9_PGCTL_SSA_EU311_ACK;
4372         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4373                      GEN9_PGCTL_SSB_EU19_ACK |
4374                      GEN9_PGCTL_SSB_EU210_ACK |
4375                      GEN9_PGCTL_SSB_EU311_ACK;
4376
4377         for (s = 0; s < info->sseu.max_slices; s++) {
4378                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4379                         /* skip disabled slice */
4380                         continue;
4381
4382                 sseu->slice_mask |= BIT(s);
4383                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4384
4385                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4386                         unsigned int eu_cnt;
4387
4388                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4389                                 /* skip disabled subslice */
4390                                 continue;
4391
4392                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4393                                                eu_mask[ss % 2]);
4394                         sseu->eu_total += eu_cnt;
4395                         sseu->eu_per_subslice = max_t(unsigned int,
4396                                                       sseu->eu_per_subslice,
4397                                                       eu_cnt);
4398                 }
4399         }
4400 #undef SS_MAX
4401 }
4402
4403 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4404                                     struct sseu_dev_info *sseu)
4405 {
4406 #define SS_MAX 3
4407         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4408         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4409         int s, ss;
4410
4411         for (s = 0; s < info->sseu.max_slices; s++) {
4412                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4413                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4414                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4415         }
4416
4417         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4418                      GEN9_PGCTL_SSA_EU19_ACK |
4419                      GEN9_PGCTL_SSA_EU210_ACK |
4420                      GEN9_PGCTL_SSA_EU311_ACK;
4421         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4422                      GEN9_PGCTL_SSB_EU19_ACK |
4423                      GEN9_PGCTL_SSB_EU210_ACK |
4424                      GEN9_PGCTL_SSB_EU311_ACK;
4425
4426         for (s = 0; s < info->sseu.max_slices; s++) {
4427                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4428                         /* skip disabled slice */
4429                         continue;
4430
4431                 sseu->slice_mask |= BIT(s);
4432
4433                 if (IS_GEN9_BC(dev_priv))
4434                         sseu->subslice_mask[s] =
4435                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4436
4437                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4438                         unsigned int eu_cnt;
4439
4440                         if (IS_GEN9_LP(dev_priv)) {
4441                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4442                                         /* skip disabled subslice */
4443                                         continue;
4444
4445                                 sseu->subslice_mask[s] |= BIT(ss);
4446                         }
4447
4448                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4449                                                eu_mask[ss%2]);
4450                         sseu->eu_total += eu_cnt;
4451                         sseu->eu_per_subslice = max_t(unsigned int,
4452                                                       sseu->eu_per_subslice,
4453                                                       eu_cnt);
4454                 }
4455         }
4456 #undef SS_MAX
4457 }
4458
4459 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4460                                          struct sseu_dev_info *sseu)
4461 {
4462         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4463         int s;
4464
4465         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4466
4467         if (sseu->slice_mask) {
4468                 sseu->eu_per_subslice =
4469                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4470                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4471                         sseu->subslice_mask[s] =
4472                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4473                 }
4474                 sseu->eu_total = sseu->eu_per_subslice *
4475                                  sseu_subslice_total(sseu);
4476
4477                 /* subtract fused off EU(s) from enabled slice(s) */
4478                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4479                         u8 subslice_7eu =
4480                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4481
4482                         sseu->eu_total -= hweight8(subslice_7eu);
4483                 }
4484         }
4485 }
4486
4487 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4488                                  const struct sseu_dev_info *sseu)
4489 {
4490         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4491         const char *type = is_available_info ? "Available" : "Enabled";
4492         int s;
4493
4494         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4495                    sseu->slice_mask);
4496         seq_printf(m, "  %s Slice Total: %u\n", type,
4497                    hweight8(sseu->slice_mask));
4498         seq_printf(m, "  %s Subslice Total: %u\n", type,
4499                    sseu_subslice_total(sseu));
4500         for (s = 0; s < fls(sseu->slice_mask); s++) {
4501                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4502                            s, hweight8(sseu->subslice_mask[s]));
4503         }
4504         seq_printf(m, "  %s EU Total: %u\n", type,
4505                    sseu->eu_total);
4506         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4507                    sseu->eu_per_subslice);
4508
4509         if (!is_available_info)
4510                 return;
4511
4512         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4513         if (HAS_POOLED_EU(dev_priv))
4514                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4515
4516         seq_printf(m, "  Has Slice Power Gating: %s\n",
4517                    yesno(sseu->has_slice_pg));
4518         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4519                    yesno(sseu->has_subslice_pg));
4520         seq_printf(m, "  Has EU Power Gating: %s\n",
4521                    yesno(sseu->has_eu_pg));
4522 }
4523
4524 static int i915_sseu_status(struct seq_file *m, void *unused)
4525 {
4526         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4527         struct sseu_dev_info sseu;
4528
4529         if (INTEL_GEN(dev_priv) < 8)
4530                 return -ENODEV;
4531
4532         seq_puts(m, "SSEU Device Info\n");
4533         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4534
4535         seq_puts(m, "SSEU Device Status\n");
4536         memset(&sseu, 0, sizeof(sseu));
4537         sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4538         sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4539         sseu.max_eus_per_subslice =
4540                 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4541
4542         intel_runtime_pm_get(dev_priv);
4543
4544         if (IS_CHERRYVIEW(dev_priv)) {
4545                 cherryview_sseu_device_status(dev_priv, &sseu);
4546         } else if (IS_BROADWELL(dev_priv)) {
4547                 broadwell_sseu_device_status(dev_priv, &sseu);
4548         } else if (IS_GEN9(dev_priv)) {
4549                 gen9_sseu_device_status(dev_priv, &sseu);
4550         } else if (INTEL_GEN(dev_priv) >= 10) {
4551                 gen10_sseu_device_status(dev_priv, &sseu);
4552         }
4553
4554         intel_runtime_pm_put(dev_priv);
4555
4556         i915_print_sseu_info(m, false, &sseu);
4557
4558         return 0;
4559 }
4560
4561 static int i915_forcewake_open(struct inode *inode, struct file *file)
4562 {
4563         struct drm_i915_private *i915 = inode->i_private;
4564
4565         if (INTEL_GEN(i915) < 6)
4566                 return 0;
4567
4568         intel_runtime_pm_get(i915);
4569         intel_uncore_forcewake_user_get(i915);
4570
4571         return 0;
4572 }
4573
4574 static int i915_forcewake_release(struct inode *inode, struct file *file)
4575 {
4576         struct drm_i915_private *i915 = inode->i_private;
4577
4578         if (INTEL_GEN(i915) < 6)
4579                 return 0;
4580
4581         intel_uncore_forcewake_user_put(i915);
4582         intel_runtime_pm_put(i915);
4583
4584         return 0;
4585 }
4586
4587 static const struct file_operations i915_forcewake_fops = {
4588         .owner = THIS_MODULE,
4589         .open = i915_forcewake_open,
4590         .release = i915_forcewake_release,
4591 };
4592
4593 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4594 {
4595         struct drm_i915_private *dev_priv = m->private;
4596         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4597
4598         /* Synchronize with everything first in case there's been an HPD
4599          * storm, but we haven't finished handling it in the kernel yet
4600          */
4601         synchronize_irq(dev_priv->drm.irq);
4602         flush_work(&dev_priv->hotplug.dig_port_work);
4603         flush_work(&dev_priv->hotplug.hotplug_work);
4604
4605         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4606         seq_printf(m, "Detected: %s\n",
4607                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4608
4609         return 0;
4610 }
4611
4612 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4613                                         const char __user *ubuf, size_t len,
4614                                         loff_t *offp)
4615 {
4616         struct seq_file *m = file->private_data;
4617         struct drm_i915_private *dev_priv = m->private;
4618         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4619         unsigned int new_threshold;
4620         int i;
4621         char *newline;
4622         char tmp[16];
4623
4624         if (len >= sizeof(tmp))
4625                 return -EINVAL;
4626
4627         if (copy_from_user(tmp, ubuf, len))
4628                 return -EFAULT;
4629
4630         tmp[len] = '\0';
4631
4632         /* Strip newline, if any */
4633         newline = strchr(tmp, '\n');
4634         if (newline)
4635                 *newline = '\0';
4636
4637         if (strcmp(tmp, "reset") == 0)
4638                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4639         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4640                 return -EINVAL;
4641
4642         if (new_threshold > 0)
4643                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4644                               new_threshold);
4645         else
4646                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4647
4648         spin_lock_irq(&dev_priv->irq_lock);
4649         hotplug->hpd_storm_threshold = new_threshold;
4650         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4651         for_each_hpd_pin(i)
4652                 hotplug->stats[i].count = 0;
4653         spin_unlock_irq(&dev_priv->irq_lock);
4654
4655         /* Re-enable hpd immediately if we were in an irq storm */
4656         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4657
4658         return len;
4659 }
4660
4661 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4662 {
4663         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4664 }
4665
4666 static const struct file_operations i915_hpd_storm_ctl_fops = {
4667         .owner = THIS_MODULE,
4668         .open = i915_hpd_storm_ctl_open,
4669         .read = seq_read,
4670         .llseek = seq_lseek,
4671         .release = single_release,
4672         .write = i915_hpd_storm_ctl_write
4673 };
4674
4675 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4676 {
4677         struct drm_i915_private *dev_priv = m->private;
4678
4679         seq_printf(m, "Enabled: %s\n",
4680                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4681
4682         return 0;
4683 }
4684
4685 static int
4686 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4687 {
4688         return single_open(file, i915_hpd_short_storm_ctl_show,
4689                            inode->i_private);
4690 }
4691
4692 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4693                                               const char __user *ubuf,
4694                                               size_t len, loff_t *offp)
4695 {
4696         struct seq_file *m = file->private_data;
4697         struct drm_i915_private *dev_priv = m->private;
4698         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4699         char *newline;
4700         char tmp[16];
4701         int i;
4702         bool new_state;
4703
4704         if (len >= sizeof(tmp))
4705                 return -EINVAL;
4706
4707         if (copy_from_user(tmp, ubuf, len))
4708                 return -EFAULT;
4709
4710         tmp[len] = '\0';
4711
4712         /* Strip newline, if any */
4713         newline = strchr(tmp, '\n');
4714         if (newline)
4715                 *newline = '\0';
4716
4717         /* Reset to the "default" state for this system */
4718         if (strcmp(tmp, "reset") == 0)
4719                 new_state = !HAS_DP_MST(dev_priv);
4720         else if (kstrtobool(tmp, &new_state) != 0)
4721                 return -EINVAL;
4722
4723         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4724                       new_state ? "En" : "Dis");
4725
4726         spin_lock_irq(&dev_priv->irq_lock);
4727         hotplug->hpd_short_storm_enabled = new_state;
4728         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4729         for_each_hpd_pin(i)
4730                 hotplug->stats[i].count = 0;
4731         spin_unlock_irq(&dev_priv->irq_lock);
4732
4733         /* Re-enable hpd immediately if we were in an irq storm */
4734         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4735
4736         return len;
4737 }
4738
4739 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4740         .owner = THIS_MODULE,
4741         .open = i915_hpd_short_storm_ctl_open,
4742         .read = seq_read,
4743         .llseek = seq_lseek,
4744         .release = single_release,
4745         .write = i915_hpd_short_storm_ctl_write,
4746 };
4747
4748 static int i915_drrs_ctl_set(void *data, u64 val)
4749 {
4750         struct drm_i915_private *dev_priv = data;
4751         struct drm_device *dev = &dev_priv->drm;
4752         struct intel_crtc *crtc;
4753
4754         if (INTEL_GEN(dev_priv) < 7)
4755                 return -ENODEV;
4756
4757         for_each_intel_crtc(dev, crtc) {
4758                 struct drm_connector_list_iter conn_iter;
4759                 struct intel_crtc_state *crtc_state;
4760                 struct drm_connector *connector;
4761                 struct drm_crtc_commit *commit;
4762                 int ret;
4763
4764                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4765                 if (ret)
4766                         return ret;
4767
4768                 crtc_state = to_intel_crtc_state(crtc->base.state);
4769
4770                 if (!crtc_state->base.active ||
4771                     !crtc_state->has_drrs)
4772                         goto out;
4773
4774                 commit = crtc_state->base.commit;
4775                 if (commit) {
4776                         ret = wait_for_completion_interruptible(&commit->hw_done);
4777                         if (ret)
4778                                 goto out;
4779                 }
4780
4781                 drm_connector_list_iter_begin(dev, &conn_iter);
4782                 drm_for_each_connector_iter(connector, &conn_iter) {
4783                         struct intel_encoder *encoder;
4784                         struct intel_dp *intel_dp;
4785
4786                         if (!(crtc_state->base.connector_mask &
4787                               drm_connector_mask(connector)))
4788                                 continue;
4789
4790                         encoder = intel_attached_encoder(connector);
4791                         if (encoder->type != INTEL_OUTPUT_EDP)
4792                                 continue;
4793
4794                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4795                                                 val ? "en" : "dis", val);
4796
4797                         intel_dp = enc_to_intel_dp(&encoder->base);
4798                         if (val)
4799                                 intel_edp_drrs_enable(intel_dp,
4800                                                       crtc_state);
4801                         else
4802                                 intel_edp_drrs_disable(intel_dp,
4803                                                        crtc_state);
4804                 }
4805                 drm_connector_list_iter_end(&conn_iter);
4806
4807 out:
4808                 drm_modeset_unlock(&crtc->base.mutex);
4809                 if (ret)
4810                         return ret;
4811         }
4812
4813         return 0;
4814 }
4815
4816 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4817
4818 static ssize_t
4819 i915_fifo_underrun_reset_write(struct file *filp,
4820                                const char __user *ubuf,
4821                                size_t cnt, loff_t *ppos)
4822 {
4823         struct drm_i915_private *dev_priv = filp->private_data;
4824         struct intel_crtc *intel_crtc;
4825         struct drm_device *dev = &dev_priv->drm;
4826         int ret;
4827         bool reset;
4828
4829         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4830         if (ret)
4831                 return ret;
4832
4833         if (!reset)
4834                 return cnt;
4835
4836         for_each_intel_crtc(dev, intel_crtc) {
4837                 struct drm_crtc_commit *commit;
4838                 struct intel_crtc_state *crtc_state;
4839
4840                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4841                 if (ret)
4842                         return ret;
4843
4844                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4845                 commit = crtc_state->base.commit;
4846                 if (commit) {
4847                         ret = wait_for_completion_interruptible(&commit->hw_done);
4848                         if (!ret)
4849                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4850                 }
4851
4852                 if (!ret && crtc_state->base.active) {
4853                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4854                                       pipe_name(intel_crtc->pipe));
4855
4856                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4857                 }
4858
4859                 drm_modeset_unlock(&intel_crtc->base.mutex);
4860
4861                 if (ret)
4862                         return ret;
4863         }
4864
4865         ret = intel_fbc_reset_underrun(dev_priv);
4866         if (ret)
4867                 return ret;
4868
4869         return cnt;
4870 }
4871
4872 static const struct file_operations i915_fifo_underrun_reset_ops = {
4873         .owner = THIS_MODULE,
4874         .open = simple_open,
4875         .write = i915_fifo_underrun_reset_write,
4876         .llseek = default_llseek,
4877 };
4878
4879 static const struct drm_info_list i915_debugfs_list[] = {
4880         {"i915_capabilities", i915_capabilities, 0},
4881         {"i915_gem_objects", i915_gem_object_info, 0},
4882         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4883         {"i915_gem_stolen", i915_gem_stolen_list_info },
4884         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4885         {"i915_gem_interrupt", i915_interrupt_info, 0},
4886         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4887         {"i915_guc_info", i915_guc_info, 0},
4888         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4889         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4890         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4891         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4892         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4893         {"i915_frequency_info", i915_frequency_info, 0},
4894         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4895         {"i915_reset_info", i915_reset_info, 0},
4896         {"i915_drpc_info", i915_drpc_info, 0},
4897         {"i915_emon_status", i915_emon_status, 0},
4898         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4899         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4900         {"i915_fbc_status", i915_fbc_status, 0},
4901         {"i915_ips_status", i915_ips_status, 0},
4902         {"i915_sr_status", i915_sr_status, 0},
4903         {"i915_opregion", i915_opregion, 0},
4904         {"i915_vbt", i915_vbt, 0},
4905         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4906         {"i915_context_status", i915_context_status, 0},
4907         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4908         {"i915_swizzle_info", i915_swizzle_info, 0},
4909         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4910         {"i915_llc", i915_llc, 0},
4911         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4912         {"i915_energy_uJ", i915_energy_uJ, 0},
4913         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4914         {"i915_power_domain_info", i915_power_domain_info, 0},
4915         {"i915_dmc_info", i915_dmc_info, 0},
4916         {"i915_display_info", i915_display_info, 0},
4917         {"i915_engine_info", i915_engine_info, 0},
4918         {"i915_rcs_topology", i915_rcs_topology, 0},
4919         {"i915_shrinker_info", i915_shrinker_info, 0},
4920         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4921         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4922         {"i915_wa_registers", i915_wa_registers, 0},
4923         {"i915_ddb_info", i915_ddb_info, 0},
4924         {"i915_sseu_status", i915_sseu_status, 0},
4925         {"i915_drrs_status", i915_drrs_status, 0},
4926         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4927 };
4928 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4929
4930 static const struct i915_debugfs_files {
4931         const char *name;
4932         const struct file_operations *fops;
4933 } i915_debugfs_files[] = {
4934         {"i915_wedged", &i915_wedged_fops},
4935         {"i915_cache_sharing", &i915_cache_sharing_fops},
4936         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4937         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4938         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4939 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4940         {"i915_error_state", &i915_error_state_fops},
4941         {"i915_gpu_info", &i915_gpu_info_fops},
4942 #endif
4943         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4944         {"i915_next_seqno", &i915_next_seqno_fops},
4945         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4946         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4947         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4948         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4949         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4950         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4951         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4952         {"i915_guc_log_level", &i915_guc_log_level_fops},
4953         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4954         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4955         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4956         {"i915_ipc_status", &i915_ipc_status_fops},
4957         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4958         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4959 };
4960
4961 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4962 {
4963         struct drm_minor *minor = dev_priv->drm.primary;
4964         struct dentry *ent;
4965         int i;
4966
4967         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4968                                   minor->debugfs_root, to_i915(minor->dev),
4969                                   &i915_forcewake_fops);
4970         if (!ent)
4971                 return -ENOMEM;
4972
4973         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4974                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4975                                           S_IRUGO | S_IWUSR,
4976                                           minor->debugfs_root,
4977                                           to_i915(minor->dev),
4978                                           i915_debugfs_files[i].fops);
4979                 if (!ent)
4980                         return -ENOMEM;
4981         }
4982
4983         return drm_debugfs_create_files(i915_debugfs_list,
4984                                         I915_DEBUGFS_ENTRIES,
4985                                         minor->debugfs_root, minor);
4986 }
4987
4988 struct dpcd_block {
4989         /* DPCD dump start address. */
4990         unsigned int offset;
4991         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4992         unsigned int end;
4993         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4994         size_t size;
4995         /* Only valid for eDP. */
4996         bool edp;
4997 };
4998
4999 static const struct dpcd_block i915_dpcd_debug[] = {
5000         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5001         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5002         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5003         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5004         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5005         { .offset = DP_SET_POWER },
5006         { .offset = DP_EDP_DPCD_REV },
5007         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5008         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5009         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5010 };
5011
5012 static int i915_dpcd_show(struct seq_file *m, void *data)
5013 {
5014         struct drm_connector *connector = m->private;
5015         struct intel_dp *intel_dp =
5016                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5017         uint8_t buf[16];
5018         ssize_t err;
5019         int i;
5020
5021         if (connector->status != connector_status_connected)
5022                 return -ENODEV;
5023
5024         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5025                 const struct dpcd_block *b = &i915_dpcd_debug[i];
5026                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5027
5028                 if (b->edp &&
5029                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5030                         continue;
5031
5032                 /* low tech for now */
5033                 if (WARN_ON(size > sizeof(buf)))
5034                         continue;
5035
5036                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5037                 if (err < 0)
5038                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
5039                 else
5040                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
5041         }
5042
5043         return 0;
5044 }
5045 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
5046
5047 static int i915_panel_show(struct seq_file *m, void *data)
5048 {
5049         struct drm_connector *connector = m->private;
5050         struct intel_dp *intel_dp =
5051                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5052
5053         if (connector->status != connector_status_connected)
5054                 return -ENODEV;
5055
5056         seq_printf(m, "Panel power up delay: %d\n",
5057                    intel_dp->panel_power_up_delay);
5058         seq_printf(m, "Panel power down delay: %d\n",
5059                    intel_dp->panel_power_down_delay);
5060         seq_printf(m, "Backlight on delay: %d\n",
5061                    intel_dp->backlight_on_delay);
5062         seq_printf(m, "Backlight off delay: %d\n",
5063                    intel_dp->backlight_off_delay);
5064
5065         return 0;
5066 }
5067 DEFINE_SHOW_ATTRIBUTE(i915_panel);
5068
5069 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5070 {
5071         struct drm_connector *connector = m->private;
5072         struct intel_connector *intel_connector = to_intel_connector(connector);
5073
5074         if (connector->status != connector_status_connected)
5075                 return -ENODEV;
5076
5077         /* HDCP is supported by connector */
5078         if (!intel_connector->hdcp.shim)
5079                 return -EINVAL;
5080
5081         seq_printf(m, "%s:%d HDCP version: ", connector->name,
5082                    connector->base.id);
5083         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5084                    "None" : "HDCP1.4");
5085         seq_puts(m, "\n");
5086
5087         return 0;
5088 }
5089 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5090
5091 /**
5092  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5093  * @connector: pointer to a registered drm_connector
5094  *
5095  * Cleanup will be done by drm_connector_unregister() through a call to
5096  * drm_debugfs_connector_remove().
5097  *
5098  * Returns 0 on success, negative error codes on error.
5099  */
5100 int i915_debugfs_connector_add(struct drm_connector *connector)
5101 {
5102         struct dentry *root = connector->debugfs_entry;
5103
5104         /* The connector must have been registered beforehands. */
5105         if (!root)
5106                 return -ENODEV;
5107
5108         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5109             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5110                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5111                                     connector, &i915_dpcd_fops);
5112
5113         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5114                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5115                                     connector, &i915_panel_fops);
5116                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5117                                     connector, &i915_psr_sink_status_fops);
5118         }
5119
5120         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5121             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5122             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5123                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5124                                     connector, &i915_hdcp_sink_capability_fops);
5125         }
5126
5127         return 0;
5128 }