Merge tag 'drm-intel-gt-next-2022-09-09' of git://anongit.freedesktop.org/drm/drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc_log.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5
6 #include <linux/debugfs.h>
7 #include <linux/string_helpers.h>
8
9 #include "gt/intel_gt.h"
10 #include "i915_drv.h"
11 #include "i915_irq.h"
12 #include "i915_memcpy.h"
13 #include "intel_guc_capture.h"
14 #include "intel_guc_log.h"
15
16 #if defined(CONFIG_DRM_I915_DEBUG_GUC)
17 #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE       SZ_2M
18 #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE       SZ_16M
19 #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE     SZ_4M
20 #elif defined(CONFIG_DRM_I915_DEBUG_GEM)
21 #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE       SZ_1M
22 #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE       SZ_2M
23 #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE     SZ_4M
24 #else
25 #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE       SZ_8K
26 #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE       SZ_64K
27 #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE     SZ_2M
28 #endif
29
30 static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
31
32 struct guc_log_section {
33         u32 max;
34         u32 flag;
35         u32 default_val;
36         const char *name;
37 };
38
39 static s32 scale_log_param(struct intel_guc_log *log, const struct guc_log_section *section,
40                            s32 param)
41 {
42         /* -1 means default */
43         if (param < 0)
44                 return section->default_val;
45
46         /* Check for 32-bit overflow */
47         if (param >= SZ_4K) {
48                 drm_err(&guc_to_gt(log_to_guc(log))->i915->drm, "Size too large for GuC %s log: %dMB!",
49                         section->name, param);
50                 return section->default_val;
51         }
52
53         /* Param units are 1MB */
54         return param * SZ_1M;
55 }
56
57 static void _guc_log_init_sizes(struct intel_guc_log *log)
58 {
59         struct intel_guc *guc = log_to_guc(log);
60         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
61         static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
62                 {
63                         GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
64                         GUC_LOG_LOG_ALLOC_UNITS,
65                         GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
66                         "crash dump"
67                 },
68                 {
69                         GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
70                         GUC_LOG_LOG_ALLOC_UNITS,
71                         GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
72                         "debug",
73                 },
74                 {
75                         GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
76                         GUC_LOG_CAPTURE_ALLOC_UNITS,
77                         GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
78                         "capture",
79                 }
80         };
81         s32 params[GUC_LOG_SECTIONS_LIMIT] = {
82                 GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE / SZ_1M,
83                 GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE / SZ_1M,
84                 GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE / SZ_1M,
85         };
86         int i;
87
88         for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
89                 log->sizes[i].bytes = scale_log_param(log, sections + i, params[i]);
90
91         /* If debug size > 1MB then bump default crash size to keep the same units */
92         if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
93             GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
94                 log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
95
96         /* Prepare the GuC API structure fields: */
97         for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
98                 /* Convert to correct units */
99                 if ((log->sizes[i].bytes % SZ_1M) == 0) {
100                         log->sizes[i].units = SZ_1M;
101                         log->sizes[i].flag = sections[i].flag;
102                 } else {
103                         log->sizes[i].units = SZ_4K;
104                         log->sizes[i].flag = 0;
105                 }
106
107                 if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
108                         drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
109                                 sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
110                 log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
111
112                 if (!log->sizes[i].count) {
113                         drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
114                 } else {
115                         /* Size is +1 unit */
116                         log->sizes[i].count--;
117                 }
118
119                 /* Clip to field size */
120                 if (log->sizes[i].count > sections[i].max) {
121                         drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
122                                 sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
123                         log->sizes[i].count = sections[i].max;
124                 }
125         }
126
127         if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
128                 drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
129                         log->sizes[GUC_LOG_SECTIONS_CRASH].units,
130                         log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
131                 log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
132                 log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
133         }
134
135         log->sizes_initialised = true;
136 }
137
138 static void guc_log_init_sizes(struct intel_guc_log *log)
139 {
140         if (log->sizes_initialised)
141                 return;
142
143         _guc_log_init_sizes(log);
144 }
145
146 static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
147 {
148         guc_log_init_sizes(log);
149
150         return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
151 }
152
153 static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
154 {
155         guc_log_init_sizes(log);
156
157         return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
158 }
159
160 u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
161 {
162         guc_log_init_sizes(log);
163
164         return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
165 }
166
167 static u32 intel_guc_log_size(struct intel_guc_log *log)
168 {
169         /*
170          *  GuC Log buffer Layout:
171          *
172          *  NB: Ordering must follow "enum guc_log_buffer_type".
173          *
174          *  +===============================+ 00B
175          *  |      Debug state header       |
176          *  +-------------------------------+ 32B
177          *  |    Crash dump state header    |
178          *  +-------------------------------+ 64B
179          *  |     Capture state header      |
180          *  +-------------------------------+ 96B
181          *  |                               |
182          *  +===============================+ PAGE_SIZE (4KB)
183          *  |          Debug logs           |
184          *  +===============================+ + DEBUG_SIZE
185          *  |        Crash Dump logs        |
186          *  +===============================+ + CRASH_SIZE
187          *  |         Capture logs          |
188          *  +===============================+ + CAPTURE_SIZE
189          */
190         return PAGE_SIZE +
191                 intel_guc_log_section_size_crash(log) +
192                 intel_guc_log_section_size_debug(log) +
193                 intel_guc_log_section_size_capture(log);
194 }
195
196 /**
197  * DOC: GuC firmware log
198  *
199  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
200  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
201  * i915_guc_load_status will print out firmware loading status and scratch
202  * registers value.
203  */
204
205 static int guc_action_flush_log_complete(struct intel_guc *guc)
206 {
207         u32 action[] = {
208                 INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
209                 GUC_DEBUG_LOG_BUFFER
210         };
211
212         return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
213 }
214
215 static int guc_action_flush_log(struct intel_guc *guc)
216 {
217         u32 action[] = {
218                 INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
219                 0
220         };
221
222         return intel_guc_send(guc, action, ARRAY_SIZE(action));
223 }
224
225 static int guc_action_control_log(struct intel_guc *guc, bool enable,
226                                   bool default_logging, u32 verbosity)
227 {
228         u32 action[] = {
229                 INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
230                 (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
231                 (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
232                 (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
233         };
234
235         GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
236
237         return intel_guc_send(guc, action, ARRAY_SIZE(action));
238 }
239
240 /*
241  * Sub buffer switch callback. Called whenever relay has to switch to a new
242  * sub buffer, relay stays on the same sub buffer if 0 is returned.
243  */
244 static int subbuf_start_callback(struct rchan_buf *buf,
245                                  void *subbuf,
246                                  void *prev_subbuf,
247                                  size_t prev_padding)
248 {
249         /*
250          * Use no-overwrite mode by default, where relay will stop accepting
251          * new data if there are no empty sub buffers left.
252          * There is no strict synchronization enforced by relay between Consumer
253          * and Producer. In overwrite mode, there is a possibility of getting
254          * inconsistent/garbled data, the producer could be writing on to the
255          * same sub buffer from which Consumer is reading. This can't be avoided
256          * unless Consumer is fast enough and can always run in tandem with
257          * Producer.
258          */
259         if (relay_buf_full(buf))
260                 return 0;
261
262         return 1;
263 }
264
265 /*
266  * file_create() callback. Creates relay file in debugfs.
267  */
268 static struct dentry *create_buf_file_callback(const char *filename,
269                                                struct dentry *parent,
270                                                umode_t mode,
271                                                struct rchan_buf *buf,
272                                                int *is_global)
273 {
274         struct dentry *buf_file;
275
276         /*
277          * This to enable the use of a single buffer for the relay channel and
278          * correspondingly have a single file exposed to User, through which
279          * it can collect the logs in order without any post-processing.
280          * Need to set 'is_global' even if parent is NULL for early logging.
281          */
282         *is_global = 1;
283
284         if (!parent)
285                 return NULL;
286
287         buf_file = debugfs_create_file(filename, mode,
288                                        parent, buf, &relay_file_operations);
289         if (IS_ERR(buf_file))
290                 return NULL;
291
292         return buf_file;
293 }
294
295 /*
296  * file_remove() default callback. Removes relay file in debugfs.
297  */
298 static int remove_buf_file_callback(struct dentry *dentry)
299 {
300         debugfs_remove(dentry);
301         return 0;
302 }
303
304 /* relay channel callbacks */
305 static const struct rchan_callbacks relay_callbacks = {
306         .subbuf_start = subbuf_start_callback,
307         .create_buf_file = create_buf_file_callback,
308         .remove_buf_file = remove_buf_file_callback,
309 };
310
311 static void guc_move_to_next_buf(struct intel_guc_log *log)
312 {
313         /*
314          * Make sure the updates made in the sub buffer are visible when
315          * Consumer sees the following update to offset inside the sub buffer.
316          */
317         smp_wmb();
318
319         /* All data has been written, so now move the offset of sub buffer. */
320         relay_reserve(log->relay.channel, log->vma->obj->base.size -
321                                           intel_guc_log_section_size_capture(log));
322
323         /* Switch to the next sub buffer */
324         relay_flush(log->relay.channel);
325 }
326
327 static void *guc_get_write_buffer(struct intel_guc_log *log)
328 {
329         /*
330          * Just get the base address of a new sub buffer and copy data into it
331          * ourselves. NULL will be returned in no-overwrite mode, if all sub
332          * buffers are full. Could have used the relay_write() to indirectly
333          * copy the data, but that would have been bit convoluted, as we need to
334          * write to only certain locations inside a sub buffer which cannot be
335          * done without using relay_reserve() along with relay_write(). So its
336          * better to use relay_reserve() alone.
337          */
338         return relay_reserve(log->relay.channel, 0);
339 }
340
341 bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
342                                       enum guc_log_buffer_type type,
343                                       unsigned int full_cnt)
344 {
345         unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
346         bool overflow = false;
347
348         if (full_cnt != prev_full_cnt) {
349                 overflow = true;
350
351                 log->stats[type].overflow = full_cnt;
352                 log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
353
354                 if (full_cnt < prev_full_cnt) {
355                         /* buffer_full_cnt is a 4 bit counter */
356                         log->stats[type].sampled_overflow += 16;
357                 }
358
359                 dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
360                                        "GuC log buffer overflow\n");
361         }
362
363         return overflow;
364 }
365
366 unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
367                                            enum guc_log_buffer_type type)
368 {
369         switch (type) {
370         case GUC_DEBUG_LOG_BUFFER:
371                 return intel_guc_log_section_size_debug(log);
372         case GUC_CRASH_DUMP_LOG_BUFFER:
373                 return intel_guc_log_section_size_crash(log);
374         case GUC_CAPTURE_LOG_BUFFER:
375                 return intel_guc_log_section_size_capture(log);
376         default:
377                 MISSING_CASE(type);
378         }
379
380         return 0;
381 }
382
383 size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
384                                        enum guc_log_buffer_type type)
385 {
386         enum guc_log_buffer_type i;
387         size_t offset = PAGE_SIZE;/* for the log_buffer_states */
388
389         for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
390                 if (i == type)
391                         break;
392                 offset += intel_guc_get_log_buffer_size(log, i);
393         }
394
395         return offset;
396 }
397
398 static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
399 {
400         unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
401         struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
402         struct guc_log_buffer_state log_buf_state_local;
403         enum guc_log_buffer_type type;
404         void *src_data, *dst_data;
405         bool new_overflow;
406
407         mutex_lock(&log->relay.lock);
408
409         if (WARN_ON(!intel_guc_log_relay_created(log)))
410                 goto out_unlock;
411
412         /* Get the pointer to shared GuC log buffer */
413         src_data = log->buf_addr;
414         log_buf_state = src_data;
415
416         /* Get the pointer to local buffer to store the logs */
417         log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
418
419         if (unlikely(!log_buf_snapshot_state)) {
420                 /*
421                  * Used rate limited to avoid deluge of messages, logs might be
422                  * getting consumed by User at a slow rate.
423                  */
424                 DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
425                 log->relay.full_count++;
426
427                 goto out_unlock;
428         }
429
430         /* Actual logs are present from the 2nd page */
431         src_data += PAGE_SIZE;
432         dst_data += PAGE_SIZE;
433
434         /* For relay logging, we exclude error state capture */
435         for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
436                 /*
437                  * Make a copy of the state structure, inside GuC log buffer
438                  * (which is uncached mapped), on the stack to avoid reading
439                  * from it multiple times.
440                  */
441                 memcpy(&log_buf_state_local, log_buf_state,
442                        sizeof(struct guc_log_buffer_state));
443                 buffer_size = intel_guc_get_log_buffer_size(log, type);
444                 read_offset = log_buf_state_local.read_ptr;
445                 write_offset = log_buf_state_local.sampled_write_ptr;
446                 full_cnt = log_buf_state_local.buffer_full_cnt;
447
448                 /* Bookkeeping stuff */
449                 log->stats[type].flush += log_buf_state_local.flush_to_file;
450                 new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
451
452                 /* Update the state of shared log buffer */
453                 log_buf_state->read_ptr = write_offset;
454                 log_buf_state->flush_to_file = 0;
455                 log_buf_state++;
456
457                 /* First copy the state structure in snapshot buffer */
458                 memcpy(log_buf_snapshot_state, &log_buf_state_local,
459                        sizeof(struct guc_log_buffer_state));
460
461                 /*
462                  * The write pointer could have been updated by GuC firmware,
463                  * after sending the flush interrupt to Host, for consistency
464                  * set write pointer value to same value of sampled_write_ptr
465                  * in the snapshot buffer.
466                  */
467                 log_buf_snapshot_state->write_ptr = write_offset;
468                 log_buf_snapshot_state++;
469
470                 /* Now copy the actual logs. */
471                 if (unlikely(new_overflow)) {
472                         /* copy the whole buffer in case of overflow */
473                         read_offset = 0;
474                         write_offset = buffer_size;
475                 } else if (unlikely((read_offset > buffer_size) ||
476                                     (write_offset > buffer_size))) {
477                         DRM_ERROR("invalid log buffer state\n");
478                         /* copy whole buffer as offsets are unreliable */
479                         read_offset = 0;
480                         write_offset = buffer_size;
481                 }
482
483                 /* Just copy the newly written data */
484                 if (read_offset > write_offset) {
485                         i915_memcpy_from_wc(dst_data, src_data, write_offset);
486                         bytes_to_copy = buffer_size - read_offset;
487                 } else {
488                         bytes_to_copy = write_offset - read_offset;
489                 }
490                 i915_memcpy_from_wc(dst_data + read_offset,
491                                     src_data + read_offset, bytes_to_copy);
492
493                 src_data += buffer_size;
494                 dst_data += buffer_size;
495         }
496
497         guc_move_to_next_buf(log);
498
499 out_unlock:
500         mutex_unlock(&log->relay.lock);
501 }
502
503 static void copy_debug_logs_work(struct work_struct *work)
504 {
505         struct intel_guc_log *log =
506                 container_of(work, struct intel_guc_log, relay.flush_work);
507
508         guc_log_copy_debuglogs_for_relay(log);
509 }
510
511 static int guc_log_relay_map(struct intel_guc_log *log)
512 {
513         lockdep_assert_held(&log->relay.lock);
514
515         if (!log->vma || !log->buf_addr)
516                 return -ENODEV;
517
518         /*
519          * WC vmalloc mapping of log buffer pages was done at
520          * GuC Log Init time, but lets keep a ref for book-keeping
521          */
522         i915_gem_object_get(log->vma->obj);
523         log->relay.buf_in_use = true;
524
525         return 0;
526 }
527
528 static void guc_log_relay_unmap(struct intel_guc_log *log)
529 {
530         lockdep_assert_held(&log->relay.lock);
531
532         i915_gem_object_put(log->vma->obj);
533         log->relay.buf_in_use = false;
534 }
535
536 void intel_guc_log_init_early(struct intel_guc_log *log)
537 {
538         mutex_init(&log->relay.lock);
539         INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
540         log->relay.started = false;
541 }
542
543 static int guc_log_relay_create(struct intel_guc_log *log)
544 {
545         struct intel_guc *guc = log_to_guc(log);
546         struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
547         struct rchan *guc_log_relay_chan;
548         size_t n_subbufs, subbuf_size;
549         int ret;
550
551         lockdep_assert_held(&log->relay.lock);
552         GEM_BUG_ON(!log->vma);
553
554          /*
555           * Keep the size of sub buffers same as shared log buffer
556           * but GuC log-events excludes the error-state-capture logs
557           */
558         subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
559
560         /*
561          * Store up to 8 snapshots, which is large enough to buffer sufficient
562          * boot time logs and provides enough leeway to User, in terms of
563          * latency, for consuming the logs from relay. Also doesn't take
564          * up too much memory.
565          */
566         n_subbufs = 8;
567
568         guc_log_relay_chan = relay_open("guc_log",
569                                         dev_priv->drm.primary->debugfs_root,
570                                         subbuf_size, n_subbufs,
571                                         &relay_callbacks, dev_priv);
572         if (!guc_log_relay_chan) {
573                 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
574
575                 ret = -ENOMEM;
576                 return ret;
577         }
578
579         GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
580         log->relay.channel = guc_log_relay_chan;
581
582         return 0;
583 }
584
585 static void guc_log_relay_destroy(struct intel_guc_log *log)
586 {
587         lockdep_assert_held(&log->relay.lock);
588
589         relay_close(log->relay.channel);
590         log->relay.channel = NULL;
591 }
592
593 static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
594 {
595         struct intel_guc *guc = log_to_guc(log);
596         struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
597         intel_wakeref_t wakeref;
598
599         _guc_log_copy_debuglogs_for_relay(log);
600
601         /*
602          * Generally device is expected to be active only at this
603          * time, so get/put should be really quick.
604          */
605         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
606                 guc_action_flush_log_complete(guc);
607 }
608
609 static u32 __get_default_log_level(struct intel_guc_log *log)
610 {
611         struct intel_guc *guc = log_to_guc(log);
612         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
613
614         /* A negative value means "use platform/config default" */
615         if (i915->params.guc_log_level < 0) {
616                 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
617                         IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
618                         GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
619         }
620
621         if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
622                 DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
623                          "guc_log_level", i915->params.guc_log_level,
624                          "verbosity too high");
625                 return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
626                         IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
627                         GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
628         }
629
630         GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
631         GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
632         return i915->params.guc_log_level;
633 }
634
635 int intel_guc_log_create(struct intel_guc_log *log)
636 {
637         struct intel_guc *guc = log_to_guc(log);
638         struct i915_vma *vma;
639         void *vaddr;
640         u32 guc_log_size;
641         int ret;
642
643         GEM_BUG_ON(log->vma);
644
645         guc_log_size = intel_guc_log_size(log);
646
647         vma = intel_guc_allocate_vma(guc, guc_log_size);
648         if (IS_ERR(vma)) {
649                 ret = PTR_ERR(vma);
650                 goto err;
651         }
652
653         log->vma = vma;
654         /*
655          * Create a WC (Uncached for read) vmalloc mapping up front immediate access to
656          * data from memory during  critical events such as error capture
657          */
658         vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
659         if (IS_ERR(vaddr)) {
660                 ret = PTR_ERR(vaddr);
661                 i915_vma_unpin_and_release(&log->vma, 0);
662                 goto err;
663         }
664         log->buf_addr = vaddr;
665
666         log->level = __get_default_log_level(log);
667         DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
668                          log->level, str_enabled_disabled(log->level),
669                          str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
670                          GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
671
672         return 0;
673
674 err:
675         DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
676         return ret;
677 }
678
679 void intel_guc_log_destroy(struct intel_guc_log *log)
680 {
681         log->buf_addr = NULL;
682         i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
683 }
684
685 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
686 {
687         struct intel_guc *guc = log_to_guc(log);
688         struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
689         intel_wakeref_t wakeref;
690         int ret = 0;
691
692         BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
693         GEM_BUG_ON(!log->vma);
694
695         /*
696          * GuC is recognizing log levels starting from 0 to max, we're using 0
697          * as indication that logging should be disabled.
698          */
699         if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
700                 return -EINVAL;
701
702         mutex_lock(&dev_priv->drm.struct_mutex);
703
704         if (log->level == level)
705                 goto out_unlock;
706
707         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
708                 ret = guc_action_control_log(guc,
709                                              GUC_LOG_LEVEL_IS_VERBOSE(level),
710                                              GUC_LOG_LEVEL_IS_ENABLED(level),
711                                              GUC_LOG_LEVEL_TO_VERBOSITY(level));
712         if (ret) {
713                 DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
714                 goto out_unlock;
715         }
716
717         log->level = level;
718
719 out_unlock:
720         mutex_unlock(&dev_priv->drm.struct_mutex);
721
722         return ret;
723 }
724
725 bool intel_guc_log_relay_created(const struct intel_guc_log *log)
726 {
727         return log->buf_addr;
728 }
729
730 int intel_guc_log_relay_open(struct intel_guc_log *log)
731 {
732         int ret;
733
734         if (!log->vma)
735                 return -ENODEV;
736
737         mutex_lock(&log->relay.lock);
738
739         if (intel_guc_log_relay_created(log)) {
740                 ret = -EEXIST;
741                 goto out_unlock;
742         }
743
744         /*
745          * We require SSE 4.1 for fast reads from the GuC log buffer and
746          * it should be present on the chipsets supporting GuC based
747          * submissions.
748          */
749         if (!i915_has_memcpy_from_wc()) {
750                 ret = -ENXIO;
751                 goto out_unlock;
752         }
753
754         ret = guc_log_relay_create(log);
755         if (ret)
756                 goto out_unlock;
757
758         ret = guc_log_relay_map(log);
759         if (ret)
760                 goto out_relay;
761
762         mutex_unlock(&log->relay.lock);
763
764         return 0;
765
766 out_relay:
767         guc_log_relay_destroy(log);
768 out_unlock:
769         mutex_unlock(&log->relay.lock);
770
771         return ret;
772 }
773
774 int intel_guc_log_relay_start(struct intel_guc_log *log)
775 {
776         if (log->relay.started)
777                 return -EEXIST;
778
779         /*
780          * When GuC is logging without us relaying to userspace, we're ignoring
781          * the flush notification. This means that we need to unconditionally
782          * flush on relay enabling, since GuC only notifies us once.
783          */
784         queue_work(system_highpri_wq, &log->relay.flush_work);
785
786         log->relay.started = true;
787
788         return 0;
789 }
790
791 void intel_guc_log_relay_flush(struct intel_guc_log *log)
792 {
793         struct intel_guc *guc = log_to_guc(log);
794         intel_wakeref_t wakeref;
795
796         if (!log->relay.started)
797                 return;
798
799         /*
800          * Before initiating the forceful flush, wait for any pending/ongoing
801          * flush to complete otherwise forceful flush may not actually happen.
802          */
803         flush_work(&log->relay.flush_work);
804
805         with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
806                 guc_action_flush_log(guc);
807
808         /* GuC would have updated log buffer by now, so copy it */
809         guc_log_copy_debuglogs_for_relay(log);
810 }
811
812 /*
813  * Stops the relay log. Called from intel_guc_log_relay_close(), so no
814  * possibility of race with start/flush since relay_write cannot race
815  * relay_close.
816  */
817 static void guc_log_relay_stop(struct intel_guc_log *log)
818 {
819         struct intel_guc *guc = log_to_guc(log);
820         struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
821
822         if (!log->relay.started)
823                 return;
824
825         intel_synchronize_irq(i915);
826
827         flush_work(&log->relay.flush_work);
828
829         log->relay.started = false;
830 }
831
832 void intel_guc_log_relay_close(struct intel_guc_log *log)
833 {
834         guc_log_relay_stop(log);
835
836         mutex_lock(&log->relay.lock);
837         GEM_BUG_ON(!intel_guc_log_relay_created(log));
838         guc_log_relay_unmap(log);
839         guc_log_relay_destroy(log);
840         mutex_unlock(&log->relay.lock);
841 }
842
843 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
844 {
845         if (log->relay.started)
846                 queue_work(system_highpri_wq, &log->relay.flush_work);
847 }
848
849 static const char *
850 stringify_guc_log_type(enum guc_log_buffer_type type)
851 {
852         switch (type) {
853         case GUC_DEBUG_LOG_BUFFER:
854                 return "DEBUG";
855         case GUC_CRASH_DUMP_LOG_BUFFER:
856                 return "CRASH";
857         case GUC_CAPTURE_LOG_BUFFER:
858                 return "CAPTURE";
859         default:
860                 MISSING_CASE(type);
861         }
862
863         return "";
864 }
865
866 /**
867  * intel_guc_log_info - dump information about GuC log relay
868  * @log: the GuC log
869  * @p: the &drm_printer
870  *
871  * Pretty printer for GuC log info
872  */
873 void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
874 {
875         enum guc_log_buffer_type type;
876
877         if (!intel_guc_log_relay_created(log)) {
878                 drm_puts(p, "GuC log relay not created\n");
879                 return;
880         }
881
882         drm_puts(p, "GuC logging stats:\n");
883
884         drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
885
886         for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
887                 drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
888                            stringify_guc_log_type(type),
889                            log->stats[type].flush,
890                            log->stats[type].sampled_overflow);
891         }
892 }
893
894 /**
895  * intel_guc_log_dump - dump the contents of the GuC log
896  * @log: the GuC log
897  * @p: the &drm_printer
898  * @dump_load_err: dump the log saved on GuC load error
899  *
900  * Pretty printer for the GuC log
901  */
902 int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
903                        bool dump_load_err)
904 {
905         struct intel_guc *guc = log_to_guc(log);
906         struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
907         struct drm_i915_gem_object *obj = NULL;
908         void *map;
909         u32 *page;
910         int i, j;
911
912         if (!intel_guc_is_supported(guc))
913                 return -ENODEV;
914
915         if (dump_load_err)
916                 obj = uc->load_err_log;
917         else if (guc->log.vma)
918                 obj = guc->log.vma->obj;
919
920         if (!obj)
921                 return 0;
922
923         page = (u32 *)__get_free_page(GFP_KERNEL);
924         if (!page)
925                 return -ENOMEM;
926
927         intel_guc_dump_time_info(guc, p);
928
929         map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
930         if (IS_ERR(map)) {
931                 DRM_DEBUG("Failed to pin object\n");
932                 drm_puts(p, "(log data unaccessible)\n");
933                 free_page((unsigned long)page);
934                 return PTR_ERR(map);
935         }
936
937         for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
938                 if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
939                         memcpy(page, map + i, PAGE_SIZE);
940
941                 for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
942                         drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
943                                    *(page + j + 0), *(page + j + 1),
944                                    *(page + j + 2), *(page + j + 3));
945         }
946
947         drm_puts(p, "\n");
948
949         i915_gem_object_unpin_map(obj);
950         free_page((unsigned long)page);
951
952         return 0;
953 }