5d6fdcbc092c60bf1b524ff826cf8fbd77989533
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gpu_error.h
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright � 2008-2018 Intel Corporation
5  */
6
7 #ifndef _I915_GPU_ERROR_H_
8 #define _I915_GPU_ERROR_H_
9
10 #include <linux/kref.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
13
14 #include <drm/drm_mm.h>
15
16 #include "intel_device_info.h"
17 #include "intel_ringbuffer.h"
18 #include "intel_uc_fw.h"
19
20 #include "i915_gem.h"
21 #include "i915_gem_gtt.h"
22 #include "i915_params.h"
23 #include "i915_scheduler.h"
24
25 struct drm_i915_private;
26 struct intel_overlay_error_state;
27 struct intel_display_error_state;
28
29 struct i915_gpu_state {
30         struct kref ref;
31         ktime_t time;
32         ktime_t boottime;
33         ktime_t uptime;
34
35         struct drm_i915_private *i915;
36
37         char error_msg[128];
38         bool simulated;
39         bool awake;
40         bool wakelock;
41         bool suspended;
42         int iommu;
43         u32 reset_count;
44         u32 suspend_count;
45         struct intel_device_info device_info;
46         struct intel_driver_caps driver_caps;
47         struct i915_params params;
48
49         struct i915_error_uc {
50                 struct intel_uc_fw guc_fw;
51                 struct intel_uc_fw huc_fw;
52                 struct drm_i915_error_object *guc_log;
53         } uc;
54
55         /* Generic register state */
56         u32 eir;
57         u32 pgtbl_er;
58         u32 ier;
59         u32 gtier[4], ngtier;
60         u32 ccid;
61         u32 derrmr;
62         u32 forcewake;
63         u32 error; /* gen6+ */
64         u32 err_int; /* gen7 */
65         u32 fault_data0; /* gen8, gen9 */
66         u32 fault_data1; /* gen8, gen9 */
67         u32 done_reg;
68         u32 gac_eco;
69         u32 gam_ecochk;
70         u32 gab_ctl;
71         u32 gfx_mode;
72
73         u32 nfence;
74         u64 fence[I915_MAX_NUM_FENCES];
75         struct intel_overlay_error_state *overlay;
76         struct intel_display_error_state *display;
77
78         struct drm_i915_error_engine {
79                 int engine_id;
80                 /* Software tracked state */
81                 bool idle;
82                 bool waiting;
83                 int num_waiters;
84                 unsigned long hangcheck_timestamp;
85                 bool hangcheck_stalled;
86                 enum intel_engine_hangcheck_action hangcheck_action;
87                 struct i915_address_space *vm;
88                 int num_requests;
89                 u32 reset_count;
90
91                 /* position of active request inside the ring */
92                 u32 rq_head, rq_post, rq_tail;
93
94                 /* our own tracking of ring head and tail */
95                 u32 cpu_ring_head;
96                 u32 cpu_ring_tail;
97
98                 u32 last_seqno;
99
100                 /* Register state */
101                 u32 start;
102                 u32 tail;
103                 u32 head;
104                 u32 ctl;
105                 u32 mode;
106                 u32 hws;
107                 u32 ipeir;
108                 u32 ipehr;
109                 u32 bbstate;
110                 u32 instpm;
111                 u32 instps;
112                 u32 seqno;
113                 u64 bbaddr;
114                 u64 acthd;
115                 u32 fault_reg;
116                 u64 faddr;
117                 u32 rc_psmi; /* sleep state */
118                 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
119                 struct intel_instdone instdone;
120
121                 struct drm_i915_error_context {
122                         char comm[TASK_COMM_LEN];
123                         pid_t pid;
124                         u32 handle;
125                         u32 hw_id;
126                         int ban_score;
127                         int active;
128                         int guilty;
129                         bool bannable;
130                         struct i915_sched_attr sched_attr;
131                 } context;
132
133                 struct drm_i915_error_object {
134                         u64 gtt_offset;
135                         u64 gtt_size;
136                         int page_count;
137                         int unused;
138                         u32 *pages[0];
139                 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
140
141                 struct drm_i915_error_object **user_bo;
142                 long user_bo_count;
143
144                 struct drm_i915_error_object *wa_ctx;
145                 struct drm_i915_error_object *default_state;
146
147                 struct drm_i915_error_request {
148                         long jiffies;
149                         pid_t pid;
150                         u32 context;
151                         int ban_score;
152                         u32 seqno;
153                         u32 head;
154                         u32 tail;
155                         struct i915_sched_attr sched_attr;
156                 } *requests, execlist[EXECLIST_MAX_PORTS];
157                 unsigned int num_ports;
158
159                 struct drm_i915_error_waiter {
160                         char comm[TASK_COMM_LEN];
161                         pid_t pid;
162                         u32 seqno;
163                 } *waiters;
164
165                 struct {
166                         u32 gfx_mode;
167                         union {
168                                 u64 pdp[4];
169                                 u32 pp_dir_base;
170                         };
171                 } vm_info;
172         } engine[I915_NUM_ENGINES];
173
174         struct drm_i915_error_buffer {
175                 u32 size;
176                 u32 name;
177                 u32 rseqno[I915_NUM_ENGINES], wseqno;
178                 u64 gtt_offset;
179                 u32 read_domains;
180                 u32 write_domain;
181                 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
182                 u32 tiling:2;
183                 u32 dirty:1;
184                 u32 purgeable:1;
185                 u32 userptr:1;
186                 s32 engine:4;
187                 u32 cache_level:3;
188         } *active_bo[I915_NUM_ENGINES], *pinned_bo;
189         u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
190         struct i915_address_space *active_vm[I915_NUM_ENGINES];
191 };
192
193 struct i915_gpu_error {
194         /* For hangcheck timer */
195 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
196 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
197
198         struct delayed_work hangcheck_work;
199
200         /* For reset and error_state handling. */
201         spinlock_t lock;
202         /* Protected by the above dev->gpu_error.lock. */
203         struct i915_gpu_state *first_error;
204
205         atomic_t pending_fb_pin;
206
207         unsigned long missed_irq_rings;
208
209         /**
210          * State variable controlling the reset flow and count
211          *
212          * This is a counter which gets incremented when reset is triggered,
213          *
214          * Before the reset commences, the I915_RESET_BACKOFF bit is set
215          * meaning that any waiters holding onto the struct_mutex should
216          * relinquish the lock immediately in order for the reset to start.
217          *
218          * If reset is not completed successfully, the I915_WEDGE bit is
219          * set meaning that hardware is terminally sour and there is no
220          * recovery. All waiters on the reset_queue will be woken when
221          * that happens.
222          *
223          * This counter is used by the wait_seqno code to notice that reset
224          * event happened and it needs to restart the entire ioctl (since most
225          * likely the seqno it waited for won't ever signal anytime soon).
226          *
227          * This is important for lock-free wait paths, where no contended lock
228          * naturally enforces the correct ordering between the bail-out of the
229          * waiter and the gpu reset work code.
230          */
231         unsigned long reset_count;
232
233         /**
234          * flags: Control various stages of the GPU reset
235          *
236          * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
237          * other users acquiring the struct_mutex. To do this we set the
238          * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
239          * and then check for that bit before acquiring the struct_mutex (in
240          * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
241          * secondary role in preventing two concurrent global reset attempts.
242          *
243          * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
244          * struct_mutex. We try to acquire the struct_mutex in the reset worker,
245          * but it may be held by some long running waiter (that we cannot
246          * interrupt without causing trouble). Once we are ready to do the GPU
247          * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
248          * they already hold the struct_mutex and want to participate they can
249          * inspect the bit and do the reset directly, otherwise the worker
250          * waits for the struct_mutex.
251          *
252          * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
253          * acquire the struct_mutex to reset an engine, we need an explicit
254          * flag to prevent two concurrent reset attempts in the same engine.
255          * As the number of engines continues to grow, allocate the flags from
256          * the most significant bits.
257          *
258          * #I915_WEDGED - If reset fails and we can no longer use the GPU,
259          * we set the #I915_WEDGED bit. Prior to command submission, e.g.
260          * i915_request_alloc(), this bit is checked and the sequence
261          * aborted (with -EIO reported to userspace) if set.
262          */
263         unsigned long flags;
264 #define I915_RESET_BACKOFF      0
265 #define I915_RESET_HANDOFF      1
266 #define I915_RESET_MODESET      2
267 #define I915_WEDGED             (BITS_PER_LONG - 1)
268 #define I915_RESET_ENGINE       (I915_WEDGED - I915_NUM_ENGINES)
269
270         /** Number of times an engine has been reset */
271         u32 reset_engine_count[I915_NUM_ENGINES];
272
273         /** Set of stalled engines with guilty requests, in the current reset */
274         u32 stalled_mask;
275
276         /** Reason for the current *global* reset */
277         const char *reason;
278
279         /**
280          * Waitqueue to signal when a hang is detected. Used to for waiters
281          * to release the struct_mutex for the reset to procede.
282          */
283         wait_queue_head_t wait_queue;
284
285         /**
286          * Waitqueue to signal when the reset has completed. Used by clients
287          * that wait for dev_priv->mm.wedged to settle.
288          */
289         wait_queue_head_t reset_queue;
290
291         /* For missed irq/seqno simulation. */
292         unsigned long test_irq_rings;
293 };
294
295 struct drm_i915_error_state_buf {
296         struct drm_i915_private *i915;
297         unsigned int bytes;
298         unsigned int size;
299         int err;
300         u8 *buf;
301         loff_t start;
302         loff_t pos;
303 };
304
305 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
306
307 __printf(2, 3)
308 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
309 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
310                             const struct i915_gpu_state *gpu);
311 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
312                               struct drm_i915_private *i915,
313                               size_t count, loff_t pos);
314
315 static inline void
316 i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
317 {
318         kfree(eb->buf);
319 }
320
321 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
322 void i915_capture_error_state(struct drm_i915_private *dev_priv,
323                               u32 engine_mask,
324                               const char *error_msg);
325
326 static inline struct i915_gpu_state *
327 i915_gpu_state_get(struct i915_gpu_state *gpu)
328 {
329         kref_get(&gpu->ref);
330         return gpu;
331 }
332
333 void __i915_gpu_state_free(struct kref *kref);
334 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
335 {
336         if (gpu)
337                 kref_put(&gpu->ref, __i915_gpu_state_free);
338 }
339
340 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
341 void i915_reset_error_state(struct drm_i915_private *i915);
342
343 #else
344
345 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
346                                             u32 engine_mask,
347                                             const char *error_msg)
348 {
349 }
350
351 static inline struct i915_gpu_state *
352 i915_first_error_state(struct drm_i915_private *i915)
353 {
354         return NULL;
355 }
356
357 static inline void i915_reset_error_state(struct drm_i915_private *i915)
358 {
359 }
360
361 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
362
363 #endif /* _I915_GPU_ERROR_H_ */