drm/amd/display: Move AllowDRAMSelfRefreshOrDRAMClockChangeInVblank to bounding box
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_reset.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2018 Intel Corporation
4  */
5
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8
9 #include "display/intel_display_types.h"
10 #include "display/intel_overlay.h"
11
12 #include "gem/i915_gem_context.h"
13
14 #include "i915_drv.h"
15 #include "i915_gpu_error.h"
16 #include "i915_irq.h"
17 #include "intel_breadcrumbs.h"
18 #include "intel_engine_pm.h"
19 #include "intel_gt.h"
20 #include "intel_gt_pm.h"
21 #include "intel_gt_requests.h"
22 #include "intel_reset.h"
23
24 #include "uc/intel_guc.h"
25 #include "uc/intel_guc_submission.h"
26
27 #define RESET_MAX_RETRIES 3
28
29 /* XXX How to handle concurrent GGTT updates using tiling registers? */
30 #define RESET_UNDER_STOP_MACHINE 0
31
32 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
33 {
34         intel_uncore_rmw_fw(uncore, reg, 0, set);
35 }
36
37 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
38 {
39         intel_uncore_rmw_fw(uncore, reg, clr, 0);
40 }
41
42 static void skip_context(struct i915_request *rq)
43 {
44         struct intel_context *hung_ctx = rq->context;
45
46         list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
47                 if (!i915_request_is_active(rq))
48                         return;
49
50                 if (rq->context == hung_ctx) {
51                         i915_request_set_error_once(rq, -EIO);
52                         __i915_request_skip(rq);
53                 }
54         }
55 }
56
57 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
58 {
59         struct drm_i915_file_private *file_priv = ctx->file_priv;
60         unsigned long prev_hang;
61         unsigned int score;
62
63         if (IS_ERR_OR_NULL(file_priv))
64                 return;
65
66         score = 0;
67         if (banned)
68                 score = I915_CLIENT_SCORE_CONTEXT_BAN;
69
70         prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
71         if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
72                 score += I915_CLIENT_SCORE_HANG_FAST;
73
74         if (score) {
75                 atomic_add(score, &file_priv->ban_score);
76
77                 drm_dbg(&ctx->i915->drm,
78                         "client %s: gained %u ban score, now %u\n",
79                         ctx->name, score,
80                         atomic_read(&file_priv->ban_score));
81         }
82 }
83
84 static bool mark_guilty(struct i915_request *rq)
85 {
86         struct i915_gem_context *ctx;
87         unsigned long prev_hang;
88         bool banned;
89         int i;
90
91         if (intel_context_is_closed(rq->context)) {
92                 intel_context_set_banned(rq->context);
93                 return true;
94         }
95
96         rcu_read_lock();
97         ctx = rcu_dereference(rq->context->gem_context);
98         if (ctx && !kref_get_unless_zero(&ctx->ref))
99                 ctx = NULL;
100         rcu_read_unlock();
101         if (!ctx)
102                 return intel_context_is_banned(rq->context);
103
104         atomic_inc(&ctx->guilty_count);
105
106         /* Cool contexts are too cool to be banned! (Used for reset testing.) */
107         if (!i915_gem_context_is_bannable(ctx)) {
108                 banned = false;
109                 goto out;
110         }
111
112         drm_notice(&ctx->i915->drm,
113                    "%s context reset due to GPU hang\n",
114                    ctx->name);
115
116         /* Record the timestamp for the last N hangs */
117         prev_hang = ctx->hang_timestamp[0];
118         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
119                 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
120         ctx->hang_timestamp[i] = jiffies;
121
122         /* If we have hung N+1 times in rapid succession, we ban the context! */
123         banned = !i915_gem_context_is_recoverable(ctx);
124         if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
125                 banned = true;
126         if (banned) {
127                 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
128                         ctx->name, atomic_read(&ctx->guilty_count));
129                 intel_context_set_banned(rq->context);
130         }
131
132         client_mark_guilty(ctx, banned);
133
134 out:
135         i915_gem_context_put(ctx);
136         return banned;
137 }
138
139 static void mark_innocent(struct i915_request *rq)
140 {
141         struct i915_gem_context *ctx;
142
143         rcu_read_lock();
144         ctx = rcu_dereference(rq->context->gem_context);
145         if (ctx)
146                 atomic_inc(&ctx->active_count);
147         rcu_read_unlock();
148 }
149
150 void __i915_request_reset(struct i915_request *rq, bool guilty)
151 {
152         RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
153         GEM_BUG_ON(__i915_request_is_complete(rq));
154
155         rcu_read_lock(); /* protect the GEM context */
156         if (guilty) {
157                 i915_request_set_error_once(rq, -EIO);
158                 __i915_request_skip(rq);
159                 if (mark_guilty(rq))
160                         skip_context(rq);
161         } else {
162                 i915_request_set_error_once(rq, -EAGAIN);
163                 mark_innocent(rq);
164         }
165         rcu_read_unlock();
166 }
167
168 static bool i915_in_reset(struct pci_dev *pdev)
169 {
170         u8 gdrst;
171
172         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
173         return gdrst & GRDOM_RESET_STATUS;
174 }
175
176 static int i915_do_reset(struct intel_gt *gt,
177                          intel_engine_mask_t engine_mask,
178                          unsigned int retry)
179 {
180         struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
181         int err;
182
183         /* Assert reset for at least 20 usec, and wait for acknowledgement. */
184         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
185         udelay(50);
186         err = wait_for_atomic(i915_in_reset(pdev), 50);
187
188         /* Clear the reset request. */
189         pci_write_config_byte(pdev, I915_GDRST, 0);
190         udelay(50);
191         if (!err)
192                 err = wait_for_atomic(!i915_in_reset(pdev), 50);
193
194         return err;
195 }
196
197 static bool g4x_reset_complete(struct pci_dev *pdev)
198 {
199         u8 gdrst;
200
201         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
202         return (gdrst & GRDOM_RESET_ENABLE) == 0;
203 }
204
205 static int g33_do_reset(struct intel_gt *gt,
206                         intel_engine_mask_t engine_mask,
207                         unsigned int retry)
208 {
209         struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
210
211         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
212         return wait_for_atomic(g4x_reset_complete(pdev), 50);
213 }
214
215 static int g4x_do_reset(struct intel_gt *gt,
216                         intel_engine_mask_t engine_mask,
217                         unsigned int retry)
218 {
219         struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
220         struct intel_uncore *uncore = gt->uncore;
221         int ret;
222
223         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
224         rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
225         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
226
227         pci_write_config_byte(pdev, I915_GDRST,
228                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
229         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
230         if (ret) {
231                 GT_TRACE(gt, "Wait for media reset failed\n");
232                 goto out;
233         }
234
235         pci_write_config_byte(pdev, I915_GDRST,
236                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
237         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
238         if (ret) {
239                 GT_TRACE(gt, "Wait for render reset failed\n");
240                 goto out;
241         }
242
243 out:
244         pci_write_config_byte(pdev, I915_GDRST, 0);
245
246         rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
247         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
248
249         return ret;
250 }
251
252 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
253                         unsigned int retry)
254 {
255         struct intel_uncore *uncore = gt->uncore;
256         int ret;
257
258         intel_uncore_write_fw(uncore, ILK_GDSR,
259                               ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
260         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
261                                            ILK_GRDOM_RESET_ENABLE, 0,
262                                            5000, 0,
263                                            NULL);
264         if (ret) {
265                 GT_TRACE(gt, "Wait for render reset failed\n");
266                 goto out;
267         }
268
269         intel_uncore_write_fw(uncore, ILK_GDSR,
270                               ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
271         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
272                                            ILK_GRDOM_RESET_ENABLE, 0,
273                                            5000, 0,
274                                            NULL);
275         if (ret) {
276                 GT_TRACE(gt, "Wait for media reset failed\n");
277                 goto out;
278         }
279
280 out:
281         intel_uncore_write_fw(uncore, ILK_GDSR, 0);
282         intel_uncore_posting_read_fw(uncore, ILK_GDSR);
283         return ret;
284 }
285
286 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
287 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
288 {
289         struct intel_uncore *uncore = gt->uncore;
290         int err;
291
292         /*
293          * GEN6_GDRST is not in the gt power well, no need to check
294          * for fifo space for the write or forcewake the chip for
295          * the read
296          */
297         intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
298
299         /* Wait for the device to ack the reset requests */
300         err = __intel_wait_for_register_fw(uncore,
301                                            GEN6_GDRST, hw_domain_mask, 0,
302                                            500, 0,
303                                            NULL);
304         if (err)
305                 GT_TRACE(gt,
306                          "Wait for 0x%08x engines reset failed\n",
307                          hw_domain_mask);
308
309         return err;
310 }
311
312 static int gen6_reset_engines(struct intel_gt *gt,
313                               intel_engine_mask_t engine_mask,
314                               unsigned int retry)
315 {
316         static const u32 hw_engine_mask[] = {
317                 [RCS0]  = GEN6_GRDOM_RENDER,
318                 [BCS0]  = GEN6_GRDOM_BLT,
319                 [VCS0]  = GEN6_GRDOM_MEDIA,
320                 [VCS1]  = GEN8_GRDOM_MEDIA2,
321                 [VECS0] = GEN6_GRDOM_VECS,
322         };
323         struct intel_engine_cs *engine;
324         u32 hw_mask;
325
326         if (engine_mask == ALL_ENGINES) {
327                 hw_mask = GEN6_GRDOM_FULL;
328         } else {
329                 intel_engine_mask_t tmp;
330
331                 hw_mask = 0;
332                 for_each_engine_masked(engine, gt, engine_mask, tmp) {
333                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
334                         hw_mask |= hw_engine_mask[engine->id];
335                 }
336         }
337
338         return gen6_hw_domain_reset(gt, hw_mask);
339 }
340
341 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
342 {
343         int vecs_id;
344
345         GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
346
347         vecs_id = _VECS((engine->instance) / 2);
348
349         return engine->gt->engine[vecs_id];
350 }
351
352 struct sfc_lock_data {
353         i915_reg_t lock_reg;
354         i915_reg_t ack_reg;
355         i915_reg_t usage_reg;
356         u32 lock_bit;
357         u32 ack_bit;
358         u32 usage_bit;
359         u32 reset_bit;
360 };
361
362 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
363                                      struct sfc_lock_data *sfc_lock)
364 {
365         switch (engine->class) {
366         default:
367                 MISSING_CASE(engine->class);
368                 fallthrough;
369         case VIDEO_DECODE_CLASS:
370                 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine);
371                 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
372
373                 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
374                 sfc_lock->ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
375
376                 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine);
377                 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
378                 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
379
380                 break;
381         case VIDEO_ENHANCEMENT_CLASS:
382                 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine);
383                 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
384
385                 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine);
386                 sfc_lock->ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
387
388                 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine);
389                 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
390                 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
391
392                 break;
393         }
394 }
395
396 static int gen11_lock_sfc(struct intel_engine_cs *engine,
397                           u32 *reset_mask,
398                           u32 *unlock_mask)
399 {
400         struct intel_uncore *uncore = engine->uncore;
401         u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
402         struct sfc_lock_data sfc_lock;
403         bool lock_obtained, lock_to_other = false;
404         int ret;
405
406         switch (engine->class) {
407         case VIDEO_DECODE_CLASS:
408                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
409                         return 0;
410
411                 fallthrough;
412         case VIDEO_ENHANCEMENT_CLASS:
413                 get_sfc_forced_lock_data(engine, &sfc_lock);
414
415                 break;
416         default:
417                 return 0;
418         }
419
420         if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
421                 struct intel_engine_cs *paired_vecs;
422
423                 if (engine->class != VIDEO_DECODE_CLASS ||
424                     GRAPHICS_VER(engine->i915) != 12)
425                         return 0;
426
427                 /*
428                  * Wa_14010733141
429                  *
430                  * If the VCS-MFX isn't using the SFC, we also need to check
431                  * whether VCS-HCP is using it.  If so, we need to issue a *VE*
432                  * forced lock on the VE engine that shares the same SFC.
433                  */
434                 if (!(intel_uncore_read_fw(uncore,
435                                            GEN12_HCP_SFC_LOCK_STATUS(engine)) &
436                       GEN12_HCP_SFC_USAGE_BIT))
437                         return 0;
438
439                 paired_vecs = find_sfc_paired_vecs_engine(engine);
440                 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
441                 lock_to_other = true;
442                 *unlock_mask |= paired_vecs->mask;
443         } else {
444                 *unlock_mask |= engine->mask;
445         }
446
447         /*
448          * If the engine is using an SFC, tell the engine that a software reset
449          * is going to happen. The engine will then try to force lock the SFC.
450          * If SFC ends up being locked to the engine we want to reset, we have
451          * to reset it as well (we will unlock it once the reset sequence is
452          * completed).
453          */
454         rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
455
456         ret = __intel_wait_for_register_fw(uncore,
457                                            sfc_lock.ack_reg,
458                                            sfc_lock.ack_bit,
459                                            sfc_lock.ack_bit,
460                                            1000, 0, NULL);
461
462         /*
463          * Was the SFC released while we were trying to lock it?
464          *
465          * We should reset both the engine and the SFC if:
466          *  - We were locking the SFC to this engine and the lock succeeded
467          *       OR
468          *  - We were locking the SFC to a different engine (Wa_14010733141)
469          *    but the SFC was released before the lock was obtained.
470          *
471          * Otherwise we need only reset the engine by itself and we can
472          * leave the SFC alone.
473          */
474         lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
475                         sfc_lock.usage_bit) != 0;
476         if (lock_obtained == lock_to_other)
477                 return 0;
478
479         if (ret) {
480                 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
481                 return ret;
482         }
483
484         *reset_mask |= sfc_lock.reset_bit;
485         return 0;
486 }
487
488 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
489 {
490         struct intel_uncore *uncore = engine->uncore;
491         u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
492         struct sfc_lock_data sfc_lock = {};
493
494         if (engine->class != VIDEO_DECODE_CLASS &&
495             engine->class != VIDEO_ENHANCEMENT_CLASS)
496                 return;
497
498         if (engine->class == VIDEO_DECODE_CLASS &&
499             (BIT(engine->instance) & vdbox_sfc_access) == 0)
500                 return;
501
502         get_sfc_forced_lock_data(engine, &sfc_lock);
503
504         rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
505 }
506
507 static int gen11_reset_engines(struct intel_gt *gt,
508                                intel_engine_mask_t engine_mask,
509                                unsigned int retry)
510 {
511         static const u32 hw_engine_mask[] = {
512                 [RCS0]  = GEN11_GRDOM_RENDER,
513                 [BCS0]  = GEN11_GRDOM_BLT,
514                 [VCS0]  = GEN11_GRDOM_MEDIA,
515                 [VCS1]  = GEN11_GRDOM_MEDIA2,
516                 [VCS2]  = GEN11_GRDOM_MEDIA3,
517                 [VCS3]  = GEN11_GRDOM_MEDIA4,
518                 [VECS0] = GEN11_GRDOM_VECS,
519                 [VECS1] = GEN11_GRDOM_VECS2,
520         };
521         struct intel_engine_cs *engine;
522         intel_engine_mask_t tmp;
523         u32 reset_mask, unlock_mask = 0;
524         int ret;
525
526         if (engine_mask == ALL_ENGINES) {
527                 reset_mask = GEN11_GRDOM_FULL;
528         } else {
529                 reset_mask = 0;
530                 for_each_engine_masked(engine, gt, engine_mask, tmp) {
531                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
532                         reset_mask |= hw_engine_mask[engine->id];
533                         ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
534                         if (ret)
535                                 goto sfc_unlock;
536                 }
537         }
538
539         ret = gen6_hw_domain_reset(gt, reset_mask);
540
541 sfc_unlock:
542         /*
543          * We unlock the SFC based on the lock status and not the result of
544          * gen11_lock_sfc to make sure that we clean properly if something
545          * wrong happened during the lock (e.g. lock acquired after timeout
546          * expiration).
547          *
548          * Due to Wa_14010733141, we may have locked an SFC to an engine that
549          * wasn't being reset.  So instead of calling gen11_unlock_sfc()
550          * on engine_mask, we instead call it on the mask of engines that our
551          * gen11_lock_sfc() calls told us actually had locks attempted.
552          */
553         for_each_engine_masked(engine, gt, unlock_mask, tmp)
554                 gen11_unlock_sfc(engine);
555
556         return ret;
557 }
558
559 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
560 {
561         struct intel_uncore *uncore = engine->uncore;
562         const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
563         u32 request, mask, ack;
564         int ret;
565
566         if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
567                 return -ETIMEDOUT;
568
569         ack = intel_uncore_read_fw(uncore, reg);
570         if (ack & RESET_CTL_CAT_ERROR) {
571                 /*
572                  * For catastrophic errors, ready-for-reset sequence
573                  * needs to be bypassed: HAS#396813
574                  */
575                 request = RESET_CTL_CAT_ERROR;
576                 mask = RESET_CTL_CAT_ERROR;
577
578                 /* Catastrophic errors need to be cleared by HW */
579                 ack = 0;
580         } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
581                 request = RESET_CTL_REQUEST_RESET;
582                 mask = RESET_CTL_READY_TO_RESET;
583                 ack = RESET_CTL_READY_TO_RESET;
584         } else {
585                 return 0;
586         }
587
588         intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
589         ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
590                                            700, 0, NULL);
591         if (ret)
592                 drm_err(&engine->i915->drm,
593                         "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
594                         engine->name, request,
595                         intel_uncore_read_fw(uncore, reg));
596
597         return ret;
598 }
599
600 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
601 {
602         intel_uncore_write_fw(engine->uncore,
603                               RING_RESET_CTL(engine->mmio_base),
604                               _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
605 }
606
607 static int gen8_reset_engines(struct intel_gt *gt,
608                               intel_engine_mask_t engine_mask,
609                               unsigned int retry)
610 {
611         struct intel_engine_cs *engine;
612         const bool reset_non_ready = retry >= 1;
613         intel_engine_mask_t tmp;
614         int ret;
615
616         for_each_engine_masked(engine, gt, engine_mask, tmp) {
617                 ret = gen8_engine_reset_prepare(engine);
618                 if (ret && !reset_non_ready)
619                         goto skip_reset;
620
621                 /*
622                  * If this is not the first failed attempt to prepare,
623                  * we decide to proceed anyway.
624                  *
625                  * By doing so we risk context corruption and with
626                  * some gens (kbl), possible system hang if reset
627                  * happens during active bb execution.
628                  *
629                  * We rather take context corruption instead of
630                  * failed reset with a wedged driver/gpu. And
631                  * active bb execution case should be covered by
632                  * stop_engines() we have before the reset.
633                  */
634         }
635
636         if (GRAPHICS_VER(gt->i915) >= 11)
637                 ret = gen11_reset_engines(gt, engine_mask, retry);
638         else
639                 ret = gen6_reset_engines(gt, engine_mask, retry);
640
641 skip_reset:
642         for_each_engine_masked(engine, gt, engine_mask, tmp)
643                 gen8_engine_reset_cancel(engine);
644
645         return ret;
646 }
647
648 static int mock_reset(struct intel_gt *gt,
649                       intel_engine_mask_t mask,
650                       unsigned int retry)
651 {
652         return 0;
653 }
654
655 typedef int (*reset_func)(struct intel_gt *,
656                           intel_engine_mask_t engine_mask,
657                           unsigned int retry);
658
659 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
660 {
661         struct drm_i915_private *i915 = gt->i915;
662
663         if (is_mock_gt(gt))
664                 return mock_reset;
665         else if (GRAPHICS_VER(i915) >= 8)
666                 return gen8_reset_engines;
667         else if (GRAPHICS_VER(i915) >= 6)
668                 return gen6_reset_engines;
669         else if (GRAPHICS_VER(i915) >= 5)
670                 return ilk_do_reset;
671         else if (IS_G4X(i915))
672                 return g4x_do_reset;
673         else if (IS_G33(i915) || IS_PINEVIEW(i915))
674                 return g33_do_reset;
675         else if (GRAPHICS_VER(i915) >= 3)
676                 return i915_do_reset;
677         else
678                 return NULL;
679 }
680
681 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
682 {
683         const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
684         reset_func reset;
685         int ret = -ETIMEDOUT;
686         int retry;
687
688         reset = intel_get_gpu_reset(gt);
689         if (!reset)
690                 return -ENODEV;
691
692         /*
693          * If the power well sleeps during the reset, the reset
694          * request may be dropped and never completes (causing -EIO).
695          */
696         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
697         for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
698                 GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
699                 preempt_disable();
700                 ret = reset(gt, engine_mask, retry);
701                 preempt_enable();
702         }
703         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
704
705         return ret;
706 }
707
708 bool intel_has_gpu_reset(const struct intel_gt *gt)
709 {
710         if (!gt->i915->params.reset)
711                 return NULL;
712
713         return intel_get_gpu_reset(gt);
714 }
715
716 bool intel_has_reset_engine(const struct intel_gt *gt)
717 {
718         if (gt->i915->params.reset < 2)
719                 return false;
720
721         return INTEL_INFO(gt->i915)->has_reset_engine;
722 }
723
724 int intel_reset_guc(struct intel_gt *gt)
725 {
726         u32 guc_domain =
727                 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
728         int ret;
729
730         GEM_BUG_ON(!HAS_GT_UC(gt->i915));
731
732         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
733         ret = gen6_hw_domain_reset(gt, guc_domain);
734         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
735
736         return ret;
737 }
738
739 /*
740  * Ensure irq handler finishes, and not run again.
741  * Also return the active request so that we only search for it once.
742  */
743 static void reset_prepare_engine(struct intel_engine_cs *engine)
744 {
745         /*
746          * During the reset sequence, we must prevent the engine from
747          * entering RC6. As the context state is undefined until we restart
748          * the engine, if it does enter RC6 during the reset, the state
749          * written to the powercontext is undefined and so we may lose
750          * GPU state upon resume, i.e. fail to restart after a reset.
751          */
752         intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
753         if (engine->reset.prepare)
754                 engine->reset.prepare(engine);
755 }
756
757 static void revoke_mmaps(struct intel_gt *gt)
758 {
759         int i;
760
761         for (i = 0; i < gt->ggtt->num_fences; i++) {
762                 struct drm_vma_offset_node *node;
763                 struct i915_vma *vma;
764                 u64 vma_offset;
765
766                 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
767                 if (!vma)
768                         continue;
769
770                 if (!i915_vma_has_userfault(vma))
771                         continue;
772
773                 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
774
775                 if (!vma->mmo)
776                         continue;
777
778                 node = &vma->mmo->vma_node;
779                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
780
781                 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
782                                     drm_vma_node_offset_addr(node) + vma_offset,
783                                     vma->size,
784                                     1);
785         }
786 }
787
788 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
789 {
790         struct intel_engine_cs *engine;
791         intel_engine_mask_t awake = 0;
792         enum intel_engine_id id;
793
794         for_each_engine(engine, gt, id) {
795                 if (intel_engine_pm_get_if_awake(engine))
796                         awake |= engine->mask;
797                 reset_prepare_engine(engine);
798         }
799
800         intel_uc_reset_prepare(&gt->uc);
801
802         return awake;
803 }
804
805 static void gt_revoke(struct intel_gt *gt)
806 {
807         revoke_mmaps(gt);
808 }
809
810 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
811 {
812         struct intel_engine_cs *engine;
813         enum intel_engine_id id;
814         int err;
815
816         /*
817          * Everything depends on having the GTT running, so we need to start
818          * there.
819          */
820         err = i915_ggtt_enable_hw(gt->i915);
821         if (err)
822                 return err;
823
824         local_bh_disable();
825         for_each_engine(engine, gt, id)
826                 __intel_engine_reset(engine, stalled_mask & engine->mask);
827         local_bh_enable();
828
829         intel_ggtt_restore_fences(gt->ggtt);
830
831         return err;
832 }
833
834 static void reset_finish_engine(struct intel_engine_cs *engine)
835 {
836         if (engine->reset.finish)
837                 engine->reset.finish(engine);
838         intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
839
840         intel_engine_signal_breadcrumbs(engine);
841 }
842
843 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
844 {
845         struct intel_engine_cs *engine;
846         enum intel_engine_id id;
847
848         for_each_engine(engine, gt, id) {
849                 reset_finish_engine(engine);
850                 if (awake & engine->mask)
851                         intel_engine_pm_put(engine);
852         }
853 }
854
855 static void nop_submit_request(struct i915_request *request)
856 {
857         RQ_TRACE(request, "-EIO\n");
858
859         request = i915_request_mark_eio(request);
860         if (request) {
861                 i915_request_submit(request);
862                 intel_engine_signal_breadcrumbs(request->engine);
863
864                 i915_request_put(request);
865         }
866 }
867
868 static void __intel_gt_set_wedged(struct intel_gt *gt)
869 {
870         struct intel_engine_cs *engine;
871         intel_engine_mask_t awake;
872         enum intel_engine_id id;
873
874         if (test_bit(I915_WEDGED, &gt->reset.flags))
875                 return;
876
877         GT_TRACE(gt, "start\n");
878
879         /*
880          * First, stop submission to hw, but do not yet complete requests by
881          * rolling the global seqno forward (since this would complete requests
882          * for which we haven't set the fence error to EIO yet).
883          */
884         awake = reset_prepare(gt);
885
886         /* Even if the GPU reset fails, it should still stop the engines */
887         if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
888                 __intel_gt_reset(gt, ALL_ENGINES);
889
890         for_each_engine(engine, gt, id)
891                 engine->submit_request = nop_submit_request;
892
893         /*
894          * Make sure no request can slip through without getting completed by
895          * either this call here to intel_engine_write_global_seqno, or the one
896          * in nop_submit_request.
897          */
898         synchronize_rcu_expedited();
899         set_bit(I915_WEDGED, &gt->reset.flags);
900
901         /* Mark all executing requests as skipped */
902         local_bh_disable();
903         for_each_engine(engine, gt, id)
904                 if (engine->reset.cancel)
905                         engine->reset.cancel(engine);
906         local_bh_enable();
907
908         reset_finish(gt, awake);
909
910         GT_TRACE(gt, "end\n");
911 }
912
913 void intel_gt_set_wedged(struct intel_gt *gt)
914 {
915         intel_wakeref_t wakeref;
916
917         if (test_bit(I915_WEDGED, &gt->reset.flags))
918                 return;
919
920         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
921         mutex_lock(&gt->reset.mutex);
922
923         if (GEM_SHOW_DEBUG()) {
924                 struct drm_printer p = drm_debug_printer(__func__);
925                 struct intel_engine_cs *engine;
926                 enum intel_engine_id id;
927
928                 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
929                 for_each_engine(engine, gt, id) {
930                         if (intel_engine_is_idle(engine))
931                                 continue;
932
933                         intel_engine_dump(engine, &p, "%s\n", engine->name);
934                 }
935         }
936
937         __intel_gt_set_wedged(gt);
938
939         mutex_unlock(&gt->reset.mutex);
940         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
941 }
942
943 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
944 {
945         struct intel_gt_timelines *timelines = &gt->timelines;
946         struct intel_timeline *tl;
947         bool ok;
948
949         if (!test_bit(I915_WEDGED, &gt->reset.flags))
950                 return true;
951
952         /* Never fully initialised, recovery impossible */
953         if (intel_gt_has_unrecoverable_error(gt))
954                 return false;
955
956         GT_TRACE(gt, "start\n");
957
958         /*
959          * Before unwedging, make sure that all pending operations
960          * are flushed and errored out - we may have requests waiting upon
961          * third party fences. We marked all inflight requests as EIO, and
962          * every execbuf since returned EIO, for consistency we want all
963          * the currently pending requests to also be marked as EIO, which
964          * is done inside our nop_submit_request - and so we must wait.
965          *
966          * No more can be submitted until we reset the wedged bit.
967          */
968         spin_lock(&timelines->lock);
969         list_for_each_entry(tl, &timelines->active_list, link) {
970                 struct dma_fence *fence;
971
972                 fence = i915_active_fence_get(&tl->last_request);
973                 if (!fence)
974                         continue;
975
976                 spin_unlock(&timelines->lock);
977
978                 /*
979                  * All internal dependencies (i915_requests) will have
980                  * been flushed by the set-wedge, but we may be stuck waiting
981                  * for external fences. These should all be capped to 10s
982                  * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
983                  * in the worst case.
984                  */
985                 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
986                 dma_fence_put(fence);
987
988                 /* Restart iteration after droping lock */
989                 spin_lock(&timelines->lock);
990                 tl = list_entry(&timelines->active_list, typeof(*tl), link);
991         }
992         spin_unlock(&timelines->lock);
993
994         /* We must reset pending GPU events before restoring our submission */
995         ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
996         if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
997                 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
998         if (!ok) {
999                 /*
1000                  * Warn CI about the unrecoverable wedged condition.
1001                  * Time for a reboot.
1002                  */
1003                 add_taint_for_CI(gt->i915, TAINT_WARN);
1004                 return false;
1005         }
1006
1007         /*
1008          * Undo nop_submit_request. We prevent all new i915 requests from
1009          * being queued (by disallowing execbuf whilst wedged) so having
1010          * waited for all active requests above, we know the system is idle
1011          * and do not have to worry about a thread being inside
1012          * engine->submit_request() as we swap over. So unlike installing
1013          * the nop_submit_request on reset, we can do this from normal
1014          * context and do not require stop_machine().
1015          */
1016         intel_engines_reset_default_submission(gt);
1017
1018         GT_TRACE(gt, "end\n");
1019
1020         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1021         clear_bit(I915_WEDGED, &gt->reset.flags);
1022
1023         return true;
1024 }
1025
1026 bool intel_gt_unset_wedged(struct intel_gt *gt)
1027 {
1028         bool result;
1029
1030         mutex_lock(&gt->reset.mutex);
1031         result = __intel_gt_unset_wedged(gt);
1032         mutex_unlock(&gt->reset.mutex);
1033
1034         return result;
1035 }
1036
1037 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1038 {
1039         int err, i;
1040
1041         err = __intel_gt_reset(gt, ALL_ENGINES);
1042         for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1043                 msleep(10 * (i + 1));
1044                 err = __intel_gt_reset(gt, ALL_ENGINES);
1045         }
1046         if (err)
1047                 return err;
1048
1049         return gt_reset(gt, stalled_mask);
1050 }
1051
1052 static int resume(struct intel_gt *gt)
1053 {
1054         struct intel_engine_cs *engine;
1055         enum intel_engine_id id;
1056         int ret;
1057
1058         for_each_engine(engine, gt, id) {
1059                 ret = intel_engine_resume(engine);
1060                 if (ret)
1061                         return ret;
1062         }
1063
1064         return 0;
1065 }
1066
1067 /**
1068  * intel_gt_reset - reset chip after a hang
1069  * @gt: #intel_gt to reset
1070  * @stalled_mask: mask of the stalled engines with the guilty requests
1071  * @reason: user error message for why we are resetting
1072  *
1073  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1074  * on failure.
1075  *
1076  * Procedure is fairly simple:
1077  *   - reset the chip using the reset reg
1078  *   - re-init context state
1079  *   - re-init hardware status page
1080  *   - re-init ring buffer
1081  *   - re-init interrupt state
1082  *   - re-init display
1083  */
1084 void intel_gt_reset(struct intel_gt *gt,
1085                     intel_engine_mask_t stalled_mask,
1086                     const char *reason)
1087 {
1088         intel_engine_mask_t awake;
1089         int ret;
1090
1091         GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1092
1093         might_sleep();
1094         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1095
1096         /*
1097          * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1098          * critical section like gpu reset.
1099          */
1100         gt_revoke(gt);
1101
1102         mutex_lock(&gt->reset.mutex);
1103
1104         /* Clear any previous failed attempts at recovery. Time to try again. */
1105         if (!__intel_gt_unset_wedged(gt))
1106                 goto unlock;
1107
1108         if (reason)
1109                 drm_notice(&gt->i915->drm,
1110                            "Resetting chip for %s\n", reason);
1111         atomic_inc(&gt->i915->gpu_error.reset_count);
1112
1113         awake = reset_prepare(gt);
1114
1115         if (!intel_has_gpu_reset(gt)) {
1116                 if (gt->i915->params.reset)
1117                         drm_err(&gt->i915->drm, "GPU reset not supported\n");
1118                 else
1119                         drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
1120                 goto error;
1121         }
1122
1123         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1124                 intel_runtime_pm_disable_interrupts(gt->i915);
1125
1126         if (do_reset(gt, stalled_mask)) {
1127                 drm_err(&gt->i915->drm, "Failed to reset chip\n");
1128                 goto taint;
1129         }
1130
1131         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1132                 intel_runtime_pm_enable_interrupts(gt->i915);
1133
1134         intel_overlay_reset(gt->i915);
1135
1136         /*
1137          * Next we need to restore the context, but we don't use those
1138          * yet either...
1139          *
1140          * Ring buffer needs to be re-initialized in the KMS case, or if X
1141          * was running at the time of the reset (i.e. we weren't VT
1142          * switched away).
1143          */
1144         ret = intel_gt_init_hw(gt);
1145         if (ret) {
1146                 drm_err(&gt->i915->drm,
1147                         "Failed to initialise HW following reset (%d)\n",
1148                         ret);
1149                 goto taint;
1150         }
1151
1152         ret = resume(gt);
1153         if (ret)
1154                 goto taint;
1155
1156 finish:
1157         reset_finish(gt, awake);
1158 unlock:
1159         mutex_unlock(&gt->reset.mutex);
1160         return;
1161
1162 taint:
1163         /*
1164          * History tells us that if we cannot reset the GPU now, we
1165          * never will. This then impacts everything that is run
1166          * subsequently. On failing the reset, we mark the driver
1167          * as wedged, preventing further execution on the GPU.
1168          * We also want to go one step further and add a taint to the
1169          * kernel so that any subsequent faults can be traced back to
1170          * this failure. This is important for CI, where if the
1171          * GPU/driver fails we would like to reboot and restart testing
1172          * rather than continue on into oblivion. For everyone else,
1173          * the system should still plod along, but they have been warned!
1174          */
1175         add_taint_for_CI(gt->i915, TAINT_WARN);
1176 error:
1177         __intel_gt_set_wedged(gt);
1178         goto finish;
1179 }
1180
1181 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1182 {
1183         return __intel_gt_reset(engine->gt, engine->mask);
1184 }
1185
1186 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1187 {
1188         struct intel_gt *gt = engine->gt;
1189         int ret;
1190
1191         ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1192         GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1193
1194         if (!intel_engine_pm_get_if_awake(engine))
1195                 return 0;
1196
1197         reset_prepare_engine(engine);
1198
1199         if (msg)
1200                 drm_notice(&engine->i915->drm,
1201                            "Resetting %s for %s\n", engine->name, msg);
1202         atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1203
1204         if (intel_engine_uses_guc(engine))
1205                 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1206         else
1207                 ret = intel_gt_reset_engine(engine);
1208         if (ret) {
1209                 /* If we fail here, we expect to fallback to a global reset */
1210                 ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
1211                 goto out;
1212         }
1213
1214         /*
1215          * The request that caused the hang is stuck on elsp, we know the
1216          * active request and can drop it, adjust head to skip the offending
1217          * request to resume executing remaining requests in the queue.
1218          */
1219         __intel_engine_reset(engine, true);
1220
1221         /*
1222          * The engine and its registers (and workarounds in case of render)
1223          * have been reset to their default values. Follow the init_ring
1224          * process to program RING_MODE, HWSP and re-enable submission.
1225          */
1226         ret = intel_engine_resume(engine);
1227
1228 out:
1229         intel_engine_cancel_stop_cs(engine);
1230         reset_finish_engine(engine);
1231         intel_engine_pm_put_async(engine);
1232         return ret;
1233 }
1234
1235 /**
1236  * intel_engine_reset - reset GPU engine to recover from a hang
1237  * @engine: engine to reset
1238  * @msg: reason for GPU reset; or NULL for no drm_notice()
1239  *
1240  * Reset a specific GPU engine. Useful if a hang is detected.
1241  * Returns zero on successful reset or otherwise an error code.
1242  *
1243  * Procedure is:
1244  *  - identifies the request that caused the hang and it is dropped
1245  *  - reset engine (which will force the engine to idle)
1246  *  - re-init/configure engine
1247  */
1248 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1249 {
1250         int err;
1251
1252         local_bh_disable();
1253         err = __intel_engine_reset_bh(engine, msg);
1254         local_bh_enable();
1255
1256         return err;
1257 }
1258
1259 static void intel_gt_reset_global(struct intel_gt *gt,
1260                                   u32 engine_mask,
1261                                   const char *reason)
1262 {
1263         struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1264         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1265         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1266         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1267         struct intel_wedge_me w;
1268
1269         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1270
1271         GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1272         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1273
1274         /* Use a watchdog to ensure that our reset completes */
1275         intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1276                 intel_display_prepare_reset(gt->i915);
1277
1278                 /* Flush everyone using a resource about to be clobbered */
1279                 synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1280
1281                 intel_gt_reset(gt, engine_mask, reason);
1282
1283                 intel_display_finish_reset(gt->i915);
1284         }
1285
1286         if (!test_bit(I915_WEDGED, &gt->reset.flags))
1287                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1288 }
1289
1290 /**
1291  * intel_gt_handle_error - handle a gpu error
1292  * @gt: the intel_gt
1293  * @engine_mask: mask representing engines that are hung
1294  * @flags: control flags
1295  * @fmt: Error message format string
1296  *
1297  * Do some basic checking of register state at error time and
1298  * dump it to the syslog.  Also call i915_capture_error_state() to make
1299  * sure we get a record and make it available in debugfs.  Fire a uevent
1300  * so userspace knows something bad happened (should trigger collection
1301  * of a ring dump etc.).
1302  */
1303 void intel_gt_handle_error(struct intel_gt *gt,
1304                            intel_engine_mask_t engine_mask,
1305                            unsigned long flags,
1306                            const char *fmt, ...)
1307 {
1308         struct intel_engine_cs *engine;
1309         intel_wakeref_t wakeref;
1310         intel_engine_mask_t tmp;
1311         char error_msg[80];
1312         char *msg = NULL;
1313
1314         if (fmt) {
1315                 va_list args;
1316
1317                 va_start(args, fmt);
1318                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1319                 va_end(args);
1320
1321                 msg = error_msg;
1322         }
1323
1324         /*
1325          * In most cases it's guaranteed that we get here with an RPM
1326          * reference held, for example because there is a pending GPU
1327          * request that won't finish until the reset is done. This
1328          * isn't the case at least when we get here by doing a
1329          * simulated reset via debugfs, so get an RPM reference.
1330          */
1331         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1332
1333         engine_mask &= gt->info.engine_mask;
1334
1335         if (flags & I915_ERROR_CAPTURE) {
1336                 i915_capture_error_state(gt, engine_mask);
1337                 intel_gt_clear_error_registers(gt, engine_mask);
1338         }
1339
1340         /*
1341          * Try engine reset when available. We fall back to full reset if
1342          * single reset fails.
1343          */
1344         if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1345                 local_bh_disable();
1346                 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1347                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1348                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1349                                              &gt->reset.flags))
1350                                 continue;
1351
1352                         if (__intel_engine_reset_bh(engine, msg) == 0)
1353                                 engine_mask &= ~engine->mask;
1354
1355                         clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1356                                               &gt->reset.flags);
1357                 }
1358                 local_bh_enable();
1359         }
1360
1361         if (!engine_mask)
1362                 goto out;
1363
1364         /* Full reset needs the mutex, stop any other user trying to do so. */
1365         if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1366                 wait_event(gt->reset.queue,
1367                            !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1368                 goto out; /* piggy-back on the other reset */
1369         }
1370
1371         /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1372         synchronize_rcu_expedited();
1373
1374         /* Prevent any other reset-engine attempt. */
1375         for_each_engine(engine, gt, tmp) {
1376                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1377                                         &gt->reset.flags))
1378                         wait_on_bit(&gt->reset.flags,
1379                                     I915_RESET_ENGINE + engine->id,
1380                                     TASK_UNINTERRUPTIBLE);
1381         }
1382
1383         intel_gt_reset_global(gt, engine_mask, msg);
1384
1385         for_each_engine(engine, gt, tmp)
1386                 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1387                                  &gt->reset.flags);
1388         clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1389         smp_mb__after_atomic();
1390         wake_up_all(&gt->reset.queue);
1391
1392 out:
1393         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1394 }
1395
1396 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1397 {
1398         might_lock(&gt->reset.backoff_srcu);
1399         might_sleep();
1400
1401         rcu_read_lock();
1402         while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1403                 rcu_read_unlock();
1404
1405                 if (wait_event_interruptible(gt->reset.queue,
1406                                              !test_bit(I915_RESET_BACKOFF,
1407                                                        &gt->reset.flags)))
1408                         return -EINTR;
1409
1410                 rcu_read_lock();
1411         }
1412         *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1413         rcu_read_unlock();
1414
1415         return 0;
1416 }
1417
1418 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1419 __releases(&gt->reset.backoff_srcu)
1420 {
1421         srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1422 }
1423
1424 int intel_gt_terminally_wedged(struct intel_gt *gt)
1425 {
1426         might_sleep();
1427
1428         if (!intel_gt_is_wedged(gt))
1429                 return 0;
1430
1431         if (intel_gt_has_unrecoverable_error(gt))
1432                 return -EIO;
1433
1434         /* Reset still in progress? Maybe we will recover? */
1435         if (wait_event_interruptible(gt->reset.queue,
1436                                      !test_bit(I915_RESET_BACKOFF,
1437                                                &gt->reset.flags)))
1438                 return -EINTR;
1439
1440         return intel_gt_is_wedged(gt) ? -EIO : 0;
1441 }
1442
1443 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1444 {
1445         BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1446                      I915_WEDGED_ON_INIT);
1447         intel_gt_set_wedged(gt);
1448         set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1449
1450         /* Wedged on init is non-recoverable */
1451         add_taint_for_CI(gt->i915, TAINT_WARN);
1452 }
1453
1454 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1455 {
1456         intel_gt_set_wedged(gt);
1457         set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
1458         intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1459 }
1460
1461 void intel_gt_init_reset(struct intel_gt *gt)
1462 {
1463         init_waitqueue_head(&gt->reset.queue);
1464         mutex_init(&gt->reset.mutex);
1465         init_srcu_struct(&gt->reset.backoff_srcu);
1466
1467         /*
1468          * While undesirable to wait inside the shrinker, complain anyway.
1469          *
1470          * If we have to wait during shrinking, we guarantee forward progress
1471          * by forcing the reset. Therefore during the reset we must not
1472          * re-enter the shrinker. By declaring that we take the reset mutex
1473          * within the shrinker, we forbid ourselves from performing any
1474          * fs-reclaim or taking related locks during reset.
1475          */
1476         i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
1477
1478         /* no GPU until we are ready! */
1479         __set_bit(I915_WEDGED, &gt->reset.flags);
1480 }
1481
1482 void intel_gt_fini_reset(struct intel_gt *gt)
1483 {
1484         cleanup_srcu_struct(&gt->reset.backoff_srcu);
1485 }
1486
1487 static void intel_wedge_me(struct work_struct *work)
1488 {
1489         struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1490
1491         drm_err(&w->gt->i915->drm,
1492                 "%s timed out, cancelling all in-flight rendering.\n",
1493                 w->name);
1494         intel_gt_set_wedged(w->gt);
1495 }
1496
1497 void __intel_init_wedge(struct intel_wedge_me *w,
1498                         struct intel_gt *gt,
1499                         long timeout,
1500                         const char *name)
1501 {
1502         w->gt = gt;
1503         w->name = name;
1504
1505         INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1506         schedule_delayed_work(&w->work, timeout);
1507 }
1508
1509 void __intel_fini_wedge(struct intel_wedge_me *w)
1510 {
1511         cancel_delayed_work_sync(&w->work);
1512         destroy_delayed_work_on_stack(&w->work);
1513         w->gt = NULL;
1514 }
1515
1516 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1517 #include "selftest_reset.c"
1518 #include "selftest_hangcheck.c"
1519 #endif