2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/sched/clock.h>
12 #include "intel_gt_irq.h"
13 #include "intel_uncore.h"
14 #include "intel_rps.h"
16 static void guc_irq_handler(struct intel_guc *guc, u16 iir)
18 if (iir & GUC_INTR_GUC2HOST)
19 intel_guc_to_host_event_handler(guc);
23 cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
27 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
30 eir = ENGINE_READ(engine, RING_EIR);
31 ENGINE_TRACE(engine, "CS error: %x\n", eir);
33 /* Disable the error interrupt until after the reset */
35 ENGINE_WRITE(engine, RING_EMR, ~0u);
36 ENGINE_WRITE(engine, RING_EIR, eir);
37 WRITE_ONCE(engine->execlists.error_interrupt, eir);
42 if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
45 if (iir & GT_RENDER_USER_INTERRUPT) {
46 intel_engine_signal_breadcrumbs(engine);
47 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
51 tasklet_hi_schedule(&engine->execlists.tasklet);
55 gen11_gt_engine_identity(struct intel_gt *gt,
56 const unsigned int bank, const unsigned int bit)
58 void __iomem * const regs = gt->uncore->regs;
62 lockdep_assert_held(>->irq_lock);
64 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
67 * NB: Specs do not specify how long to spin wait,
68 * so we do ~100us as an educated guess.
70 timeout_ts = (local_clock() >> 10) + 100;
72 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
73 } while (!(ident & GEN11_INTR_DATA_VALID) &&
74 !time_after32(local_clock() >> 10, timeout_ts));
76 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
77 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
82 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
83 GEN11_INTR_DATA_VALID);
89 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
92 if (instance == OTHER_GUC_INSTANCE)
93 return guc_irq_handler(>->uc.guc, iir);
95 if (instance == OTHER_GTPM_INSTANCE)
96 return gen11_rps_irq_handler(>->rps, iir);
98 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
103 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
104 const u8 instance, const u16 iir)
106 struct intel_engine_cs *engine;
108 if (instance <= MAX_ENGINE_INSTANCE)
109 engine = gt->engine_class[class][instance];
114 return cs_irq_handler(engine, iir);
116 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
121 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
123 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
124 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
125 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
130 if (class <= COPY_ENGINE_CLASS)
131 return gen11_engine_irq_handler(gt, class, instance, intr);
133 if (class == OTHER_CLASS)
134 return gen11_other_irq_handler(gt, instance, intr);
136 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
137 class, instance, intr);
141 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
143 void __iomem * const regs = gt->uncore->regs;
144 unsigned long intr_dw;
147 lockdep_assert_held(>->irq_lock);
149 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
151 for_each_set_bit(bit, &intr_dw, 32) {
152 const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
154 gen11_gt_identity_handler(gt, ident);
157 /* Clear must be after shared has been served for engine */
158 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
161 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
165 spin_lock(>->irq_lock);
167 for (bank = 0; bank < 2; bank++) {
168 if (master_ctl & GEN11_GT_DW_IRQ(bank))
169 gen11_gt_bank_handler(gt, bank);
172 spin_unlock(>->irq_lock);
175 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
176 const unsigned int bank, const unsigned int bit)
178 void __iomem * const regs = gt->uncore->regs;
181 lockdep_assert_held(>->irq_lock);
183 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
186 * According to the BSpec, DW_IIR bits cannot be cleared without
187 * first servicing the Selector & Shared IIR registers.
189 gen11_gt_engine_identity(gt, bank, bit);
192 * We locked GT INT DW by reading it. If we want to (try
193 * to) recover from this successfully, we need to clear
194 * our bit, otherwise we are locking the register for
197 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
205 void gen11_gt_irq_reset(struct intel_gt *gt)
207 struct intel_uncore *uncore = gt->uncore;
209 /* Disable RCS, BCS, VCS and VECS class engines. */
210 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
211 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
213 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
214 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
215 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
216 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
217 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
218 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
220 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
221 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
222 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
223 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
226 void gen11_gt_irq_postinstall(struct intel_gt *gt)
229 GT_CS_MASTER_ERROR_INTERRUPT |
230 GT_RENDER_USER_INTERRUPT |
231 GT_CONTEXT_SWITCH_INTERRUPT;
232 struct intel_uncore *uncore = gt->uncore;
233 const u32 dmask = irqs << 16 | irqs;
234 const u32 smask = irqs << 16;
236 BUILD_BUG_ON(irqs & 0xffff0000);
238 /* Enable RCS, BCS, VCS and VECS class interrupts. */
239 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
240 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
242 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
243 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
244 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
245 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
246 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
247 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
250 * RPS interrupts will get enabled/disabled on demand when RPS itself
251 * is enabled/disabled.
254 gt->pm_imr = ~gt->pm_ier;
255 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
256 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
258 /* Same thing for GuC interrupts */
259 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
260 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
263 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
265 if (gt_iir & GT_RENDER_USER_INTERRUPT)
266 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
267 if (gt_iir & ILK_BSD_USER_INTERRUPT)
268 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
271 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
273 if (!HAS_L3_DPF(gt->i915))
276 spin_lock(>->irq_lock);
277 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
278 spin_unlock(>->irq_lock);
280 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
281 gt->i915->l3_parity.which_slice |= 1 << 1;
283 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
284 gt->i915->l3_parity.which_slice |= 1 << 0;
286 schedule_work(>->i915->l3_parity.error_work);
289 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
291 if (gt_iir & GT_RENDER_USER_INTERRUPT)
292 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
293 if (gt_iir & GT_BSD_USER_INTERRUPT)
294 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
295 if (gt_iir & GT_BLT_USER_INTERRUPT)
296 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
298 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
299 GT_BSD_CS_ERROR_INTERRUPT |
300 GT_CS_MASTER_ERROR_INTERRUPT))
301 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
303 if (gt_iir & GT_PARITY_ERROR(gt->i915))
304 gen7_parity_error_irq_handler(gt, gt_iir);
307 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
309 void __iomem * const regs = gt->uncore->regs;
312 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
313 iir = raw_reg_read(regs, GEN8_GT_IIR(0));
315 cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
316 iir >> GEN8_RCS_IRQ_SHIFT);
317 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
318 iir >> GEN8_BCS_IRQ_SHIFT);
319 raw_reg_write(regs, GEN8_GT_IIR(0), iir);
323 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
324 iir = raw_reg_read(regs, GEN8_GT_IIR(1));
326 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
327 iir >> GEN8_VCS0_IRQ_SHIFT);
328 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
329 iir >> GEN8_VCS1_IRQ_SHIFT);
330 raw_reg_write(regs, GEN8_GT_IIR(1), iir);
334 if (master_ctl & GEN8_GT_VECS_IRQ) {
335 iir = raw_reg_read(regs, GEN8_GT_IIR(3));
337 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
338 iir >> GEN8_VECS_IRQ_SHIFT);
339 raw_reg_write(regs, GEN8_GT_IIR(3), iir);
343 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
344 iir = raw_reg_read(regs, GEN8_GT_IIR(2));
346 gen6_rps_irq_handler(>->rps, iir);
347 guc_irq_handler(>->uc.guc, iir >> 16);
348 raw_reg_write(regs, GEN8_GT_IIR(2), iir);
353 void gen8_gt_irq_reset(struct intel_gt *gt)
355 struct intel_uncore *uncore = gt->uncore;
357 GEN8_IRQ_RESET_NDX(uncore, GT, 0);
358 GEN8_IRQ_RESET_NDX(uncore, GT, 1);
359 GEN8_IRQ_RESET_NDX(uncore, GT, 2);
360 GEN8_IRQ_RESET_NDX(uncore, GT, 3);
363 void gen8_gt_irq_postinstall(struct intel_gt *gt)
365 /* These are interrupts we'll toggle with the ring mask register */
367 GT_CS_MASTER_ERROR_INTERRUPT |
368 GT_RENDER_USER_INTERRUPT |
369 GT_CONTEXT_SWITCH_INTERRUPT;
370 const u32 gt_interrupts[] = {
371 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
372 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
374 irqs << GEN8_VECS_IRQ_SHIFT,
376 struct intel_uncore *uncore = gt->uncore;
379 gt->pm_imr = ~gt->pm_ier;
380 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
381 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
383 * RPS interrupts will get enabled/disabled on demand when RPS itself
384 * is enabled/disabled. Same wil be the case for GuC interrupts.
386 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
387 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
390 static void gen5_gt_update_irq(struct intel_gt *gt,
392 u32 enabled_irq_mask)
394 lockdep_assert_held(>->irq_lock);
396 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
398 gt->gt_imr &= ~interrupt_mask;
399 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
400 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
403 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
405 gen5_gt_update_irq(gt, mask, mask);
406 intel_uncore_posting_read_fw(gt->uncore, GTIMR);
409 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
411 gen5_gt_update_irq(gt, mask, 0);
414 void gen5_gt_irq_reset(struct intel_gt *gt)
416 struct intel_uncore *uncore = gt->uncore;
418 GEN3_IRQ_RESET(uncore, GT);
419 if (INTEL_GEN(gt->i915) >= 6)
420 GEN3_IRQ_RESET(uncore, GEN6_PM);
423 void gen5_gt_irq_postinstall(struct intel_gt *gt)
425 struct intel_uncore *uncore = gt->uncore;
430 if (HAS_L3_DPF(gt->i915)) {
431 /* L3 parity interrupt is always unmasked. */
432 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
433 gt_irqs |= GT_PARITY_ERROR(gt->i915);
436 gt_irqs |= GT_RENDER_USER_INTERRUPT;
437 if (IS_GEN(gt->i915, 5))
438 gt_irqs |= ILK_BSD_USER_INTERRUPT;
440 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
442 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
444 if (INTEL_GEN(gt->i915) >= 6) {
446 * RPS interrupts will get enabled/disabled on demand when RPS
447 * itself is enabled/disabled.
449 if (HAS_ENGINE(gt->i915, VECS0)) {
450 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
451 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
454 gt->pm_imr = 0xffffffff;
455 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);