1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
6 #define INTERRUPT_CRITICAL_INPUT 0x100
9 #define INTERRUPT_DEBUG 0xd00
11 #define INTERRUPT_PERFMON 0x260
12 #define INTERRUPT_DOORBELL 0x280
16 #define INTERRUPT_MACHINE_CHECK 0x200
19 #define INTERRUPT_SYSTEM_RESET 0x100
22 #define INTERRUPT_DATA_SEGMENT 0x380
23 #define INTERRUPT_INST_SEGMENT 0x480
24 #define INTERRUPT_TRACE 0xd00
25 #define INTERRUPT_H_DATA_STORAGE 0xe00
26 #define INTERRUPT_HMI 0xe60
27 #define INTERRUPT_H_FAC_UNAVAIL 0xf80
28 #ifdef CONFIG_PPC_BOOK3S
29 #define INTERRUPT_DOORBELL 0xa00
30 #define INTERRUPT_PERFMON 0xf00
31 #define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
34 /* BookE/BookS/4xx/8xx */
35 #define INTERRUPT_DATA_STORAGE 0x300
36 #define INTERRUPT_INST_STORAGE 0x400
37 #define INTERRUPT_EXTERNAL 0x500
38 #define INTERRUPT_ALIGNMENT 0x600
39 #define INTERRUPT_PROGRAM 0x700
40 #define INTERRUPT_SYSCALL 0xc00
41 #define INTERRUPT_TRACE 0xd00
44 #define INTERRUPT_FP_UNAVAIL 0x800
46 /* BookE/BookS/44x/8xx */
47 #define INTERRUPT_DECREMENTER 0x900
49 #ifndef INTERRUPT_PERFMON
50 #define INTERRUPT_PERFMON 0x0
54 #define INTERRUPT_SOFT_EMU_8xx 0x1000
55 #define INTERRUPT_INST_TLB_MISS_8xx 0x1100
56 #define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
57 #define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
58 #define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
59 #define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
60 #define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
63 #define INTERRUPT_INST_TLB_MISS_603 0x1000
64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
69 #include <linux/context_tracking.h>
70 #include <linux/hardirq.h>
71 #include <asm/cputime.h>
72 #include <asm/ftrace.h>
73 #include <asm/kprobes.h>
74 #include <asm/runlatch.h>
76 static inline void nap_adjust_return(struct pt_regs *regs)
78 #ifdef CONFIG_PPC_970_NAP
79 if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
80 /* Can avoid a test-and-clear because NMIs do not call this */
81 clear_thread_local_flags(_TLF_NAPPING);
82 regs->nip = (unsigned long)power4_idle_nap_return;
87 struct interrupt_state {
90 static inline void booke_restore_dbcr0(void)
92 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
93 unsigned long dbcr0 = current->thread.debug.dbcr0;
95 if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
97 mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
102 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
105 if (!arch_irq_disabled_regs(regs))
106 trace_hardirqs_off();
108 if (user_mode(regs)) {
110 account_cpu_user_entry();
112 kuap_save_and_lock(regs);
117 if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
118 trace_hardirqs_off();
119 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
121 if (user_mode(regs)) {
122 CT_WARN_ON(ct_state() != CONTEXT_USER);
125 account_cpu_user_entry();
126 account_stolen_time();
129 * CT_WARN_ON comes here via program_check_exception,
130 * so avoid recursion.
132 if (TRAP(regs) != INTERRUPT_PROGRAM)
133 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
137 booke_restore_dbcr0();
141 * Care should be taken to note that interrupt_exit_prepare and
142 * interrupt_async_exit_prepare do not necessarily return immediately to
143 * regs context (e.g., if regs is usermode, we don't necessarily return to
144 * user mode). Other interrupts might be taken between here and return,
145 * context switch / preemption may occur in the exit path after this, or a
146 * signal may be delivered, etc.
148 * The real interrupt exit code is platform specific, e.g.,
149 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
151 * However interrupt_nmi_exit_prepare does return directly to regs, because
152 * NMIs do not do "exit work" or replay soft-masked interrupts.
154 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
158 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
160 #ifdef CONFIG_PPC_BOOK3S_64
161 if (cpu_has_feature(CPU_FTR_CTRL) &&
162 !test_thread_local_flags(_TLF_RUNLATCH))
163 __ppc64_runlatch_on();
166 interrupt_enter_prepare(regs, state);
170 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
173 * Adjust at exit so the main handler sees the true NIA. This must
174 * come before irq_exit() because irq_exit can enable interrupts, and
175 * if another interrupt is taken before nap_adjust_return has run
176 * here, then that interrupt would return directly to idle nap return.
178 nap_adjust_return(regs);
181 interrupt_exit_prepare(regs, state);
184 struct interrupt_nmi_state {
192 static inline bool nmi_disables_ftrace(struct pt_regs *regs)
194 /* Allow DEC and PMI to be traced when they are soft-NMI */
195 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
196 if (TRAP(regs) == INTERRUPT_DECREMENTER)
198 if (TRAP(regs) == INTERRUPT_PERFMON)
201 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
202 if (TRAP(regs) == INTERRUPT_PERFMON)
209 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
212 state->irq_soft_mask = local_paca->irq_soft_mask;
213 state->irq_happened = local_paca->irq_happened;
216 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
217 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
218 * because that goes through irq tracing which we don't want in NMI.
220 local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
221 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
223 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
224 regs->nip < (unsigned long)__end_interrupts) {
225 // Kernel code running below __end_interrupts is
226 // implicitly soft-masked.
227 regs->softe = IRQS_ALL_DISABLED;
230 /* Don't do any per-CPU operations until interrupt state is fixed */
232 if (nmi_disables_ftrace(regs)) {
233 state->ftrace_enabled = this_cpu_get_ftrace_enabled();
234 this_cpu_set_ftrace_enabled(0);
239 * Do not use nmi_enter() for pseries hash guest taking a real-mode
240 * NMI because not everything it touches is within the RMA limit.
242 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
243 !firmware_has_feature(FW_FEATURE_LPAR) ||
244 radix_enabled() || (mfmsr() & MSR_DR))
248 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
250 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
251 !firmware_has_feature(FW_FEATURE_LPAR) ||
252 radix_enabled() || (mfmsr() & MSR_DR))
256 * nmi does not call nap_adjust_return because nmi should not create
257 * new work to do (must use irq_work for that).
261 if (nmi_disables_ftrace(regs))
262 this_cpu_set_ftrace_enabled(state->ftrace_enabled);
264 /* Check we didn't change the pending interrupt mask. */
265 WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
266 local_paca->irq_happened = state->irq_happened;
267 local_paca->irq_soft_mask = state->irq_soft_mask;
272 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
273 * function definition. The reason for this is the noinstr section is placed
274 * after the main text section, i.e., very far away from the interrupt entry
275 * asm. That creates problems with fitting linker stubs when building large
278 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
281 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
282 * @func: Function name of the entry point
283 * @returns: Returns a value back to asm caller
285 #define DECLARE_INTERRUPT_HANDLER_RAW(func) \
286 __visible long func(struct pt_regs *regs)
289 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
290 * @func: Function name of the entry point
291 * @returns: Returns a value back to asm caller
293 * @func is called from ASM entry code.
295 * This is a plain function which does no tracing, reconciling, etc.
296 * The macro is written so it acts as function definition. Append the
297 * body with a pair of curly brackets.
299 * raw interrupt handlers must not enable or disable interrupts, or
300 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
301 * not be advisable either, although may be possible in a pinch, the
302 * trace will look odd at least.
304 * A raw handler may call one of the other interrupt handler functions
305 * to be converted into that interrupt context without these restrictions.
307 * On PPC64, _RAW handlers may return with fast_interrupt_return.
309 * Specific handlers may have additional restrictions.
311 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \
312 static __always_inline long ____##func(struct pt_regs *regs); \
314 interrupt_handler long func(struct pt_regs *regs) \
318 ret = ____##func (regs); \
322 NOKPROBE_SYMBOL(func); \
324 static __always_inline long ____##func(struct pt_regs *regs)
327 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
328 * @func: Function name of the entry point
330 #define DECLARE_INTERRUPT_HANDLER(func) \
331 __visible void func(struct pt_regs *regs)
334 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
335 * @func: Function name of the entry point
337 * @func is called from ASM entry code.
339 * The macro is written so it acts as function definition. Append the
340 * body with a pair of curly brackets.
342 #define DEFINE_INTERRUPT_HANDLER(func) \
343 static __always_inline void ____##func(struct pt_regs *regs); \
345 interrupt_handler void func(struct pt_regs *regs) \
347 struct interrupt_state state; \
349 interrupt_enter_prepare(regs, &state); \
353 interrupt_exit_prepare(regs, &state); \
355 NOKPROBE_SYMBOL(func); \
357 static __always_inline void ____##func(struct pt_regs *regs)
360 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
361 * @func: Function name of the entry point
362 * @returns: Returns a value back to asm caller
364 #define DECLARE_INTERRUPT_HANDLER_RET(func) \
365 __visible long func(struct pt_regs *regs)
368 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
369 * @func: Function name of the entry point
370 * @returns: Returns a value back to asm caller
372 * @func is called from ASM entry code.
374 * The macro is written so it acts as function definition. Append the
375 * body with a pair of curly brackets.
377 #define DEFINE_INTERRUPT_HANDLER_RET(func) \
378 static __always_inline long ____##func(struct pt_regs *regs); \
380 interrupt_handler long func(struct pt_regs *regs) \
382 struct interrupt_state state; \
385 interrupt_enter_prepare(regs, &state); \
387 ret = ____##func (regs); \
389 interrupt_exit_prepare(regs, &state); \
393 NOKPROBE_SYMBOL(func); \
395 static __always_inline long ____##func(struct pt_regs *regs)
398 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
399 * @func: Function name of the entry point
401 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
402 __visible void func(struct pt_regs *regs)
405 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
406 * @func: Function name of the entry point
408 * @func is called from ASM entry code.
410 * The macro is written so it acts as function definition. Append the
411 * body with a pair of curly brackets.
413 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
414 static __always_inline void ____##func(struct pt_regs *regs); \
416 interrupt_handler void func(struct pt_regs *regs) \
418 struct interrupt_state state; \
420 interrupt_async_enter_prepare(regs, &state); \
424 interrupt_async_exit_prepare(regs, &state); \
426 NOKPROBE_SYMBOL(func); \
428 static __always_inline void ____##func(struct pt_regs *regs)
431 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
432 * @func: Function name of the entry point
433 * @returns: Returns a value back to asm caller
435 #define DECLARE_INTERRUPT_HANDLER_NMI(func) \
436 __visible long func(struct pt_regs *regs)
439 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
440 * @func: Function name of the entry point
441 * @returns: Returns a value back to asm caller
443 * @func is called from ASM entry code.
445 * The macro is written so it acts as function definition. Append the
446 * body with a pair of curly brackets.
448 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \
449 static __always_inline long ____##func(struct pt_regs *regs); \
451 interrupt_handler long func(struct pt_regs *regs) \
453 struct interrupt_nmi_state state; \
456 interrupt_nmi_enter_prepare(regs, &state); \
458 ret = ____##func (regs); \
460 interrupt_nmi_exit_prepare(regs, &state); \
464 NOKPROBE_SYMBOL(func); \
466 static __always_inline long ____##func(struct pt_regs *regs)
469 /* Interrupt handlers */
471 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
472 #ifdef CONFIG_PPC_BOOK3S_64
473 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
475 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
477 DECLARE_INTERRUPT_HANDLER(SMIException);
478 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
479 DECLARE_INTERRUPT_HANDLER(unknown_exception);
480 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
481 DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
482 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
483 DECLARE_INTERRUPT_HANDLER(RunModeException);
484 DECLARE_INTERRUPT_HANDLER(single_step_exception);
485 DECLARE_INTERRUPT_HANDLER(program_check_exception);
486 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
487 DECLARE_INTERRUPT_HANDLER(alignment_exception);
488 DECLARE_INTERRUPT_HANDLER(StackOverflow);
489 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
490 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
491 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
492 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
493 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
494 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
495 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
496 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
497 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
498 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
499 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
500 DECLARE_INTERRUPT_HANDLER(DebugException);
501 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
502 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
503 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
504 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
505 DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
506 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
509 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
510 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
513 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
516 DECLARE_INTERRUPT_HANDLER(do_page_fault);
517 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
520 DECLARE_INTERRUPT_HANDLER(do_break);
523 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
526 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
527 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
529 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
531 void __noreturn unrecoverable_exception(struct pt_regs *regs);
533 void replay_system_reset(void);
534 void replay_soft_interrupts(void);
536 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
538 if (!arch_irq_disabled_regs(regs))
542 #endif /* __ASSEMBLY__ */
544 #endif /* _ASM_POWERPC_INTERRUPT_H */