1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm_sdei.h>
6 #include <linux/hardirq.h>
7 #include <linux/irqflags.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/uaccess.h>
11 #include <asm/alternative.h>
12 #include <asm/kprobes.h>
14 #include <asm/ptrace.h>
15 #include <asm/sections.h>
16 #include <asm/stacktrace.h>
17 #include <asm/sysreg.h>
18 #include <asm/vmap_stack.h>
20 unsigned long sdei_exit_mode;
23 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
24 * register, meaning SDEI has to switch to its own stack. We need two stacks as
25 * a critical event may interrupt a normal event that has just taken a
26 * synchronous exception, and is using sp as scratch register. For a critical
27 * event interrupting a normal event, we can't reliably tell if we were on the
29 * For now, we allocate stacks when the driver is probed.
31 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
32 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
34 #ifdef CONFIG_VMAP_STACK
35 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
36 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
39 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
43 p = per_cpu(*ptr, cpu);
45 per_cpu(*ptr, cpu) = NULL;
50 static void free_sdei_stacks(void)
54 for_each_possible_cpu(cpu) {
55 _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
56 _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
60 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
64 p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
67 per_cpu(*ptr, cpu) = p;
72 static int init_sdei_stacks(void)
77 for_each_possible_cpu(cpu) {
78 err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
81 err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
92 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
94 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
95 unsigned long high = low + SDEI_STACK_SIZE;
97 if (sp < low || sp >= high)
103 info->type = STACK_TYPE_SDEI_NORMAL;
109 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
111 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
112 unsigned long high = low + SDEI_STACK_SIZE;
114 if (sp < low || sp >= high)
120 info->type = STACK_TYPE_SDEI_CRITICAL;
126 bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
128 if (!IS_ENABLED(CONFIG_VMAP_STACK))
131 if (on_sdei_critical_stack(sp, info))
134 if (on_sdei_normal_stack(sp, info))
140 unsigned long sdei_arch_get_entry_point(int conduit)
143 * SDEI works between adjacent exception levels. If we booted at EL1 we
144 * assume a hypervisor is marshalling events. If we booted at EL2 and
145 * dropped to EL1 because we don't support VHE, then we can't support
148 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
149 pr_err("Not supported on this hardware/boot configuration\n");
153 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
154 if (init_sdei_stacks())
158 sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
160 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
161 if (arm64_kernel_unmapped_at_el0()) {
162 unsigned long offset;
164 offset = (unsigned long)__sdei_asm_entry_trampoline -
165 (unsigned long)__entry_tramp_text_start;
166 return TRAMP_VALIAS + offset;
168 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
169 return (unsigned long)__sdei_asm_handler;
174 * __sdei_handler() returns one of:
175 * SDEI_EV_HANDLED - success, return to the interrupted context.
176 * SDEI_EV_FAILED - failure, return this error code to firmare.
177 * virtual-address - success, return to this address.
179 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
180 struct sdei_registered_event *arg)
184 int clobbered_registers = 4;
185 u64 elr = read_sysreg(elr_el1);
186 u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
187 unsigned long vbar = read_sysreg(vbar_el1);
189 if (arm64_kernel_unmapped_at_el0())
190 clobbered_registers++;
192 /* Retrieve the missing registers values */
193 for (i = 0; i < clobbered_registers; i++) {
194 /* from within the handler, this call always succeeds */
195 sdei_api_event_context(i, ®s->regs[i]);
199 * We didn't take an exception to get here, set PAN. UAO will be cleared
200 * by sdei_event_handler()s set_fs(USER_DS) call.
202 __uaccess_enable_hw_pan();
204 err = sdei_event_handler(regs, arg);
206 return SDEI_EV_FAILED;
208 if (elr != read_sysreg(elr_el1)) {
210 * We took a synchronous exception from the SDEI handler.
211 * This could deadlock, and if you interrupt KVM it will
214 pr_warn("unsafe: exception during handler\n");
217 mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
220 * If we interrupted the kernel with interrupts masked, we always go
221 * back to wherever we came from.
223 if (mode == kernel_mode && !interrupts_enabled(regs))
224 return SDEI_EV_HANDLED;
227 * Otherwise, we pretend this was an IRQ. This lets user space tasks
228 * receive signals before we return to them, and KVM to invoke it's
229 * world switch to do the same.
231 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
234 if (mode == kernel_mode)
236 else if (mode & PSR_MODE32_BIT)
243 asmlinkage __kprobes notrace unsigned long
244 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
247 bool do_nmi_exit = false;
250 * nmi_enter() deals with printk() re-entrance and use of RCU when
251 * RCU believed this CPU was idle. Because critical events can
252 * interrupt normal events, we may already be in_nmi().
259 ret = _sdei_handler(regs, arg);