1 // SPDX-License-Identifier: GPL-2.0-only
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/nospec.h>
25 #include <linux/uprobes.h>
26 #include <linux/livepatch.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
31 #include <asm/traps.h>
33 #include <asm/cpufeature.h>
34 #include <asm/fpu/api.h>
35 #include <asm/nospec-branch.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/syscalls.h>
40 #ifdef CONFIG_CONTEXT_TRACKING
41 /* Called on entry from user mode with IRQs off. */
42 __visible inline void enter_from_user_mode(void)
44 CT_WARN_ON(ct_state() != CONTEXT_USER);
48 static inline void enter_from_user_mode(void) {}
51 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
54 if (arch == AUDIT_ARCH_X86_64) {
55 audit_syscall_entry(regs->orig_ax, regs->di,
56 regs->si, regs->dx, regs->r10);
60 audit_syscall_entry(regs->orig_ax, regs->bx,
61 regs->cx, regs->dx, regs->si);
66 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
67 * to skip the syscall.
69 static long syscall_trace_enter(struct pt_regs *regs)
71 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
73 struct thread_info *ti = current_thread_info();
74 unsigned long ret = 0;
77 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
78 BUG_ON(regs != task_pt_regs(current));
80 work = READ_ONCE(ti->flags);
82 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
83 ret = tracehook_report_syscall_entry(regs);
84 if (ret || (work & _TIF_SYSCALL_EMU))
90 * Do seccomp after ptrace, to catch any tracer changes.
92 if (work & _TIF_SECCOMP) {
93 struct seccomp_data sd;
96 sd.nr = regs->orig_ax;
97 sd.instruction_pointer = regs->ip;
99 if (arch == AUDIT_ARCH_X86_64) {
100 sd.args[0] = regs->di;
101 sd.args[1] = regs->si;
102 sd.args[2] = regs->dx;
103 sd.args[3] = regs->r10;
104 sd.args[4] = regs->r8;
105 sd.args[5] = regs->r9;
109 sd.args[0] = regs->bx;
110 sd.args[1] = regs->cx;
111 sd.args[2] = regs->dx;
112 sd.args[3] = regs->si;
113 sd.args[4] = regs->di;
114 sd.args[5] = regs->bp;
117 ret = __secure_computing(&sd);
123 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
124 trace_sys_enter(regs, regs->orig_ax);
126 do_audit_syscall_entry(regs, arch);
128 return ret ?: regs->orig_ax;
131 #define EXIT_TO_USERMODE_LOOP_FLAGS \
132 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
133 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
135 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
138 * In order to return to user mode, we need to have IRQs off with
139 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
140 * can be set at any time on preemptible kernels if we have IRQs on,
141 * so we need to loop. Disabling preemption wouldn't help: doing the
142 * work to clear some of the flags can sleep.
145 /* We have work to do. */
148 if (cached_flags & _TIF_NEED_RESCHED)
151 if (cached_flags & _TIF_UPROBE)
152 uprobe_notify_resume(regs);
154 if (cached_flags & _TIF_PATCH_PENDING)
155 klp_update_patch_state(current);
157 /* deal with pending signal delivery */
158 if (cached_flags & _TIF_SIGPENDING)
161 if (cached_flags & _TIF_NOTIFY_RESUME) {
162 clear_thread_flag(TIF_NOTIFY_RESUME);
163 tracehook_notify_resume(regs);
164 rseq_handle_notify_resume(NULL, regs);
167 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
168 fire_user_return_notifiers();
170 /* Disable IRQs and retry */
173 cached_flags = READ_ONCE(current_thread_info()->flags);
175 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
180 /* Called with IRQs disabled. */
181 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
183 struct thread_info *ti = current_thread_info();
186 addr_limit_user_check();
188 lockdep_assert_irqs_disabled();
191 cached_flags = READ_ONCE(ti->flags);
193 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
194 exit_to_usermode_loop(regs, cached_flags);
196 /* Reload ti->flags; we may have rescheduled above. */
197 cached_flags = READ_ONCE(ti->flags);
199 fpregs_assert_state_consistent();
200 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
205 * Compat syscalls set TS_COMPAT. Make sure we clear it before
206 * returning to user mode. We need to clear it *after* signal
207 * handling, because syscall restart has a fixup for compat
208 * syscalls. The fixup is exercised by the ptrace_syscall_32
211 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
212 * special case only applies after poking regs and before the
213 * very next return to user mode.
215 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
220 mds_user_clear_cpu_buffers();
223 #define SYSCALL_EXIT_WORK_FLAGS \
224 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
225 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
227 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
231 audit_syscall_exit(regs);
233 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
234 trace_sys_exit(regs, regs->ax);
237 * If TIF_SYSCALL_EMU is set, we only get here because of
238 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
239 * We already reported this syscall instruction in
240 * syscall_trace_enter().
243 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
245 if (step || cached_flags & _TIF_SYSCALL_TRACE)
246 tracehook_report_syscall_exit(regs, step);
250 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
251 * state such that we can immediately switch to user mode.
253 __visible inline void syscall_return_slowpath(struct pt_regs *regs)
255 struct thread_info *ti = current_thread_info();
256 u32 cached_flags = READ_ONCE(ti->flags);
258 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
260 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
261 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
267 * First do one-time work. If these work items are enabled, we
268 * want to run them exactly once per syscall exit with IRQs on.
270 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
271 syscall_slow_exit_work(regs, cached_flags);
274 prepare_exit_to_usermode(regs);
278 __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
280 struct thread_info *ti;
282 enter_from_user_mode();
284 ti = current_thread_info();
285 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
286 nr = syscall_trace_enter(regs);
289 * NB: Native and x32 syscalls are dispatched from the same
290 * table. The only functional difference is the x32 bit in
291 * regs->orig_ax, which changes the behavior of some syscalls.
293 nr &= __SYSCALL_MASK;
294 if (likely(nr < NR_syscalls)) {
295 nr = array_index_nospec(nr, NR_syscalls);
296 regs->ax = sys_call_table[nr](regs);
299 syscall_return_slowpath(regs);
303 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
305 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
306 * all entry and exit work and returns with IRQs off. This function is
307 * extremely hot in workloads that use it, and it's usually called from
308 * do_fast_syscall_32, so forcibly inline it to improve performance.
310 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
312 struct thread_info *ti = current_thread_info();
313 unsigned int nr = (unsigned int)regs->orig_ax;
315 #ifdef CONFIG_IA32_EMULATION
316 ti->status |= TS_COMPAT;
319 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
321 * Subtlety here: if ptrace pokes something larger than
322 * 2^32-1 into orig_ax, this truncates it. This may or
323 * may not be necessary, but it matches the old asm
326 nr = syscall_trace_enter(regs);
329 if (likely(nr < IA32_NR_syscalls)) {
330 nr = array_index_nospec(nr, IA32_NR_syscalls);
331 #ifdef CONFIG_IA32_EMULATION
332 regs->ax = ia32_sys_call_table[nr](regs);
335 * It's possible that a 32-bit syscall implementation
336 * takes a 64-bit parameter but nonetheless assumes that
337 * the high bits are zero. Make sure we zero-extend all
340 regs->ax = ia32_sys_call_table[nr](
341 (unsigned int)regs->bx, (unsigned int)regs->cx,
342 (unsigned int)regs->dx, (unsigned int)regs->si,
343 (unsigned int)regs->di, (unsigned int)regs->bp);
344 #endif /* CONFIG_IA32_EMULATION */
347 syscall_return_slowpath(regs);
350 /* Handles int $0x80 */
351 __visible void do_int80_syscall_32(struct pt_regs *regs)
353 enter_from_user_mode();
355 do_syscall_32_irqs_on(regs);
358 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
359 __visible long do_fast_syscall_32(struct pt_regs *regs)
362 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
363 * convention. Adjust regs so it looks like we entered using int80.
366 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
367 vdso_image_32.sym_int80_landing_pad;
370 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
371 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
374 regs->ip = landing_pad;
376 enter_from_user_mode();
380 /* Fetch EBP from where the vDSO stashed it. */
384 * Micro-optimization: the pointer we're following is explicitly
385 * 32 bits, so it can't be out of range.
387 __get_user(*(u32 *)®s->bp,
388 (u32 __user __force *)(unsigned long)(u32)regs->sp)
390 get_user(*(u32 *)®s->bp,
391 (u32 __user __force *)(unsigned long)(u32)regs->sp)
395 /* User code screwed up. */
398 prepare_exit_to_usermode(regs);
399 return 0; /* Keep it simple: use IRET. */
402 /* Now this is just like a normal syscall. */
403 do_syscall_32_irqs_on(regs);
407 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
408 * SYSRETL is available on all 64-bit CPUs, so we don't need to
409 * bother with SYSEXIT.
411 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
412 * because the ECX fixup above will ensure that this is essentially
415 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
416 regs->ip == landing_pad &&
417 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
420 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
422 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
423 * because the ECX fixup above will ensure that this is essentially
426 * We don't allow syscalls at all from VM86 mode, but we still
427 * need to check VM, because we might be returning from sys_vm86.
429 return static_cpu_has(X86_FEATURE_SEP) &&
430 regs->cs == __USER_CS && regs->ss == __USER_DS &&
431 regs->ip == landing_pad &&
432 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;