2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/resctrl_sched.h>
56 #include <asm/unistd.h>
57 #include <asm/fsgsbase.h>
58 #ifdef CONFIG_IA32_EMULATION
59 /* Not included via unistd.h */
60 #include <asm/unistd_32_ia32.h>
65 /* Prints also some state that isn't saved in the pt_regs */
66 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
68 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
69 unsigned long d0, d1, d2, d3, d6, d7;
70 unsigned int fsindex, gsindex;
75 if (regs->orig_ax != -1)
76 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
80 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
81 regs->ax, regs->bx, regs->cx);
82 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
83 regs->dx, regs->si, regs->di);
84 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
85 regs->bp, regs->r8, regs->r9);
86 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
87 regs->r10, regs->r11, regs->r12);
88 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
89 regs->r13, regs->r14, regs->r15);
91 if (mode == SHOW_REGS_SHORT)
94 if (mode == SHOW_REGS_USER) {
95 rdmsrl(MSR_FS_BASE, fs);
96 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
97 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
102 asm("movl %%ds,%0" : "=r" (ds));
103 asm("movl %%es,%0" : "=r" (es));
104 asm("movl %%fs,%0" : "=r" (fsindex));
105 asm("movl %%gs,%0" : "=r" (gsindex));
107 rdmsrl(MSR_FS_BASE, fs);
108 rdmsrl(MSR_GS_BASE, gs);
109 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
116 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
117 fs, fsindex, gs, gsindex, shadowgs);
118 printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
120 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
130 /* Only print out debug registers if they are in their non-default state. */
131 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
132 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
133 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
135 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
139 if (boot_cpu_has(X86_FEATURE_OSPKE))
140 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
143 void release_thread(struct task_struct *dead_task)
146 #ifdef CONFIG_MODIFY_LDT_SYSCALL
147 if (dead_task->mm->context.ldt) {
148 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
150 dead_task->mm->context.ldt->entries,
151 dead_task->mm->context.ldt->nr_entries);
158 enum which_selector {
164 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
165 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
166 * It's forcibly inlined because it'll generate better code and this function
169 static __always_inline void save_base_legacy(struct task_struct *prev_p,
170 unsigned short selector,
171 enum which_selector which)
173 if (likely(selector == 0)) {
175 * On Intel (without X86_BUG_NULL_SEG), the segment base could
176 * be the pre-existing saved base or it could be zero. On AMD
177 * (with X86_BUG_NULL_SEG), the segment base could be almost
180 * This branch is very hot (it's hit twice on almost every
181 * context switch between 64-bit programs), and avoiding
182 * the RDMSR helps a lot, so we just assume that whatever
183 * value is already saved is correct. This matches historical
184 * Linux behavior, so it won't break existing applications.
186 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
187 * report that the base is zero, it needs to actually be zero:
188 * see the corresponding logic in load_seg_legacy.
192 * If the selector is 1, 2, or 3, then the base is zero on
193 * !X86_BUG_NULL_SEG CPUs and could be anything on
194 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
195 * has never attempted to preserve the base across context
198 * If selector > 3, then it refers to a real segment, and
199 * saving the base isn't necessary.
202 prev_p->thread.fsbase = 0;
204 prev_p->thread.gsbase = 0;
208 static __always_inline void save_fsgs(struct task_struct *task)
210 savesegment(fs, task->thread.fsindex);
211 savesegment(gs, task->thread.gsindex);
212 save_base_legacy(task, task->thread.fsindex, FS);
213 save_base_legacy(task, task->thread.gsindex, GS);
216 #if IS_ENABLED(CONFIG_KVM)
218 * While a process is running,current->thread.fsbase and current->thread.gsbase
219 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
220 * wants an efficient way to save and restore FSBASE and GSBASE.
221 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
223 void save_fsgs_for_kvm(void)
227 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
230 static __always_inline void loadseg(enum which_selector which,
234 loadsegment(fs, sel);
239 static __always_inline void load_seg_legacy(unsigned short prev_index,
240 unsigned long prev_base,
241 unsigned short next_index,
242 unsigned long next_base,
243 enum which_selector which)
245 if (likely(next_index <= 3)) {
247 * The next task is using 64-bit TLS, is not using this
248 * segment at all, or is having fun with arcane CPU features.
250 if (next_base == 0) {
252 * Nasty case: on AMD CPUs, we need to forcibly zero
255 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
256 loadseg(which, __USER_DS);
257 loadseg(which, next_index);
260 * We could try to exhaustively detect cases
261 * under which we can skip the segment load,
262 * but there's really only one case that matters
263 * for performance: if both the previous and
264 * next states are fully zeroed, we can skip
267 * (This assumes that prev_base == 0 has no
268 * false positives. This is the case on
271 if (likely(prev_index | next_index | prev_base))
272 loadseg(which, next_index);
275 if (prev_index != next_index)
276 loadseg(which, next_index);
277 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
282 * The next task is using a real segment. Loading the selector
285 loadseg(which, next_index);
289 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
290 struct thread_struct *next)
292 load_seg_legacy(prev->fsindex, prev->fsbase,
293 next->fsindex, next->fsbase, FS);
294 load_seg_legacy(prev->gsindex, prev->gsbase,
295 next->gsindex, next->gsbase, GS);
298 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
299 unsigned short selector)
301 unsigned short idx = selector >> 3;
304 if (likely((selector & SEGMENT_TI_MASK) == 0)) {
305 if (unlikely(idx >= GDT_ENTRIES))
309 * There are no user segments in the GDT with nonzero bases
310 * other than the TLS segments.
312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
315 idx -= GDT_ENTRY_TLS_MIN;
316 base = get_desc_base(&task->thread.tls_array[idx]);
318 #ifdef CONFIG_MODIFY_LDT_SYSCALL
319 struct ldt_struct *ldt;
322 * If performance here mattered, we could protect the LDT
323 * with RCU. This is a slow path, though, so we can just
326 mutex_lock(&task->mm->context.lock);
327 ldt = task->mm->context.ldt;
328 if (unlikely(idx >= ldt->nr_entries))
331 base = get_desc_base(ldt->entries + idx);
332 mutex_unlock(&task->mm->context.lock);
341 unsigned long x86_fsbase_read_task(struct task_struct *task)
343 unsigned long fsbase;
346 fsbase = x86_fsbase_read_cpu();
347 else if (task->thread.fsindex == 0)
348 fsbase = task->thread.fsbase;
350 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
355 unsigned long x86_gsbase_read_task(struct task_struct *task)
357 unsigned long gsbase;
360 gsbase = x86_gsbase_read_cpu_inactive();
361 else if (task->thread.gsindex == 0)
362 gsbase = task->thread.gsbase;
364 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
369 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
371 WARN_ON_ONCE(task == current);
373 task->thread.fsbase = fsbase;
376 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
378 WARN_ON_ONCE(task == current);
380 task->thread.gsbase = gsbase;
383 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
384 unsigned long arg, struct task_struct *p, unsigned long tls)
387 struct pt_regs *childregs;
388 struct fork_frame *fork_frame;
389 struct inactive_task_frame *frame;
390 struct task_struct *me = current;
392 childregs = task_pt_regs(p);
393 fork_frame = container_of(childregs, struct fork_frame, regs);
394 frame = &fork_frame->frame;
396 frame->ret_addr = (unsigned long) ret_from_fork;
397 p->thread.sp = (unsigned long) fork_frame;
398 p->thread.io_bitmap_ptr = NULL;
400 savesegment(gs, p->thread.gsindex);
401 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
402 savesegment(fs, p->thread.fsindex);
403 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
404 savesegment(es, p->thread.es);
405 savesegment(ds, p->thread.ds);
406 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
408 if (unlikely(p->flags & PF_KTHREAD)) {
410 memset(childregs, 0, sizeof(struct pt_regs));
411 frame->bx = sp; /* function */
416 *childregs = *current_pt_regs();
423 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
424 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
425 IO_BITMAP_BYTES, GFP_KERNEL);
426 if (!p->thread.io_bitmap_ptr) {
427 p->thread.io_bitmap_max = 0;
430 set_tsk_thread_flag(p, TIF_IO_BITMAP);
434 * Set a new TLS for the child thread?
436 if (clone_flags & CLONE_SETTLS) {
437 #ifdef CONFIG_IA32_EMULATION
438 if (in_ia32_syscall())
439 err = do_set_thread_area(p, -1,
440 (struct user_desc __user *)tls, 0);
443 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
449 if (err && p->thread.io_bitmap_ptr) {
450 kfree(p->thread.io_bitmap_ptr);
451 p->thread.io_bitmap_max = 0;
458 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
459 unsigned long new_sp,
460 unsigned int _cs, unsigned int _ss, unsigned int _ds)
462 WARN_ON_ONCE(regs != current_pt_regs());
464 if (static_cpu_has(X86_BUG_NULL_SEG)) {
465 /* Loading zero below won't clear the base. */
466 loadsegment(fs, __USER_DS);
467 load_gs_index(__USER_DS);
471 loadsegment(es, _ds);
472 loadsegment(ds, _ds);
479 regs->flags = X86_EFLAGS_IF;
484 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
486 start_thread_common(regs, new_ip, new_sp,
487 __USER_CS, __USER_DS, 0);
489 EXPORT_SYMBOL_GPL(start_thread);
492 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
494 start_thread_common(regs, new_ip, new_sp,
495 test_thread_flag(TIF_X32)
496 ? __USER_CS : __USER32_CS,
497 __USER_DS, __USER_DS);
502 * switch_to(x,y) should switch tasks from x to y.
504 * This could still be optimized:
505 * - fold all the options into a flag word and test it with a single test.
506 * - could test fs/gs bitsliced
508 * Kprobes not supported here. Set the probe on schedule instead.
509 * Function graph tracer not supported too.
511 __visible __notrace_funcgraph struct task_struct *
512 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
514 struct thread_struct *prev = &prev_p->thread;
515 struct thread_struct *next = &next_p->thread;
516 struct fpu *prev_fpu = &prev->fpu;
517 struct fpu *next_fpu = &next->fpu;
518 int cpu = smp_processor_id();
520 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
521 this_cpu_read(irq_count) != -1);
523 switch_fpu_prepare(prev_fpu, cpu);
525 /* We must save %fs and %gs before load_TLS() because
526 * %fs and %gs may be cleared by load_TLS().
528 * (e.g. xen_load_tls())
533 * Load TLS before restoring any segments so that segment loads
534 * reference the correct GDT entries.
539 * Leave lazy mode, flushing any hypercalls made here. This
540 * must be done after loading TLS entries in the GDT but before
541 * loading segments that might reference them, and and it must
542 * be done before fpu__restore(), so the TS bit is up to
545 arch_end_context_switch(next_p);
549 * Reading them only returns the selectors, but writing them (if
550 * nonzero) loads the full descriptor from the GDT or LDT. The
551 * LDT for next is loaded in switch_mm, and the GDT is loaded
554 * We therefore need to write new values to the segment
555 * registers on every context switch unless both the new and old
558 * Note that we don't need to do anything for CS and SS, as
559 * those are saved and restored as part of pt_regs.
561 savesegment(es, prev->es);
562 if (unlikely(next->es | prev->es))
563 loadsegment(es, next->es);
565 savesegment(ds, prev->ds);
566 if (unlikely(next->ds | prev->ds))
567 loadsegment(ds, next->ds);
569 x86_fsgsbase_load(prev, next);
571 switch_fpu_finish(next_fpu, cpu);
574 * Switch the PDA and FPU contexts.
576 this_cpu_write(current_task, next_p);
577 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
580 update_task_stack(next_p);
582 switch_to_extra(prev_p, next_p);
586 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
587 * current_pt_regs()->flags may not match the current task's
588 * intended IOPL. We need to switch it manually.
590 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
591 prev->iopl != next->iopl))
592 xen_set_iopl_mask(next->iopl);
595 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
597 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
598 * does not update the cached descriptor. As a result, if we
599 * do SYSRET while SS is NULL, we'll end up in user mode with
600 * SS apparently equal to __USER_DS but actually unusable.
602 * The straightforward workaround would be to fix it up just
603 * before SYSRET, but that would slow down the system call
604 * fast paths. Instead, we ensure that SS is never NULL in
605 * system call context. We do this by replacing NULL SS
606 * selectors at every context switch. SYSCALL sets up a valid
607 * SS, so the only way to get NULL is to re-enter the kernel
608 * from CPL 3 through an interrupt. Since that can't happen
609 * in the same task as a running syscall, we are guaranteed to
610 * context switch between every interrupt vector entry and a
613 * We read SS first because SS reads are much faster than
614 * writes. Out of caution, we force SS to __KERNEL_DS even if
615 * it previously had a different non-NULL value.
617 unsigned short ss_sel;
618 savesegment(ss, ss_sel);
619 if (ss_sel != __KERNEL_DS)
620 loadsegment(ss, __KERNEL_DS);
623 /* Load the Intel cache allocation PQR MSR. */
629 void set_personality_64bit(void)
631 /* inherit personality from parent */
633 /* Make sure to be in 64bit mode */
634 clear_thread_flag(TIF_IA32);
635 clear_thread_flag(TIF_ADDR32);
636 clear_thread_flag(TIF_X32);
637 /* Pretend that this comes from a 64bit execve */
638 task_pt_regs(current)->orig_ax = __NR_execve;
639 current_thread_info()->status &= ~TS_COMPAT;
641 /* Ensure the corresponding mm is not marked. */
643 current->mm->context.ia32_compat = 0;
645 /* TBD: overwrites user setup. Should have two bits.
646 But 64bit processes have always behaved this way,
647 so it's not too bad. The main problem is just that
648 32bit children are affected again. */
649 current->personality &= ~READ_IMPLIES_EXEC;
652 static void __set_personality_x32(void)
654 #ifdef CONFIG_X86_X32
655 clear_thread_flag(TIF_IA32);
656 set_thread_flag(TIF_X32);
658 current->mm->context.ia32_compat = TIF_X32;
659 current->personality &= ~READ_IMPLIES_EXEC;
661 * in_32bit_syscall() uses the presence of the x32 syscall bit
662 * flag to determine compat status. The x86 mmap() code relies on
663 * the syscall bitness so set x32 syscall bit right here to make
664 * in_32bit_syscall() work during exec().
666 * Pretend to come from a x32 execve.
668 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
669 current_thread_info()->status &= ~TS_COMPAT;
673 static void __set_personality_ia32(void)
675 #ifdef CONFIG_IA32_EMULATION
676 set_thread_flag(TIF_IA32);
677 clear_thread_flag(TIF_X32);
679 current->mm->context.ia32_compat = TIF_IA32;
680 current->personality |= force_personality32;
681 /* Prepare the first "return" to user space */
682 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
683 current_thread_info()->status |= TS_COMPAT;
687 void set_personality_ia32(bool x32)
689 /* Make sure to be in 32bit mode */
690 set_thread_flag(TIF_ADDR32);
693 __set_personality_x32();
695 __set_personality_ia32();
697 EXPORT_SYMBOL_GPL(set_personality_ia32);
699 #ifdef CONFIG_CHECKPOINT_RESTORE
700 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
704 ret = map_vdso_once(image, addr);
708 return (long)image->size;
712 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
718 if (unlikely(arg2 >= TASK_SIZE_MAX))
723 * ARCH_SET_GS has always overwritten the index
724 * and the base. Zero is the most sensible value
725 * to put in the index, and is the only value that
726 * makes any sense if FSGSBASE is unavailable.
728 if (task == current) {
730 x86_gsbase_write_cpu_inactive(arg2);
733 * On non-FSGSBASE systems, save_base_legacy() expects
734 * that we also fill in thread.gsbase.
736 task->thread.gsbase = arg2;
739 task->thread.gsindex = 0;
740 x86_gsbase_write_task(task, arg2);
747 * Not strictly needed for %fs, but do it for symmetry
750 if (unlikely(arg2 >= TASK_SIZE_MAX))
755 * Set the selector to 0 for the same reason
758 if (task == current) {
760 x86_fsbase_write_cpu(arg2);
763 * On non-FSGSBASE systems, save_base_legacy() expects
764 * that we also fill in thread.fsbase.
766 task->thread.fsbase = arg2;
768 task->thread.fsindex = 0;
769 x86_fsbase_write_task(task, arg2);
775 unsigned long base = x86_fsbase_read_task(task);
777 ret = put_user(base, (unsigned long __user *)arg2);
781 unsigned long base = x86_gsbase_read_task(task);
783 ret = put_user(base, (unsigned long __user *)arg2);
787 #ifdef CONFIG_CHECKPOINT_RESTORE
788 # ifdef CONFIG_X86_X32_ABI
789 case ARCH_MAP_VDSO_X32:
790 return prctl_map_vdso(&vdso_image_x32, arg2);
792 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
793 case ARCH_MAP_VDSO_32:
794 return prctl_map_vdso(&vdso_image_32, arg2);
796 case ARCH_MAP_VDSO_64:
797 return prctl_map_vdso(&vdso_image_64, arg2);
808 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
812 ret = do_arch_prctl_64(current, option, arg2);
814 ret = do_arch_prctl_common(current, option, arg2);
819 #ifdef CONFIG_IA32_EMULATION
820 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
822 return do_arch_prctl_common(current, option, arg2);
826 unsigned long KSTK_ESP(struct task_struct *task)
828 return task_pt_regs(task)->sp;