2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
45 #include <asm/proto.h>
47 #include <asm/syscalls.h>
48 #include <asm/debugreg.h>
49 #include <asm/switch_to.h>
50 #include <asm/xen/hypervisor.h>
53 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
55 /* Prints also some state that isn't saved in the pt_regs */
56 void __show_regs(struct pt_regs *regs, int all)
58 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
59 unsigned long d0, d1, d2, d3, d6, d7;
60 unsigned int fsindex, gsindex;
61 unsigned int ds, cs, es;
63 printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
65 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
66 regs->sp, regs->flags);
67 if (regs->orig_ax != -1)
68 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
72 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
73 regs->ax, regs->bx, regs->cx);
74 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
75 regs->dx, regs->si, regs->di);
76 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
77 regs->bp, regs->r8, regs->r9);
78 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
79 regs->r10, regs->r11, regs->r12);
80 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
81 regs->r13, regs->r14, regs->r15);
83 asm("movl %%ds,%0" : "=r" (ds));
84 asm("movl %%cs,%0" : "=r" (cs));
85 asm("movl %%es,%0" : "=r" (es));
86 asm("movl %%fs,%0" : "=r" (fsindex));
87 asm("movl %%gs,%0" : "=r" (gsindex));
89 rdmsrl(MSR_FS_BASE, fs);
90 rdmsrl(MSR_GS_BASE, gs);
91 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
101 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
102 fs, fsindex, gs, gsindex, shadowgs);
103 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
105 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
115 /* Only print out debug registers if they are in their non-default state. */
116 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
117 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
118 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
120 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
124 if (boot_cpu_has(X86_FEATURE_OSPKE))
125 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
128 void release_thread(struct task_struct *dead_task)
131 #ifdef CONFIG_MODIFY_LDT_SYSCALL
132 if (dead_task->mm->context.ldt) {
133 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
135 dead_task->mm->context.ldt->entries,
136 dead_task->mm->context.ldt->size);
143 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
144 unsigned long arg, struct task_struct *p, unsigned long tls)
147 struct pt_regs *childregs;
148 struct fork_frame *fork_frame;
149 struct inactive_task_frame *frame;
150 struct task_struct *me = current;
152 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
153 childregs = task_pt_regs(p);
154 fork_frame = container_of(childregs, struct fork_frame, regs);
155 frame = &fork_frame->frame;
157 frame->ret_addr = (unsigned long) ret_from_fork;
158 p->thread.sp = (unsigned long) fork_frame;
159 p->thread.io_bitmap_ptr = NULL;
161 savesegment(gs, p->thread.gsindex);
162 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
163 savesegment(fs, p->thread.fsindex);
164 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
165 savesegment(es, p->thread.es);
166 savesegment(ds, p->thread.ds);
167 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
169 if (unlikely(p->flags & PF_KTHREAD)) {
171 memset(childregs, 0, sizeof(struct pt_regs));
172 frame->bx = sp; /* function */
177 *childregs = *current_pt_regs();
184 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
185 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
186 IO_BITMAP_BYTES, GFP_KERNEL);
187 if (!p->thread.io_bitmap_ptr) {
188 p->thread.io_bitmap_max = 0;
191 set_tsk_thread_flag(p, TIF_IO_BITMAP);
195 * Set a new TLS for the child thread?
197 if (clone_flags & CLONE_SETTLS) {
198 #ifdef CONFIG_IA32_EMULATION
199 if (in_ia32_syscall())
200 err = do_set_thread_area(p, -1,
201 (struct user_desc __user *)tls, 0);
204 err = do_arch_prctl(p, ARCH_SET_FS, tls);
210 if (err && p->thread.io_bitmap_ptr) {
211 kfree(p->thread.io_bitmap_ptr);
212 p->thread.io_bitmap_max = 0;
219 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
220 unsigned long new_sp,
221 unsigned int _cs, unsigned int _ss, unsigned int _ds)
224 loadsegment(es, _ds);
225 loadsegment(ds, _ds);
231 regs->flags = X86_EFLAGS_IF;
236 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
238 start_thread_common(regs, new_ip, new_sp,
239 __USER_CS, __USER_DS, 0);
243 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
245 start_thread_common(regs, new_ip, new_sp,
246 test_thread_flag(TIF_X32)
247 ? __USER_CS : __USER32_CS,
248 __USER_DS, __USER_DS);
253 * switch_to(x,y) should switch tasks from x to y.
255 * This could still be optimized:
256 * - fold all the options into a flag word and test it with a single test.
257 * - could test fs/gs bitsliced
259 * Kprobes not supported here. Set the probe on schedule instead.
260 * Function graph tracer not supported too.
262 __visible __notrace_funcgraph struct task_struct *
263 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
265 struct thread_struct *prev = &prev_p->thread;
266 struct thread_struct *next = &next_p->thread;
267 struct fpu *prev_fpu = &prev->fpu;
268 struct fpu *next_fpu = &next->fpu;
269 int cpu = smp_processor_id();
270 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
271 unsigned prev_fsindex, prev_gsindex;
273 switch_fpu_prepare(prev_fpu, cpu);
275 /* We must save %fs and %gs before load_TLS() because
276 * %fs and %gs may be cleared by load_TLS().
278 * (e.g. xen_load_tls())
280 savesegment(fs, prev_fsindex);
281 savesegment(gs, prev_gsindex);
284 * Load TLS before restoring any segments so that segment loads
285 * reference the correct GDT entries.
290 * Leave lazy mode, flushing any hypercalls made here. This
291 * must be done after loading TLS entries in the GDT but before
292 * loading segments that might reference them, and and it must
293 * be done before fpu__restore(), so the TS bit is up to
296 arch_end_context_switch(next_p);
300 * Reading them only returns the selectors, but writing them (if
301 * nonzero) loads the full descriptor from the GDT or LDT. The
302 * LDT for next is loaded in switch_mm, and the GDT is loaded
305 * We therefore need to write new values to the segment
306 * registers on every context switch unless both the new and old
309 * Note that we don't need to do anything for CS and SS, as
310 * those are saved and restored as part of pt_regs.
312 savesegment(es, prev->es);
313 if (unlikely(next->es | prev->es))
314 loadsegment(es, next->es);
316 savesegment(ds, prev->ds);
317 if (unlikely(next->ds | prev->ds))
318 loadsegment(ds, next->ds);
323 * These are even more complicated than DS and ES: they have
324 * 64-bit bases are that controlled by arch_prctl. The bases
325 * don't necessarily match the selectors, as user code can do
326 * any number of things to cause them to be inconsistent.
328 * We don't promise to preserve the bases if the selectors are
329 * nonzero. We also don't promise to preserve the base if the
330 * selector is zero and the base doesn't match whatever was
331 * most recently passed to ARCH_SET_FS/GS. (If/when the
332 * FSGSBASE instructions are enabled, we'll need to offer
333 * stronger guarantees.)
336 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
340 /* Loading a nonzero value into FS sets the index and base. */
341 loadsegment(fs, next->fsindex);
344 /* Next index is zero but next base is nonzero. */
347 wrmsrl(MSR_FS_BASE, next->fsbase);
349 /* Next base and index are both zero. */
350 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
352 * We don't know the previous base and can't
353 * find out without RDMSR. Forcibly clear it.
355 loadsegment(fs, __USER_DS);
359 * If the previous index is zero and ARCH_SET_FS
360 * didn't change the base, then the base is
361 * also zero and we don't need to do anything.
363 if (prev->fsbase || prev_fsindex)
369 * Save the old state and preserve the invariant.
370 * NB: if prev_fsindex == 0, then we can't reliably learn the base
371 * without RDMSR because Intel user code can zero it without telling
372 * us and AMD user code can program any 32-bit value without telling
377 prev->fsindex = prev_fsindex;
380 /* Loading a nonzero value into GS sets the index and base. */
381 load_gs_index(next->gsindex);
384 /* Next index is zero but next base is nonzero. */
387 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
389 /* Next base and index are both zero. */
390 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
392 * We don't know the previous base and can't
393 * find out without RDMSR. Forcibly clear it.
395 * This contains a pointless SWAPGS pair.
396 * Fixing it would involve an explicit check
397 * for Xen or a new pvop.
399 load_gs_index(__USER_DS);
403 * If the previous index is zero and ARCH_SET_GS
404 * didn't change the base, then the base is
405 * also zero and we don't need to do anything.
407 if (prev->gsbase || prev_gsindex)
413 * Save the old state and preserve the invariant.
414 * NB: if prev_gsindex == 0, then we can't reliably learn the base
415 * without RDMSR because Intel user code can zero it without telling
416 * us and AMD user code can program any 32-bit value without telling
421 prev->gsindex = prev_gsindex;
423 switch_fpu_finish(next_fpu, cpu);
426 * Switch the PDA and FPU contexts.
428 this_cpu_write(current_task, next_p);
430 /* Reload esp0 and ss1. This changes current_thread_info(). */
434 * Now maybe reload the debug registers and handle I/O bitmaps
436 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
437 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
438 __switch_to_xtra(prev_p, next_p, tss);
442 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
443 * current_pt_regs()->flags may not match the current task's
444 * intended IOPL. We need to switch it manually.
446 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
447 prev->iopl != next->iopl))
448 xen_set_iopl_mask(next->iopl);
451 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
453 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
454 * does not update the cached descriptor. As a result, if we
455 * do SYSRET while SS is NULL, we'll end up in user mode with
456 * SS apparently equal to __USER_DS but actually unusable.
458 * The straightforward workaround would be to fix it up just
459 * before SYSRET, but that would slow down the system call
460 * fast paths. Instead, we ensure that SS is never NULL in
461 * system call context. We do this by replacing NULL SS
462 * selectors at every context switch. SYSCALL sets up a valid
463 * SS, so the only way to get NULL is to re-enter the kernel
464 * from CPL 3 through an interrupt. Since that can't happen
465 * in the same task as a running syscall, we are guaranteed to
466 * context switch between every interrupt vector entry and a
469 * We read SS first because SS reads are much faster than
470 * writes. Out of caution, we force SS to __KERNEL_DS even if
471 * it previously had a different non-NULL value.
473 unsigned short ss_sel;
474 savesegment(ss, ss_sel);
475 if (ss_sel != __KERNEL_DS)
476 loadsegment(ss, __KERNEL_DS);
482 void set_personality_64bit(void)
484 /* inherit personality from parent */
486 /* Make sure to be in 64bit mode */
487 clear_thread_flag(TIF_IA32);
488 clear_thread_flag(TIF_ADDR32);
489 clear_thread_flag(TIF_X32);
491 /* Ensure the corresponding mm is not marked. */
493 current->mm->context.ia32_compat = 0;
495 /* TBD: overwrites user setup. Should have two bits.
496 But 64bit processes have always behaved this way,
497 so it's not too bad. The main problem is just that
498 32bit childs are affected again. */
499 current->personality &= ~READ_IMPLIES_EXEC;
502 void set_personality_ia32(bool x32)
504 /* inherit personality from parent */
506 /* Make sure to be in 32bit mode */
507 set_thread_flag(TIF_ADDR32);
509 /* Mark the associated mm as containing 32-bit tasks. */
511 clear_thread_flag(TIF_IA32);
512 set_thread_flag(TIF_X32);
514 current->mm->context.ia32_compat = TIF_X32;
515 current->personality &= ~READ_IMPLIES_EXEC;
516 /* in_compat_syscall() uses the presence of the x32
517 syscall bit flag to determine compat status */
518 current->thread.status &= ~TS_COMPAT;
520 set_thread_flag(TIF_IA32);
521 clear_thread_flag(TIF_X32);
523 current->mm->context.ia32_compat = TIF_IA32;
524 current->personality |= force_personality32;
525 /* Prepare the first "return" to user space */
526 current->thread.status |= TS_COMPAT;
529 EXPORT_SYMBOL_GPL(set_personality_ia32);
531 #ifdef CONFIG_CHECKPOINT_RESTORE
532 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
536 ret = map_vdso_once(image, addr);
540 return (long)image->size;
544 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
547 int doit = task == current;
552 if (addr >= TASK_SIZE_MAX)
555 task->thread.gsindex = 0;
556 task->thread.gsbase = addr;
559 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
564 /* Not strictly needed for fs, but do it for symmetry
566 if (addr >= TASK_SIZE_MAX)
569 task->thread.fsindex = 0;
570 task->thread.fsbase = addr;
572 /* set the selector to 0 to not confuse __switch_to */
574 ret = wrmsrl_safe(MSR_FS_BASE, addr);
581 rdmsrl(MSR_FS_BASE, base);
583 base = task->thread.fsbase;
584 ret = put_user(base, (unsigned long __user *)addr);
590 rdmsrl(MSR_KERNEL_GS_BASE, base);
592 base = task->thread.gsbase;
593 ret = put_user(base, (unsigned long __user *)addr);
597 #ifdef CONFIG_CHECKPOINT_RESTORE
598 # ifdef CONFIG_X86_X32_ABI
599 case ARCH_MAP_VDSO_X32:
600 return prctl_map_vdso(&vdso_image_x32, addr);
602 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
603 case ARCH_MAP_VDSO_32:
604 return prctl_map_vdso(&vdso_image_32, addr);
606 case ARCH_MAP_VDSO_64:
607 return prctl_map_vdso(&vdso_image_64, addr);
618 long sys_arch_prctl(int code, unsigned long addr)
620 return do_arch_prctl(current, code, addr);
623 unsigned long KSTK_ESP(struct task_struct *task)
625 return task_pt_regs(task)->sp;