2 * Suspend and hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/suspend.h>
12 #include <linux/smp.h>
14 #include <asm/pgtable.h>
15 #include <asm/proto.h>
20 #include <asm/suspend.h>
22 static void fix_processor_context(void);
24 struct saved_context saved_context;
27 * __save_processor_state - save CPU registers before creating a
28 * hibernation image and before restoring the memory state from it
29 * @ctxt - structure to store the registers contents in
31 * NOTE: If there is a CPU register the modification of which by the
32 * boot kernel (ie. the kernel used for loading the hibernation image)
33 * might affect the operations of the restored target kernel (ie. the one
34 * saved in the hibernation image), then its contents must be saved by this
35 * function. In other words, if kernel A is hibernated and different
36 * kernel B is used for loading the hibernation image into memory, the
37 * kernel A's __save_processor_state() function must save all registers
38 * needed by kernel A, so that it can operate correctly after the resume
39 * regardless of what kernel B does in the meantime.
41 static void __save_processor_state(struct saved_context *ctxt)
48 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
49 store_idt((struct desc_ptr *)&ctxt->idt_limit);
52 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
56 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
57 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
58 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
59 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
60 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
62 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
63 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
64 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
65 mtrr_save_fixed_ranges(NULL);
70 rdmsrl(MSR_EFER, ctxt->efer);
71 ctxt->cr0 = read_cr0();
72 ctxt->cr2 = read_cr2();
73 ctxt->cr3 = read_cr3();
74 ctxt->cr4 = read_cr4();
75 ctxt->cr8 = read_cr8();
78 void save_processor_state(void)
80 __save_processor_state(&saved_context);
83 static void do_fpu_end(void)
86 * Restore FPU regs if necessary
92 * __restore_processor_state - restore the contents of CPU registers saved
93 * by __save_processor_state()
94 * @ctxt - structure to load the registers contents from
96 static void __restore_processor_state(struct saved_context *ctxt)
101 wrmsrl(MSR_EFER, ctxt->efer);
102 write_cr8(ctxt->cr8);
103 write_cr4(ctxt->cr4);
104 write_cr3(ctxt->cr3);
105 write_cr2(ctxt->cr2);
106 write_cr0(ctxt->cr0);
109 * now restore the descriptor tables to their proper values
110 * ltr is done i fix_processor_context().
112 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
113 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
119 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
120 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
121 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
122 load_gs_index(ctxt->gs);
123 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
125 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
126 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
127 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
130 * restore XCR0 for xsave capable cpu's.
133 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
135 fix_processor_context();
141 void restore_processor_state(void)
143 __restore_processor_state(&saved_context);
146 static void fix_processor_context(void)
148 int cpu = smp_processor_id();
149 struct tss_struct *t = &per_cpu(init_tss, cpu);
152 * This just modifies memory; should not be necessary. But... This
153 * is necessary, because 386 hardware has concept of busy TSS or some
156 set_tss_desc(cpu, t);
158 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
160 syscall_init(); /* This sets MSR_*STAR and related */
161 load_TR_desc(); /* This does ltr */
162 load_LDT(¤t->active_mm->context); /* This does lldt */
165 * Now maybe reload the debug registers
167 if (current->thread.debugreg7){
168 loaddebug(¤t->thread, 0);
169 loaddebug(¤t->thread, 1);
170 loaddebug(¤t->thread, 2);
171 loaddebug(¤t->thread, 3);
173 loaddebug(¤t->thread, 6);
174 loaddebug(¤t->thread, 7);