2 * Suspend support specific for i386.
4 * Distribute under GPLv2
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/suspend.h>
11 #include <linux/smp.h>
13 #include <asm/pgtable.h>
14 #include <asm/proto.h>
19 #include <asm/suspend.h>
22 static struct saved_context saved_context;
24 unsigned long saved_context_ebx;
25 unsigned long saved_context_esp, saved_context_ebp;
26 unsigned long saved_context_esi, saved_context_edi;
27 unsigned long saved_context_eflags;
30 struct saved_context saved_context;
34 * __save_processor_state - save CPU registers before creating a
35 * hibernation image and before restoring the memory state from it
36 * @ctxt - structure to store the registers contents in
38 * NOTE: If there is a CPU register the modification of which by the
39 * boot kernel (ie. the kernel used for loading the hibernation image)
40 * might affect the operations of the restored target kernel (ie. the one
41 * saved in the hibernation image), then its contents must be saved by this
42 * function. In other words, if kernel A is hibernated and different
43 * kernel B is used for loading the hibernation image into memory, the
44 * kernel A's __save_processor_state() function must save all registers
45 * needed by kernel A, so that it can operate correctly after the resume
46 * regardless of what kernel B does in the meantime.
48 static void __save_processor_state(struct saved_context *ctxt)
51 mtrr_save_fixed_ranges(NULL);
59 store_gdt(&ctxt->gdt);
60 store_idt(&ctxt->idt);
63 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
64 store_idt((struct desc_ptr *)&ctxt->idt_limit);
68 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
73 savesegment(es, ctxt->es);
74 savesegment(fs, ctxt->fs);
75 savesegment(gs, ctxt->gs);
76 savesegment(ss, ctxt->ss);
79 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
80 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
81 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
82 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
83 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
85 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
86 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
87 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
88 mtrr_save_fixed_ranges(NULL);
90 rdmsrl(MSR_EFER, ctxt->efer);
96 ctxt->cr0 = read_cr0();
97 ctxt->cr2 = read_cr2();
98 ctxt->cr3 = read_cr3();
100 ctxt->cr4 = read_cr4_safe();
103 ctxt->cr4 = read_cr4();
104 ctxt->cr8 = read_cr8();
108 /* Needed by apm.c */
109 void save_processor_state(void)
111 __save_processor_state(&saved_context);
114 EXPORT_SYMBOL(save_processor_state);
117 static void do_fpu_end(void)
120 * Restore FPU regs if necessary.
125 static void fix_processor_context(void)
127 int cpu = smp_processor_id();
128 struct tss_struct *t = &per_cpu(init_tss, cpu);
130 set_tss_desc(cpu, t); /*
131 * This just modifies memory; should not be
132 * necessary. But... This is necessary, because
133 * 386 hardware has concept of busy TSS or some
138 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
140 syscall_init(); /* This sets MSR_*STAR and related */
142 load_TR_desc(); /* This does ltr */
143 load_LDT(¤t->active_mm->context); /* This does lldt */
146 * Now maybe reload the debug registers
148 if (current->thread.debugreg7) {
150 set_debugreg(current->thread.debugreg0, 0);
151 set_debugreg(current->thread.debugreg1, 1);
152 set_debugreg(current->thread.debugreg2, 2);
153 set_debugreg(current->thread.debugreg3, 3);
155 set_debugreg(current->thread.debugreg6, 6);
156 set_debugreg(current->thread.debugreg7, 7);
159 loaddebug(¤t->thread, 0);
160 loaddebug(¤t->thread, 1);
161 loaddebug(¤t->thread, 2);
162 loaddebug(¤t->thread, 3);
164 loaddebug(¤t->thread, 6);
165 loaddebug(¤t->thread, 7);
172 * __restore_processor_state - restore the contents of CPU registers saved
173 * by __save_processor_state()
174 * @ctxt - structure to load the registers contents from
176 static void __restore_processor_state(struct saved_context *ctxt)
181 /* cr4 was introduced in the Pentium CPU */
184 write_cr4(ctxt->cr4);
187 wrmsrl(MSR_EFER, ctxt->efer);
188 write_cr8(ctxt->cr8);
189 write_cr4(ctxt->cr4);
191 write_cr3(ctxt->cr3);
192 write_cr2(ctxt->cr2);
193 write_cr0(ctxt->cr0);
196 * now restore the descriptor tables to their proper values
197 * ltr is done i fix_processor_context().
200 load_gdt(&ctxt->gdt);
201 load_idt(&ctxt->idt);
204 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
205 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
212 loadsegment(es, ctxt->es);
213 loadsegment(fs, ctxt->fs);
214 loadsegment(gs, ctxt->gs);
215 loadsegment(ss, ctxt->ss);
220 if (boot_cpu_has(X86_FEATURE_SEP))
224 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
225 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
226 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
227 load_gs_index(ctxt->gs);
228 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
230 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
231 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
232 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
236 * restore XCR0 for xsave capable cpu's.
239 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
241 fix_processor_context();
247 mcheck_init(&boot_cpu_data);
251 /* Needed by apm.c */
252 void restore_processor_state(void)
254 __restore_processor_state(&saved_context);
257 EXPORT_SYMBOL(restore_processor_state);