Merge branch 's3c24xx-updates' of git://git.fluff.org/bjdooks/linux
[sfrench/cifs-2.6.git] / arch / x86 / kernel / machine_kexec_64.c
1 /*
2  * handle transition of Linux booting another kernel
3  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8
9 #include <linux/mm.h>
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <linux/numa.h>
14 #include <linux/ftrace.h>
15 #include <linux/io.h>
16 #include <linux/suspend.h>
17
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20 #include <asm/mmu_context.h>
21 #include <asm/debugreg.h>
22
23 static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
24                                 unsigned long addr)
25 {
26         pud_t *pud;
27         pmd_t *pmd;
28         struct page *page;
29         int result = -ENOMEM;
30
31         addr &= PMD_MASK;
32         pgd += pgd_index(addr);
33         if (!pgd_present(*pgd)) {
34                 page = kimage_alloc_control_pages(image, 0);
35                 if (!page)
36                         goto out;
37                 pud = (pud_t *)page_address(page);
38                 memset(pud, 0, PAGE_SIZE);
39                 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
40         }
41         pud = pud_offset(pgd, addr);
42         if (!pud_present(*pud)) {
43                 page = kimage_alloc_control_pages(image, 0);
44                 if (!page)
45                         goto out;
46                 pmd = (pmd_t *)page_address(page);
47                 memset(pmd, 0, PAGE_SIZE);
48                 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
49         }
50         pmd = pmd_offset(pud, addr);
51         if (!pmd_present(*pmd))
52                 set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
53         result = 0;
54 out:
55         return result;
56 }
57
58 static void init_level2_page(pmd_t *level2p, unsigned long addr)
59 {
60         unsigned long end_addr;
61
62         addr &= PAGE_MASK;
63         end_addr = addr + PUD_SIZE;
64         while (addr < end_addr) {
65                 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
66                 addr += PMD_SIZE;
67         }
68 }
69
70 static int init_level3_page(struct kimage *image, pud_t *level3p,
71                                 unsigned long addr, unsigned long last_addr)
72 {
73         unsigned long end_addr;
74         int result;
75
76         result = 0;
77         addr &= PAGE_MASK;
78         end_addr = addr + PGDIR_SIZE;
79         while ((addr < last_addr) && (addr < end_addr)) {
80                 struct page *page;
81                 pmd_t *level2p;
82
83                 page = kimage_alloc_control_pages(image, 0);
84                 if (!page) {
85                         result = -ENOMEM;
86                         goto out;
87                 }
88                 level2p = (pmd_t *)page_address(page);
89                 init_level2_page(level2p, addr);
90                 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
91                 addr += PUD_SIZE;
92         }
93         /* clear the unused entries */
94         while (addr < end_addr) {
95                 pud_clear(level3p++);
96                 addr += PUD_SIZE;
97         }
98 out:
99         return result;
100 }
101
102
103 static int init_level4_page(struct kimage *image, pgd_t *level4p,
104                                 unsigned long addr, unsigned long last_addr)
105 {
106         unsigned long end_addr;
107         int result;
108
109         result = 0;
110         addr &= PAGE_MASK;
111         end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
112         while ((addr < last_addr) && (addr < end_addr)) {
113                 struct page *page;
114                 pud_t *level3p;
115
116                 page = kimage_alloc_control_pages(image, 0);
117                 if (!page) {
118                         result = -ENOMEM;
119                         goto out;
120                 }
121                 level3p = (pud_t *)page_address(page);
122                 result = init_level3_page(image, level3p, addr, last_addr);
123                 if (result)
124                         goto out;
125                 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
126                 addr += PGDIR_SIZE;
127         }
128         /* clear the unused entries */
129         while (addr < end_addr) {
130                 pgd_clear(level4p++);
131                 addr += PGDIR_SIZE;
132         }
133 out:
134         return result;
135 }
136
137 static void free_transition_pgtable(struct kimage *image)
138 {
139         free_page((unsigned long)image->arch.pud);
140         free_page((unsigned long)image->arch.pmd);
141         free_page((unsigned long)image->arch.pte);
142 }
143
144 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
145 {
146         pud_t *pud;
147         pmd_t *pmd;
148         pte_t *pte;
149         unsigned long vaddr, paddr;
150         int result = -ENOMEM;
151
152         vaddr = (unsigned long)relocate_kernel;
153         paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
154         pgd += pgd_index(vaddr);
155         if (!pgd_present(*pgd)) {
156                 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
157                 if (!pud)
158                         goto err;
159                 image->arch.pud = pud;
160                 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
161         }
162         pud = pud_offset(pgd, vaddr);
163         if (!pud_present(*pud)) {
164                 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
165                 if (!pmd)
166                         goto err;
167                 image->arch.pmd = pmd;
168                 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
169         }
170         pmd = pmd_offset(pud, vaddr);
171         if (!pmd_present(*pmd)) {
172                 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
173                 if (!pte)
174                         goto err;
175                 image->arch.pte = pte;
176                 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
177         }
178         pte = pte_offset_kernel(pmd, vaddr);
179         set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
180         return 0;
181 err:
182         free_transition_pgtable(image);
183         return result;
184 }
185
186
187 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
188 {
189         pgd_t *level4p;
190         int result;
191         level4p = (pgd_t *)__va(start_pgtable);
192         result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
193         if (result)
194                 return result;
195         /*
196          * image->start may be outside 0 ~ max_pfn, for example when
197          * jump back to original kernel from kexeced kernel
198          */
199         result = init_one_level2_page(image, level4p, image->start);
200         if (result)
201                 return result;
202         return init_transition_pgtable(image, level4p);
203 }
204
205 static void set_idt(void *newidt, u16 limit)
206 {
207         struct desc_ptr curidt;
208
209         /* x86-64 supports unaliged loads & stores */
210         curidt.size    = limit;
211         curidt.address = (unsigned long)newidt;
212
213         __asm__ __volatile__ (
214                 "lidtq %0\n"
215                 : : "m" (curidt)
216                 );
217 };
218
219
220 static void set_gdt(void *newgdt, u16 limit)
221 {
222         struct desc_ptr curgdt;
223
224         /* x86-64 supports unaligned loads & stores */
225         curgdt.size    = limit;
226         curgdt.address = (unsigned long)newgdt;
227
228         __asm__ __volatile__ (
229                 "lgdtq %0\n"
230                 : : "m" (curgdt)
231                 );
232 };
233
234 static void load_segments(void)
235 {
236         __asm__ __volatile__ (
237                 "\tmovl %0,%%ds\n"
238                 "\tmovl %0,%%es\n"
239                 "\tmovl %0,%%ss\n"
240                 "\tmovl %0,%%fs\n"
241                 "\tmovl %0,%%gs\n"
242                 : : "a" (__KERNEL_DS) : "memory"
243                 );
244 }
245
246 int machine_kexec_prepare(struct kimage *image)
247 {
248         unsigned long start_pgtable;
249         int result;
250
251         /* Calculate the offsets */
252         start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
253
254         /* Setup the identity mapped 64bit page table */
255         result = init_pgtable(image, start_pgtable);
256         if (result)
257                 return result;
258
259         return 0;
260 }
261
262 void machine_kexec_cleanup(struct kimage *image)
263 {
264         free_transition_pgtable(image);
265 }
266
267 /*
268  * Do not allocate memory (or fail in any way) in machine_kexec().
269  * We are past the point of no return, committed to rebooting now.
270  */
271 void machine_kexec(struct kimage *image)
272 {
273         unsigned long page_list[PAGES_NR];
274         void *control_page;
275         int save_ftrace_enabled;
276
277 #ifdef CONFIG_KEXEC_JUMP
278         if (image->preserve_context)
279                 save_processor_state();
280 #endif
281
282         save_ftrace_enabled = __ftrace_enabled_save();
283
284         /* Interrupts aren't acceptable while we reboot */
285         local_irq_disable();
286         hw_breakpoint_disable();
287
288         if (image->preserve_context) {
289 #ifdef CONFIG_X86_IO_APIC
290                 /*
291                  * We need to put APICs in legacy mode so that we can
292                  * get timer interrupts in second kernel. kexec/kdump
293                  * paths already have calls to disable_IO_APIC() in
294                  * one form or other. kexec jump path also need
295                  * one.
296                  */
297                 disable_IO_APIC();
298 #endif
299         }
300
301         control_page = page_address(image->control_code_page) + PAGE_SIZE;
302         memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
303
304         page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
305         page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
306         page_list[PA_TABLE_PAGE] =
307           (unsigned long)__pa(page_address(image->control_code_page));
308
309         if (image->type == KEXEC_TYPE_DEFAULT)
310                 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
311                                                 << PAGE_SHIFT);
312
313         /*
314          * The segment registers are funny things, they have both a
315          * visible and an invisible part.  Whenever the visible part is
316          * set to a specific selector, the invisible part is loaded
317          * with from a table in memory.  At no other time is the
318          * descriptor table in memory accessed.
319          *
320          * I take advantage of this here by force loading the
321          * segments, before I zap the gdt with an invalid value.
322          */
323         load_segments();
324         /*
325          * The gdt & idt are now invalid.
326          * If you want to load them you must set up your own idt & gdt.
327          */
328         set_gdt(phys_to_virt(0), 0);
329         set_idt(phys_to_virt(0), 0);
330
331         /* now call it */
332         image->start = relocate_kernel((unsigned long)image->head,
333                                        (unsigned long)page_list,
334                                        image->start,
335                                        image->preserve_context);
336
337 #ifdef CONFIG_KEXEC_JUMP
338         if (image->preserve_context)
339                 restore_processor_state();
340 #endif
341
342         __ftrace_enabled_restore(save_ftrace_enabled);
343 }
344
345 void arch_crash_save_vmcoreinfo(void)
346 {
347         VMCOREINFO_SYMBOL(phys_base);
348         VMCOREINFO_SYMBOL(init_level4_pgt);
349
350 #ifdef CONFIG_NUMA
351         VMCOREINFO_SYMBOL(node_data);
352         VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
353 #endif
354 }
355