Merge tag 's390-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / arch / mips / kernel / machine_kexec.c
1 /*
2  * machine_kexec.c for kexec
3  * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 #include <linux/compiler.h>
9 #include <linux/kexec.h>
10 #include <linux/mm.h>
11 #include <linux/delay.h>
12 #include <linux/libfdt.h>
13
14 #include <asm/cacheflush.h>
15 #include <asm/page.h>
16
17 extern const unsigned char relocate_new_kernel[];
18 extern const size_t relocate_new_kernel_size;
19
20 extern unsigned long kexec_start_address;
21 extern unsigned long kexec_indirection_page;
22
23 static unsigned long reboot_code_buffer;
24
25 #ifdef CONFIG_SMP
26 static void (*relocated_kexec_smp_wait)(void *);
27
28 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
29 void (*_crash_smp_send_stop)(void) = NULL;
30 #endif
31
32 void (*_machine_kexec_shutdown)(void) = NULL;
33 void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
34
35 static void kexec_image_info(const struct kimage *kimage)
36 {
37         unsigned long i;
38
39         pr_debug("kexec kimage info:\n");
40         pr_debug("  type:        %d\n", kimage->type);
41         pr_debug("  start:       %lx\n", kimage->start);
42         pr_debug("  head:        %lx\n", kimage->head);
43         pr_debug("  nr_segments: %lu\n", kimage->nr_segments);
44
45         for (i = 0; i < kimage->nr_segments; i++) {
46                 pr_debug("    segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
47                         i,
48                         kimage->segment[i].mem,
49                         kimage->segment[i].mem + kimage->segment[i].memsz,
50                         (unsigned long)kimage->segment[i].memsz,
51                         (unsigned long)kimage->segment[i].memsz /  PAGE_SIZE);
52         }
53 }
54
55 #ifdef CONFIG_UHI_BOOT
56
57 static int uhi_machine_kexec_prepare(struct kimage *kimage)
58 {
59         int i;
60
61         /*
62          * In case DTB file is not passed to the new kernel, a flat device
63          * tree will be created by kexec tool. It holds modified command
64          * line for the new kernel.
65          */
66         for (i = 0; i < kimage->nr_segments; i++) {
67                 struct fdt_header fdt;
68
69                 if (kimage->segment[i].memsz <= sizeof(fdt))
70                         continue;
71
72                 if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
73                         continue;
74
75                 if (fdt_check_header(&fdt))
76                         continue;
77
78                 kexec_args[0] = -2;
79                 kexec_args[1] = (unsigned long)
80                         phys_to_virt((unsigned long)kimage->segment[i].mem);
81                 break;
82         }
83
84         return 0;
85 }
86
87 int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
88
89 #else
90
91 int (*_machine_kexec_prepare)(struct kimage *) = NULL;
92
93 #endif /* CONFIG_UHI_BOOT */
94
95 int
96 machine_kexec_prepare(struct kimage *kimage)
97 {
98 #ifdef CONFIG_SMP
99         if (!kexec_nonboot_cpu_func())
100                 return -EINVAL;
101 #endif
102
103         kexec_image_info(kimage);
104
105         if (_machine_kexec_prepare)
106                 return _machine_kexec_prepare(kimage);
107
108         return 0;
109 }
110
111 void
112 machine_kexec_cleanup(struct kimage *kimage)
113 {
114 }
115
116 #ifdef CONFIG_SMP
117 static void kexec_shutdown_secondary(void *param)
118 {
119         int cpu = smp_processor_id();
120
121         if (!cpu_online(cpu))
122                 return;
123
124         /* We won't be sent IPIs any more. */
125         set_cpu_online(cpu, false);
126
127         local_irq_disable();
128         while (!atomic_read(&kexec_ready_to_reboot))
129                 cpu_relax();
130
131         kexec_reboot();
132
133         /* NOTREACHED */
134 }
135 #endif
136
137 void
138 machine_shutdown(void)
139 {
140         if (_machine_kexec_shutdown)
141                 _machine_kexec_shutdown();
142
143 #ifdef CONFIG_SMP
144         smp_call_function(kexec_shutdown_secondary, NULL, 0);
145
146         while (num_online_cpus() > 1) {
147                 cpu_relax();
148                 mdelay(1);
149         }
150 #endif
151 }
152
153 void
154 machine_crash_shutdown(struct pt_regs *regs)
155 {
156         if (_machine_crash_shutdown)
157                 _machine_crash_shutdown(regs);
158         else
159                 default_machine_crash_shutdown(regs);
160 }
161
162 #ifdef CONFIG_SMP
163 void kexec_nonboot_cpu_jump(void)
164 {
165         local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
166                                  reboot_code_buffer + relocate_new_kernel_size);
167
168         relocated_kexec_smp_wait(NULL);
169 }
170 #endif
171
172 void kexec_reboot(void)
173 {
174         void (*do_kexec)(void) __noreturn;
175
176         /*
177          * We know we were online, and there will be no incoming IPIs at
178          * this point. Mark online again before rebooting so that the crash
179          * analysis tool will see us correctly.
180          */
181         set_cpu_online(smp_processor_id(), true);
182
183         /* Ensure remote CPUs observe that we're online before rebooting. */
184         smp_mb__after_atomic();
185
186 #ifdef CONFIG_SMP
187         if (smp_processor_id() > 0) {
188                 /*
189                  * Instead of cpu_relax() or wait, this is needed for kexec
190                  * smp reboot. Kdump usually doesn't require an smp new
191                  * kernel, but kexec may do.
192                  */
193                 kexec_nonboot_cpu();
194
195                 /* NOTREACHED */
196         }
197 #endif
198
199         /*
200          * Make sure we get correct instructions written by the
201          * machine_kexec() CPU.
202          */
203         local_flush_icache_range(reboot_code_buffer,
204                                  reboot_code_buffer + relocate_new_kernel_size);
205
206         do_kexec = (void *)reboot_code_buffer;
207         do_kexec();
208 }
209
210 void
211 machine_kexec(struct kimage *image)
212 {
213         unsigned long entry;
214         unsigned long *ptr;
215
216         reboot_code_buffer =
217           (unsigned long)page_address(image->control_code_page);
218
219         kexec_start_address =
220                 (unsigned long) phys_to_virt(image->start);
221
222         if (image->type == KEXEC_TYPE_DEFAULT) {
223                 kexec_indirection_page =
224                         (unsigned long) phys_to_virt(image->head & PAGE_MASK);
225         } else {
226                 kexec_indirection_page = (unsigned long)&image->head;
227         }
228
229         memcpy((void*)reboot_code_buffer, relocate_new_kernel,
230                relocate_new_kernel_size);
231
232         /*
233          * The generic kexec code builds a page list with physical
234          * addresses. they are directly accessible through KSEG0 (or
235          * CKSEG0 or XPHYS if on 64bit system), hence the
236          * phys_to_virt() call.
237          */
238         for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
239              ptr = (entry & IND_INDIRECTION) ?
240                phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
241                 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
242                     *ptr & IND_DESTINATION)
243                         *ptr = (unsigned long) phys_to_virt(*ptr);
244         }
245
246         /* Mark offline BEFORE disabling local irq. */
247         set_cpu_online(smp_processor_id(), false);
248
249         /*
250          * we do not want to be bothered.
251          */
252         local_irq_disable();
253
254         printk("Will call new kernel at %08lx\n", image->start);
255         printk("Bye ...\n");
256         /* Make reboot code buffer available to the boot CPU. */
257         __flush_cache_all();
258 #ifdef CONFIG_SMP
259         /* All secondary cpus now may jump to kexec_wait cycle */
260         relocated_kexec_smp_wait = reboot_code_buffer +
261                 (void *)(kexec_smp_wait - relocate_new_kernel);
262         smp_wmb();
263         atomic_set(&kexec_ready_to_reboot, 1);
264 #endif
265         kexec_reboot();
266 }