]> git.samba.org - sfrench/cifs-2.6.git/blob - arch/mips/kernel/setup.c
Merge branch 'asoc-4.18' into asoc-linus
[sfrench/cifs-2.6.git] / arch / mips / kernel / setup.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/decompress/generic.h>
30 #include <linux/of_fdt.h>
31
32 #include <asm/addrspace.h>
33 #include <asm/bootinfo.h>
34 #include <asm/bugs.h>
35 #include <asm/cache.h>
36 #include <asm/cdmm.h>
37 #include <asm/cpu.h>
38 #include <asm/debug.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp-ops.h>
42 #include <asm/prom.h>
43
44 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
45 const char __section(.appended_dtb) __appended_dtb[0x100000];
46 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
47
48 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
49
50 EXPORT_SYMBOL(cpu_data);
51
52 #ifdef CONFIG_VT
53 struct screen_info screen_info;
54 #endif
55
56 /*
57  * Setup information
58  *
59  * These are initialized so they are in the .data section
60  */
61 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
62
63 EXPORT_SYMBOL(mips_machtype);
64
65 struct boot_mem_map boot_mem_map;
66
67 static char __initdata command_line[COMMAND_LINE_SIZE];
68 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
69
70 #ifdef CONFIG_CMDLINE_BOOL
71 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
72 #endif
73
74 /*
75  * mips_io_port_base is the begin of the address space to which x86 style
76  * I/O ports are mapped.
77  */
78 const unsigned long mips_io_port_base = -1;
79 EXPORT_SYMBOL(mips_io_port_base);
80
81 static struct resource code_resource = { .name = "Kernel code", };
82 static struct resource data_resource = { .name = "Kernel data", };
83 static struct resource bss_resource = { .name = "Kernel bss", };
84
85 static void *detect_magic __initdata = detect_memory_region;
86
87 void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
88 {
89         int x = boot_mem_map.nr_map;
90         int i;
91
92         /*
93          * If the region reaches the top of the physical address space, adjust
94          * the size slightly so that (start + size) doesn't overflow
95          */
96         if (start + size - 1 == PHYS_ADDR_MAX)
97                 --size;
98
99         /* Sanity check */
100         if (start + size < start) {
101                 pr_warn("Trying to add an invalid memory region, skipped\n");
102                 return;
103         }
104
105         /*
106          * Try to merge with existing entry, if any.
107          */
108         for (i = 0; i < boot_mem_map.nr_map; i++) {
109                 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
110                 unsigned long top;
111
112                 if (entry->type != type)
113                         continue;
114
115                 if (start + size < entry->addr)
116                         continue;                       /* no overlap */
117
118                 if (entry->addr + entry->size < start)
119                         continue;                       /* no overlap */
120
121                 top = max(entry->addr + entry->size, start + size);
122                 entry->addr = min(entry->addr, start);
123                 entry->size = top - entry->addr;
124
125                 return;
126         }
127
128         if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
129                 pr_err("Ooops! Too many entries in the memory map!\n");
130                 return;
131         }
132
133         boot_mem_map.map[x].addr = start;
134         boot_mem_map.map[x].size = size;
135         boot_mem_map.map[x].type = type;
136         boot_mem_map.nr_map++;
137 }
138
139 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
140 {
141         void *dm = &detect_magic;
142         phys_addr_t size;
143
144         for (size = sz_min; size < sz_max; size <<= 1) {
145                 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
146                         break;
147         }
148
149         pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
150                 ((unsigned long long) size) / SZ_1M,
151                 (unsigned long long) start,
152                 ((unsigned long long) sz_min) / SZ_1M,
153                 ((unsigned long long) sz_max) / SZ_1M);
154
155         add_memory_region(start, size, BOOT_MEM_RAM);
156 }
157
158 static bool __init __maybe_unused memory_region_available(phys_addr_t start,
159                                                           phys_addr_t size)
160 {
161         int i;
162         bool in_ram = false, free = true;
163
164         for (i = 0; i < boot_mem_map.nr_map; i++) {
165                 phys_addr_t start_, end_;
166
167                 start_ = boot_mem_map.map[i].addr;
168                 end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
169
170                 switch (boot_mem_map.map[i].type) {
171                 case BOOT_MEM_RAM:
172                         if (start >= start_ && start + size <= end_)
173                                 in_ram = true;
174                         break;
175                 case BOOT_MEM_RESERVED:
176                         if ((start >= start_ && start < end_) ||
177                             (start < start_ && start + size >= start_))
178                                 free = false;
179                         break;
180                 default:
181                         continue;
182                 }
183         }
184
185         return in_ram && free;
186 }
187
188 static void __init print_memory_map(void)
189 {
190         int i;
191         const int field = 2 * sizeof(unsigned long);
192
193         for (i = 0; i < boot_mem_map.nr_map; i++) {
194                 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
195                        field, (unsigned long long) boot_mem_map.map[i].size,
196                        field, (unsigned long long) boot_mem_map.map[i].addr);
197
198                 switch (boot_mem_map.map[i].type) {
199                 case BOOT_MEM_RAM:
200                         printk(KERN_CONT "(usable)\n");
201                         break;
202                 case BOOT_MEM_INIT_RAM:
203                         printk(KERN_CONT "(usable after init)\n");
204                         break;
205                 case BOOT_MEM_ROM_DATA:
206                         printk(KERN_CONT "(ROM data)\n");
207                         break;
208                 case BOOT_MEM_RESERVED:
209                         printk(KERN_CONT "(reserved)\n");
210                         break;
211                 default:
212                         printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
213                         break;
214                 }
215         }
216 }
217
218 /*
219  * Manage initrd
220  */
221 #ifdef CONFIG_BLK_DEV_INITRD
222
223 static int __init rd_start_early(char *p)
224 {
225         unsigned long start = memparse(p, &p);
226
227 #ifdef CONFIG_64BIT
228         /* Guess if the sign extension was forgotten by bootloader */
229         if (start < XKPHYS)
230                 start = (int)start;
231 #endif
232         initrd_start = start;
233         initrd_end += start;
234         return 0;
235 }
236 early_param("rd_start", rd_start_early);
237
238 static int __init rd_size_early(char *p)
239 {
240         initrd_end += memparse(p, &p);
241         return 0;
242 }
243 early_param("rd_size", rd_size_early);
244
245 /* it returns the next free pfn after initrd */
246 static unsigned long __init init_initrd(void)
247 {
248         unsigned long end;
249
250         /*
251          * Board specific code or command line parser should have
252          * already set up initrd_start and initrd_end. In these cases
253          * perfom sanity checks and use them if all looks good.
254          */
255         if (!initrd_start || initrd_end <= initrd_start)
256                 goto disable;
257
258         if (initrd_start & ~PAGE_MASK) {
259                 pr_err("initrd start must be page aligned\n");
260                 goto disable;
261         }
262         if (initrd_start < PAGE_OFFSET) {
263                 pr_err("initrd start < PAGE_OFFSET\n");
264                 goto disable;
265         }
266
267         /*
268          * Sanitize initrd addresses. For example firmware
269          * can't guess if they need to pass them through
270          * 64-bits values if the kernel has been built in pure
271          * 32-bit. We need also to switch from KSEG0 to XKPHYS
272          * addresses now, so the code can now safely use __pa().
273          */
274         end = __pa(initrd_end);
275         initrd_end = (unsigned long)__va(end);
276         initrd_start = (unsigned long)__va(__pa(initrd_start));
277
278         ROOT_DEV = Root_RAM0;
279         return PFN_UP(end);
280 disable:
281         initrd_start = 0;
282         initrd_end = 0;
283         return 0;
284 }
285
286 /* In some conditions (e.g. big endian bootloader with a little endian
287    kernel), the initrd might appear byte swapped.  Try to detect this and
288    byte swap it if needed.  */
289 static void __init maybe_bswap_initrd(void)
290 {
291 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
292         u64 buf;
293
294         /* Check for CPIO signature */
295         if (!memcmp((void *)initrd_start, "070701", 6))
296                 return;
297
298         /* Check for compressed initrd */
299         if (decompress_method((unsigned char *)initrd_start, 8, NULL))
300                 return;
301
302         /* Try again with a byte swapped header */
303         buf = swab64p((u64 *)initrd_start);
304         if (!memcmp(&buf, "070701", 6) ||
305             decompress_method((unsigned char *)(&buf), 8, NULL)) {
306                 unsigned long i;
307
308                 pr_info("Byteswapped initrd detected\n");
309                 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
310                         swab64s((u64 *)i);
311         }
312 #endif
313 }
314
315 static void __init finalize_initrd(void)
316 {
317         unsigned long size = initrd_end - initrd_start;
318
319         if (size == 0) {
320                 printk(KERN_INFO "Initrd not found or empty");
321                 goto disable;
322         }
323         if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
324                 printk(KERN_ERR "Initrd extends beyond end of memory");
325                 goto disable;
326         }
327
328         maybe_bswap_initrd();
329
330         reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
331         initrd_below_start_ok = 1;
332
333         pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
334                 initrd_start, size);
335         return;
336 disable:
337         printk(KERN_CONT " - disabling initrd\n");
338         initrd_start = 0;
339         initrd_end = 0;
340 }
341
342 #else  /* !CONFIG_BLK_DEV_INITRD */
343
344 static unsigned long __init init_initrd(void)
345 {
346         return 0;
347 }
348
349 #define finalize_initrd()       do {} while (0)
350
351 #endif
352
353 /*
354  * Initialize the bootmem allocator. It also setup initrd related data
355  * if needed.
356  */
357 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
358
359 static void __init bootmem_init(void)
360 {
361         init_initrd();
362         finalize_initrd();
363 }
364
365 #else  /* !CONFIG_SGI_IP27 */
366
367 static unsigned long __init bootmap_bytes(unsigned long pages)
368 {
369         unsigned long bytes = DIV_ROUND_UP(pages, 8);
370
371         return ALIGN(bytes, sizeof(long));
372 }
373
374 static void __init bootmem_init(void)
375 {
376         unsigned long reserved_end;
377         unsigned long mapstart = ~0UL;
378         unsigned long bootmap_size;
379         phys_addr_t ramstart = PHYS_ADDR_MAX;
380         bool bootmap_valid = false;
381         int i;
382
383         /*
384          * Sanity check any INITRD first. We don't take it into account
385          * for bootmem setup initially, rely on the end-of-kernel-code
386          * as our memory range starting point. Once bootmem is inited we
387          * will reserve the area used for the initrd.
388          */
389         init_initrd();
390         reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
391
392         /*
393          * max_low_pfn is not a number of pages. The number of pages
394          * of the system is given by 'max_low_pfn - min_low_pfn'.
395          */
396         min_low_pfn = ~0UL;
397         max_low_pfn = 0;
398
399         /*
400          * Find the highest page frame number we have available
401          * and the lowest used RAM address
402          */
403         for (i = 0; i < boot_mem_map.nr_map; i++) {
404                 unsigned long start, end;
405
406                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
407                         continue;
408
409                 start = PFN_UP(boot_mem_map.map[i].addr);
410                 end = PFN_DOWN(boot_mem_map.map[i].addr
411                                 + boot_mem_map.map[i].size);
412
413                 ramstart = min(ramstart, boot_mem_map.map[i].addr);
414
415 #ifndef CONFIG_HIGHMEM
416                 /*
417                  * Skip highmem here so we get an accurate max_low_pfn if low
418                  * memory stops short of high memory.
419                  * If the region overlaps HIGHMEM_START, end is clipped so
420                  * max_pfn excludes the highmem portion.
421                  */
422                 if (start >= PFN_DOWN(HIGHMEM_START))
423                         continue;
424                 if (end > PFN_DOWN(HIGHMEM_START))
425                         end = PFN_DOWN(HIGHMEM_START);
426 #endif
427
428                 if (end > max_low_pfn)
429                         max_low_pfn = end;
430                 if (start < min_low_pfn)
431                         min_low_pfn = start;
432                 if (end <= reserved_end)
433                         continue;
434 #ifdef CONFIG_BLK_DEV_INITRD
435                 /* Skip zones before initrd and initrd itself */
436                 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
437                         continue;
438 #endif
439                 if (start >= mapstart)
440                         continue;
441                 mapstart = max(reserved_end, start);
442         }
443
444         /*
445          * Reserve any memory between the start of RAM and PHYS_OFFSET
446          */
447         if (ramstart > PHYS_OFFSET)
448                 add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
449                                   BOOT_MEM_RESERVED);
450
451         if (min_low_pfn >= max_low_pfn)
452                 panic("Incorrect memory mapping !!!");
453         if (min_low_pfn > ARCH_PFN_OFFSET) {
454                 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
455                         (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
456                         min_low_pfn - ARCH_PFN_OFFSET);
457         } else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) {
458                 pr_info("%lu free pages won't be used\n",
459                         ARCH_PFN_OFFSET - min_low_pfn);
460         }
461         min_low_pfn = ARCH_PFN_OFFSET;
462
463         /*
464          * Determine low and high memory ranges
465          */
466         max_pfn = max_low_pfn;
467         if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
468 #ifdef CONFIG_HIGHMEM
469                 highstart_pfn = PFN_DOWN(HIGHMEM_START);
470                 highend_pfn = max_low_pfn;
471 #endif
472                 max_low_pfn = PFN_DOWN(HIGHMEM_START);
473         }
474
475 #ifdef CONFIG_BLK_DEV_INITRD
476         /*
477          * mapstart should be after initrd_end
478          */
479         if (initrd_end)
480                 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
481 #endif
482
483         /*
484          * check that mapstart doesn't overlap with any of
485          * memory regions that have been reserved through eg. DTB
486          */
487         bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
488
489         bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
490                                                 bootmap_size);
491         for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
492                 unsigned long mapstart_addr;
493
494                 switch (boot_mem_map.map[i].type) {
495                 case BOOT_MEM_RESERVED:
496                         mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
497                                                 boot_mem_map.map[i].size);
498                         if (PHYS_PFN(mapstart_addr) < mapstart)
499                                 break;
500
501                         bootmap_valid = memory_region_available(mapstart_addr,
502                                                                 bootmap_size);
503                         if (bootmap_valid)
504                                 mapstart = PHYS_PFN(mapstart_addr);
505                         break;
506                 default:
507                         break;
508                 }
509         }
510
511         if (!bootmap_valid)
512                 panic("No memory area to place a bootmap bitmap");
513
514         /*
515          * Initialize the boot-time allocator with low memory only.
516          */
517         if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
518                                          min_low_pfn, max_low_pfn))
519                 panic("Unexpected memory size required for bootmap");
520
521         for (i = 0; i < boot_mem_map.nr_map; i++) {
522                 unsigned long start, end;
523
524                 start = PFN_UP(boot_mem_map.map[i].addr);
525                 end = PFN_DOWN(boot_mem_map.map[i].addr
526                                 + boot_mem_map.map[i].size);
527
528                 if (start <= min_low_pfn)
529                         start = min_low_pfn;
530                 if (start >= end)
531                         continue;
532
533 #ifndef CONFIG_HIGHMEM
534                 if (end > max_low_pfn)
535                         end = max_low_pfn;
536
537                 /*
538                  * ... finally, is the area going away?
539                  */
540                 if (end <= start)
541                         continue;
542 #endif
543
544                 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
545         }
546
547         /*
548          * Register fully available low RAM pages with the bootmem allocator.
549          */
550         for (i = 0; i < boot_mem_map.nr_map; i++) {
551                 unsigned long start, end, size;
552
553                 start = PFN_UP(boot_mem_map.map[i].addr);
554                 end   = PFN_DOWN(boot_mem_map.map[i].addr
555                                     + boot_mem_map.map[i].size);
556
557                 /*
558                  * Reserve usable memory.
559                  */
560                 switch (boot_mem_map.map[i].type) {
561                 case BOOT_MEM_RAM:
562                         break;
563                 case BOOT_MEM_INIT_RAM:
564                         memory_present(0, start, end);
565                         continue;
566                 default:
567                         /* Not usable memory */
568                         if (start > min_low_pfn && end < max_low_pfn)
569                                 reserve_bootmem(boot_mem_map.map[i].addr,
570                                                 boot_mem_map.map[i].size,
571                                                 BOOTMEM_DEFAULT);
572                         continue;
573                 }
574
575                 /*
576                  * We are rounding up the start address of usable memory
577                  * and at the end of the usable range downwards.
578                  */
579                 if (start >= max_low_pfn)
580                         continue;
581                 if (start < reserved_end)
582                         start = reserved_end;
583                 if (end > max_low_pfn)
584                         end = max_low_pfn;
585
586                 /*
587                  * ... finally, is the area going away?
588                  */
589                 if (end <= start)
590                         continue;
591                 size = end - start;
592
593                 /* Register lowmem ranges */
594                 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
595                 memory_present(0, start, end);
596         }
597
598         /*
599          * Reserve the bootmap memory.
600          */
601         reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
602
603 #ifdef CONFIG_RELOCATABLE
604         /*
605          * The kernel reserves all memory below its _end symbol as bootmem,
606          * but the kernel may now be at a much higher address. The memory
607          * between the original and new locations may be returned to the system.
608          */
609         if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
610                 unsigned long offset;
611                 extern void show_kernel_relocation(const char *level);
612
613                 offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
614                 free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
615
616 #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
617                 /*
618                  * This information is necessary when debugging the kernel
619                  * But is a security vulnerability otherwise!
620                  */
621                 show_kernel_relocation(KERN_INFO);
622 #endif
623         }
624 #endif
625
626         /*
627          * Reserve initrd memory if needed.
628          */
629         finalize_initrd();
630 }
631
632 #endif  /* CONFIG_SGI_IP27 */
633
634 /*
635  * arch_mem_init - initialize memory management subsystem
636  *
637  *  o plat_mem_setup() detects the memory configuration and will record detected
638  *    memory areas using add_memory_region.
639  *
640  * At this stage the memory configuration of the system is known to the
641  * kernel but generic memory management system is still entirely uninitialized.
642  *
643  *  o bootmem_init()
644  *  o sparse_init()
645  *  o paging_init()
646  *  o dma_contiguous_reserve()
647  *
648  * At this stage the bootmem allocator is ready to use.
649  *
650  * NOTE: historically plat_mem_setup did the entire platform initialization.
651  *       This was rather impractical because it meant plat_mem_setup had to
652  * get away without any kind of memory allocator.  To keep old code from
653  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
654  * initialization hook for anything else was introduced.
655  */
656
657 static int usermem __initdata;
658
659 static int __init early_parse_mem(char *p)
660 {
661         phys_addr_t start, size;
662
663         /*
664          * If a user specifies memory size, we
665          * blow away any automatically generated
666          * size.
667          */
668         if (usermem == 0) {
669                 boot_mem_map.nr_map = 0;
670                 usermem = 1;
671         }
672         start = 0;
673         size = memparse(p, &p);
674         if (*p == '@')
675                 start = memparse(p + 1, &p);
676
677         add_memory_region(start, size, BOOT_MEM_RAM);
678
679         return 0;
680 }
681 early_param("mem", early_parse_mem);
682
683 static int __init early_parse_memmap(char *p)
684 {
685         char *oldp;
686         u64 start_at, mem_size;
687
688         if (!p)
689                 return -EINVAL;
690
691         if (!strncmp(p, "exactmap", 8)) {
692                 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
693                 return 0;
694         }
695
696         oldp = p;
697         mem_size = memparse(p, &p);
698         if (p == oldp)
699                 return -EINVAL;
700
701         if (*p == '@') {
702                 start_at = memparse(p+1, &p);
703                 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
704         } else if (*p == '#') {
705                 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
706                 return -EINVAL;
707         } else if (*p == '$') {
708                 start_at = memparse(p+1, &p);
709                 add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
710         } else {
711                 pr_err("\"memmap\" invalid format!\n");
712                 return -EINVAL;
713         }
714
715         if (*p == '\0') {
716                 usermem = 1;
717                 return 0;
718         } else
719                 return -EINVAL;
720 }
721 early_param("memmap", early_parse_memmap);
722
723 #ifdef CONFIG_PROC_VMCORE
724 unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
725 static int __init early_parse_elfcorehdr(char *p)
726 {
727         int i;
728
729         setup_elfcorehdr = memparse(p, &p);
730
731         for (i = 0; i < boot_mem_map.nr_map; i++) {
732                 unsigned long start = boot_mem_map.map[i].addr;
733                 unsigned long end = (boot_mem_map.map[i].addr +
734                                      boot_mem_map.map[i].size);
735                 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
736                         /*
737                          * Reserve from the elf core header to the end of
738                          * the memory segment, that should all be kdump
739                          * reserved memory.
740                          */
741                         setup_elfcorehdr_size = end - setup_elfcorehdr;
742                         break;
743                 }
744         }
745         /*
746          * If we don't find it in the memory map, then we shouldn't
747          * have to worry about it, as the new kernel won't use it.
748          */
749         return 0;
750 }
751 early_param("elfcorehdr", early_parse_elfcorehdr);
752 #endif
753
754 static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
755 {
756         phys_addr_t size;
757         int i;
758
759         size = end - mem;
760         if (!size)
761                 return;
762
763         /* Make sure it is in the boot_mem_map */
764         for (i = 0; i < boot_mem_map.nr_map; i++) {
765                 if (mem >= boot_mem_map.map[i].addr &&
766                     mem < (boot_mem_map.map[i].addr +
767                            boot_mem_map.map[i].size))
768                         return;
769         }
770         add_memory_region(mem, size, type);
771 }
772
773 #ifdef CONFIG_KEXEC
774 static inline unsigned long long get_total_mem(void)
775 {
776         unsigned long long total;
777
778         total = max_pfn - min_low_pfn;
779         return total << PAGE_SHIFT;
780 }
781
782 static void __init mips_parse_crashkernel(void)
783 {
784         unsigned long long total_mem;
785         unsigned long long crash_size, crash_base;
786         int ret;
787
788         total_mem = get_total_mem();
789         ret = parse_crashkernel(boot_command_line, total_mem,
790                                 &crash_size, &crash_base);
791         if (ret != 0 || crash_size <= 0)
792                 return;
793
794         if (!memory_region_available(crash_base, crash_size)) {
795                 pr_warn("Invalid memory region reserved for crash kernel\n");
796                 return;
797         }
798
799         crashk_res.start = crash_base;
800         crashk_res.end   = crash_base + crash_size - 1;
801 }
802
803 static void __init request_crashkernel(struct resource *res)
804 {
805         int ret;
806
807         if (crashk_res.start == crashk_res.end)
808                 return;
809
810         ret = request_resource(res, &crashk_res);
811         if (!ret)
812                 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
813                         (unsigned long)((crashk_res.end -
814                                          crashk_res.start + 1) >> 20),
815                         (unsigned long)(crashk_res.start  >> 20));
816 }
817 #else /* !defined(CONFIG_KEXEC)         */
818 static void __init mips_parse_crashkernel(void)
819 {
820 }
821
822 static void __init request_crashkernel(struct resource *res)
823 {
824 }
825 #endif /* !defined(CONFIG_KEXEC)  */
826
827 #define USE_PROM_CMDLINE        IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
828 #define USE_DTB_CMDLINE         IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
829 #define EXTEND_WITH_PROM        IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
830 #define BUILTIN_EXTEND_WITH_PROM        \
831         IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
832
833 static void __init arch_mem_init(char **cmdline_p)
834 {
835         struct memblock_region *reg;
836         extern void plat_mem_setup(void);
837
838 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
839         strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
840 #else
841         if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
842             (USE_DTB_CMDLINE && !boot_command_line[0]))
843                 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
844
845         if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
846                 if (boot_command_line[0])
847                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
848                 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
849         }
850
851 #if defined(CONFIG_CMDLINE_BOOL)
852         if (builtin_cmdline[0]) {
853                 if (boot_command_line[0])
854                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
855                 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
856         }
857
858         if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
859                 if (boot_command_line[0])
860                         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
861                 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
862         }
863 #endif
864 #endif
865
866         /* call board setup routine */
867         plat_mem_setup();
868
869         /*
870          * Make sure all kernel memory is in the maps.  The "UP" and
871          * "DOWN" are opposite for initdata since if it crosses over
872          * into another memory section you don't want that to be
873          * freed when the initdata is freed.
874          */
875         arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
876                          PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
877                          BOOT_MEM_RAM);
878         arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
879                          PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
880                          BOOT_MEM_INIT_RAM);
881
882         pr_info("Determined physical RAM map:\n");
883         print_memory_map();
884
885         strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
886
887         *cmdline_p = command_line;
888
889         parse_early_param();
890
891         if (usermem) {
892                 pr_info("User-defined physical RAM map:\n");
893                 print_memory_map();
894         }
895
896         early_init_fdt_reserve_self();
897         early_init_fdt_scan_reserved_mem();
898
899         bootmem_init();
900 #ifdef CONFIG_PROC_VMCORE
901         if (setup_elfcorehdr && setup_elfcorehdr_size) {
902                 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
903                        setup_elfcorehdr, setup_elfcorehdr_size);
904                 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
905                                 BOOTMEM_DEFAULT);
906         }
907 #endif
908
909         mips_parse_crashkernel();
910 #ifdef CONFIG_KEXEC
911         if (crashk_res.start != crashk_res.end)
912                 reserve_bootmem(crashk_res.start,
913                                 crashk_res.end - crashk_res.start + 1,
914                                 BOOTMEM_DEFAULT);
915 #endif
916         device_tree_init();
917         sparse_init();
918         plat_swiotlb_setup();
919
920         dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
921         /* Tell bootmem about cma reserved memblock section */
922         for_each_memblock(reserved, reg)
923                 if (reg->size != 0)
924                         reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
925
926         reserve_bootmem_region(__pa_symbol(&__nosave_begin),
927                         __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
928 }
929
930 static void __init resource_init(void)
931 {
932         int i;
933
934         if (UNCAC_BASE != IO_BASE)
935                 return;
936
937         code_resource.start = __pa_symbol(&_text);
938         code_resource.end = __pa_symbol(&_etext) - 1;
939         data_resource.start = __pa_symbol(&_etext);
940         data_resource.end = __pa_symbol(&_edata) - 1;
941         bss_resource.start = __pa_symbol(&__bss_start);
942         bss_resource.end = __pa_symbol(&__bss_stop) - 1;
943
944         for (i = 0; i < boot_mem_map.nr_map; i++) {
945                 struct resource *res;
946                 unsigned long start, end;
947
948                 start = boot_mem_map.map[i].addr;
949                 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
950                 if (start >= HIGHMEM_START)
951                         continue;
952                 if (end >= HIGHMEM_START)
953                         end = HIGHMEM_START - 1;
954
955                 res = alloc_bootmem(sizeof(struct resource));
956
957                 res->start = start;
958                 res->end = end;
959                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
960
961                 switch (boot_mem_map.map[i].type) {
962                 case BOOT_MEM_RAM:
963                 case BOOT_MEM_INIT_RAM:
964                 case BOOT_MEM_ROM_DATA:
965                         res->name = "System RAM";
966                         res->flags |= IORESOURCE_SYSRAM;
967                         break;
968                 case BOOT_MEM_RESERVED:
969                 default:
970                         res->name = "reserved";
971                 }
972
973                 request_resource(&iomem_resource, res);
974
975                 /*
976                  *  We don't know which RAM region contains kernel data,
977                  *  so we try it repeatedly and let the resource manager
978                  *  test it.
979                  */
980                 request_resource(res, &code_resource);
981                 request_resource(res, &data_resource);
982                 request_resource(res, &bss_resource);
983                 request_crashkernel(res);
984         }
985 }
986
987 #ifdef CONFIG_SMP
988 static void __init prefill_possible_map(void)
989 {
990         int i, possible = num_possible_cpus();
991
992         if (possible > nr_cpu_ids)
993                 possible = nr_cpu_ids;
994
995         for (i = 0; i < possible; i++)
996                 set_cpu_possible(i, true);
997         for (; i < NR_CPUS; i++)
998                 set_cpu_possible(i, false);
999
1000         nr_cpu_ids = possible;
1001 }
1002 #else
1003 static inline void prefill_possible_map(void) {}
1004 #endif
1005
1006 void __init setup_arch(char **cmdline_p)
1007 {
1008         cpu_probe();
1009         mips_cm_probe();
1010         prom_init();
1011
1012         setup_early_fdc_console();
1013 #ifdef CONFIG_EARLY_PRINTK
1014         setup_early_printk();
1015 #endif
1016         cpu_report();
1017         check_bugs_early();
1018
1019 #if defined(CONFIG_VT)
1020 #if defined(CONFIG_VGA_CONSOLE)
1021         conswitchp = &vga_con;
1022 #elif defined(CONFIG_DUMMY_CONSOLE)
1023         conswitchp = &dummy_con;
1024 #endif
1025 #endif
1026
1027         arch_mem_init(cmdline_p);
1028
1029         resource_init();
1030         plat_smp_setup();
1031         prefill_possible_map();
1032
1033         cpu_cache_init();
1034         paging_init();
1035 }
1036
1037 unsigned long kernelsp[NR_CPUS];
1038 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
1039
1040 #ifdef CONFIG_USE_OF
1041 unsigned long fw_passed_dtb;
1042 #endif
1043
1044 #ifdef CONFIG_DEBUG_FS
1045 struct dentry *mips_debugfs_dir;
1046 static int __init debugfs_mips(void)
1047 {
1048         struct dentry *d;
1049
1050         d = debugfs_create_dir("mips", NULL);
1051         if (!d)
1052                 return -ENOMEM;
1053         mips_debugfs_dir = d;
1054         return 0;
1055 }
1056 arch_initcall(debugfs_mips);
1057 #endif