Merge branch 'linus' into tmp.x86.mpparse.new
[sfrench/cifs-2.6.git] / arch / x86 / kernel / setup.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <asm/smp.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
12 #include <asm/mpspec.h>
13 #include <asm/apicdef.h>
14
15 #ifdef CONFIG_X86_LOCAL_APIC
16 unsigned int num_processors;
17 unsigned disabled_cpus __cpuinitdata;
18 /* Processor that is doing the boot up */
19 unsigned int boot_cpu_physical_apicid = -1U;
20 unsigned int max_physical_apicid;
21 EXPORT_SYMBOL(boot_cpu_physical_apicid);
22
23 DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
24 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
25
26 /* Bitmask of physically existing CPUs */
27 physid_mask_t phys_cpu_present_map;
28 #endif
29
30 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
31 /*
32  * Copy data used in early init routines from the initial arrays to the
33  * per cpu data areas.  These arrays then become expendable and the
34  * *_early_ptr's are zeroed indicating that the static arrays are gone.
35  */
36 static void __init setup_per_cpu_maps(void)
37 {
38         int cpu;
39
40         for_each_possible_cpu(cpu) {
41                 per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
42                 per_cpu(x86_bios_cpu_apicid, cpu) =
43                                                 x86_bios_cpu_apicid_init[cpu];
44 #ifdef CONFIG_NUMA
45                 per_cpu(x86_cpu_to_node_map, cpu) =
46                                                 x86_cpu_to_node_map_init[cpu];
47 #endif
48         }
49
50         /* indicate the early static arrays will soon be gone */
51         x86_cpu_to_apicid_early_ptr = NULL;
52         x86_bios_cpu_apicid_early_ptr = NULL;
53 #ifdef CONFIG_NUMA
54         x86_cpu_to_node_map_early_ptr = NULL;
55 #endif
56 }
57
58 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
59 cpumask_t *cpumask_of_cpu_map __read_mostly;
60 EXPORT_SYMBOL(cpumask_of_cpu_map);
61
62 /* requires nr_cpu_ids to be initialized */
63 static void __init setup_cpumask_of_cpu(void)
64 {
65         int i;
66
67         /* alloc_bootmem zeroes memory */
68         cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
69         for (i = 0; i < nr_cpu_ids; i++)
70                 cpu_set(i, cpumask_of_cpu_map[i]);
71 }
72 #else
73 static inline void setup_cpumask_of_cpu(void) { }
74 #endif
75
76 #ifdef CONFIG_X86_32
77 /*
78  * Great future not-so-futuristic plan: make i386 and x86_64 do it
79  * the same way
80  */
81 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
82 EXPORT_SYMBOL(__per_cpu_offset);
83 #endif
84
85 /*
86  * Great future plan:
87  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
88  * Always point %gs to its beginning
89  */
90 void __init setup_per_cpu_areas(void)
91 {
92         int i, highest_cpu = 0;
93         unsigned long size;
94
95 #ifdef CONFIG_HOTPLUG_CPU
96         prefill_possible_map();
97 #endif
98
99         /* Copy section for each CPU (we discard the original) */
100         size = PERCPU_ENOUGH_ROOM;
101         printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
102                           size);
103
104         for_each_possible_cpu(i) {
105                 char *ptr;
106 #ifndef CONFIG_NEED_MULTIPLE_NODES
107                 ptr = alloc_bootmem_pages(size);
108 #else
109                 int node = early_cpu_to_node(i);
110                 if (!node_online(node) || !NODE_DATA(node)) {
111                         ptr = alloc_bootmem_pages(size);
112                         printk(KERN_INFO
113                                "cpu %d has no node or node-local memory\n", i);
114                 }
115                 else
116                         ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
117 #endif
118                 if (!ptr)
119                         panic("Cannot allocate cpu data for CPU %d\n", i);
120 #ifdef CONFIG_X86_64
121                 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
122 #else
123                 __per_cpu_offset[i] = ptr - __per_cpu_start;
124 #endif
125                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
126
127                 highest_cpu = i;
128         }
129
130         nr_cpu_ids = highest_cpu + 1;
131         printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids);
132
133         /* Setup percpu data maps */
134         setup_per_cpu_maps();
135
136         /* Setup cpumask_of_cpu map */
137         setup_cpumask_of_cpu();
138 }
139
140 #endif
141
142 void __init parse_setup_data(void)
143 {
144         struct setup_data *data;
145         u64 pa_data;
146
147         if (boot_params.hdr.version < 0x0209)
148                 return;
149         pa_data = boot_params.hdr.setup_data;
150         while (pa_data) {
151                 data = early_ioremap(pa_data, PAGE_SIZE);
152                 switch (data->type) {
153                 default:
154                         break;
155                 }
156 #ifndef CONFIG_DEBUG_BOOT_PARAMS
157                 free_early(pa_data, pa_data+sizeof(*data)+data->len);
158 #endif
159                 pa_data = data->next;
160                 early_iounmap(data, PAGE_SIZE);
161         }
162 }