Merge branch 'for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[sfrench/cifs-2.6.git] / arch / x86 / kernel / setup_percpu.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kernel.h>
4 #include <linux/export.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/percpu.h>
8 #include <linux/kexec.h>
9 #include <linux/crash_dump.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/pfn.h>
13 #include <asm/sections.h>
14 #include <asm/processor.h>
15 #include <asm/desc.h>
16 #include <asm/setup.h>
17 #include <asm/mpspec.h>
18 #include <asm/apicdef.h>
19 #include <asm/highmem.h>
20 #include <asm/proto.h>
21 #include <asm/cpumask.h>
22 #include <asm/cpu.h>
23 #include <asm/stackprotector.h>
24
25 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26 EXPORT_PER_CPU_SYMBOL(cpu_number);
27
28 #ifdef CONFIG_X86_64
29 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
30 #else
31 #define BOOT_PERCPU_OFFSET 0
32 #endif
33
34 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
35 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
36
37 unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
38         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
39 };
40 EXPORT_SYMBOL(__per_cpu_offset);
41
42 /*
43  * On x86_64 symbols referenced from code should be reachable using
44  * 32bit relocations.  Reserve space for static percpu variables in
45  * modules so that they are always served from the first chunk which
46  * is located at the percpu segment base.  On x86_32, anything can
47  * address anywhere.  No need to reserve space in the first chunk.
48  */
49 #ifdef CONFIG_X86_64
50 #define PERCPU_FIRST_CHUNK_RESERVE      PERCPU_MODULE_RESERVE
51 #else
52 #define PERCPU_FIRST_CHUNK_RESERVE      0
53 #endif
54
55 #ifdef CONFIG_X86_32
56 /**
57  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
58  *
59  * If NUMA is not configured or there is only one NUMA node available,
60  * there is no reason to consider NUMA.  This function determines
61  * whether percpu allocation should consider NUMA or not.
62  *
63  * RETURNS:
64  * true if NUMA should be considered; otherwise, false.
65  */
66 static bool __init pcpu_need_numa(void)
67 {
68 #ifdef CONFIG_NEED_MULTIPLE_NODES
69         pg_data_t *last = NULL;
70         unsigned int cpu;
71
72         for_each_possible_cpu(cpu) {
73                 int node = early_cpu_to_node(cpu);
74
75                 if (node_online(node) && NODE_DATA(node) &&
76                     last && last != NODE_DATA(node))
77                         return true;
78
79                 last = NODE_DATA(node);
80         }
81 #endif
82         return false;
83 }
84 #endif
85
86 /**
87  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
88  * @cpu: cpu to allocate for
89  * @size: size allocation in bytes
90  * @align: alignment
91  *
92  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
93  * does the right thing for NUMA regardless of the current
94  * configuration.
95  *
96  * RETURNS:
97  * Pointer to the allocated area on success, NULL on failure.
98  */
99 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
100                                         unsigned long align)
101 {
102         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
103 #ifdef CONFIG_NEED_MULTIPLE_NODES
104         int node = early_cpu_to_node(cpu);
105         void *ptr;
106
107         if (!node_online(node) || !NODE_DATA(node)) {
108                 ptr = __alloc_bootmem_nopanic(size, align, goal);
109                 pr_info("cpu %d has no node %d or node-local memory\n",
110                         cpu, node);
111                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
112                          cpu, size, __pa(ptr));
113         } else {
114                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
115                                                    size, align, goal);
116                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
117                          cpu, size, node, __pa(ptr));
118         }
119         return ptr;
120 #else
121         return __alloc_bootmem_nopanic(size, align, goal);
122 #endif
123 }
124
125 /*
126  * Helpers for first chunk memory allocation
127  */
128 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
129 {
130         return pcpu_alloc_bootmem(cpu, size, align);
131 }
132
133 static void __init pcpu_fc_free(void *ptr, size_t size)
134 {
135         free_bootmem(__pa(ptr), size);
136 }
137
138 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
139 {
140 #ifdef CONFIG_NEED_MULTIPLE_NODES
141         if (early_cpu_to_node(from) == early_cpu_to_node(to))
142                 return LOCAL_DISTANCE;
143         else
144                 return REMOTE_DISTANCE;
145 #else
146         return LOCAL_DISTANCE;
147 #endif
148 }
149
150 static void __init pcpup_populate_pte(unsigned long addr)
151 {
152         populate_extra_pte(addr);
153 }
154
155 static inline void setup_percpu_segment(int cpu)
156 {
157 #ifdef CONFIG_X86_32
158         struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
159                                               0xFFFFF);
160
161         write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
162 #endif
163 }
164
165 void __init setup_per_cpu_areas(void)
166 {
167         unsigned int cpu;
168         unsigned long delta;
169         int rc;
170
171         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
172                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
173
174         /*
175          * Allocate percpu area.  Embedding allocator is our favorite;
176          * however, on NUMA configurations, it can result in very
177          * sparse unit mapping and vmalloc area isn't spacious enough
178          * on 32bit.  Use page in that case.
179          */
180 #ifdef CONFIG_X86_32
181         if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
182                 pcpu_chosen_fc = PCPU_FC_PAGE;
183 #endif
184         rc = -EINVAL;
185         if (pcpu_chosen_fc != PCPU_FC_PAGE) {
186                 const size_t dyn_size = PERCPU_MODULE_RESERVE +
187                         PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
188                 size_t atom_size;
189
190                 /*
191                  * On 64bit, use PMD_SIZE for atom_size so that embedded
192                  * percpu areas are aligned to PMD.  This, in the future,
193                  * can also allow using PMD mappings in vmalloc area.  Use
194                  * PAGE_SIZE on 32bit as vmalloc space is highly contended
195                  * and large vmalloc area allocs can easily fail.
196                  */
197 #ifdef CONFIG_X86_64
198                 atom_size = PMD_SIZE;
199 #else
200                 atom_size = PAGE_SIZE;
201 #endif
202                 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
203                                             dyn_size, atom_size,
204                                             pcpu_cpu_distance,
205                                             pcpu_fc_alloc, pcpu_fc_free);
206                 if (rc < 0)
207                         pr_warning("%s allocator failed (%d), falling back to page size\n",
208                                    pcpu_fc_names[pcpu_chosen_fc], rc);
209         }
210         if (rc < 0)
211                 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
212                                            pcpu_fc_alloc, pcpu_fc_free,
213                                            pcpup_populate_pte);
214         if (rc < 0)
215                 panic("cannot initialize percpu area (err=%d)", rc);
216
217         /* alrighty, percpu areas up and running */
218         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
219         for_each_possible_cpu(cpu) {
220                 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
221                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
222                 per_cpu(cpu_number, cpu) = cpu;
223                 setup_percpu_segment(cpu);
224                 setup_stack_canary_segment(cpu);
225                 /*
226                  * Copy data used in early init routines from the
227                  * initial arrays to the per cpu data areas.  These
228                  * arrays then become expendable and the *_early_ptr's
229                  * are zeroed indicating that the static arrays are
230                  * gone.
231                  */
232 #ifdef CONFIG_X86_LOCAL_APIC
233                 per_cpu(x86_cpu_to_apicid, cpu) =
234                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
235                 per_cpu(x86_bios_cpu_apicid, cpu) =
236                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
237                 per_cpu(x86_cpu_to_acpiid, cpu) =
238                         early_per_cpu_map(x86_cpu_to_acpiid, cpu);
239 #endif
240 #ifdef CONFIG_X86_32
241                 per_cpu(x86_cpu_to_logical_apicid, cpu) =
242                         early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
243 #endif
244 #ifdef CONFIG_X86_64
245                 per_cpu(irq_stack_ptr, cpu) =
246                         per_cpu(irq_stack_union.irq_stack, cpu) +
247                         IRQ_STACK_SIZE;
248 #endif
249 #ifdef CONFIG_NUMA
250                 per_cpu(x86_cpu_to_node_map, cpu) =
251                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
252                 /*
253                  * Ensure that the boot cpu numa_node is correct when the boot
254                  * cpu is on a node that doesn't have memory installed.
255                  * Also cpu_up() will call cpu_to_node() for APs when
256                  * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
257                  * up later with c_init aka intel_init/amd_init.
258                  * So set them all (boot cpu and all APs).
259                  */
260                 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
261 #endif
262                 /*
263                  * Up to this point, the boot CPU has been using .init.data
264                  * area.  Reload any changed state for the boot CPU.
265                  */
266                 if (!cpu)
267                         switch_to_new_gdt(cpu);
268         }
269
270         /* indicate the early static arrays will soon be gone */
271 #ifdef CONFIG_X86_LOCAL_APIC
272         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
273         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
274         early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
275 #endif
276 #ifdef CONFIG_X86_32
277         early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
278 #endif
279 #ifdef CONFIG_NUMA
280         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
281 #endif
282
283         /* Setup node to cpumask map */
284         setup_node_to_cpumask_map();
285
286         /* Setup cpu initialized, callin, callout masks */
287         setup_cpu_local_masks();
288
289 #ifdef CONFIG_X86_32
290         /*
291          * Sync back kernel address range again.  We already did this in
292          * setup_arch(), but percpu data also needs to be available in
293          * the smpboot asm.  We can't reliably pick up percpu mappings
294          * using vmalloc_fault(), because exception dispatch needs
295          * percpu data.
296          */
297         clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
298                         swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
299                         KERNEL_PGD_PTRS);
300
301         /*
302          * sync back low identity map too.  It is used for example
303          * in the 32-bit EFI stub.
304          */
305         clone_pgd_range(initial_page_table,
306                         swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
307                         min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
308 #endif
309 }