1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kernel.h>
4 #include <linux/export.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/percpu.h>
8 #include <linux/kexec.h>
9 #include <linux/crash_dump.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/pfn.h>
13 #include <asm/sections.h>
14 #include <asm/processor.h>
16 #include <asm/setup.h>
17 #include <asm/mpspec.h>
18 #include <asm/apicdef.h>
19 #include <asm/highmem.h>
20 #include <asm/proto.h>
21 #include <asm/cpumask.h>
23 #include <asm/stackprotector.h>
25 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26 EXPORT_PER_CPU_SYMBOL(cpu_number);
29 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
31 #define BOOT_PERCPU_OFFSET 0
34 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
35 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
37 unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
38 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
40 EXPORT_SYMBOL(__per_cpu_offset);
43 * On x86_64 symbols referenced from code should be reachable using
44 * 32bit relocations. Reserve space for static percpu variables in
45 * modules so that they are always served from the first chunk which
46 * is located at the percpu segment base. On x86_32, anything can
47 * address anywhere. No need to reserve space in the first chunk.
50 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
52 #define PERCPU_FIRST_CHUNK_RESERVE 0
57 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
59 * If NUMA is not configured or there is only one NUMA node available,
60 * there is no reason to consider NUMA. This function determines
61 * whether percpu allocation should consider NUMA or not.
64 * true if NUMA should be considered; otherwise, false.
66 static bool __init pcpu_need_numa(void)
68 #ifdef CONFIG_NEED_MULTIPLE_NODES
69 pg_data_t *last = NULL;
72 for_each_possible_cpu(cpu) {
73 int node = early_cpu_to_node(cpu);
75 if (node_online(node) && NODE_DATA(node) &&
76 last && last != NODE_DATA(node))
79 last = NODE_DATA(node);
87 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
88 * @cpu: cpu to allocate for
89 * @size: size allocation in bytes
92 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
93 * does the right thing for NUMA regardless of the current
97 * Pointer to the allocated area on success, NULL on failure.
99 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
102 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
103 #ifdef CONFIG_NEED_MULTIPLE_NODES
104 int node = early_cpu_to_node(cpu);
107 if (!node_online(node) || !NODE_DATA(node)) {
108 ptr = __alloc_bootmem_nopanic(size, align, goal);
109 pr_info("cpu %d has no node %d or node-local memory\n",
111 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
112 cpu, size, __pa(ptr));
114 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
117 cpu, size, node, __pa(ptr));
121 return __alloc_bootmem_nopanic(size, align, goal);
126 * Helpers for first chunk memory allocation
128 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
130 return pcpu_alloc_bootmem(cpu, size, align);
133 static void __init pcpu_fc_free(void *ptr, size_t size)
135 free_bootmem(__pa(ptr), size);
138 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
140 #ifdef CONFIG_NEED_MULTIPLE_NODES
141 if (early_cpu_to_node(from) == early_cpu_to_node(to))
142 return LOCAL_DISTANCE;
144 return REMOTE_DISTANCE;
146 return LOCAL_DISTANCE;
150 static void __init pcpup_populate_pte(unsigned long addr)
152 populate_extra_pte(addr);
155 static inline void setup_percpu_segment(int cpu)
158 struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
161 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
165 void __init setup_per_cpu_areas(void)
171 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n",
172 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
175 * Allocate percpu area. Embedding allocator is our favorite;
176 * however, on NUMA configurations, it can result in very
177 * sparse unit mapping and vmalloc area isn't spacious enough
178 * on 32bit. Use page in that case.
181 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
182 pcpu_chosen_fc = PCPU_FC_PAGE;
185 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
186 const size_t dyn_size = PERCPU_MODULE_RESERVE +
187 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
191 * On 64bit, use PMD_SIZE for atom_size so that embedded
192 * percpu areas are aligned to PMD. This, in the future,
193 * can also allow using PMD mappings in vmalloc area. Use
194 * PAGE_SIZE on 32bit as vmalloc space is highly contended
195 * and large vmalloc area allocs can easily fail.
198 atom_size = PMD_SIZE;
200 atom_size = PAGE_SIZE;
202 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
205 pcpu_fc_alloc, pcpu_fc_free);
207 pr_warning("%s allocator failed (%d), falling back to page size\n",
208 pcpu_fc_names[pcpu_chosen_fc], rc);
211 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
212 pcpu_fc_alloc, pcpu_fc_free,
215 panic("cannot initialize percpu area (err=%d)", rc);
217 /* alrighty, percpu areas up and running */
218 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
219 for_each_possible_cpu(cpu) {
220 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
221 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
222 per_cpu(cpu_number, cpu) = cpu;
223 setup_percpu_segment(cpu);
224 setup_stack_canary_segment(cpu);
226 * Copy data used in early init routines from the
227 * initial arrays to the per cpu data areas. These
228 * arrays then become expendable and the *_early_ptr's
229 * are zeroed indicating that the static arrays are
232 #ifdef CONFIG_X86_LOCAL_APIC
233 per_cpu(x86_cpu_to_apicid, cpu) =
234 early_per_cpu_map(x86_cpu_to_apicid, cpu);
235 per_cpu(x86_bios_cpu_apicid, cpu) =
236 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
237 per_cpu(x86_cpu_to_acpiid, cpu) =
238 early_per_cpu_map(x86_cpu_to_acpiid, cpu);
241 per_cpu(x86_cpu_to_logical_apicid, cpu) =
242 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
245 per_cpu(irq_stack_ptr, cpu) =
246 per_cpu(irq_stack_union.irq_stack, cpu) +
250 per_cpu(x86_cpu_to_node_map, cpu) =
251 early_per_cpu_map(x86_cpu_to_node_map, cpu);
253 * Ensure that the boot cpu numa_node is correct when the boot
254 * cpu is on a node that doesn't have memory installed.
255 * Also cpu_up() will call cpu_to_node() for APs when
256 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
257 * up later with c_init aka intel_init/amd_init.
258 * So set them all (boot cpu and all APs).
260 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
263 * Up to this point, the boot CPU has been using .init.data
264 * area. Reload any changed state for the boot CPU.
267 switch_to_new_gdt(cpu);
270 /* indicate the early static arrays will soon be gone */
271 #ifdef CONFIG_X86_LOCAL_APIC
272 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
273 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
274 early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
277 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
280 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
283 /* Setup node to cpumask map */
284 setup_node_to_cpumask_map();
286 /* Setup cpu initialized, callin, callout masks */
287 setup_cpu_local_masks();
291 * Sync back kernel address range again. We already did this in
292 * setup_arch(), but percpu data also needs to be available in
293 * the smpboot asm. We can't reliably pick up percpu mappings
294 * using vmalloc_fault(), because exception dispatch needs
297 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
298 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
302 * sync back low identity map too. It is used for example
303 * in the 32-bit EFI stub.
305 clone_pgd_range(initial_page_table,
306 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
307 min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));