From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 17:25:23 +0000 (-0300) Subject: x86: use specialized routine for setup per-cpu area X-Git-Tag: v2.6.26-rc1~1154^2~374 X-Git-Url: http://git.samba.org/samba.git/?p=sfrench%2Fcifs-2.6.git;a=commitdiff_plain;h=4fe29a85642544503cf81e9cf251ef0f4e65b162 x86: use specialized routine for setup per-cpu area We use the same routing as x86_64, moved now to setup.c. Just with a few ifdefs inside. Note that this routing uses prefill_possible_map(). It has the very nice side effect of allowing hotplugging of cpus that are marked as present but disabled by acpi bios. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dbdd3142215c..fd27048087b8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -117,7 +117,7 @@ config ARCH_HAS_CPU_RELAX def_bool y config HAVE_SETUP_PER_CPU_AREA - def_bool X86_64 + def_bool X86_64 || (X86_SMP && !X86_VOYAGER) config ARCH_HIBERNATION_POSSIBLE def_bool y diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c436e747f502..5d33509fd1c1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -18,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp) obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o obj-y += traps_$(BITS).o irq_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o -obj-y += setup_$(BITS).o i8259_$(BITS).o +obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..1179aa06cdbf --- /dev/null +++ b/arch/x86/kernel/setup.c @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +/* + * Copy data used in early init routines from the initial arrays to the + * per cpu data areas. These arrays then become expendable and the + * *_early_ptr's are zeroed indicating that the static arrays are gone. + */ +static void __init setup_per_cpu_maps(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { +#ifdef CONFIG_SMP + if (per_cpu_offset(cpu)) { +#endif + per_cpu(x86_cpu_to_apicid, cpu) = + x86_cpu_to_apicid_init[cpu]; + per_cpu(x86_bios_cpu_apicid, cpu) = + x86_bios_cpu_apicid_init[cpu]; +#ifdef CONFIG_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + x86_cpu_to_node_map_init[cpu]; +#endif +#ifdef CONFIG_SMP + } else + printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", + cpu); +#endif + } + + /* indicate the early static arrays will soon be gone */ + x86_cpu_to_apicid_early_ptr = NULL; + x86_bios_cpu_apicid_early_ptr = NULL; +#ifdef CONFIG_NUMA + x86_cpu_to_node_map_early_ptr = NULL; +#endif +} + +#ifdef CONFIG_X86_32 +/* + * Great future not-so-futuristic plan: make i386 and x86_64 do it + * the same way + */ +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); +#endif + +/* + * Great future plan: + * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. + * Always point %gs to its beginning + */ +void __init setup_per_cpu_areas(void) +{ + int i; + unsigned long size; + +#ifdef CONFIG_HOTPLUG_CPU + prefill_possible_map(); +#endif + + /* Copy section for each CPU (we discard the original) */ + size = PERCPU_ENOUGH_ROOM; + + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + size); + for_each_cpu_mask(i, cpu_possible_map) { + char *ptr; +#ifndef CONFIG_NEED_MULTIPLE_NODES + ptr = alloc_bootmem_pages(size); +#else + int node = early_cpu_to_node(i); + if (!node_online(node) || !NODE_DATA(node)) + ptr = alloc_bootmem_pages(size); + else + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); +#endif + if (!ptr) + panic("Cannot allocate cpu data for CPU %d\n", i); +#ifdef CONFIG_X86_64 + cpu_pda(i)->data_offset = ptr - __per_cpu_start; +#else + __per_cpu_offset[i] = ptr - __per_cpu_start; +#endif + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + } + + /* setup percpu data maps early */ + setup_per_cpu_maps(); +} + +#endif diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index e24c45677094..6b4e3262e8cb 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -85,83 +85,6 @@ static int __init nonx32_setup(char *str) } __setup("noexec32=", nonx32_setup); -/* - * Copy data used in early init routines from the initial arrays to the - * per cpu data areas. These arrays then become expendable and the - * *_early_ptr's are zeroed indicating that the static arrays are gone. - */ -static void __init setup_per_cpu_maps(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { -#ifdef CONFIG_SMP - if (per_cpu_offset(cpu)) { -#endif - per_cpu(x86_cpu_to_apicid, cpu) = - x86_cpu_to_apicid_init[cpu]; - per_cpu(x86_bios_cpu_apicid, cpu) = - x86_bios_cpu_apicid_init[cpu]; -#ifdef CONFIG_NUMA - per_cpu(x86_cpu_to_node_map, cpu) = - x86_cpu_to_node_map_init[cpu]; -#endif -#ifdef CONFIG_SMP - } - else - printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", - cpu); -#endif - } - - /* indicate the early static arrays will soon be gone */ - x86_cpu_to_apicid_early_ptr = NULL; - x86_bios_cpu_apicid_early_ptr = NULL; -#ifdef CONFIG_NUMA - x86_cpu_to_node_map_early_ptr = NULL; -#endif -} - -/* - * Great future plan: - * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. - * Always point %gs to its beginning - */ -void __init setup_per_cpu_areas(void) -{ - int i; - unsigned long size; - -#ifdef CONFIG_HOTPLUG_CPU - prefill_possible_map(); -#endif - - /* Copy section for each CPU (we discard the original) */ - size = PERCPU_ENOUGH_ROOM; - - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); - for_each_cpu_mask (i, cpu_possible_map) { - char *ptr; -#ifndef CONFIG_NEED_MULTIPLE_NODES - ptr = alloc_bootmem_pages(size); -#else - int node = early_cpu_to_node(i); - - if (!node_online(node) || !NODE_DATA(node)) - ptr = alloc_bootmem_pages(size); - else - ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); -#endif - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); - cpu_pda(i)->data_offset = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - } - - /* setup percpu data maps early */ - setup_per_cpu_maps(); -} - void pda_init(int cpu) { struct x8664_pda *pda = cpu_pda(cpu); diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 92a5df6190b5..bf5c9e9f26c1 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -665,6 +665,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) unmap_cpu_to_logical_apicid(cpu); cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ + cpu_clear(cpu, cpu_possible_map); cpucount--; } else { per_cpu(x86_cpu_to_apicid, cpu) = apicid; @@ -743,6 +744,7 @@ EXPORT_SYMBOL(xquad_portio); static void __init disable_smp(void) { + cpu_possible_map = cpumask_of_cpu(0); smpboot_clear_io_apic_irqs(); phys_cpu_present_map = physid_mask_of_physid(0); map_cpu_to_logical_apicid();