x86: remove duplicated code with pcpu_need_numa()
[sfrench/cifs-2.6.git] / arch / x86 / kernel / setup_percpu.c
index 01161077a49c32cb6b2adac120c91176105875f4..3a97a4cf187245462f3890f08313544762642f28 100644 (file)
 #include <linux/crash_dump.h>
 #include <linux/smp.h>
 #include <linux/topology.h>
+#include <linux/pfn.h>
 #include <asm/sections.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+#include <asm/cpu.h>
+#include <asm/stackprotector.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-unsigned int num_processors;
-unsigned disabled_cpus __cpuinitdata;
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_physical_apicid = -1U;
-EXPORT_SYMBOL(boot_cpu_physical_apicid);
-unsigned int max_physical_apicid;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map;
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
 #endif
 
-/* map cpu index to physical APIC ID */
-DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
-DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
-
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define        X86_64_NUMA     1
-
-/* map cpu index to node index */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-
-/* which logical CPUs are on which nodes */
-cpumask_t *node_to_cpumask_map;
-EXPORT_SYMBOL(node_to_cpumask_map);
-
-/* setup node_to_cpumask_map */
-static void __init setup_node_to_cpumask_map(void);
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
 
+#ifdef CONFIG_X86_64
+#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
 #else
-static inline void setup_node_to_cpumask_map(void) { }
+#define BOOT_PERCPU_OFFSET 0
 #endif
 
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
- */
-static void __init setup_per_cpu_maps(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
-       }
+DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
-#endif
-}
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+};
+EXPORT_SYMBOL(__per_cpu_offset);
 
-#ifdef CONFIG_X86_32
 /*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
+ * On x86_64 symbols referenced from code should be reachable using
+ * 32bit relocations.  Reserve space for static percpu variables in
+ * modules so that they are always served from the first chunk which
+ * is located at the percpu segment base.  On x86_32, anything can
+ * address anywhere.  No need to reserve space in the first chunk.
  */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-
-#else /* CONFIG_SMP && CONFIG_X86_64 */
+#ifdef CONFIG_X86_64
+#define PERCPU_FIRST_CHUNK_RESERVE     PERCPU_MODULE_RESERVE
+#else
+#define PERCPU_FIRST_CHUNK_RESERVE     0
+#endif
 
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
+/**
+ * pcpu_need_numa - determine percpu allocation needs to consider NUMA
+ *
+ * If NUMA is not configured or there is only one NUMA node available,
+ * there is no reason to consider NUMA.  This function determines
+ * whether percpu allocation should consider NUMA or not.
+ *
+ * RETURNS:
+ * true if NUMA should be considered; otherwise, false.
  */
-static void __init setup_cpu_pda_map(void)
+static bool __init pcpu_need_numa(void)
 {
-       char *pda;
-       struct x8664_pda **new_cpu_pda;
-       unsigned long size;
-       int cpu;
-
-       size = roundup(sizeof(struct x8664_pda), cache_line_size());
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       pg_data_t *last = NULL;
+       unsigned int cpu;
 
-       /* allocate cpu_pda array and pointer table */
-       {
-               unsigned long tsize = nr_cpu_ids * sizeof(void *);
-               unsigned long asize = size * (nr_cpu_ids - 1);
-
-               tsize = roundup(tsize, cache_line_size());
-               new_cpu_pda = alloc_bootmem(tsize + asize);
-               pda = (char *)new_cpu_pda + tsize;
-       }
-
-       /* initialize pointer table to static pda's */
        for_each_possible_cpu(cpu) {
-               if (cpu == 0) {
-                       /* leave boot cpu pda in place */
-                       new_cpu_pda[0] = cpu_pda(0);
-                       continue;
-               }
-               new_cpu_pda[cpu] = (struct x8664_pda *)pda;
-               new_cpu_pda[cpu]->in_bootmem = 1;
-               pda += size;
-       }
-
-       /* point to new pointer table */
-       _cpu_pda = new_cpu_pda;
-}
-
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_64
-
-/* correctly size the local cpu masks */
-static void __init setup_cpu_local_masks(void)
-{
-       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
-       alloc_bootmem_cpumask_var(&cpu_callin_mask);
-       alloc_bootmem_cpumask_var(&cpu_callout_mask);
-       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
+               int node = early_cpu_to_node(cpu);
 
-#else /* CONFIG_X86_32 */
+               if (node_online(node) && NODE_DATA(node) &&
+                   last && last != NODE_DATA(node))
+                       return true;
 
-static inline void setup_cpu_local_masks(void)
-{
+               last = NODE_DATA(node);
+       }
+#endif
+       return false;
 }
 
-#endif /* CONFIG_X86_32 */
-
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
  */
-void __init setup_per_cpu_areas(void)
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+                                       unsigned long align)
 {
-       ssize_t size, old_size;
-       char *ptr;
-       int cpu;
-       unsigned long align = 1;
-
-       /* Setup cpu_pda map */
-       setup_cpu_pda_map();
-
-       /* Copy section for each CPU (we discard the original) */
-       old_size = PERCPU_ENOUGH_ROOM;
-       align = max_t(unsigned long, PAGE_SIZE, align);
-       size = roundup(old_size, align);
-
-       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
-               NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
-
-       pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
-
-       for_each_possible_cpu(cpu) {
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-               ptr = __alloc_bootmem(size, align,
-                                __pa(MAX_DMA_ADDRESS));
+       const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int node = early_cpu_to_node(cpu);
+       void *ptr;
+
+       if (!node_online(node) || !NODE_DATA(node)) {
+               ptr = __alloc_bootmem_nopanic(size, align, goal);
+               pr_info("cpu %d has no node %d or node-local memory\n",
+                       cpu, node);
+               pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+                        cpu, size, __pa(ptr));
+       } else {
+               ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
+                                                  size, align, goal);
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+                        "%016lx\n", cpu, size, node, __pa(ptr));
+       }
+       return ptr;
 #else
-               int node = early_cpu_to_node(cpu);
-               if (!node_online(node) || !NODE_DATA(node)) {
-                       ptr = __alloc_bootmem(size, align,
-                                        __pa(MAX_DMA_ADDRESS));
-                       pr_info("cpu %d has no node %d or node-local memory\n",
-                               cpu, node);
-                       pr_debug("per cpu data for cpu%d at %016lx\n",
-                                cpu, __pa(ptr));
-               } else {
-                       ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
-                                                       __pa(MAX_DMA_ADDRESS));
-                       pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
-                               cpu, node, __pa(ptr));
-               }
+       return __alloc_bootmem_nopanic(size, align, goal);
 #endif
-               per_cpu_offset(cpu) = ptr - __per_cpu_start;
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-       }
-
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
-
-       /* Setup node to cpumask map */
-       setup_node_to_cpumask_map();
-
-       /* Setup cpu initialized, callin, callout masks */
-       setup_cpu_local_masks();
 }
 
-#endif
-
-#ifdef X86_64_NUMA
-
 /*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
+ * Remap allocator
+ *
+ * This allocator uses PMD page as unit.  A PMD page is allocated for
+ * each cpu and each is remapped into vmalloc area using PMD mapping.
+ * As PMD page is quite large, only part of it is used for the first
+ * chunk.  Unused part is returned to the bootmem allocator.
  *
- * Note: node_to_cpumask() is not valid until after this is done.
+ * So, the PMD pages are mapped twice - once to the physical mapping
+ * and to the vmalloc area for the first percpu chunk.  The double
+ * mapping does add one more PMD TLB entry pressure but still is much
+ * better than only using 4k mappings while still being NUMA friendly.
  */
-static void __init setup_node_to_cpumask_map(void)
-{
-       unsigned int node, num = 0;
-       cpumask_t *map;
-
-       /* setup nr_node_ids if not done yet */
-       if (nr_node_ids == MAX_NUMNODES) {
-               for_each_node_mask(node, node_possible_map)
-                       num = node;
-               nr_node_ids = num + 1;
-       }
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
 
-       /* allocate the map */
-       map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+{
+       size_t off = (size_t)pageno << PAGE_SHIFT;
 
-       pr_debug("Node to cpumask map at %p for %d nodes\n",
-                map, nr_node_ids);
+       if (off >= pcpur_size)
+               return NULL;
 
-       /* node_to_cpumask() will now work */
-       node_to_cpumask_map = map;
+       return virt_to_page(pcpur_ptrs[cpu] + off);
 }
 
-void __cpuinit numa_set_node(int cpu, int node)
+static ssize_t __init setup_pcpu_remap(size_t static_size)
 {
-       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
-
-       if (cpu_pda(cpu) && node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
+       static struct vm_struct vm;
+       size_t ptrs_size, dyn_size;
+       unsigned int cpu;
+       ssize_t ret;
+
+       /*
+        * If large page isn't supported, there's no benefit in doing
+        * this.  Also, on non-NUMA, embedding is better.
+        */
+       if (!cpu_has_pse || !pcpu_need_numa())
+               return -EINVAL;
+
+       /*
+        * Currently supports only single page.  Supporting multiple
+        * pages won't be too difficult if it ever becomes necessary.
+        */
+       pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+                              PERCPU_DYNAMIC_RESERVE);
+       if (pcpur_size > PMD_SIZE) {
+               pr_warning("PERCPU: static data is larger than large page, "
+                          "can't use large page\n");
+               return -EINVAL;
+       }
+       dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
 
-       if (cpu_to_node_map)
-               cpu_to_node_map[cpu] = node;
+       /* allocate pointer array and alloc large pages */
+       ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+       pcpur_ptrs = alloc_bootmem(ptrs_size);
 
-       else if (per_cpu_offset(cpu))
-               per_cpu(x86_cpu_to_node_map, cpu) = node;
+       for_each_possible_cpu(cpu) {
+               pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
+               if (!pcpur_ptrs[cpu])
+                       goto enomem;
+
+               /*
+                * Only use pcpur_size bytes and give back the rest.
+                *
+                * Ingo: The 2MB up-rounding bootmem is needed to make
+                * sure the partial 2MB page is still fully RAM - it's
+                * not well-specified to have a PAT-incompatible area
+                * (unmapped RAM, device memory, etc.) in that hole.
+                */
+               free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+                            PMD_SIZE - pcpur_size);
+
+               memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
+       }
 
-       else
-               pr_debug("Setting node for non-present cpu %d\n", cpu);
-}
+       /* allocate address and map */
+       vm.flags = VM_ALLOC;
+       vm.size = num_possible_cpus() * PMD_SIZE;
+       vm_area_register_early(&vm, PMD_SIZE);
 
-void __cpuinit numa_clear_node(int cpu)
-{
-       numa_set_node(cpu, NUMA_NO_NODE);
-}
+       for_each_possible_cpu(cpu) {
+               pmd_t *pmd;
 
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+               pmd = populate_extra_pmd((unsigned long)vm.addr
+                                        + cpu * PMD_SIZE);
+               set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
+                                    PAGE_KERNEL_LARGE));
+       }
 
-void __cpuinit numa_add_cpu(int cpu)
-{
-       cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+       /* we're ready, commit */
+       pr_info("PERCPU: Remapped at %p with large pages, static data "
+               "%zu bytes\n", vm.addr, static_size);
+
+       ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+                                    PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
+                                    PMD_SIZE, vm.addr, NULL);
+       goto out_free_ar;
+
+enomem:
+       for_each_possible_cpu(cpu)
+               if (pcpur_ptrs[cpu])
+                       free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
+       ret = -ENOMEM;
+out_free_ar:
+       free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+       return ret;
 }
-
-void __cpuinit numa_remove_cpu(int cpu)
+#else
+static ssize_t __init setup_pcpu_remap(size_t static_size)
 {
-       cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+       return -EINVAL;
 }
-
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
+#endif
 
 /*
- * --------- debug versions of the numa functions ---------
+ * Embedding allocator
+ *
+ * The first chunk is sized to just contain the static area plus
+ * module and dynamic reserves and embedded into linear physical
+ * mapping so that it can use PMD mapping without additional TLB
+ * pressure.
  */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static ssize_t __init setup_pcpu_embed(size_t static_size)
 {
-       int node = cpu_to_node(cpu);
-       cpumask_t *mask;
-       char buf[64];
-
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_ERR "node_to_cpumask_map NULL\n");
-               dump_stack();
-               return;
-       }
-
-       mask = &node_to_cpumask_map[node];
-       if (enable)
-               cpu_set(cpu, *mask);
-       else
-               cpu_clear(cpu, *mask);
-
-       cpulist_scnprintf(buf, sizeof(buf), mask);
-       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+       size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
+
+       /*
+        * If large page isn't supported, there's no benefit in doing
+        * this.  Also, embedding allocation doesn't play well with
+        * NUMA.
+        */
+       if (!cpu_has_pse || pcpu_need_numa())
+               return -EINVAL;
+
+       return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
+                                     reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
 }
 
-void __cpuinit numa_add_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 1);
-}
+/*
+ * 4k page allocator
+ *
+ * This is the basic allocator.  Static percpu area is allocated
+ * page-by-page and most of initialization is done by the generic
+ * setup function.
+ */
+static struct page **pcpu4k_pages __initdata;
+static int pcpu4k_nr_static_pages __initdata;
 
-void __cpuinit numa_remove_cpu(int cpu)
+static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
 {
-       numa_set_cpumask(cpu, 0);
+       if (pageno < pcpu4k_nr_static_pages)
+               return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
+       return NULL;
 }
 
-int cpu_to_node(int cpu)
+static void __init pcpu4k_populate_pte(unsigned long addr)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
-               printk(KERN_WARNING
-                       "cpu_to_node(%d): usage too early!\n", cpu);
-               dump_stack();
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       populate_extra_pte(addr);
 }
-EXPORT_SYMBOL(cpu_to_node);
 
-/*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
- */
-int early_cpu_to_node(int cpu)
+static ssize_t __init setup_pcpu_4k(size_t static_size)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       if (!per_cpu_offset(cpu)) {
-               printk(KERN_WARNING
-                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
-               dump_stack();
-               return NUMA_NO_NODE;
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
-}
-
+       size_t pages_size;
+       unsigned int cpu;
+       int i, j;
+       ssize_t ret;
+
+       pcpu4k_nr_static_pages = PFN_UP(static_size);
+
+       /* unaligned allocations can't be freed, round up to page size */
+       pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
+                              * sizeof(pcpu4k_pages[0]));
+       pcpu4k_pages = alloc_bootmem(pages_size);
+
+       /* allocate and copy */
+       j = 0;
+       for_each_possible_cpu(cpu)
+               for (i = 0; i < pcpu4k_nr_static_pages; i++) {
+                       void *ptr;
+
+                       ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
+                       if (!ptr)
+                               goto enomem;
+
+                       memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
+                       pcpu4k_pages[j++] = virt_to_page(ptr);
+               }
 
-/* empty cpumask */
-static const cpumask_t cpu_mask_none;
+       /* we're ready, commit */
+       pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
+               pcpu4k_nr_static_pages, static_size);
+
+       ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
+                                    PERCPU_FIRST_CHUNK_RESERVE, -1,
+                                    -1, NULL, pcpu4k_populate_pte);
+       goto out_free_ar;
+
+enomem:
+       while (--j >= 0)
+               free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
+       ret = -ENOMEM;
+out_free_ar:
+       free_bootmem(__pa(pcpu4k_pages), pages_size);
+       return ret;
+}
 
-/*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
-const cpumask_t *cpumask_of_node(int node)
+static inline void setup_percpu_segment(int cpu)
 {
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): no node_to_cpumask_map!\n",
-                       node);
-               dump_stack();
-               return (const cpumask_t *)&cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return &cpu_mask_none;
-       }
-       return &node_to_cpumask_map[node];
+#ifdef CONFIG_X86_32
+       struct desc_struct gdt;
+
+       pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+                       0x2 | DESCTYPE_S, 0x8);
+       gdt.s = 1;
+       write_gdt_entry(get_cpu_gdt_table(cpu),
+                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+#endif
 }
-EXPORT_SYMBOL(cpumask_of_node);
 
 /*
- * Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
  */
-cpumask_t node_to_cpumask(int node)
+void __init setup_per_cpu_areas(void)
 {
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
-               dump_stack();
-               return cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return cpu_mask_none;
-       }
-       return node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(node_to_cpumask);
+       size_t static_size = __per_cpu_end - __per_cpu_start;
+       unsigned int cpu;
+       unsigned long delta;
+       size_t pcpu_unit_size;
+       ssize_t ret;
 
-/*
- * --------- end of debug versions of the numa functions ---------
- */
+       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
+               NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+       /*
+        * Allocate percpu area.  If PSE is supported, try to make use
+        * of large page mappings.  Please read comments on top of
+        * each allocator for details.
+        */
+       ret = setup_pcpu_remap(static_size);
+       if (ret < 0)
+               ret = setup_pcpu_embed(static_size);
+       if (ret < 0)
+               ret = setup_pcpu_4k(static_size);
+       if (ret < 0)
+               panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
+                     static_size, ret);
+
+       pcpu_unit_size = ret;
+
+       /* alrighty, percpu areas up and running */
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
+               per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               setup_percpu_segment(cpu);
+               setup_stack_canary_segment(cpu);
+               /*
+                * Copy data used in early init routines from the
+                * initial arrays to the per cpu data areas.  These
+                * arrays then become expendable and the *_early_ptr's
+                * are zeroed indicating that the static arrays are
+                * gone.
+                */
+#ifdef CONFIG_X86_LOCAL_APIC
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                       early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                       early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#endif
+#ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) +
+                       IRQ_STACK_SIZE - 64;
+#ifdef CONFIG_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                       early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#endif
+               /*
+                * Up to this point, the boot CPU has been using .data.init
+                * area.  Reload any changed state for the boot CPU.
+                */
+               if (cpu == boot_cpu_id)
+                       switch_to_new_gdt(cpu);
+       }
 
-#endif /* X86_64_NUMA */
+       /* indicate the early static arrays will soon be gone */
+#ifdef CONFIG_X86_LOCAL_APIC
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#endif
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
+       /* Setup node to cpumask map */
+       setup_node_to_cpumask_map();
+
+       /* Setup cpu initialized, callin, callout masks */
+       setup_cpu_local_masks();
+}