Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 20:36:46 +0000 (12:36 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 20:36:46 +0000 (12:36 -0800)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, mce: Clean up thermal init by introducing intel_thermal_supported()
  x86, mce: Thermal monitoring depends on APIC being enabled
  x86: Gart: fix breakage due to IOMMU initialization cleanup
  x86: Move swiotlb initialization before dma32_free_bootmem
  x86: Fix build warning in arch/x86/mm/mmio-mod.c
  x86: Remove usedac in feature-removal-schedule.txt
  x86: Fix duplicated UV BAU interrupt vector
  nvram: Fix write beyond end condition; prove to gcc copy is safe
  mm: Adjust do_pages_stat() so gcc can see copy_from_user() is safe
  x86: Limit the number of processor bootup messages
  x86: Remove enabling x2apic message for every CPU
  doc: Add documentation for bootloader_{type,version}
  x86, msr: Add support for non-contiguous cpumasks
  x86: Use find_e820() instead of hard coded trampoline address
  x86, AMD: Fix stale cpuid4_info shared_map data in shared_cpu_map cpumasks

Trivial percpu-naming-introduced conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c

1  2 
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_cacheinfo.c

index 20399b7b0c3f1a4e3a3c9d5e66a92d952d93f028,0ee9a3254eecad2cfbc0449cf49f85c36a5e67d1..4868e4a951eeec310c10d06428d60c49e2fe79b5
@@@ -427,6 -427,7 +427,7 @@@ void __cpuinit detect_ht(struct cpuinfo
  #ifdef CONFIG_X86_HT
        u32 eax, ebx, ecx, edx;
        int index_msb, core_bits;
+       static bool printed;
  
        if (!cpu_has(c, X86_FEATURE_HT))
                return;
        smp_num_siblings = (ebx & 0xff0000) >> 16;
  
        if (smp_num_siblings == 1) {
-               printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
+               printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
                goto out;
        }
  
                                       ((1 << core_bits) - 1);
  
  out:
-       if ((c->x86_max_cores * smp_num_siblings) > 1) {
+       if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
                printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
                       c->phys_proc_id);
                printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
                       c->cpu_core_id);
+               printed = 1;
        }
  #endif
  }
@@@ -1093,7 -1095,7 +1095,7 @@@ static void clear_all_debug_regs(void
  
  void __cpuinit cpu_init(void)
  {
 -      struct orig_ist *orig_ist;
 +      struct orig_ist *oist;
        struct task_struct *me;
        struct tss_struct *t;
        unsigned long v;
  
        cpu = stack_smp_processor_id();
        t = &per_cpu(init_tss, cpu);
 -      orig_ist = &per_cpu(orig_ist, cpu);
 +      oist = &per_cpu(orig_ist, cpu);
  
  #ifdef CONFIG_NUMA
        if (cpu != 0 && percpu_read(node_number) == 0 &&
        if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
                panic("CPU#%d already initialized!\n", cpu);
  
-       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+       pr_debug("Initializing CPU#%d\n", cpu);
  
        clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  
        /*
         * set up and load the per-CPU TSS
         */
 -      if (!orig_ist->ist[0]) {
 +      if (!oist->ist[0]) {
                char *estacks = per_cpu(exception_stacks, cpu);
  
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
 -                      orig_ist->ist[v] = t->x86_tss.ist[v] =
 +                      oist->ist[v] = t->x86_tss.ist[v] =
                                        (unsigned long)estacks;
                }
        }
index 0c06bca2a1dcc1dc68003aa0bdde3935b254967d,63ada177b40c459376749315c3e28419d8c2df5a..fc6c8ef92dcc5f0bd9c846b2597f0463e50209af
@@@ -499,26 -499,27 +499,27 @@@ unsigned int __cpuinit init_intel_cache
  #ifdef CONFIG_SYSFS
  
  /* pointer to _cpuid4_info array (for each cache leaf) */
 -static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
 -#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
 +static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
 +#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
  
  #ifdef CONFIG_SMP
  static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  {
        struct _cpuid4_info     *this_leaf, *sibling_leaf;
        unsigned long num_threads_sharing;
-       int index_msb, i;
+       int index_msb, i, sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
  
        if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
-               struct cpuinfo_x86 *d;
-               for_each_online_cpu(i) {
+               for_each_cpu(i, c->llc_shared_map) {
 -                      if (!per_cpu(cpuid4_info, i))
 +                      if (!per_cpu(ici_cpuid4_info, i))
                                continue;
-                       d = &cpu_data(i);
                        this_leaf = CPUID4_INFO_IDX(i, index);
-                       cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
-                                    d->llc_shared_map);
+                       for_each_cpu(sibling, c->llc_shared_map) {
+                               if (!cpu_online(sibling))
+                                       continue;
+                               set_bit(sibling, this_leaf->shared_cpu_map);
+                       }
                }
                return;
        }
                            c->apicid >> index_msb) {
                                cpumask_set_cpu(i,
                                        to_cpumask(this_leaf->shared_cpu_map));
 -                              if (i != cpu && per_cpu(cpuid4_info, i))  {
 +                              if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
                                        sibling_leaf =
                                                CPUID4_INFO_IDX(i, index);
                                        cpumask_set_cpu(cpu, to_cpumask(
@@@ -574,8 -575,8 +575,8 @@@ static void __cpuinit free_cache_attrib
        for (i = 0; i < num_cache_leaves; i++)
                cache_remove_shared_cpu_map(cpu, i);
  
 -      kfree(per_cpu(cpuid4_info, cpu));
 -      per_cpu(cpuid4_info, cpu) = NULL;
 +      kfree(per_cpu(ici_cpuid4_info, cpu));
 +      per_cpu(ici_cpuid4_info, cpu) = NULL;
  }
  
  static int
@@@ -614,15 -615,15 +615,15 @@@ static int __cpuinit detect_cache_attri
        if (num_cache_leaves == 0)
                return -ENOENT;
  
 -      per_cpu(cpuid4_info, cpu) = kzalloc(
 +      per_cpu(ici_cpuid4_info, cpu) = kzalloc(
            sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
 -      if (per_cpu(cpuid4_info, cpu) == NULL)
 +      if (per_cpu(ici_cpuid4_info, cpu) == NULL)
                return -ENOMEM;
  
        smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
        if (retval) {
 -              kfree(per_cpu(cpuid4_info, cpu));
 -              per_cpu(cpuid4_info, cpu) = NULL;
 +              kfree(per_cpu(ici_cpuid4_info, cpu));
 +              per_cpu(ici_cpuid4_info, cpu) = NULL;
        }
  
        return retval;
  extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  
  /* pointer to kobject for cpuX/cache */
 -static DEFINE_PER_CPU(struct kobject *, cache_kobject);
 +static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
  
  struct _index_kobject {
        struct kobject kobj;
  };
  
  /* pointer to array of kobjects for cpuX/cache/indexY */
 -static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
 -#define INDEX_KOBJECT_PTR(x, y)               (&((per_cpu(index_kobject, x))[y]))
 +static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
 +#define INDEX_KOBJECT_PTR(x, y)               (&((per_cpu(ici_index_kobject, x))[y]))
  
  #define show_one_plus(file_name, object, val)                         \
  static ssize_t show_##file_name                                               \
@@@ -863,10 -864,10 +864,10 @@@ static struct kobj_type ktype_percpu_en
  
  static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  {
 -      kfree(per_cpu(cache_kobject, cpu));
 -      kfree(per_cpu(index_kobject, cpu));
 -      per_cpu(cache_kobject, cpu) = NULL;
 -      per_cpu(index_kobject, cpu) = NULL;
 +      kfree(per_cpu(ici_cache_kobject, cpu));
 +      kfree(per_cpu(ici_index_kobject, cpu));
 +      per_cpu(ici_cache_kobject, cpu) = NULL;
 +      per_cpu(ici_index_kobject, cpu) = NULL;
        free_cache_attributes(cpu);
  }
  
@@@ -882,14 -883,14 +883,14 @@@ static int __cpuinit cpuid4_cache_sysfs
                return err;
  
        /* Allocate all required memory */
 -      per_cpu(cache_kobject, cpu) =
 +      per_cpu(ici_cache_kobject, cpu) =
                kzalloc(sizeof(struct kobject), GFP_KERNEL);
 -      if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
 +      if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
                goto err_out;
  
 -      per_cpu(index_kobject, cpu) = kzalloc(
 +      per_cpu(ici_index_kobject, cpu) = kzalloc(
            sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
 -      if (unlikely(per_cpu(index_kobject, cpu) == NULL))
 +      if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
                goto err_out;
  
        return 0;
@@@ -913,7 -914,7 +914,7 @@@ static int __cpuinit cache_add_dev(stru
        if (unlikely(retval < 0))
                return retval;
  
 -      retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
 +      retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
                                      &ktype_percpu_entry,
                                      &sys_dev->kobj, "%s", "cache");
        if (retval < 0) {
                this_object->index = i;
                retval = kobject_init_and_add(&(this_object->kobj),
                                              &ktype_cache,
 -                                            per_cpu(cache_kobject, cpu),
 +                                            per_cpu(ici_cache_kobject, cpu),
                                              "index%1lu", i);
                if (unlikely(retval)) {
                        for (j = 0; j < i; j++)
                                kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
 -                      kobject_put(per_cpu(cache_kobject, cpu));
 +                      kobject_put(per_cpu(ici_cache_kobject, cpu));
                        cpuid4_cache_sysfs_exit(cpu);
                        return retval;
                }
        }
        cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  
 -      kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
 +      kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
        return 0;
  }
  
@@@ -949,7 -950,7 +950,7 @@@ static void __cpuinit cache_remove_dev(
        unsigned int cpu = sys_dev->id;
        unsigned long i;
  
 -      if (per_cpu(cpuid4_info, cpu) == NULL)
 +      if (per_cpu(ici_cpuid4_info, cpu) == NULL)
                return;
        if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
                return;
  
        for (i = 0; i < num_cache_leaves; i++)
                kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
 -      kobject_put(per_cpu(cache_kobject, cpu));
 +      kobject_put(per_cpu(ici_cache_kobject, cpu));
        cpuid4_cache_sysfs_exit(cpu);
  }