Merge branch 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Dec 2009 01:00:20 +0000 (17:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Dec 2009 01:00:20 +0000 (17:00 -0800)
* 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  cpumask: rename tsk_cpumask to tsk_cpus_allowed
  cpumask: don't recommend set_cpus_allowed hack in Documentation/cpu-hotplug.txt
  cpumask: avoid dereferencing struct cpumask
  cpumask: convert drivers/idle/i7300_idle.c to cpumask_var_t
  cpumask: use modern cpumask style in drivers/scsi/fcoe/fcoe.c
  cpumask: avoid deprecated function in mm/slab.c
  cpumask: use cpu_online in kernel/perf_event.c

Documentation/cpu-hotplug.txt
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
drivers/idle/i7300_idle.c
drivers/scsi/fcoe/fcoe.c
include/linux/sched.h
kernel/perf_event.c
kernel/time/timer_list.c
mm/slab.c

index 4d4a644b505eeb42cb5acae0b69f162c3f053a4d..a99d7031cdf978f321dce744a3a1f22d0bf7206e 100644 (file)
@@ -315,41 +315,26 @@ A: The following are what is required for CPU hotplug infrastructure to work
 
 Q: I need to ensure that a particular cpu is not removed when there is some
    work specific to this cpu is in progress.
-A: First switch the current thread context to preferred cpu
+A: There are two ways.  If your code can be run in interrupt context, use
+   smp_call_function_single(), otherwise use work_on_cpu().  Note that
+   work_on_cpu() is slow, and can fail due to out of memory:
 
        int my_func_on_cpu(int cpu)
        {
-               cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
-               int curr_cpu, err = 0;
-
-               saved_mask = current->cpus_allowed;
-               cpu_set(cpu, new_mask);
-               err = set_cpus_allowed(current, new_mask);
-
-               if (err)
-                       return err;
-
-               /*
-                * If we got scheduled out just after the return from
-                * set_cpus_allowed() before running the work, this ensures
-                * we stay locked.
-                */
-               curr_cpu = get_cpu();
-
-               if (curr_cpu != cpu) {
-                       err = -EAGAIN;
-                       goto ret;
-               } else {
-                       /*
-                        * Do work : But cant sleep, since get_cpu() disables preempt
-                        */
-               }
-               ret:
-                       put_cpu();
-                       set_cpus_allowed(current, saved_mask);
-                       return err;
-               }
-
+               int err;
+               get_online_cpus();
+               if (!cpu_online(cpu))
+                       err = -EINVAL;
+               else
+#if NEEDS_BLOCKING
+                       err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
+#else
+                       smp_call_function_single(cpu, __my_func_on_cpu, &err,
+                                                true);
+#endif
+               put_online_cpus();
+               return err;
+       }
 
 Q: How do we determine how many CPUs are available for hotplug.
 A: There is no clear spec defined way from ACPI that can give us that
index a9df9441a9a240cd138b6492896cc0788a04cf1b..f125e5c551c0db827d33110dfd0d25688053e3c7 100644 (file)
@@ -1136,7 +1136,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
                return -ENOMEM;
 
-       cpumask_copy(oldmask, tsk_cpumask(current));
+       cpumask_copy(oldmask, tsk_cpus_allowed(current));
        set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
index 1f20a042a4f508197ff45ebec146a9ff2daeeccd..dd253002cd50c973bd8e1ffcc5041415cf0abe1b 100644 (file)
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
 static u8 i7300_idle_thrtlow_saved;
 static u32 i7300_idle_mc_saved;
 
-static cpumask_t idle_cpumask;
+static cpumask_var_t idle_cpumask;
 static ktime_t start_ktime;
 static unsigned long avg_idle_us;
 
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
        spin_lock_irqsave(&i7300_idle_lock, flags);
        if (val == IDLE_START) {
 
-               cpu_set(smp_processor_id(), idle_cpumask);
+               cpumask_set_cpu(smp_processor_id(), idle_cpumask);
 
-               if (cpus_weight(idle_cpumask) != num_online_cpus())
+               if (cpumask_weight(idle_cpumask) != num_online_cpus())
                        goto end;
 
                now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
                i7300_idle_ioat_start();
 
        } else if (val == IDLE_END) {
-               cpu_clear(smp_processor_id(), idle_cpumask);
-               if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
+               cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
+               if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
                        /* First CPU coming out of idle */
                        u64 idle_duration_us;
 
@@ -553,7 +553,6 @@ struct debugfs_file_info {
 static int __init i7300_idle_init(void)
 {
        spin_lock_init(&i7300_idle_lock);
-       cpus_clear(idle_cpumask);
        total_us = 0;
 
        if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
        if (i7300_idle_ioat_init())
                return -ENODEV;
 
+       if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
+               return -ENOMEM;
+
        debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
        if (debugfs_dir) {
                int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
 static void __exit i7300_idle_exit(void)
 {
        idle_notifier_unregister(&i7300_idle_nb);
+       free_cpumask_var(idle_cpumask);
 
        if (debugfs_dir) {
                int i = 0;
index e3896fcb06e3c9f3bb4f94e468c4f80dbbbed17e..10be9f36a4cc01b3ce9ce14a0d78f88dd1a6a30a 100644 (file)
@@ -1260,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
                                "CPU.\n");
 
                spin_unlock_bh(&fps->fcoe_rx_list.lock);
-               cpu = first_cpu(cpu_online_map);
+               cpu = cpumask_first(cpu_online_mask);
                fps = &per_cpu(fcoe_percpu, cpu);
                spin_lock_bh(&fps->fcoe_rx_list.lock);
                if (!fps->thread) {
index 211ed32befbd8cfec7065a236924987210877d42..e89857812be63c3b9ef5167cc4238291a337459a 100644 (file)
@@ -1553,7 +1553,7 @@ struct task_struct {
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
 /*
  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
index 8ab86988bd249e2368068d841520bb0e47f9ca29..97d1a3dd7a597a763eaa18509d58dd2ccabeedba 100644 (file)
@@ -1614,7 +1614,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
                 * offline CPU and activate it when the CPU comes up, but
                 * that's for later.
                 */
-               if (!cpu_isset(cpu, cpu_online_map))
+               if (!cpu_online(cpu))
                        return ERR_PTR(-ENODEV);
 
                cpuctx = &per_cpu(perf_cpu_context, cpu);
index 28265636b6c279311498d422401d29addbc1609b..bdfb8dd1050cfb64f4cca35f22b749b06a294dd1 100644 (file)
@@ -237,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m)
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        print_tickdevice(m, tick_get_broadcast_device(), -1);
        SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
-                  tick_get_broadcast_mask()->bits[0]);
+                  cpumask_bits(tick_get_broadcast_mask())[0]);
 #ifdef CONFIG_TICK_ONESHOT
        SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
-                  tick_get_broadcast_oneshot_mask()->bits[0]);
+                  cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
 #endif
        SEQ_printf(m, "\n");
 #endif
index e17cc2c337b8b6d2794a1a92e0117a8dec16bf0a..7d41f15b48d37ad45d9184441b5daf3640afd6bb 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1132,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu)
                if (nc)
                        free_block(cachep, nc->entry, nc->avail, node);
 
-               if (!cpus_empty(*mask)) {
+               if (!cpumask_empty(mask)) {
                        spin_unlock_irq(&l3->list_lock);
                        goto free_array_cache;
                }