Merge branch 'rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Apr 2009 00:34:12 +0000 (17:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 4 Apr 2009 00:34:12 +0000 (17:34 -0700)
* 'rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: rcu_barrier VS cpu_hotplug: Ensure callbacks in dead cpu are migrated to online cpu

kernel/rcupdate.c

index cae8a059cf47f4fab142b001c5f4d25ec8e95006..2c7b8457d0d234203517f7aa6a1ddc6b724ef447 100644 (file)
@@ -122,6 +122,8 @@ static void rcu_barrier_func(void *type)
        }
 }
 
+static inline void wait_migrated_callbacks(void);
+
 /*
  * Orchestrate the specified type of RCU barrier, waiting for all
  * RCU callbacks of the specified type to complete.
@@ -147,6 +149,7 @@ static void _rcu_barrier(enum rcu_barrier type)
                complete(&rcu_barrier_completion);
        wait_for_completion(&rcu_barrier_completion);
        mutex_unlock(&rcu_barrier_mutex);
+       wait_migrated_callbacks();
 }
 
 /**
@@ -176,9 +179,50 @@ void rcu_barrier_sched(void)
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 
+static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
+static struct rcu_head rcu_migrate_head[3];
+static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
+
+static void rcu_migrate_callback(struct rcu_head *notused)
+{
+       if (atomic_dec_and_test(&rcu_migrate_type_count))
+               wake_up(&rcu_migrate_wq);
+}
+
+static inline void wait_migrated_callbacks(void)
+{
+       wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
+}
+
+static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
+               unsigned long action, void *hcpu)
+{
+       if (action == CPU_DYING) {
+               /*
+                * preempt_disable() in on_each_cpu() prevents stop_machine(),
+                * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
+                * returns, all online cpus have queued rcu_barrier_func(),
+                * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
+                *
+                * These callbacks ensure _rcu_barrier() waits for all
+                * RCU callbacks of the specified type to complete.
+                */
+               atomic_set(&rcu_migrate_type_count, 3);
+               call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
+               call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
+               call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
+       } else if (action == CPU_POST_DEAD) {
+               /* rcu_migrate_head is protected by cpu_add_remove_lock */
+               wait_migrated_callbacks();
+       }
+
+       return NOTIFY_OK;
+}
+
 void __init rcu_init(void)
 {
        __rcu_init();
+       hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
 }
 
 void rcu_scheduler_starting(void)