Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / kernel / sched.c
index de0da71daf77d3c1107776501b4f766f7923094b..1d93cd0ae4d36a8c3cc4af5ff8e195dd1d9fcfda 100644 (file)
@@ -55,9 +55,9 @@
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/percpu.h>
-#include <linux/kthread.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/stop_machine.h>
 #include <linux/sysctl.h>
 #include <linux/syscalls.h>
 #include <linux/times.h>
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 {
+       /*
+        * Strictly speaking this rcu_read_lock() is not needed since the
+        * task_group is tied to the cgroup, which in turn can never go away
+        * as long as there are tasks attached to it.
+        *
+        * However since task_group() uses task_subsys_state() which is an
+        * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
+        */
+       rcu_read_lock();
 #ifdef CONFIG_FAIR_GROUP_SCHED
        p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
        p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
        p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
        p->rt.parent = task_group(p)->rt_se[cpu];
 #endif
+       rcu_read_unlock();
 }
 
 #else
@@ -539,15 +549,13 @@ struct rq {
        int post_schedule;
        int active_balance;
        int push_cpu;
+       struct cpu_stop_work active_balance_work;
        /* cpu of this runqueue: */
        int cpu;
        int online;
 
        unsigned long avg_load_per_task;
 
-       struct task_struct *migration_thread;
-       struct list_head migration_queue;
-
        u64 rt_avg;
        u64 age_stamp;
        u64 idle_stamp;
@@ -1815,7 +1823,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 }
 #endif
 
-static void calc_load_account_active(struct rq *this_rq);
+static void calc_load_account_idle(struct rq *this_rq);
 static void update_sysctl(void);
 static int get_update_sysctl_factor(void);
 
@@ -2037,21 +2045,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        __set_task_cpu(p, new_cpu);
 }
 
-struct migration_req {
-       struct list_head list;
-
+struct migration_arg {
        struct task_struct *task;
        int dest_cpu;
-
-       struct completion done;
 };
 
+static int migration_cpu_stop(void *data);
+
 /*
  * The task's runqueue lock must be held.
  * Returns true if you have to wait for migration thread.
  */
-static int
-migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
+static bool migrate_task(struct task_struct *p, int dest_cpu)
 {
        struct rq *rq = task_rq(p);
 
@@ -2059,58 +2064,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
         * If the task is not on a runqueue (and not running), then
         * the next wake-up will properly place the task.
         */
-       if (!p->se.on_rq && !task_running(rq, p))
-               return 0;
-
-       init_completion(&req->done);
-       req->task = p;
-       req->dest_cpu = dest_cpu;
-       list_add(&req->list, &rq->migration_queue);
-
-       return 1;
-}
-
-/*
- * wait_task_context_switch -  wait for a thread to complete at least one
- *                             context switch.
- *
- * @p must not be current.
- */
-void wait_task_context_switch(struct task_struct *p)
-{
-       unsigned long nvcsw, nivcsw, flags;
-       int running;
-       struct rq *rq;
-
-       nvcsw   = p->nvcsw;
-       nivcsw  = p->nivcsw;
-       for (;;) {
-               /*
-                * The runqueue is assigned before the actual context
-                * switch. We need to take the runqueue lock.
-                *
-                * We could check initially without the lock but it is
-                * very likely that we need to take the lock in every
-                * iteration.
-                */
-               rq = task_rq_lock(p, &flags);
-               running = task_running(rq, p);
-               task_rq_unlock(rq, &flags);
-
-               if (likely(!running))
-                       break;
-               /*
-                * The switch count is incremented before the actual
-                * context switch. We thus wait for two switches to be
-                * sure at least one completed.
-                */
-               if ((p->nvcsw - nvcsw) > 1)
-                       break;
-               if ((p->nivcsw - nivcsw) > 1)
-                       break;
-
-               cpu_relax();
-       }
+       return p->se.on_rq || task_running(rq, p);
 }
 
 /*
@@ -2168,7 +2122,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * just go back and repeat.
                 */
                rq = task_rq_lock(p, &flags);
-               trace_sched_wait_task(rq, p);
+               trace_sched_wait_task(p);
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
                ncsw = 0;
@@ -2439,7 +2393,7 @@ out_activate:
        success = 1;
 
 out_running:
-       trace_sched_wakeup(rq, p, success);
+       trace_sched_wakeup(p, success);
        check_preempt_curr(rq, p, wake_flags);
 
        p->state = TASK_RUNNING;
@@ -2613,7 +2567,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 
        rq = task_rq_lock(p, &flags);
        activate_task(rq, p, 0);
-       trace_sched_wakeup_new(rq, p, 1);
+       trace_sched_wakeup_new(p, 1);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken)
@@ -2833,7 +2787,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
-       trace_sched_switch(rq, prev, next);
+       trace_sched_switch(prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -2950,6 +2904,61 @@ static unsigned long calc_load_update;
 unsigned long avenrun[3];
 EXPORT_SYMBOL(avenrun);
 
+static long calc_load_fold_active(struct rq *this_rq)
+{
+       long nr_active, delta = 0;
+
+       nr_active = this_rq->nr_running;
+       nr_active += (long) this_rq->nr_uninterruptible;
+
+       if (nr_active != this_rq->calc_load_active) {
+               delta = nr_active - this_rq->calc_load_active;
+               this_rq->calc_load_active = nr_active;
+       }
+
+       return delta;
+}
+
+#ifdef CONFIG_NO_HZ
+/*
+ * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+static atomic_long_t calc_load_tasks_idle;
+
+static void calc_load_account_idle(struct rq *this_rq)
+{
+       long delta;
+
+       delta = calc_load_fold_active(this_rq);
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks_idle);
+}
+
+static long calc_load_fold_idle(void)
+{
+       long delta = 0;
+
+       /*
+        * Its got a race, we don't care...
+        */
+       if (atomic_long_read(&calc_load_tasks_idle))
+               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+
+       return delta;
+}
+#else
+static void calc_load_account_idle(struct rq *this_rq)
+{
+}
+
+static inline long calc_load_fold_idle(void)
+{
+       return 0;
+}
+#endif
+
 /**
  * get_avenrun - get the load average array
  * @loads:     pointer to dest load array
@@ -2996,20 +3005,22 @@ void calc_global_load(void)
 }
 
 /*
- * Either called from update_cpu_load() or from a cpu going idle
+ * Called from update_cpu_load() to periodically update this CPU's
+ * active count.
  */
 static void calc_load_account_active(struct rq *this_rq)
 {
-       long nr_active, delta;
+       long delta;
 
-       nr_active = this_rq->nr_running;
-       nr_active += (long) this_rq->nr_uninterruptible;
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
 
-       if (nr_active != this_rq->calc_load_active) {
-               delta = nr_active - this_rq->calc_load_active;
-               this_rq->calc_load_active = nr_active;
+       delta  = calc_load_fold_active(this_rq);
+       delta += calc_load_fold_idle();
+       if (delta)
                atomic_long_add(delta, &calc_load_tasks);
-       }
+
+       this_rq->calc_load_update += LOAD_FREQ;
 }
 
 /*
@@ -3041,10 +3052,7 @@ static void update_cpu_load(struct rq *this_rq)
                this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
        }
 
-       if (time_after_eq(jiffies, this_rq->calc_load_update)) {
-               this_rq->calc_load_update += LOAD_FREQ;
-               calc_load_account_active(this_rq);
-       }
+       calc_load_account_active(this_rq);
 }
 
 #ifdef CONFIG_SMP
@@ -3056,7 +3064,6 @@ static void update_cpu_load(struct rq *this_rq)
 void sched_exec(void)
 {
        struct task_struct *p = current;
-       struct migration_req req;
        unsigned long flags;
        struct rq *rq;
        int dest_cpu;
@@ -3070,17 +3077,11 @@ void sched_exec(void)
         * select_task_rq() can race against ->cpus_allowed
         */
        if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
-           likely(cpu_active(dest_cpu)) &&
-           migrate_task(p, dest_cpu, &req)) {
-               /* Need to wait for migration thread (might exit: take ref). */
-               struct task_struct *mt = rq->migration_thread;
+           likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
+               struct migration_arg arg = { p, dest_cpu };
 
-               get_task_struct(mt);
                task_rq_unlock(rq, &flags);
-               wake_up_process(mt);
-               put_task_struct(mt);
-               wait_for_completion(&req.done);
-
+               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
                return;
        }
 unlock:
@@ -3607,7 +3608,7 @@ need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_sched_qs(cpu);
+       rcu_note_context_switch(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;
 
@@ -3690,7 +3691,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
         * the mutex owner just released it and exited.
         */
        if (probe_kernel_address(&owner->cpu, cpu))
-               goto out;
+               return 0;
 #else
        cpu = owner->cpu;
 #endif
@@ -3700,14 +3701,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
         * the cpu field may no longer be valid.
         */
        if (cpu >= nr_cpumask_bits)
-               goto out;
+               return 0;
 
        /*
         * We need to validate that we can do a
         * get_cpu() and that we have the percpu area.
         */
        if (!cpu_online(cpu))
-               goto out;
+               return 0;
 
        rq = cpu_rq(cpu);
 
@@ -3726,7 +3727,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
 
                cpu_relax();
        }
-out:
+
        return 1;
 }
 #endif
@@ -3949,8 +3950,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
        if (!x->done) {
                DECLARE_WAITQUEUE(wait, current);
 
-               wait.flags |= WQ_FLAG_EXCLUSIVE;
-               __add_wait_queue_tail(&x->wait, &wait);
+               __add_wait_queue_tail_exclusive(&x->wait, &wait);
                do {
                        if (signal_pending_state(state, current)) {
                                timeout = -ERESTARTSYS;
@@ -5236,17 +5236,15 @@ static inline void sched_init_granularity(void)
 /*
  * This is how migration works:
  *
- * 1) we queue a struct migration_req structure in the source CPU's
- *    runqueue and wake up that CPU's migration thread.
- * 2) we down() the locked semaphore => thread blocks.
- * 3) migration thread wakes up (implicitly it forces the migrated
- *    thread off the CPU)
- * 4) it gets the migration request and checks whether the migrated
- *    task is still in the wrong runqueue.
- * 5) if it's in the wrong runqueue then the migration thread removes
+ * 1) we invoke migration_cpu_stop() on the target CPU using
+ *    stop_one_cpu().
+ * 2) stopper starts to run (implicitly forcing the migrated thread
+ *    off the CPU)
+ * 3) it checks whether the migrated task is still in the wrong runqueue.
+ * 4) if it's in the wrong runqueue then the migration thread removes
  *    it and puts it into the right queue.
- * 6) migration thread up()s the semaphore.
- * 7) we wake up and the migration is done.
+ * 5) stopper completes and stop_one_cpu() returns and the migration
+ *    is done.
  */
 
 /*
@@ -5260,9 +5258,9 @@ static inline void sched_init_granularity(void)
  */
 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 {
-       struct migration_req req;
        unsigned long flags;
        struct rq *rq;
+       unsigned int dest_cpu;
        int ret = 0;
 
        /*
@@ -5300,15 +5298,12 @@ again:
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
+       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+       if (migrate_task(p, dest_cpu)) {
+               struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
-               struct task_struct *mt = rq->migration_thread;
-
-               get_task_struct(mt);
                task_rq_unlock(rq, &flags);
-               wake_up_process(mt);
-               put_task_struct(mt);
-               wait_for_completion(&req.done);
+               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
                tlb_migrate_finish(p->mm);
                return 0;
        }
@@ -5366,70 +5361,22 @@ fail:
        return ret;
 }
 
-#define RCU_MIGRATION_IDLE     0
-#define RCU_MIGRATION_NEED_QS  1
-#define RCU_MIGRATION_GOT_QS   2
-#define RCU_MIGRATION_MUST_SYNC        3
-
 /*
- * migration_thread - this is a highprio system thread that performs
- * thread migration by bumping thread off CPU then 'pushing' onto
- * another runqueue.
+ * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * and performs thread migration by bumping thread off CPU then
+ * 'pushing' onto another runqueue.
  */
-static int migration_thread(void *data)
+static int migration_cpu_stop(void *data)
 {
-       int badcpu;
-       int cpu = (long)data;
-       struct rq *rq;
-
-       rq = cpu_rq(cpu);
-       BUG_ON(rq->migration_thread != current);
-
-       set_current_state(TASK_INTERRUPTIBLE);
-       while (!kthread_should_stop()) {
-               struct migration_req *req;
-               struct list_head *head;
-
-               raw_spin_lock_irq(&rq->lock);
-
-               if (cpu_is_offline(cpu)) {
-                       raw_spin_unlock_irq(&rq->lock);
-                       break;
-               }
-
-               if (rq->active_balance) {
-                       active_load_balance(rq, cpu);
-                       rq->active_balance = 0;
-               }
-
-               head = &rq->migration_queue;
-
-               if (list_empty(head)) {
-                       raw_spin_unlock_irq(&rq->lock);
-                       schedule();
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       continue;
-               }
-               req = list_entry(head->next, struct migration_req, list);
-               list_del_init(head->next);
-
-               if (req->task != NULL) {
-                       raw_spin_unlock(&rq->lock);
-                       __migrate_task(req->task, cpu, req->dest_cpu);
-               } else if (likely(cpu == (badcpu = smp_processor_id()))) {
-                       req->dest_cpu = RCU_MIGRATION_GOT_QS;
-                       raw_spin_unlock(&rq->lock);
-               } else {
-                       req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
-                       raw_spin_unlock(&rq->lock);
-                       WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
-               }
-               local_irq_enable();
-
-               complete(&req->done);
-       }
-       __set_current_state(TASK_RUNNING);
+       struct migration_arg *arg = data;
 
+       /*
+        * The original target cpu might have gone down and we might
+        * be on another cpu but it doesn't matter.
+        */
+       local_irq_disable();
+       __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
+       local_irq_enable();
        return 0;
 }
 
@@ -5796,35 +5743,20 @@ static void set_rq_offline(struct rq *rq)
 static int __cpuinit
 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
-       struct task_struct *p;
        int cpu = (long)hcpu;
        unsigned long flags;
-       struct rq *rq;
+       struct rq *rq = cpu_rq(cpu);
 
        switch (action) {
 
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
-               if (IS_ERR(p))
-                       return NOTIFY_BAD;
-               kthread_bind(p, cpu);
-               /* Must be high prio: stop_machine expects to yield to it. */
-               rq = task_rq_lock(p, &flags);
-               __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
-               task_rq_unlock(rq, &flags);
-               get_task_struct(p);
-               cpu_rq(cpu)->migration_thread = p;
                rq->calc_load_update = calc_load_update;
                break;
 
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               /* Strictly unnecessary, as first user will wake it. */
-               wake_up_process(cpu_rq(cpu)->migration_thread);
-
                /* Update our root-domain */
-               rq = cpu_rq(cpu);
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -5835,25 +5767,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               if (!cpu_rq(cpu)->migration_thread)
-                       break;
-               /* Unbind it from offline cpu so it can run. Fall thru. */
-               kthread_bind(cpu_rq(cpu)->migration_thread,
-                            cpumask_any(cpu_online_mask));
-               kthread_stop(cpu_rq(cpu)->migration_thread);
-               put_task_struct(cpu_rq(cpu)->migration_thread);
-               cpu_rq(cpu)->migration_thread = NULL;
-               break;
-
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                migrate_live_tasks(cpu);
-               rq = cpu_rq(cpu);
-               kthread_stop(rq->migration_thread);
-               put_task_struct(rq->migration_thread);
-               rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
                raw_spin_lock_irq(&rq->lock);
                deactivate_task(rq, rq->idle, 0);
@@ -5864,29 +5780,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
                calc_global_load_remove(rq);
-               /*
-                * No need to migrate the tasks: it was best-effort if
-                * they didn't take sched_hotcpu_mutex. Just wake up
-                * the requestors.
-                */
-               raw_spin_lock_irq(&rq->lock);
-               while (!list_empty(&rq->migration_queue)) {
-                       struct migration_req *req;
-
-                       req = list_entry(rq->migration_queue.next,
-                                        struct migration_req, list);
-                       list_del_init(&req->list);
-                       raw_spin_unlock_irq(&rq->lock);
-                       complete(&req->done);
-                       raw_spin_lock_irq(&rq->lock);
-               }
-               raw_spin_unlock_irq(&rq->lock);
                break;
 
        case CPU_DYING:
        case CPU_DYING_FROZEN:
                /* Update our root-domain */
-               rq = cpu_rq(cpu);
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -6217,6 +6115,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
 
+       for (tmp = sd; tmp; tmp = tmp->parent)
+               tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
+
        /* Remove the sched domains which do not contribute to scheduling. */
        for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
@@ -7700,10 +7601,8 @@ void __init sched_init(void)
                rq->push_cpu = 0;
                rq->cpu = i;
                rq->online = 0;
-               rq->migration_thread = NULL;
                rq->idle_stamp = 0;
                rq->avg_idle = 2*sysctl_sched_migration_cost;
-               INIT_LIST_HEAD(&rq->migration_queue);
                rq_attach_root(rq, &def_root_domain);
 #endif
                init_rq_hrtick(rq);
@@ -8997,43 +8896,32 @@ struct cgroup_subsys cpuacct_subsys = {
 
 #ifndef CONFIG_SMP
 
-int rcu_expedited_torture_stats(char *page)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
-
 void synchronize_sched_expedited(void)
 {
+       barrier();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 
 #else /* #ifndef CONFIG_SMP */
 
-static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
-static DEFINE_MUTEX(rcu_sched_expedited_mutex);
-
-#define RCU_EXPEDITED_STATE_POST -2
-#define RCU_EXPEDITED_STATE_IDLE -1
-
-static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
 
-int rcu_expedited_torture_stats(char *page)
+static int synchronize_sched_expedited_cpu_stop(void *data)
 {
-       int cnt = 0;
-       int cpu;
-
-       cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
-       for_each_online_cpu(cpu) {
-                cnt += sprintf(&page[cnt], " %d:%d",
-                               cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
-       }
-       cnt += sprintf(&page[cnt], "\n");
-       return cnt;
+       /*
+        * There must be a full memory barrier on each affected CPU
+        * between the time that try_stop_cpus() is called and the
+        * time that it returns.
+        *
+        * In the current initial implementation of cpu_stop, the
+        * above condition is already met when the control reaches
+        * this point and the following smp_mb() is not strictly
+        * necessary.  Do smp_mb() anyway for documentation and
+        * robustness against future implementation changes.
+        */
+       smp_mb(); /* See above comment block. */
+       return 0;
 }
-EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
-
-static long synchronize_sched_expedited_count;
 
 /*
  * Wait for an rcu-sched grace period to elapse, but use "big hammer"
@@ -9047,18 +8935,14 @@ static long synchronize_sched_expedited_count;
  */
 void synchronize_sched_expedited(void)
 {
-       int cpu;
-       unsigned long flags;
-       bool need_full_sync = 0;
-       struct rq *rq;
-       struct migration_req *req;
-       long snap;
-       int trycount = 0;
+       int snap, trycount = 0;
 
        smp_mb();  /* ensure prior mod happens before capturing snap. */
-       snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+       snap = atomic_read(&synchronize_sched_expedited_count) + 1;
        get_online_cpus();
-       while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+       while (try_stop_cpus(cpu_online_mask,
+                            synchronize_sched_expedited_cpu_stop,
+                            NULL) == -EAGAIN) {
                put_online_cpus();
                if (trycount++ < 10)
                        udelay(trycount * num_online_cpus());
@@ -9066,41 +8950,15 @@ void synchronize_sched_expedited(void)
                        synchronize_sched();
                        return;
                }
-               if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+               if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
                        smp_mb(); /* ensure test happens before caller kfree */
                        return;
                }
                get_online_cpus();
        }
-       rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
-       for_each_online_cpu(cpu) {
-               rq = cpu_rq(cpu);
-               req = &per_cpu(rcu_migration_req, cpu);
-               init_completion(&req->done);
-               req->task = NULL;
-               req->dest_cpu = RCU_MIGRATION_NEED_QS;
-               raw_spin_lock_irqsave(&rq->lock, flags);
-               list_add(&req->list, &rq->migration_queue);
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
-               wake_up_process(rq->migration_thread);
-       }
-       for_each_online_cpu(cpu) {
-               rcu_expedited_state = cpu;
-               req = &per_cpu(rcu_migration_req, cpu);
-               rq = cpu_rq(cpu);
-               wait_for_completion(&req->done);
-               raw_spin_lock_irqsave(&rq->lock, flags);
-               if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
-                       need_full_sync = 1;
-               req->dest_cpu = RCU_MIGRATION_IDLE;
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
-       }
-       rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
-       synchronize_sched_expedited_count++;
-       mutex_unlock(&rcu_sched_expedited_mutex);
+       atomic_inc(&synchronize_sched_expedited_count);
+       smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
        put_online_cpus();
-       if (need_full_sync)
-               synchronize_sched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);