Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / sched / core.c
index df9f1fe5689b04006907283f798b91959c0b9d51..7fa8e74ad2ab4003457d266df57373f41f0e0d2a 100644 (file)
@@ -3486,8 +3486,36 @@ void scheduler_tick(void)
 
 struct tick_work {
        int                     cpu;
+       atomic_t                state;
        struct delayed_work     work;
 };
+/* Values for ->state, see diagram below. */
+#define TICK_SCHED_REMOTE_OFFLINE      0
+#define TICK_SCHED_REMOTE_OFFLINING    1
+#define TICK_SCHED_REMOTE_RUNNING      2
+
+/*
+ * State diagram for ->state:
+ *
+ *
+ *          TICK_SCHED_REMOTE_OFFLINE
+ *                    |   ^
+ *                    |   |
+ *                    |   | sched_tick_remote()
+ *                    |   |
+ *                    |   |
+ *                    +--TICK_SCHED_REMOTE_OFFLINING
+ *                    |   ^
+ *                    |   |
+ * sched_tick_start() |   | sched_tick_stop()
+ *                    |   |
+ *                    V   |
+ *          TICK_SCHED_REMOTE_RUNNING
+ *
+ *
+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
+ * and sched_tick_start() are happy to leave the state in RUNNING.
+ */
 
 static struct tick_work __percpu *tick_work_cpu;
 
@@ -3500,6 +3528,7 @@ static void sched_tick_remote(struct work_struct *work)
        struct task_struct *curr;
        struct rq_flags rf;
        u64 delta;
+       int os;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3513,7 +3542,7 @@ static void sched_tick_remote(struct work_struct *work)
 
        rq_lock_irq(rq, &rf);
        curr = rq->curr;
-       if (is_idle_task(curr))
+       if (is_idle_task(curr) || cpu_is_offline(cpu))
                goto out_unlock;
 
        update_rq_clock(rq);
@@ -3533,13 +3562,18 @@ out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough
-        * to keep scheduler internal stats reasonably up to date.
+        * to keep scheduler internal stats reasonably up to date.  But
+        * first update state to reflect hotplug activity if required.
         */
-       queue_delayed_work(system_unbound_wq, dwork, HZ);
+       os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
+       WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
+       if (os == TICK_SCHED_REMOTE_RUNNING)
+               queue_delayed_work(system_unbound_wq, dwork, HZ);
 }
 
 static void sched_tick_start(int cpu)
 {
+       int os;
        struct tick_work *twork;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
@@ -3548,15 +3582,20 @@ static void sched_tick_start(int cpu)
        WARN_ON_ONCE(!tick_work_cpu);
 
        twork = per_cpu_ptr(tick_work_cpu, cpu);
-       twork->cpu = cpu;
-       INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
-       queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+       os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
+       WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
+       if (os == TICK_SCHED_REMOTE_OFFLINE) {
+               twork->cpu = cpu;
+               INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+               queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+       }
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 static void sched_tick_stop(int cpu)
 {
        struct tick_work *twork;
+       int os;
 
        if (housekeeping_cpu(cpu, HK_FLAG_TICK))
                return;
@@ -3564,7 +3603,10 @@ static void sched_tick_stop(int cpu)
        WARN_ON_ONCE(!tick_work_cpu);
 
        twork = per_cpu_ptr(tick_work_cpu, cpu);
-       cancel_delayed_work_sync(&twork->work);
+       /* There cannot be competing actions, but don't rely on stop-machine. */
+       os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
+       WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
+       /* Don't cancel, as this would mess up the state machine. */
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
@@ -3572,7 +3614,6 @@ int __init sched_tick_offload_init(void)
 {
        tick_work_cpu = alloc_percpu(struct tick_work);
        BUG_ON(!tick_work_cpu);
-
        return 0;
 }