rcu: Avoid needlessly IPIing CPUs at GP end
authorPaul E. McKenney <paul.mckenney@linaro.org>
Wed, 23 Nov 2011 01:46:19 +0000 (17:46 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sun, 11 Dec 2011 18:32:01 +0000 (10:32 -0800)
If a CPU enters dyntick-idle mode with callbacks pending, it will need
an IPI at the end of the grace period.  However, if it exits dyntick-idle
mode before the grace period ends, it will be needlessly IPIed at the
end of the grace period.

Therefore, this commit clears the per-CPU rcu_awake_at_gp_end flag
when a CPU determines that it does not need it.  This in turn requires
disabling interrupts across much of rcu_prepare_for_idle() in order to
avoid having nested interrupts clearing this state out from under us.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcutree_plugin.h

index 45790bfb6e8c779ac344bdee041e5c47c059d424..c4daf1e19e01faa2ce3ddafdab7c70c246555a71 100644 (file)
@@ -2027,6 +2027,9 @@ int rcu_needs_cpu(int cpu)
 static void rcu_prepare_for_idle(int cpu)
 {
        int c = 0;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        /*
         * If there are no callbacks on this CPU or if RCU has no further
@@ -2036,14 +2039,17 @@ static void rcu_prepare_for_idle(int cpu)
        if (!rcu_cpu_has_callbacks(cpu)) {
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
                per_cpu(rcu_dyntick_drain, cpu) = 0;
+               per_cpu(rcu_awake_at_gp_end, cpu) = 0;
+               local_irq_restore(flags);
                trace_rcu_prep_idle("No callbacks");
                return;
        }
        if (!rcu_pending(cpu)) {
-               trace_rcu_prep_idle("Dyntick with callbacks");
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
                per_cpu(rcu_dyntick_drain, cpu) = 0;
                per_cpu(rcu_awake_at_gp_end, cpu) = 1;
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("Dyntick with callbacks");
                return;  /* Nothing to do immediately. */
        }
 
@@ -2052,6 +2058,7 @@ static void rcu_prepare_for_idle(int cpu)
         * refrained from disabling the scheduling-clock tick.
         */
        if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+               local_irq_restore(flags);
                trace_rcu_prep_idle("In holdoff");
                return;
        }
@@ -2060,9 +2067,11 @@ static void rcu_prepare_for_idle(int cpu)
        if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* First time through, initialize the counter. */
                per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+               per_cpu(rcu_awake_at_gp_end, cpu) = 0;
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+               local_irq_restore(flags);
                trace_rcu_prep_idle("Begin holdoff");
                invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
                return;
@@ -2095,10 +2104,13 @@ static void rcu_prepare_for_idle(int cpu)
         * So try forcing the callbacks through the grace period.
         */
        if (c) {
+               local_irq_restore(flags);
                trace_rcu_prep_idle("More callbacks");
                invoke_rcu_core();
-       } else
+       } else {
+               local_irq_restore(flags);
                trace_rcu_prep_idle("Callbacks drained");
+       }
 }
 
 /*