rcu: Add RCU_NONIDLE() for idle-loop RCU read-side critical sections
[sfrench/cifs-2.6.git] / kernel / rcutiny.c
index 977296dca0a41100b1d549a5ee2fc8a3bd9e22c3..37a5444204d2895193b8ffafbe8b7552c0f83acc 100644 (file)
@@ -53,7 +53,7 @@ static void __call_rcu(struct rcu_head *head,
 
 #include "rcutiny_plugin.h"
 
-static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 
 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
 static void rcu_idle_enter_common(long long oldval)
@@ -88,10 +88,16 @@ void rcu_idle_enter(void)
 
        local_irq_save(flags);
        oldval = rcu_dynticks_nesting;
-       rcu_dynticks_nesting = 0;
+       WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
+       if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
+           DYNTICK_TASK_NEST_VALUE)
+               rcu_dynticks_nesting = 0;
+       else
+               rcu_dynticks_nesting  -= DYNTICK_TASK_NEST_VALUE;
        rcu_idle_enter_common(oldval);
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
 /*
  * Exit an interrupt handler towards idle.
@@ -140,11 +146,15 @@ void rcu_idle_exit(void)
 
        local_irq_save(flags);
        oldval = rcu_dynticks_nesting;
-       WARN_ON_ONCE(oldval != 0);
-       rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
+       WARN_ON_ONCE(rcu_dynticks_nesting < 0);
+       if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
+               rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
+       else
+               rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
        rcu_idle_exit_common(oldval);
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
 /*
  * Enter an interrupt handler, moving away from idle.
@@ -258,7 +268,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 
        /* If no RCU callbacks ready to invoke, just return. */
        if (&rcp->rcucblist == rcp->donetail) {
-               RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
+               RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
                RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
                                              ACCESS_ONCE(rcp->rcucblist),
                                              need_resched(),
@@ -269,7 +279,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 
        /* Move the ready-to-invoke callbacks to a local list. */
        local_irq_save(flags);
-       RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
+       RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
        list = rcp->rcucblist;
        rcp->rcucblist = *rcp->donetail;
        *rcp->donetail = NULL;
@@ -319,6 +329,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 void synchronize_sched(void)
 {
+       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
+                          !lock_is_held(&rcu_lock_map) &&
+                          !lock_is_held(&rcu_sched_lock_map),
+                          "Illegal synchronize_sched() in RCU read-side critical section");
        cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);