rcu/nocb: Process batch locally as long as offloading isn't complete
authorFrederic Weisbecker <frederic@kernel.org>
Fri, 13 Nov 2020 12:13:27 +0000 (13:13 +0100)
committerPaul E. McKenney <paulmck@kernel.org>
Thu, 7 Jan 2021 00:24:59 +0000 (16:24 -0800)
This commit makes sure to process the callbacks locally (via either
RCU_SOFTIRQ or the rcuc kthread) whenever the segcblist isn't entirely
offloaded.  This ensures that callbacks are invoked one way or another
while a CPU is in the middle of a toggle operation.

Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Inspired-by: Paul E. McKenney <paulmck@kernel.org>
Tested-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/rcu_segcblist.h
kernel/rcu/tree.c

index 28c9a5225afc6ac421d0dfd735d52371a9ae49d3..afad6fc6311c296170f6940ab70be435b6a6129f 100644 (file)
@@ -95,6 +95,18 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
        return false;
 }
 
+static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
+{
+       int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
+
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
+               if ((rsclp->flags & flags) == flags)
+                       return true;
+       }
+
+       return false;
+}
+
 /*
  * Are all segments following the specified segment of the specified
  * rcu_segcblist structure empty of callbacks?  (The specified
index 4ef59a5416a30887c33305f9376886ecd6b6e56d..ec14c017c0e3b961f0cea418823ecdbdfc6d0b43 100644 (file)
@@ -2700,6 +2700,7 @@ static __latent_entropy void rcu_core(void)
        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
        const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
 
        if (cpu_is_offline(smp_processor_id()))
                return;
@@ -2729,7 +2730,7 @@ static __latent_entropy void rcu_core(void)
        rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
 
        /* If there are callbacks ready, invoke them. */
-       if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+       if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
            likely(READ_ONCE(rcu_scheduler_fully_active)))
                rcu_do_batch(rdp);