rcu: Fix missing nocb gp wake on rcu_barrier()
authorFrederic Weisbecker <frederic@kernel.org>
Sun, 16 Oct 2022 16:22:53 +0000 (16:22 +0000)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 18 Oct 2022 22:01:31 +0000 (15:01 -0700)
In preparation for RCU lazy changes, wake up the RCU nocb gp thread if
needed after an entrain.  This change prevents the RCU barrier callback
from waiting in the queue for several seconds before the lazy callbacks
in front of it are serviced.

Reported-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_nocb.h

index 6bb8e72bc8151ef2eb4093f2464f32c225672f88..fb7a1b95af71e19be7d78045fdd627b770d0f60a 100644 (file)
@@ -3894,6 +3894,8 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
 {
        unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
        unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
+       bool wake_nocb = false;
+       bool was_alldone = false;
 
        lockdep_assert_held(&rcu_state.barrier_lock);
        if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
@@ -3902,7 +3904,14 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
        rdp->barrier_head.func = rcu_barrier_callback;
        debug_rcu_head_queue(&rdp->barrier_head);
        rcu_nocb_lock(rdp);
+       /*
+        * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
+        * queue. This way we don't wait for bypass timer that can reach seconds
+        * if it's fully lazy.
+        */
+       was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
        WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
+       wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
        if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
                atomic_inc(&rcu_state.barrier_cpu_count);
        } else {
@@ -3910,6 +3919,8 @@ static void rcu_barrier_entrain(struct rcu_data *rdp)
                rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
        }
        rcu_nocb_unlock(rdp);
+       if (wake_nocb)
+               wake_nocb_gp(rdp, false);
        smp_store_release(&rdp->barrier_seq_snap, gseq);
 }
 
index d4a97e40ea9c3e2ff3e9fd2eea268c6d9d371856..925dd98f8b23b65a6a66e89773f0cea5361484fa 100644 (file)
@@ -439,6 +439,7 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
 static void rcu_init_one_nocb(struct rcu_node *rnp);
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
                                  unsigned long j);
 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
index f77a6d7e1356435c63f8a285eaa66eed0c63ace7..094fd454b6c381bd3e52f60ca0690627bd99408e 100644 (file)
@@ -1558,6 +1558,11 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 {
 }
 
+static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
+{
+       return false;
+}
+
 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
                                  unsigned long j)
 {