rcu/nocb: Print no-CBs diagnostics when rcutorture writer unduly delayed
authorPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 25 Jun 2019 20:32:51 +0000 (13:32 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 13 Aug 2019 21:38:24 +0000 (14:38 -0700)
This commit causes locking, sleeping, and callback state to be printed
for no-CBs CPUs when the rcutorture writer is delayed sufficiently for
rcutorture to complain.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/rcutorture.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h

index b22947324423b87ff76e79777ed1ba54369b381b..3c9feca1eab17e231750afd27606c557799ddc38 100644 (file)
@@ -2176,6 +2176,7 @@ rcu_torture_cleanup(void)
                return;
        }
 
+       show_rcu_gp_kthreads();
        rcu_torture_barrier_cleanup();
        torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
        torture_stop_kthread(rcu_torture_stall, stall_task);
index e4df86db8137e951237278bb0571ac484ec32faa..c612f306fe89032a5561a0ccf7028d873f8d5c73 100644 (file)
@@ -212,7 +212,11 @@ struct rcu_data {
        /* The following fields are used by GP kthread, hence own cacheline. */
        raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
        struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
-       bool nocb_gp_sleep;             /* Is the nocb GP thread asleep? */
+       u8 nocb_gp_sleep;               /* Is the nocb GP thread asleep? */
+       u8 nocb_gp_bypass;              /* Found a bypass on last scan? */
+       u8 nocb_gp_gp;                  /* GP to wait for on last scan? */
+       unsigned long nocb_gp_seq;      /*  If so, ->gp_seq to wait for. */
+       unsigned long nocb_gp_loops;    /* # passes through wait code. */
        struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
        bool nocb_cb_sleep;             /* Is the nocb CB thread asleep? */
        struct task_struct *nocb_cb_kthread;
@@ -438,6 +442,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_cpu_nocb_kthread(int cpu);
 static void __init rcu_spawn_nocb_kthreads(void);
+static void show_rcu_nocb_state(struct rcu_data *rdp);
 static void rcu_nocb_lock(struct rcu_data *rdp);
 static void rcu_nocb_unlock(struct rcu_data *rdp);
 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
index 97c730753a6d0d807685aebcdb33bb5276c69b7d..25a53742ca68e076f7678d5964dc6eaa8a67a039 100644 (file)
@@ -2021,6 +2021,9 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
                        rcu_gp_kthread_wake();
        }
 
+       my_rdp->nocb_gp_bypass = bypass;
+       my_rdp->nocb_gp_gp = needwait_gp;
+       my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
        if (bypass && !rcu_nocb_poll) {
                // At least one child with non-empty ->nocb_bypass, so set
                // timer in order to avoid stranding its callbacks.
@@ -2055,6 +2058,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
                WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
                raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
        }
+       my_rdp->nocb_gp_seq = -1;
        WARN_ON(signal_pending(current));
 }
 
@@ -2071,6 +2075,7 @@ static int rcu_nocb_gp_kthread(void *arg)
        struct rcu_data *rdp = arg;
 
        for (;;) {
+               WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
                nocb_gp_wait(rdp);
                cond_resched_tasks_rcu_qs();
        }
@@ -2362,6 +2367,79 @@ void rcu_bind_current_to_nocb(void)
 }
 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
 
+/*
+ * Dump out nocb grace-period kthread state for the specified rcu_data
+ * structure.
+ */
+static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
+{
+       struct rcu_node *rnp = rdp->mynode;
+
+       pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n",
+               rdp->cpu,
+               "kK"[!!rdp->nocb_gp_kthread],
+               "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "tT"[timer_pending(&rdp->nocb_timer)],
+               "bB"[timer_pending(&rdp->nocb_bypass_timer)],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[swait_active(&rdp->nocb_gp_wq)],
+               ".W"[swait_active(&rnp->nocb_gp_wq[0])],
+               ".W"[swait_active(&rnp->nocb_gp_wq[1])],
+               ".B"[!!rdp->nocb_gp_bypass],
+               ".G"[!!rdp->nocb_gp_gp],
+               (long)rdp->nocb_gp_seq,
+               rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
+}
+
+/* Dump out nocb kthread state for the specified rcu_data structure. */
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+       struct rcu_segcblist *rsclp = &rdp->cblist;
+       bool waslocked;
+       bool wastimer;
+       bool wassleep;
+
+       if (rdp->nocb_gp_rdp == rdp)
+               show_rcu_nocb_gp_state(rdp);
+
+       pr_info("   CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n",
+               rdp->cpu, rdp->nocb_gp_rdp->cpu,
+               "kK"[!!rdp->nocb_cb_kthread],
+               "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
+               "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
+               "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
+               "sS"[!!rdp->nocb_cb_sleep],
+               ".W"[swait_active(&rdp->nocb_cb_wq)],
+               jiffies - rdp->nocb_bypass_first,
+               jiffies - rdp->nocb_nobypass_last,
+               rdp->nocb_nobypass_count,
+               ".D"[rcu_segcblist_ready_cbs(rsclp)],
+               ".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
+               ".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
+               ".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
+               ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
+               rcu_segcblist_n_cbs(&rdp->cblist));
+
+       /* It is OK for GP kthreads to have GP state. */
+       if (rdp->nocb_gp_rdp == rdp)
+               return;
+
+       waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
+       wastimer = timer_pending(&rdp->nocb_timer);
+       wassleep = swait_active(&rdp->nocb_gp_wq);
+       if (!rdp->nocb_defer_wakeup && !rdp->nocb_gp_sleep &&
+           !waslocked && !wastimer && !wassleep)
+               return;  /* Nothing untowards. */
+
+       pr_info("   !!! %c%c%c%c %c\n",
+               "lL"[waslocked],
+               "dD"[!!rdp->nocb_defer_wakeup],
+               "tT"[wastimer],
+               "sS"[!!rdp->nocb_gp_sleep],
+               ".W"[wassleep]);
+}
+
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 
 /* No ->nocb_lock to acquire.  */
@@ -2439,6 +2517,10 @@ static void __init rcu_spawn_nocb_kthreads(void)
 {
 }
 
+static void show_rcu_nocb_state(struct rcu_data *rdp)
+{
+}
+
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
 /*
index 0627a66699a602a3ab11baf4e2697c0b98fb7bc6..841ab43f3e60d4b26df3a63477fe65814947a826 100644 (file)
@@ -589,6 +589,11 @@ void show_rcu_gp_kthreads(void)
                                cpu, (long)rdp->gp_seq_needed);
                }
        }
+       for_each_possible_cpu(cpu) {
+               rdp = per_cpu_ptr(&rcu_data, cpu);
+               if (rcu_segcblist_is_offloaded(&rdp->cblist))
+                       show_rcu_nocb_state(rdp);
+       }
        /* sched_show_task(rcu_state.gp_kthread); */
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);