rcu: Add lockdep_assert_irqs_disabled() to rcu_sched_clock_irq() and callees
authorPaul E. McKenney <paulmck@kernel.org>
Thu, 19 Nov 2020 18:13:06 +0000 (10:13 -0800)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 4 Jan 2021 23:54:49 +0000 (15:54 -0800)
This commit adds a number of lockdep_assert_irqs_disabled() calls
to rcu_sched_clock_irq() and a number of the functions that it calls.
The point of this is to help track down a situation where lockdep appears
to be insisting that interrupts are enabled within these functions, which
should only ever be invoked from the scheduling-clock interrupt handler.

Link: https://lore.kernel.org/lkml/20201111133813.GA81547@elver.google.com/
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h

index bd04b09b84b327cde1aa8472986dfc0a5ff39719..f70634f7c3aa4bf553c6072ee64cce5cb485c2fe 100644 (file)
@@ -2553,6 +2553,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 void rcu_sched_clock_irq(int user)
 {
        trace_rcu_utilization(TPS("Start scheduler-tick"));
+       lockdep_assert_irqs_disabled();
        raw_cpu_inc(rcu_data.ticks_this_gp);
        /* The load-acquire pairs with the store-release setting to true. */
        if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
@@ -2566,6 +2567,7 @@ void rcu_sched_clock_irq(int user)
        rcu_flavor_sched_clock_irq(user);
        if (rcu_pending(user))
                invoke_rcu_core();
+       lockdep_assert_irqs_disabled();
 
        trace_rcu_utilization(TPS("End scheduler-tick"));
 }
@@ -3690,6 +3692,8 @@ static int rcu_pending(int user)
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
 
+       lockdep_assert_irqs_disabled();
+
        /* Check for CPU stalls, if enabled. */
        check_cpu_stall(rdp);
 
index fd8a52e9a88749839a9f8d4f0ddef6143e4e450d..cb76e70da8ca8d80b169ad4f6850a6cc6152b977 100644 (file)
@@ -682,6 +682,7 @@ static void rcu_flavor_sched_clock_irq(int user)
 {
        struct task_struct *t = current;
 
+       lockdep_assert_irqs_disabled();
        if (user || rcu_is_cpu_rrupt_from_idle()) {
                rcu_note_voluntary_context_switch(current);
        }
index ca21d28a0f98f8d82a6031aa65469d0a7915fda4..4024dcc78aac9a126678a8d9005c2826fda9ddb4 100644 (file)
@@ -260,6 +260,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
        struct task_struct *t;
        struct task_struct *ts[8];
 
+       lockdep_assert_irqs_disabled();
        if (!rcu_preempt_blocked_readers_cgp(rnp))
                return 0;
        pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
@@ -284,6 +285,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
                                ".q"[rscr.rs.b.need_qs],
                                ".e"[rscr.rs.b.exp_hint],
                                ".l"[rscr.on_blkd_list]);
+               lockdep_assert_irqs_disabled();
                put_task_struct(t);
                ndetected++;
        }
@@ -472,6 +474,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
        struct rcu_node *rnp;
        long totqlen = 0;
 
+       lockdep_assert_irqs_disabled();
+
        /* Kick and suppress, if so configured. */
        rcu_stall_kick_kthreads();
        if (rcu_stall_is_suppressed())
@@ -493,6 +497,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
                                }
                }
                ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
+               lockdep_assert_irqs_disabled();
        }
 
        for_each_possible_cpu(cpu)
@@ -538,6 +543,8 @@ static void print_cpu_stall(unsigned long gps)
        struct rcu_node *rnp = rcu_get_root();
        long totqlen = 0;
 
+       lockdep_assert_irqs_disabled();
+
        /* Kick and suppress, if so configured. */
        rcu_stall_kick_kthreads();
        if (rcu_stall_is_suppressed())
@@ -592,6 +599,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
        unsigned long js;
        struct rcu_node *rnp;
 
+       lockdep_assert_irqs_disabled();
        if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
            !rcu_gp_in_progress())
                return;