irq_work: Convert flags to atomic_t
authorFrederic Weisbecker <frederic@kernel.org>
Fri, 8 Nov 2019 16:08:55 +0000 (17:08 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 Nov 2019 08:02:56 +0000 (09:02 +0100)
We need to convert flags to atomic_t in order to later fix an ordering
issue on atomic_cmpxchg() failure. This will allow us to use atomic_fetch_or().

Also clarify the nature of those flags.

[ mingo: Converted two more usage site the original patch missed. ]

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20191108160858.31665-2-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/irq_work.h
kernel/bpf/stackmap.c
kernel/irq_work.c
kernel/printk/printk.c
kernel/trace/bpf_trace.c

index b11fcdfd077076e9593e9af3ea64f443d34572a2..02da997ad12cedc723ff42b0027323f30001ec7b 100644 (file)
@@ -22,7 +22,7 @@
 #define IRQ_WORK_CLAIMED       (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
 
 struct irq_work {
-       unsigned long flags;
+       atomic_t flags;
        struct llist_node llnode;
        void (*func)(struct irq_work *);
 };
@@ -30,11 +30,15 @@ struct irq_work {
 static inline
 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
 {
-       work->flags = 0;
+       atomic_set(&work->flags, 0);
        work->func = func;
 }
 
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = {     \
+               .flags = ATOMIC_INIT(0),                        \
+               .func  = (_f)                                   \
+}
+
 
 bool irq_work_queue(struct irq_work *work);
 bool irq_work_queue_on(struct irq_work *work, int cpu);
index 052580c33d2687e18d56a00067c6a6b81da8d409..4d31284095e29f0ddba9a667078fcb43c3da627d 100644 (file)
@@ -289,7 +289,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
 
        if (in_nmi()) {
                work = this_cpu_ptr(&up_read_work);
-               if (work->irq_work.flags & IRQ_WORK_BUSY)
+               if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
                        /* cannot queue more up_read, fallback */
                        irq_work_busy = true;
        }
index d42acaf818861a8ee8e7d733139840d267be6ee0..df0dbf4d859b46eea8cdee40bbc05182f6f6b19f 100644 (file)
@@ -29,16 +29,16 @@ static DEFINE_PER_CPU(struct llist_head, lazy_list);
  */
 static bool irq_work_claim(struct irq_work *work)
 {
-       unsigned long flags, oflags, nflags;
+       int flags, oflags, nflags;
 
        /*
         * Start with our best wish as a premise but only trust any
         * flag value after cmpxchg() result.
         */
-       flags = work->flags & ~IRQ_WORK_PENDING;
+       flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING;
        for (;;) {
                nflags = flags | IRQ_WORK_CLAIMED;
-               oflags = cmpxchg(&work->flags, flags, nflags);
+               oflags = atomic_cmpxchg(&work->flags, flags, nflags);
                if (oflags == flags)
                        break;
                if (oflags & IRQ_WORK_PENDING)
@@ -61,7 +61,7 @@ void __weak arch_irq_work_raise(void)
 static void __irq_work_queue_local(struct irq_work *work)
 {
        /* If the work is "lazy", handle it from next tick if any */
-       if (work->flags & IRQ_WORK_LAZY) {
+       if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
                if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
                    tick_nohz_tick_stopped())
                        arch_irq_work_raise();
@@ -143,7 +143,7 @@ static void irq_work_run_list(struct llist_head *list)
 {
        struct irq_work *work, *tmp;
        struct llist_node *llnode;
-       unsigned long flags;
+       int flags;
 
        BUG_ON(!irqs_disabled());
 
@@ -159,15 +159,15 @@ static void irq_work_run_list(struct llist_head *list)
                 * to claim that work don't rely on us to handle their data
                 * while we are in the middle of the func.
                 */
-               flags = work->flags & ~IRQ_WORK_PENDING;
-               xchg(&work->flags, flags);
+               flags = atomic_read(&work->flags) & ~IRQ_WORK_PENDING;
+               atomic_xchg(&work->flags, flags);
 
                work->func(work);
                /*
                 * Clear the BUSY bit and return to the free state if
                 * no-one else claimed it meanwhile.
                 */
-               (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
+               (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
        }
 }
 
@@ -199,7 +199,7 @@ void irq_work_sync(struct irq_work *work)
 {
        lockdep_assert_irqs_enabled();
 
-       while (work->flags & IRQ_WORK_BUSY)
+       while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
                cpu_relax();
 }
 EXPORT_SYMBOL_GPL(irq_work_sync);
index ca65327a6de8cb3b9fe92867d34c6bcfd67b6bd6..865727373a3bf2818908931da8ef1409365482a6 100644 (file)
@@ -2961,7 +2961,7 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
 
 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
        .func = wake_up_klogd_work_func,
-       .flags = IRQ_WORK_LAZY,
+       .flags = ATOMIC_INIT(IRQ_WORK_LAZY),
 };
 
 void wake_up_klogd(void)
index 44bd08f2443b3bec8928ab5c9f9080af1c8a4c02..ff467a4e263925e775961dc2847f88239b5a3eff 100644 (file)
@@ -660,7 +660,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
                        return -EINVAL;
 
                work = this_cpu_ptr(&send_signal_work);
-               if (work->irq_work.flags & IRQ_WORK_BUSY)
+               if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
                        return -EBUSY;
 
                /* Add the current task, which is the target of sending signal,