tasklets: Prevent tasklet_unlock_spin_wait() deadlock on RT
[sfrench/cifs-2.6.git] / kernel / softirq.c
index ba89ca77698ab62f01988bdeeb8afb8347e72776..f1eb83dc3d508f9caf75790ae564c3f6449cae72 100644 (file)
@@ -620,6 +620,32 @@ void tasklet_init(struct tasklet_struct *t,
 }
 EXPORT_SYMBOL(tasklet_init);
 
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+               if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+                       /*
+                        * Prevent a live lock when current preempted soft
+                        * interrupt processing or prevents ksoftirqd from
+                        * running. If the tasklet runs on a different CPU
+                        * then this has no effect other than doing the BH
+                        * disable/enable dance for nothing.
+                        */
+                       local_bh_disable();
+                       local_bh_enable();
+               } else {
+                       cpu_relax();
+               }
+       }
+}
+EXPORT_SYMBOL(tasklet_unlock_spin_wait);
+#endif
+
 void tasklet_kill(struct tasklet_struct *t)
 {
        if (in_interrupt())
@@ -633,7 +659,7 @@ void tasklet_kill(struct tasklet_struct *t)
 }
 EXPORT_SYMBOL(tasklet_kill);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 void tasklet_unlock(struct tasklet_struct *t)
 {
        smp_mb__before_atomic();