[PATCH] lockdep: irqtrace subsystem, core
authorIngo Molnar <mingo@elte.hu>
Mon, 3 Jul 2006 07:24:42 +0000 (00:24 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 3 Jul 2006 22:27:03 +0000 (15:27 -0700)
Accurate hard-IRQ-flags and softirq-flags state tracing.

This allows us to attach extra functionality to IRQ flags on/off
events (such as trace-on/off).

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/powerpc/kernel/irq.c
include/asm-powerpc/irqflags.h [new file with mode: 0644]
include/linux/hardirq.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/irqflags.h [new file with mode: 0644]
include/linux/sched.h
kernel/fork.c
kernel/sched.c
kernel/softirq.c

index 525baab45d2d83d667801ad7ba977b8863b94baf..027728b95429faced458aa63efdb22bee68e2183 100644 (file)
@@ -429,7 +429,7 @@ void do_softirq(void)
                local_bh_disable();
                do_softirq_onstack();
                account_system_vtime(current);
-               __local_bh_enable();
+               _local_bh_enable();
        }
 
        local_irq_restore(flags);
diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h
new file mode 100644 (file)
index 0000000..7970cba
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * include/asm-powerpc/irqflags.h
+ *
+ * IRQ flags handling
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() macros from the lowlevel headers.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+/*
+ * Get definitions for raw_local_save_flags(x), etc.
+ */
+#include <asm-powerpc/hw_irq.h>
+
+/*
+ * Do the CPU's IRQ-state tracing from assembly code. We call a
+ * C function, so save all the C-clobbered registers:
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS
+
+#else
+# define TRACE_IRQS_ON
+# define TRACE_IRQS_OFF
+#endif
+
+#endif
index 114ae583cca9a28480ba684c342a54b97bf46686..b1d4332b5cf0f22e2bf6f2fabf6aadb0b41f1680 100644 (file)
@@ -86,9 +86,6 @@ extern void synchronize_irq(unsigned int irq);
 # define synchronize_irq(irq)  barrier()
 #endif
 
-#define nmi_enter()            irq_enter()
-#define nmi_exit()             sub_preempt_count(HARDIRQ_OFFSET)
-
 struct task_struct;
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +94,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
 }
 #endif
 
+/*
+ * It is safe to do non-atomic ops on ->hardirq_context,
+ * because NMI handlers may not preempt and the ops are
+ * always balanced, so the interrupted value of ->hardirq_context
+ * will always be restored.
+ */
 #define irq_enter()                                    \
        do {                                            \
                account_system_vtime(current);          \
                add_preempt_count(HARDIRQ_OFFSET);      \
+               trace_hardirq_enter();                  \
+       } while (0)
+
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit()                                   \
+       do {                                            \
+               trace_hardirq_exit();                   \
+               account_system_vtime(current);          \
+               sub_preempt_count(HARDIRQ_OFFSET);      \
        } while (0)
 
+/*
+ * Exit irq context and process softirqs if needed:
+ */
 extern void irq_exit(void);
 
+#define nmi_enter()            irq_enter()
+#define nmi_exit()             __irq_exit()
+
 #endif /* LINUX_HARDIRQ_H */
index 1b7bb37624bb0ed67cfd3932888e02b444d8ba07..444a3ae0de2a44e76235c80415911de14cbd63f7 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/file.h>
 #include <linux/rcupdate.h>
+#include <linux/irqflags.h>
 
 #define INIT_FDTABLE \
 {                                                      \
@@ -124,6 +125,7 @@ extern struct group_info init_groups;
        .cpu_timers     = INIT_CPU_TIMERS(tsk.cpu_timers),              \
        .fs_excl        = ATOMIC_INIT(0),                               \
        .pi_lock        = SPIN_LOCK_UNLOCKED,                           \
+       INIT_TRACE_IRQFLAGS                                             \
 }
 
 
index 73463fbb38e4d84ae66fe532651bf8603a43bcab..d5afee95fd435294f2ee894cd75a31b7cc96be7b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/irqreturn.h>
 #include <linux/hardirq.h>
 #include <linux/sched.h>
+#include <linux/irqflags.h>
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -199,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
 #define save_and_cli(x)        save_and_cli(&x)
 #endif /* CONFIG_SMP */
 
-/* SoftIRQ primitives.  */
-#define local_bh_disable() \
-               do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
-#define __local_bh_enable() \
-               do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
-
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
 extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
 
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
    frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644 (file)
index 0000000..412e025
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * include/linux/irqflags.h
+ *
+ * IRQ flags tracing: follow the state of the hardirq and softirq flags and
+ * provide callbacks for transitions between ON and OFF states.
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() macros from the lowlevel headers.
+ */
+#ifndef _LINUX_TRACE_IRQFLAGS_H
+#define _LINUX_TRACE_IRQFLAGS_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+  extern void trace_hardirqs_on(void);
+  extern void trace_hardirqs_off(void);
+  extern void trace_softirqs_on(unsigned long ip);
+  extern void trace_softirqs_off(unsigned long ip);
+# define trace_hardirq_context(p)      ((p)->hardirq_context)
+# define trace_softirq_context(p)      ((p)->softirq_context)
+# define trace_hardirqs_enabled(p)     ((p)->hardirqs_enabled)
+# define trace_softirqs_enabled(p)     ((p)->softirqs_enabled)
+# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+# define trace_hardirq_exit()  do { current->hardirq_context--; } while (0)
+# define trace_softirq_enter() do { current->softirq_context++; } while (0)
+# define trace_softirq_exit()  do { current->softirq_context--; } while (0)
+# define INIT_TRACE_IRQFLAGS   .softirqs_enabled = 1,
+#else
+# define trace_hardirqs_on()           do { } while (0)
+# define trace_hardirqs_off()          do { } while (0)
+# define trace_softirqs_on(ip)         do { } while (0)
+# define trace_softirqs_off(ip)                do { } while (0)
+# define trace_hardirq_context(p)      0
+# define trace_softirq_context(p)      0
+# define trace_hardirqs_enabled(p)     0
+# define trace_softirqs_enabled(p)     0
+# define trace_hardirq_enter()         do { } while (0)
+# define trace_hardirq_exit()          do { } while (0)
+# define trace_softirq_enter()         do { } while (0)
+# define trace_softirq_exit()          do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+
+#include <asm/irqflags.h>
+
+#define local_irq_enable() \
+       do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
+#define local_irq_disable() \
+       do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+#define local_irq_save(flags) \
+       do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
+
+#define local_irq_restore(flags)                               \
+       do {                                                    \
+               if (raw_irqs_disabled_flags(flags)) {           \
+                       raw_local_irq_restore(flags);           \
+                       trace_hardirqs_off();                   \
+               } else {                                        \
+                       trace_hardirqs_on();                    \
+                       raw_local_irq_restore(flags);           \
+               }                                               \
+       } while (0)
+#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+/*
+ * The local_irq_*() APIs are equal to the raw_local_irq*()
+ * if !TRACE_IRQFLAGS.
+ */
+# define raw_local_irq_disable()       local_irq_disable()
+# define raw_local_irq_enable()                local_irq_enable()
+# define raw_local_irq_save(flags)     local_irq_save(flags)
+# define raw_local_irq_restore(flags)  local_irq_restore(flags)
+#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#define safe_halt()                                            \
+       do {                                                    \
+               trace_hardirqs_on();                            \
+               raw_safe_halt();                                \
+       } while (0)
+
+#define local_save_flags(flags)                raw_local_save_flags(flags)
+
+#define irqs_disabled()                                                \
+({                                                             \
+       unsigned long flags;                                    \
+                                                               \
+       raw_local_save_flags(flags);                            \
+       raw_irqs_disabled_flags(flags);                         \
+})
+
+#define irqs_disabled_flags(flags)     raw_irqs_disabled_flags(flags)
+#endif         /* CONFIG_X86 */
+
+#endif
index bdabeee10a782acc03b176ca3a29cdcefbeaee57..ad7a89014d29719181ffc475391d7a1d63524a04 100644 (file)
@@ -871,6 +871,21 @@ struct task_struct {
        /* mutex deadlock detection */
        struct mutex_waiter *blocked_on;
 #endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       unsigned int irq_events;
+       int hardirqs_enabled;
+       unsigned long hardirq_enable_ip;
+       unsigned int hardirq_enable_event;
+       unsigned long hardirq_disable_ip;
+       unsigned int hardirq_disable_event;
+       int softirqs_enabled;
+       unsigned long softirq_disable_ip;
+       unsigned int softirq_disable_event;
+       unsigned long softirq_enable_ip;
+       unsigned int softirq_enable_event;
+       int hardirq_context;
+       int softirq_context;
+#endif
 
 /* journalling filesystem info */
        void *journal_info;
index 1cd46a4fb0d38b614bbb576003663b0c4d793de1..b7db7fb74f531ac6d1b3818a1c61dcf27c22c361 100644 (file)
@@ -968,6 +968,10 @@ static task_t *copy_process(unsigned long clone_flags,
        if (!p)
                goto fork_out;
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+       DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+       DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+#endif
        retval = -EAGAIN;
        if (atomic_read(&p->user->processes) >=
                        p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
@@ -1042,6 +1046,21 @@ static task_t *copy_process(unsigned long clone_flags,
        }
        mpol_fix_fork_child_flag(p);
 #endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+       p->irq_events = 0;
+       p->hardirqs_enabled = 0;
+       p->hardirq_enable_ip = 0;
+       p->hardirq_enable_event = 0;
+       p->hardirq_disable_ip = _THIS_IP_;
+       p->hardirq_disable_event = 0;
+       p->softirqs_enabled = 1;
+       p->softirq_enable_ip = _THIS_IP_;
+       p->softirq_enable_event = 0;
+       p->softirq_disable_ip = 0;
+       p->softirq_disable_event = 0;
+       p->hardirq_context = 0;
+       p->softirq_context = 0;
+#endif
 
        rt_mutex_init_task(p);
 
index 48c1faa60a676061c3d365b417b2db9fcd82b041..9118299665344b94f432f2b76c37a3b020fe45fe 100644 (file)
@@ -4462,7 +4462,9 @@ int __sched cond_resched_softirq(void)
        BUG_ON(!in_softirq());
 
        if (need_resched() && __resched_legal()) {
-               __local_bh_enable();
+               raw_local_irq_disable();
+               _local_bh_enable();
+               raw_local_irq_enable();
                __cond_resched();
                local_bh_disable();
                return 1;
index 8f03e3b89b5540a0e21b1bab99dcb455bb74ffd5..584609b6a66e68297c2f1ef03d1f312ae1700e4a 100644 (file)
@@ -61,6 +61,119 @@ static inline void wakeup_softirqd(void)
                wake_up_process(tsk);
 }
 
+/*
+ * This one is for softirq.c-internal use,
+ * where hardirqs are disabled legitimately:
+ */
+static void __local_bh_disable(unsigned long ip)
+{
+       unsigned long flags;
+
+       WARN_ON_ONCE(in_irq());
+
+       raw_local_irq_save(flags);
+       add_preempt_count(SOFTIRQ_OFFSET);
+       /*
+        * Were softirqs turned off above:
+        */
+       if (softirq_count() == SOFTIRQ_OFFSET)
+               trace_softirqs_off(ip);
+       raw_local_irq_restore(flags);
+}
+
+void local_bh_disable(void)
+{
+       __local_bh_disable((unsigned long)__builtin_return_address(0));
+}
+
+EXPORT_SYMBOL(local_bh_disable);
+
+void __local_bh_enable(void)
+{
+       WARN_ON_ONCE(in_irq());
+
+       /*
+        * softirqs should never be enabled by __local_bh_enable(),
+        * it always nests inside local_bh_enable() sections:
+        */
+       WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
+
+       sub_preempt_count(SOFTIRQ_OFFSET);
+}
+EXPORT_SYMBOL_GPL(__local_bh_enable);
+
+/*
+ * Special-case - softirqs can safely be enabled in
+ * cond_resched_softirq(), or by __do_softirq(),
+ * without processing still-pending softirqs:
+ */
+void _local_bh_enable(void)
+{
+       WARN_ON_ONCE(in_irq());
+       WARN_ON_ONCE(!irqs_disabled());
+
+       if (softirq_count() == SOFTIRQ_OFFSET)
+               trace_softirqs_on((unsigned long)__builtin_return_address(0));
+       sub_preempt_count(SOFTIRQ_OFFSET);
+}
+
+EXPORT_SYMBOL(_local_bh_enable);
+
+void local_bh_enable(void)
+{
+       unsigned long flags;
+
+       WARN_ON_ONCE(in_irq());
+       WARN_ON_ONCE(irqs_disabled());
+
+       local_irq_save(flags);
+       /*
+        * Are softirqs going to be turned on now:
+        */
+       if (softirq_count() == SOFTIRQ_OFFSET)
+               trace_softirqs_on((unsigned long)__builtin_return_address(0));
+       /*
+        * Keep preemption disabled until we are done with
+        * softirq processing:
+        */
+       sub_preempt_count(SOFTIRQ_OFFSET - 1);
+
+       if (unlikely(!in_interrupt() && local_softirq_pending()))
+               do_softirq();
+
+       dec_preempt_count();
+       local_irq_restore(flags);
+       preempt_check_resched();
+}
+EXPORT_SYMBOL(local_bh_enable);
+
+void local_bh_enable_ip(unsigned long ip)
+{
+       unsigned long flags;
+
+       WARN_ON_ONCE(in_irq());
+
+       local_irq_save(flags);
+       /*
+        * Are softirqs going to be turned on now:
+        */
+       if (softirq_count() == SOFTIRQ_OFFSET)
+               trace_softirqs_on(ip);
+       /*
+        * Keep preemption disabled until we are done with
+        * softirq processing:
+        */
+       sub_preempt_count(SOFTIRQ_OFFSET - 1);
+
+       if (unlikely(!in_interrupt() && local_softirq_pending()))
+               do_softirq();
+
+       dec_preempt_count();
+       local_irq_restore(flags);
+       preempt_check_resched();
+}
+EXPORT_SYMBOL(local_bh_enable_ip);
+
 /*
  * We restart softirq processing MAX_SOFTIRQ_RESTART times,
  * and we fall back to softirqd after that.
@@ -80,8 +193,9 @@ asmlinkage void __do_softirq(void)
        int cpu;
 
        pending = local_softirq_pending();
+       __local_bh_disable((unsigned long)__builtin_return_address(0));
+       trace_softirq_enter();
 
-       local_bh_disable();
        cpu = smp_processor_id();
 restart:
        /* Reset the pending bitmask before enabling irqs */
@@ -109,7 +223,8 @@ restart:
        if (pending)
                wakeup_softirqd();
 
-       __local_bh_enable();
+       trace_softirq_exit();
+       _local_bh_enable();
 }
 
 #ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -136,23 +251,6 @@ EXPORT_SYMBOL(do_softirq);
 
 #endif
 
-void local_bh_enable(void)
-{
-       WARN_ON(irqs_disabled());
-       /*
-        * Keep preemption disabled until we are done with
-        * softirq processing:
-        */
-       sub_preempt_count(SOFTIRQ_OFFSET - 1);
-
-       if (unlikely(!in_interrupt() && local_softirq_pending()))
-               do_softirq();
-
-       dec_preempt_count();
-       preempt_check_resched();
-}
-EXPORT_SYMBOL(local_bh_enable);
-
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
 # define invoke_softirq()      __do_softirq()
 #else
@@ -165,6 +263,7 @@ EXPORT_SYMBOL(local_bh_enable);
 void irq_exit(void)
 {
        account_system_vtime(current);
+       trace_hardirq_exit();
        sub_preempt_count(IRQ_EXIT_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();