Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / arch / powerpc / include / asm / hw_irq.h
index 3818fa0164f0b678144ed56354130f657f20161e..88e5e8f17e9896e5a051845235bfa0c822684552 100644 (file)
 #define PACA_IRQ_DEC           0x08 /* Or FIT */
 #define PACA_IRQ_EE_EDGE       0x10 /* BookE only */
 #define PACA_IRQ_HMI           0x20
+#define PACA_IRQ_PMI           0x40
+
+/*
+ * flags for paca->irq_soft_mask
+ */
+#define IRQS_ENABLED           0
+#define IRQS_DISABLED          1 /* local_irq_disable() interrupts */
+#define IRQS_PMI_DISABLED      2
+#define IRQS_ALL_DISABLED      (IRQS_DISABLED | IRQS_PMI_DISABLED)
 
 #endif /* CONFIG_PPC64 */
 
@@ -43,46 +52,112 @@ extern void unknown_exception(struct pt_regs *regs);
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long irq_soft_mask_return(void)
 {
        unsigned long flags;
 
        asm volatile(
                "lbz %0,%1(13)"
                : "=r" (flags)
-               : "i" (offsetof(struct paca_struct, soft_enabled)));
+               : "i" (offsetof(struct paca_struct, irq_soft_mask)));
+
+       return flags;
+}
+
+/*
+ * The "memory" clobber acts as both a compiler barrier
+ * for the critical section and as a clobber because
+ * we changed paca->irq_soft_mask
+ */
+static inline notrace void irq_soft_mask_set(unsigned long mask)
+{
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+       /*
+        * The irq mask must always include the STD bit if any are set.
+        *
+        * and interrupts don't get replayed until the standard
+        * interrupt (local_irq_disable()) is unmasked.
+        *
+        * Other masks must only provide additional masking beyond
+        * the standard, and they are also not replayed until the
+        * standard interrupt becomes unmasked.
+        *
+        * This could be changed, but it will require partial
+        * unmasks to be replayed, among other things. For now, take
+        * the simple approach.
+        */
+       WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
+       asm volatile(
+               "stb %0,%1(13)"
+               :
+               : "r" (mask),
+                 "i" (offsetof(struct paca_struct, irq_soft_mask))
+               : "memory");
+}
+
+static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
+{
+       unsigned long flags;
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+       WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
+       asm volatile(
+               "lbz %0,%1(13); stb %2,%1(13)"
+               : "=&r" (flags)
+               : "i" (offsetof(struct paca_struct, irq_soft_mask)),
+                 "r" (mask)
+               : "memory");
 
        return flags;
 }
 
-static inline unsigned long arch_local_irq_disable(void)
+static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
 {
-       unsigned long flags, zero;
+       unsigned long flags, tmp;
 
        asm volatile(
-               "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
-               : "=r" (flags), "=&r" (zero)
-               : "i" (offsetof(struct paca_struct, soft_enabled))
+               "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
+               : "=&r" (flags), "=r" (tmp)
+               : "i" (offsetof(struct paca_struct, irq_soft_mask)),
+                 "r" (mask)
                : "memory");
 
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+       WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
+#endif
+
        return flags;
 }
 
+static inline unsigned long arch_local_save_flags(void)
+{
+       return irq_soft_mask_return();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+       irq_soft_mask_set(IRQS_DISABLED);
+}
+
 extern void arch_local_irq_restore(unsigned long);
 
 static inline void arch_local_irq_enable(void)
 {
-       arch_local_irq_restore(1);
+       arch_local_irq_restore(IRQS_ENABLED);
 }
 
 static inline unsigned long arch_local_irq_save(void)
 {
-       return arch_local_irq_disable();
+       return irq_soft_mask_set_return(IRQS_DISABLED);
 }
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return flags == 0;
+       return flags & IRQS_DISABLED;
 }
 
 static inline bool arch_irqs_disabled(void)
@@ -90,6 +165,55 @@ static inline bool arch_irqs_disabled(void)
        return arch_irqs_disabled_flags(arch_local_save_flags());
 }
 
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * To support disabling and enabling of irq with PMI, set of
+ * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
+ * functions are added. These macros are implemented using generic
+ * linux local_irq_* code from include/linux/irqflags.h.
+ */
+#define raw_local_irq_pmu_save(flags)                                  \
+       do {                                                            \
+               typecheck(unsigned long, flags);                        \
+               flags = irq_soft_mask_or_return(IRQS_DISABLED | \
+                               IRQS_PMI_DISABLED);                     \
+       } while(0)
+
+#define raw_local_irq_pmu_restore(flags)                               \
+       do {                                                            \
+               typecheck(unsigned long, flags);                        \
+               arch_local_irq_restore(flags);                          \
+       } while(0)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define powerpc_local_irq_pmu_save(flags)                      \
+        do {                                                   \
+               raw_local_irq_pmu_save(flags);                  \
+               trace_hardirqs_off();                           \
+       } while(0)
+#define powerpc_local_irq_pmu_restore(flags)                   \
+       do {                                                    \
+               if (raw_irqs_disabled_flags(flags)) {           \
+                       raw_local_irq_pmu_restore(flags);       \
+                       trace_hardirqs_off();                   \
+               } else {                                        \
+                       trace_hardirqs_on();                    \
+                       raw_local_irq_pmu_restore(flags);       \
+               }                                               \
+       } while(0)
+#else
+#define powerpc_local_irq_pmu_save(flags)                      \
+       do {                                                    \
+               raw_local_irq_pmu_save(flags);                  \
+       } while(0)
+#define powerpc_local_irq_pmu_restore(flags)                   \
+       do {                                                    \
+               raw_local_irq_pmu_restore(flags);               \
+       } while (0)
+#endif  /* CONFIG_TRACE_IRQFLAGS */
+
+#endif /* CONFIG_PPC_BOOK3S */
+
 #ifdef CONFIG_PPC_BOOK3E
 #define __hard_irq_enable()    asm volatile("wrteei 1" : : : "memory")
 #define __hard_irq_disable()   asm volatile("wrteei 0" : : : "memory")
@@ -98,14 +222,13 @@ static inline bool arch_irqs_disabled(void)
 #define __hard_irq_disable()   __mtmsrd(local_paca->kernel_msr, 1)
 #endif
 
-#define hard_irq_disable()     do {                    \
-       u8 _was_enabled;                                \
-       __hard_irq_disable();                           \
-       _was_enabled = local_paca->soft_enabled;        \
-       local_paca->soft_enabled = 0;                   \
-       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;  \
-       if (_was_enabled)                               \
-               trace_hardirqs_off();                   \
+#define hard_irq_disable()     do {                                    \
+       unsigned long flags;                                            \
+       __hard_irq_disable();                                           \
+       flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);            \
+       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;                  \
+       if (!arch_irqs_disabled_flags(flags))                           \
+               trace_hardirqs_off();                                   \
 } while(0)
 
 static inline bool lazy_irq_pending(void)
@@ -127,7 +250,7 @@ static inline void may_hard_irq_enable(void)
 
 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
 {
-       return !regs->softe;
+       return (regs->softe & IRQS_DISABLED);
 }
 
 extern bool prep_irq_for_idle(void);