powerpc/64s: make PACA_IRQ_HARD_DIS track MSR[EE] closely
authorNicholas Piggin <npiggin@gmail.com>
Sun, 3 Jun 2018 12:24:32 +0000 (22:24 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 24 Jul 2018 12:03:14 +0000 (22:03 +1000)
When the masked interrupt handler clears MSR[EE] for an interrupt in
the PACA_IRQ_MUST_HARD_MASK set, it does not set PACA_IRQ_HARD_DIS.
This makes them get out of synch.

With that taken into account, it's only low level irq manipulation
(and interrupt entry before reconcile) where they can be out of synch.
This makes the code less surprising.

It also allows the IRQ replay code to rely on the IRQ_HARD_DIS value
and not have to mtmsrd again in this case (e.g., for an external
interrupt that has been masked). The bigger benefit might just be
that there is not such an element of surprise in these two bits of
state.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_book3e.S
arch/powerpc/kernel/irq.c

index e151774cb5772da5dc92d524a12b595a5750bcc4..32a18f2f49bc80ed98f7af4e101ba6c8f47bacb2 100644 (file)
@@ -253,14 +253,16 @@ static inline bool lazy_irq_pending(void)
 
 /*
  * This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts when soft-disabled after having
- * cleared the source of the interrupt
+ * re-enable hard interrupts after having cleared the source
+ * of the interrupt. They are kept disabled if there is a different
+ * soft-masked interrupt pending that requires hard masking.
  */
 static inline void may_hard_irq_enable(void)
 {
-       get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
-       if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
+       if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
+               get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
                __hard_irq_enable();
+       }
 }
 
 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
index 729e9ef4d3bb844510072bfe4153cd38d7e7e9a5..0357f87a013c3ee16567dd9d9b734fdc2cd7b6ec 100644 (file)
@@ -992,6 +992,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        or      r4,r4,r3
        std     r4,_TRAP(r1)
 
+       /*
+        * PACA_IRQ_HARD_DIS won't always be set here, so set it now
+        * to reconcile the IRQ state. Tracing is already accounted for.
+        */
+       lbz     r4,PACAIRQHAPPENED(r13)
+       ori     r4,r4,PACA_IRQ_HARD_DIS
+       stb     r4,PACAIRQHAPPENED(r13)
+
        /*
         * Then find the right handler and call it. Interrupts are
         * still soft-disabled and we keep them that way.
index 9b6e653e501a1264a8bfa398400dd88ecfa38d25..3325f721e7b251c17fb5a10645e1fb31f8d8417c 100644 (file)
@@ -949,7 +949,11 @@ kernel_dbg_exc:
 
 .macro masked_interrupt_book3e paca_irq full_mask
        lbz     r10,PACAIRQHAPPENED(r13)
+       .if \full_mask == 1
+       ori     r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
+       .else
        ori     r10,r10,\paca_irq
+       .endif
        stb     r10,PACAIRQHAPPENED(r13)
 
        .if \full_mask == 1
index 76a14702cb9c21da8acbfc4486a72d55b9404c1b..36aec1f1cb2d8c4c408977c89cf2d980c457cdec 100644 (file)
@@ -1498,7 +1498,10 @@ masked_##_H##interrupt:                                  \
        mfspr   r10,SPRN_##_H##SRR1;                    \
        xori    r10,r10,MSR_EE; /* clear MSR_EE */      \
        mtspr   SPRN_##_H##SRR1,r10;                    \
-2:     mtcrf   0x80,r9;                                \
+       ori     r11,r11,PACA_IRQ_HARD_DIS;              \
+       stb     r11,PACAIRQHAPPENED(r13);               \
+2:     /* done */                                      \
+       mtcrf   0x80,r9;                                \
        std     r1,PACAR1(r13);                         \
        ld      r9,PACA_EXGEN+EX_R9(r13);               \
        ld      r10,PACA_EXGEN+EX_R10(r13);             \
index 2b269315d37701a3cf3c11e654da681c5ec3c0a5..4e0d94d02030afb2a59da4898a6d90da9e54c457 100644 (file)
@@ -36,7 +36,7 @@ _GLOBAL(\name)
         */
        lbz     r3,PACAIRQHAPPENED(r13)
        cmpwi   cr0,r3,0
-       bnelr
+       bne     2f
 
        /* Now we are going to mark ourselves as soft and hard enabled in
         * order to be able to take interrupts while asleep. We inform lockdep
@@ -72,6 +72,11 @@ _GLOBAL(\name)
        wrteei  1
        \loop
 
+2:
+       lbz     r10,PACAIRQHAPPENED(r13)
+       ori     r10,r10,PACA_IRQ_HARD_DIS
+       stb     r10,PACAIRQHAPPENED(r13)
+       blr
 .endm
 
 .macro BOOK3E_IDLE_LOOP
index 0682fef1f38599d6e63fdac97812d8c3ef4ffa59..ca941f1e83a96eb6647fb4d0e0905928d3f63101 100644 (file)
@@ -145,8 +145,20 @@ notrace unsigned int __check_irq_replay(void)
        trace_hardirqs_on();
        trace_hardirqs_off();
 
+       /*
+        * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
+        * not be set, which means interrupts have only just been hard
+        * disabled as part of the local_irq_restore or interrupt return
+        * code. In that case, skip the decrementr check becaus it's
+        * expensive to read the TB.
+        *
+        * HARD_DIS then gets cleared here, but it's reconciled later.
+        * Either local_irq_disable will replay the interrupt and that
+        * will reconcile state like other hard interrupts. Or interrupt
+        * retur will replay the interrupt and in that case it sets
+        * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
+        */
        if (happened & PACA_IRQ_HARD_DIS) {
-               /* Clear bit 0 which we wouldn't clear otherwise */
                local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
 
                /*
@@ -248,24 +260,26 @@ notrace void arch_local_irq_restore(unsigned long mask)
         * cannot have preempted.
         */
        irq_happened = get_irq_happened();
-       if (!irq_happened)
+       if (!irq_happened) {
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+               WARN_ON(!(mfmsr() & MSR_EE));
+#endif
                return;
+       }
 
        /*
         * We need to hard disable to get a trusted value from
         * __check_irq_replay(). We also need to soft-disable
         * again to avoid warnings in there due to the use of
         * per-cpu variables.
-        *
-        * We know that if the value in irq_happened is exactly 0x01
-        * then we are already hard disabled (there are other less
-        * common cases that we'll ignore for now), so we skip the
-        * (expensive) mtmsrd.
         */
-       if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
+       if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+               WARN_ON(!(mfmsr() & MSR_EE));
+#endif
                __hard_irq_disable();
 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
-       else {
+       else {
                /*
                 * We should already be hard disabled here. We had bugs
                 * where that wasn't the case so let's dbl check it and
@@ -274,8 +288,8 @@ notrace void arch_local_irq_restore(unsigned long mask)
                 */
                if (WARN_ON(mfmsr() & MSR_EE))
                        __hard_irq_disable();
-       }
 #endif
+       }
 
        irq_soft_mask_set(IRQS_ALL_DISABLED);
        trace_hardirqs_off();