powerpc/64: allow alternate return locations for soft-masked interrupts
authorNicholas Piggin <npiggin@gmail.com>
Thu, 17 Jun 2021 15:51:08 +0000 (01:51 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 24 Jun 2021 14:06:56 +0000 (00:06 +1000)
The exception table fixup adjusts a failed page fault's interrupt return
location if it was taken at an address specified in the exception table,
to a corresponding fixup handler address.

Introduce a variation of that idea which adds a fixup table for NMIs and
soft-masked asynchronous interrupts. This will be used to protect
certain critical sections that are sensitive to being clobbered by
interrupts coming in (due to using the same SPRs and/or irq soft-mask
state).

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-10-npiggin@gmail.com
arch/powerpc/include/asm/interrupt.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/Makefile
arch/powerpc/lib/restart_table.c [new file with mode: 0644]

index de36fb5d9c518a5d8ec8638395e00d8e3c17768c..a4bf6c0013bb1d967694ade8241b24837e113b74 100644 (file)
 #include <asm/kprobes.h>
 #include <asm/runlatch.h>
 
+#ifdef CONFIG_PPC64
+extern char __end_soft_masked[];
+unsigned long search_kernel_restart_table(unsigned long addr);
+#endif
+
 #ifdef CONFIG_PPC_BOOK3S_64
 static inline void srr_regs_clobbered(void)
 {
@@ -269,6 +274,14 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
         * new work to do (must use irq_work for that).
         */
 
+#ifdef CONFIG_PPC64
+       if (arch_irq_disabled_regs(regs)) {
+               unsigned long rst = search_kernel_restart_table(regs->nip);
+               if (rst)
+                       regs_set_return_ip(regs, rst);
+       }
+#endif
+
 #ifdef CONFIG_PPC64
        if (nmi_disables_ftrace(regs))
                this_cpu_set_ftrace_enabled(state->ftrace_enabled);
index d6739d700f0a3bd75afc834fe42c0e7a7c30b677..c9c2c36c1f8f270f84444f5f58f6e0b796805456 100644 (file)
@@ -762,6 +762,14 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
        stringify_in_c(.long (_target) - . ;)   \
        stringify_in_c(.previous)
 
+#define RESTART_TABLE(_start, _end, _target)   \
+       stringify_in_c(.section __restart_table,"a";)\
+       stringify_in_c(.balign 8;)              \
+       stringify_in_c(.llong (_start);)        \
+       stringify_in_c(.llong (_end);)          \
+       stringify_in_c(.llong (_target);)       \
+       stringify_in_c(.previous)
+
 #ifdef CONFIG_PPC_FSL_BOOK3E
 #define BTB_FLUSH(reg)                 \
        lis reg,BUCSR_INIT@h;           \
index b35c97c7082f3b8283d51c7cf5525185b8f1073a..1b79f8a75298e6c31a8e87dbd4a0f025fe14d09d 100644 (file)
@@ -901,6 +901,28 @@ kernel_dbg_exc:
        bl      unknown_exception
        b       interrupt_return
 
+.macro SEARCH_RESTART_TABLE
+       LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
+       LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
+300:
+       cmpd    r14,r15
+       beq     302f
+       ld      r11,0(r14)
+       cmpld   r10,r11
+       blt     301f
+       ld      r11,8(r14)
+       cmpld   r10,r11
+       bge     301f
+       ld      r11,16(r14)
+       b       303f
+301:
+       addi    r14,r14,24
+       b       300b
+302:
+       li      r11,0
+303:
+.endm
+
 /*
  * An interrupt came in while soft-disabled; We mark paca->irq_happened
  * accordingly and if the interrupt is level sensitive, we hard disable
@@ -909,6 +931,9 @@ kernel_dbg_exc:
  */
 
 .macro masked_interrupt_book3e paca_irq full_mask
+       std     r14,PACA_EXGEN+EX_R14(r13)
+       std     r15,PACA_EXGEN+EX_R15(r13)
+
        lbz     r10,PACAIRQHAPPENED(r13)
        .if \full_mask == 1
        ori     r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
@@ -918,15 +943,23 @@ kernel_dbg_exc:
        stb     r10,PACAIRQHAPPENED(r13)
 
        .if \full_mask == 1
-       rldicl  r10,r11,48,1            /* clear MSR_EE */
-       rotldi  r11,r10,16
+       xori    r11,r11,MSR_EE          /* clear MSR_EE */
        mtspr   SPRN_SRR1,r11
        .endif
 
+       mfspr   r10,SPRN_SRR0
+       SEARCH_RESTART_TABLE
+       cmpdi   r11,0
+       beq     1f
+       mtspr   SPRN_SRR0,r11           /* return to restart address */
+1:
+
        lwz     r11,PACA_EXGEN+EX_CR(r13)
        mtcr    r11
        ld      r10,PACA_EXGEN+EX_R10(r13)
        ld      r11,PACA_EXGEN+EX_R11(r13)
+       ld      r14,PACA_EXGEN+EX_R14(r13)
+       ld      r15,PACA_EXGEN+EX_R15(r13)
        mfspr   r13,SPRN_SPRG_GEN_SCRATCH
        rfi
        b       .
index ee4283d014a44eacf0e24b3d7e4d8e238b3e597e..b8ed1ce8c34718b7aad0ee7755ededa2d4d37c95 100644 (file)
@@ -591,6 +591,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        __GEN_COMMON_BODY \name
 .endm
 
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+       mr      r12,r2
+       ld      r2,PACATOC(r13)
+       LOAD_REG_ADDR(r9, __start___restart_table)
+       LOAD_REG_ADDR(r10, __stop___restart_table)
+       mr      r2,r12
+#else
+       LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+       LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+       cmpd    r9,r10
+       beq     302f
+       ld      r12,0(r9)
+       cmpld   r11,r12
+       blt     301f
+       ld      r12,8(r9)
+       cmpld   r11,r12
+       bge     301f
+       ld      r12,16(r9)
+       b       303f
+301:
+       addi    r9,r9,24
+       b       300b
+302:
+       li      r12,0
+303:
+.endm
+
 /*
  * Restore all registers including H/SRR0/1 saved in a stack frame of a
  * standard exception.
@@ -2646,6 +2676,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
        mtmsrd  r9,1
 
        kuap_kernel_restore r9, r10
+
        EXCEPTION_RESTORE_REGS hsrr=0
        RFI_TO_KERNEL
 
@@ -2703,6 +2734,16 @@ masked_interrupt:
        stb     r9,PACASRR_VALID(r13)
        .endif
 
+       SEARCH_RESTART_TABLE
+       cmpdi   r12,0
+       beq     3f
+       .if \hsrr
+       mtspr   SPRN_HSRR0,r12
+       .else
+       mtspr   SPRN_SRR0,r12
+       .endif
+3:
+
        ld      r9,PACA_EXGEN+EX_CTR(r13)
        mtctr   r9
        lwz     r9,PACA_EXGEN+EX_CCR(r13)
index 72fa3c00229a56ebfab492c563a2678c8fbc8bc2..16c5e13e00c40f2e6f08f0be8c80dd8e429ef247 100644 (file)
@@ -9,6 +9,14 @@
 #define EMITS_PT_NOTE
 #define RO_EXCEPTION_TABLE_ALIGN       0
 
+#define RESTART_TABLE(align)                                           \
+       . = ALIGN(align);                                               \
+       __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) {     \
+               __start___restart_table = .;                            \
+               KEEP(*(__restart_table))                                \
+               __stop___restart_table = .;                             \
+       }
+
 #include <asm/page.h>
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
@@ -124,6 +132,8 @@ SECTIONS
        RO_DATA(PAGE_SIZE)
 
 #ifdef CONFIG_PPC64
+       RESTART_TABLE(8)
+
        . = ALIGN(8);
        __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
                __start___stf_entry_barrier_fixup = .;
index cc1a8a0f311e7a9b2e082f5e733334b8ca34315b..4c92c80454f30aa84a164dc4c525dd1467ba6e6c 100644 (file)
@@ -42,7 +42,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
                               memcpy_power7.o
 
 obj64-y        += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
-          memcpy_64.o copy_mc_64.o
+          memcpy_64.o copy_mc_64.o restart_table.o
 
 ifndef CONFIG_PPC_QUEUED_SPINLOCKS
 obj64-$(CONFIG_SMP)    += locks.o
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
new file mode 100644 (file)
index 0000000..7cd2075
--- /dev/null
@@ -0,0 +1,30 @@
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+
+struct restart_table_entry {
+       unsigned long start;
+       unsigned long end;
+       unsigned long fixup;
+};
+
+extern struct restart_table_entry __start___restart_table[];
+extern struct restart_table_entry __stop___restart_table[];
+
+/* Given an address, look for it in the kernel exception table */
+unsigned long search_kernel_restart_table(unsigned long addr)
+{
+       struct restart_table_entry *rte = __start___restart_table;
+
+       while (rte < __stop___restart_table) {
+               unsigned long start = rte->start;
+               unsigned long end = rte->end;
+               unsigned long fixup = rte->fixup;
+
+               if (addr >= start && addr < end)
+                       return fixup;
+
+               rte++;
+       }
+       return 0;
+}
+NOKPROBE_SYMBOL(search_kernel_restart_table);