Merge branches 'fixes', 'misc' and 'spectre' into for-linus
authorRussell King <rmk+kernel@armlinux.org.uk>
Tue, 5 Jun 2018 09:03:27 +0000 (10:03 +0100)
committerRussell King <rmk+kernel@armlinux.org.uk>
Tue, 5 Jun 2018 09:03:27 +0000 (10:03 +0100)
32 files changed:
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/include/asm/assembler.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/bugs.h
arch/arm/include/asm/cp15.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/proc-fns.h
arch/arm/include/asm/system_misc.h
arch/arm/include/uapi/asm/siginfo.h [deleted file]
arch/arm/kernel/Makefile
arch/arm/kernel/bugs.c [new file with mode: 0644]
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/smp.c
arch/arm/kernel/suspend.c
arch/arm/kernel/traps.c
arch/arm/kvm/hyp/hyp-entry.S
arch/arm/lib/getuser.S
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/fault.c
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-v7-2level.S
arch/arm/mm/proc-v7-bugs.c [new file with mode: 0644]
arch/arm/mm/proc-v7.S
arch/arm/probes/kprobes/opt-arm.c
arch/arm/vfp/vfpmodule.c

index 6a5c4ac97703b198ba08c61a9a8c3a63a93e5a5f..a3c5fbcad4abf08bebc2e7303309eea156bd6b01 100644 (file)
@@ -117,11 +117,9 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
-               perl -e 'while (<>) { \
-                       $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \
-                       $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \
-               }; printf "%d\n", $$bss_end - $$bss_start;')
+KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
+               sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
+                      -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
 # Supply ZRELADDR to the decompressor via a linker symbol.
 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
index 45c8823c37503d3bfdc2beee80304c556db8bf53..517e0e18f0b8307855447abfec63f0827cb72cec 100644 (file)
 #if defined(CONFIG_DEBUG_ICEDCC)
 
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
-               .macro  loadsp, rb, tmp
+               .macro  loadsp, rb, tmp1, tmp2
                .endm
                .macro  writeb, ch, rb
                mcr     p14, 0, \ch, c0, c5, 0
                .endm
 #elif defined(CONFIG_CPU_XSCALE)
-               .macro  loadsp, rb, tmp
+               .macro  loadsp, rb, tmp1, tmp2
                .endm
                .macro  writeb, ch, rb
                mcr     p14, 0, \ch, c8, c0, 0
                .endm
 #else
-               .macro  loadsp, rb, tmp
+               .macro  loadsp, rb, tmp1, tmp2
                .endm
                .macro  writeb, ch, rb
                mcr     p14, 0, \ch, c1, c0, 0
@@ -57,7 +57,7 @@
                .endm
 
 #if defined(CONFIG_ARCH_SA1100)
-               .macro  loadsp, rb, tmp
+               .macro  loadsp, rb, tmp1, tmp2
                mov     \rb, #0x80000000        @ physical base address
 #ifdef CONFIG_DEBUG_LL_SER3
                add     \rb, \rb, #0x00050000   @ Ser3
@@ -66,8 +66,8 @@
 #endif
                .endm
 #else
-               .macro  loadsp, rb, tmp
-               addruart \rb, \tmp
+               .macro  loadsp, rb, tmp1, tmp2
+               addruart \rb, \tmp1, \tmp2
                .endm
 #endif
 #endif
@@ -561,8 +561,6 @@ not_relocated:      mov     r0, #0
                bl      decompress_kernel
                bl      cache_clean_flush
                bl      cache_off
-               mov     r1, r7                  @ restore architecture number
-               mov     r2, r8                  @ restore atags pointer
 
 #ifdef CONFIG_ARM_VIRT_EXT
                mrs     r0, spsr                @ Get saved CPU boot mode
@@ -1297,7 +1295,7 @@ phex:             adr     r3, phexbuf
                b       1b
 
 @ puts corrupts {r0, r1, r2, r3}
-puts:          loadsp  r3, r1
+puts:          loadsp  r3, r2, r1
 1:             ldrb    r2, [r0], #1
                teq     r2, #0
                moveq   pc, lr
@@ -1314,8 +1312,8 @@ puts:             loadsp  r3, r1
 @ putc corrupts {r0, r1, r2, r3}
 putc:
                mov     r2, r0
+               loadsp  r3, r1, r0
                mov     r0, #0
-               loadsp  r3, r1
                b       2b
 
 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
@@ -1365,6 +1363,8 @@ __hyp_reentry_vectors:
 
 __enter_kernel:
                mov     r0, #0                  @ must be 0
+               mov     r1, r7                  @ restore architecture number
+               mov     r2, r8                  @ restore atags pointer
  ARM(          mov     pc, r4          )       @ call kernel
  M_CLASS(      add     r4, r4, #1      )       @ enter in Thumb mode for M class
  THUMB(                bx      r4              )       @ entry point is always ARM for A/R classes
index bc8d4bbd82e27719a990c7972fd77bfca9dc7aef..0cd4dccbae788626ddd277376d1dc78c719a9ff2 100644 (file)
@@ -447,6 +447,14 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
        .size \name , . - \name
        .endm
 
+       .macro  csdb
+#ifdef CONFIG_THUMB2_KERNEL
+       .inst.w 0xf3af8014
+#else
+       .inst   0xe320f014
+#endif
+       .endm
+
        .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
 #ifndef CONFIG_CPU_USE_DOMAINS
        adds    \tmp, \addr, #\size - 1
@@ -536,4 +544,14 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry)                           \
+       .pushsection "_kprobe_blacklist", "aw" ;        \
+       .balign 4 ;                                     \
+       .long entry;                                    \
+       .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
 #endif /* __ASM_ASSEMBLER_H__ */
index 40f5c410fd8c30300e094f3323adffe63743161d..69772e742a0acdc16dbf76f2130a8025af41b6c0 100644 (file)
 #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
 #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#ifdef CONFIG_THUMB2_KERNEL
+#define CSDB   ".inst.w 0xf3af8014"
+#else
+#define CSDB   ".inst  0xe320f014"
+#endif
+#define csdb() __asm__ __volatile__(CSDB : : : "memory")
 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
 #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                                    : : "r" (0) : "memory")
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
+#ifndef CSDB
+#define CSDB
+#endif
+#ifndef csdb
+#define csdb()
+#endif
+
 #ifdef CONFIG_ARM_HEAVY_MB
 extern void (*soc_mb)(void);
 extern void arm_heavy_mb(void);
@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
 #define __smp_rmb()    __smp_mb()
 #define __smp_wmb()    dmb(ishst)
 
+#ifdef CONFIG_CPU_SPECTRE
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+                                                   unsigned long sz)
+{
+       unsigned long mask;
+
+       asm volatile(
+               "cmp    %1, %2\n"
+       "       sbc     %0, %1, %1\n"
+       CSDB
+       : "=r" (mask)
+       : "r" (idx), "Ir" (sz)
+       : "cc");
+
+       return mask;
+}
+#define array_index_mask_nospec array_index_mask_nospec
+#endif
+
 #include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */
index a97f1ea708d19f339649a2ece287a69a5fe32a40..73a99c72a930a98498a94423fe484c28aab0f583 100644 (file)
 #ifndef __ASM_BUGS_H
 #define __ASM_BUGS_H
 
-#ifdef CONFIG_MMU
 extern void check_writebuffer_bugs(void);
 
-#define check_bugs() check_writebuffer_bugs()
+#ifdef CONFIG_MMU
+extern void check_bugs(void);
+extern void check_other_bugs(void);
 #else
 #define check_bugs() do { } while (0)
+#define check_other_bugs() do { } while (0)
 #endif
 
 #endif
index 4c9fa72b59f5751c652d8bb0a89de1fce98e7303..07e27f212dc754b42b0361536e33ed8bed144f5a 100644 (file)
@@ -65,6 +65,9 @@
 #define __write_sysreg(v, r, w, c, t)  asm volatile(w " " c : : "r" ((t)(v)))
 #define write_sysreg(v, ...)           __write_sysreg(v, __VA_ARGS__)
 
+#define BPIALL                         __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU                                __ACCESS_CP15(c7, 0, c5, 0)
+
 extern unsigned long cr_alignment;     /* defined in entry-armv.S */
 
 static inline unsigned long get_cr(void)
index cb546425da8a09437cfb036bdc260ec7fce9f24f..26021980504db178d4efdfa8d8957b3a6c32ff82 100644 (file)
 #define ARM_CPU_PART_CORTEX_A12                0x4100c0d0
 #define ARM_CPU_PART_CORTEX_A17                0x4100c0e0
 #define ARM_CPU_PART_CORTEX_A15                0x4100c0f0
+#define ARM_CPU_PART_CORTEX_A53                0x4100d030
+#define ARM_CPU_PART_CORTEX_A57                0x4100d070
+#define ARM_CPU_PART_CORTEX_A72                0x4100d080
+#define ARM_CPU_PART_CORTEX_A73                0x4100d090
+#define ARM_CPU_PART_CORTEX_A75                0x4100d0a0
 #define ARM_CPU_PART_MASK              0xff00fff0
 
+/* Broadcom cores */
+#define ARM_CPU_PART_BRAHMA_B15                0x420000f0
+
 /* DEC implemented cores */
 #define ARM_CPU_PART_SA1100            0x4400a110
 
index 5a953ecb0d7888404fe97af4724281a4f578dbcb..231e87ad45d5660f97800e90b39951d8f7c88180 100644 (file)
@@ -61,8 +61,6 @@ struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
index c6a749568dd6c413603ef3b7554561f356b33cc9..8467e05360d7a3395d1a50b9b97c12f021e4ffc9 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/types.h>
 #include <linux/kvm_types.h>
+#include <asm/cputype.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
@@ -308,8 +309,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {}
 
 static inline bool kvm_arm_harden_branch_predictor(void)
 {
-       /* No way to detect it yet, pretend it is not there. */
-       return false;
+       switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       case ARM_CPU_PART_BRAHMA_B15:
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_CORTEX_A17:
+               return true;
+#endif
+       default:
+               return false;
+       }
 }
 
 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
index 707a1f06dc5d5e207f0d2c18e0b37844569ca199..cf2eae51f9a05dc5707c65e3493588841d6c4fad 100644 (file)
@@ -311,7 +311,28 @@ static inline unsigned int kvm_get_vmid_bits(void)
 
 static inline void *kvm_get_hyp_vector(void)
 {
-       return kvm_ksym_ref(__kvm_hyp_vector);
+       switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A17:
+       {
+               extern char __kvm_hyp_vector_bp_inv[];
+               return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+       }
+
+       case ARM_CPU_PART_BRAHMA_B15:
+       case ARM_CPU_PART_CORTEX_A15:
+       {
+               extern char __kvm_hyp_vector_ic_inv[];
+               return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
+       }
+#endif
+       default:
+       {
+               extern char __kvm_hyp_vector[];
+               return kvm_ksym_ref(__kvm_hyp_vector);
+       }
+       }
 }
 
 static inline int kvm_map_vectors(void)
index f2e1af45bd6fae33e8f22a9c8d8c0d8cf7a5b7c9..e25f4392e1b2868446de858701d408aaaee26eab 100644 (file)
@@ -36,6 +36,10 @@ extern struct processor {
         * Set up any processor specifics
         */
        void (*_proc_init)(void);
+       /*
+        * Check for processor bugs
+        */
+       void (*check_bugs)(void);
        /*
         * Disable any processor specifics
         */
index 78f6db114faf6e7b7dd3c0e9621fc0347931f14a..8e76db83c49878209a424d19e0e0f9789c276291 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/linkage.h>
 #include <linux/irqflags.h>
 #include <linux/reboot.h>
+#include <linux/percpu.h>
 
 extern void cpu_init(void);
 
@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 extern void (*arm_pm_idle)(void);
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+typedef void (*harden_branch_predictor_fn_t)(void);
+DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+static inline void harden_branch_predictor(void)
+{
+       harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
+                                                 smp_processor_id());
+       if (fn)
+               fn();
+}
+#else
+#define harden_branch_predictor() do { } while (0)
+#endif
+
 #define UDBG_UNDEFINED (1 << 0)
 #define UDBG_SYSCALL   (1 << 1)
 #define UDBG_BADABORT  (1 << 2)
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
deleted file mode 100644 (file)
index d051388..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SIGINFO_H
-#define __ASM_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-/*
- * SIGFPE si_codes
- */
-#ifdef __KERNEL__
-#define FPE_FIXME      0       /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
-#endif
index b59ac4bf82b8a9d2c99a5c7ef062db9f62722adc..8cad59465af39ac44eafecc881e0ed9a6afc9893 100644 (file)
@@ -31,6 +31,7 @@ else
 obj-y          += entry-armv.o
 endif
 
+obj-$(CONFIG_MMU)              += bugs.o
 obj-$(CONFIG_CPU_IDLE)         += cpuidle.o
 obj-$(CONFIG_ISA_DMA_API)      += dma.o
 obj-$(CONFIG_FIQ)              += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
new file mode 100644 (file)
index 0000000..7be5113
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <asm/bugs.h>
+#include <asm/proc-fns.h>
+
+void check_other_bugs(void)
+{
+#ifdef MULTI_CPU
+       if (processor.check_bugs)
+               processor.check_bugs();
+#endif
+}
+
+void __init check_bugs(void)
+{
+       check_writebuffer_bugs();
+       check_other_bugs();
+}
index 3c4f88701f224984bfb5fecaf304d96918b56548..20df608bf343d22f0435d158fc7d11b17b4391f7 100644 (file)
@@ -242,9 +242,7 @@ local_restart:
        tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
        bne     __sys_trace
 
-       cmp     scno, #NR_syscalls              @ check upper syscall limit
-       badr    lr, ret_fast_syscall            @ return address
-       ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
+       invoke_syscall tbl, scno, r10, ret_fast_syscall
 
        add     r1, sp, #S_OFF
 2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -278,14 +276,8 @@ __sys_trace:
        mov     r1, scno
        add     r0, sp, #S_OFF
        bl      syscall_trace_enter
-
-       badr    lr, __sys_trace_return          @ return address
-       mov     scno, r0                        @ syscall number (possibly new)
-       add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
-       cmp     scno, #NR_syscalls              @ check upper syscall limit
-       ldmccia r1, {r0 - r6}                   @ have to reload r0 - r6
-       stmccia sp, {r4, r5}                    @ and update the stack args
-       ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
+       mov     scno, r0
+       invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
        cmp     scno, #-1                       @ skip the syscall?
        bne     2b
        add     sp, sp, #S_OFF                  @ restore stack
@@ -363,6 +355,10 @@ sys_syscall:
                bic     scno, r0, #__NR_OABI_SYSCALL_BASE
                cmp     scno, #__NR_syscall - __NR_SYSCALL_BASE
                cmpne   scno, #NR_syscalls      @ check range
+#ifdef CONFIG_CPU_SPECTRE
+               movhs   scno, #0
+               csdb
+#endif
                stmloia sp, {r5, r6}            @ shuffle args
                movlo   r0, r1
                movlo   r1, r2
index 0f07579af472c8ec869c5d87fd8d1e105a24dcba..773424843d6efcc2ebeb0ec0cfa88d67643213cc 100644 (file)
 #endif
        .endm
 
+       .macro  invoke_syscall, table, nr, tmp, ret, reload=0
+#ifdef CONFIG_CPU_SPECTRE
+       mov     \tmp, \nr
+       cmp     \tmp, #NR_syscalls              @ check upper syscall limit
+       movcs   \tmp, #0
+       csdb
+       badr    lr, \ret                        @ return address
+       .if     \reload
+       add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
+       ldmccia r1, {r0 - r6}                   @ reload r0-r6
+       stmccia sp, {r4, r5}                    @ update stack arguments
+       .endif
+       ldrcc   pc, [\table, \tmp, lsl #2]      @ call sys_* routine
+#else
+       cmp     \nr, #NR_syscalls               @ check upper syscall limit
+       badr    lr, \ret                        @ return address
+       .if     \reload
+       add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
+       ldmccia r1, {r0 - r6}                   @ reload r0-r6
+       stmccia sp, {r4, r5}                    @ update stack arguments
+       .endif
+       ldrcc   pc, [\table, \nr, lsl #2]       @ call sys_* routine
+#endif
+       .endm
+
 /*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
index 6b38d7a634c19ffd279f98ca8cc3a113484d1fd3..dd2eb5f76b9f0a7d64f50169dd0d04a402b2ae67 100644 (file)
@@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused)
 {
        struct pt_regs regs;
 
-       crash_setup_regs(&regs, NULL);
+       crash_setup_regs(&regs, get_irq_regs());
        printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
               smp_processor_id());
        crash_save_cpu(&regs, smp_processor_id());
@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
                cpu_relax();
 }
 
+void crash_smp_send_stop(void)
+{
+       static int cpus_stopped;
+       unsigned long msecs;
+
+       if (cpus_stopped)
+               return;
+
+       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+       smp_call_function(machine_crash_nonpanic_core, NULL, false);
+       msecs = 1000; /* Wait at most a second for the other cpus to stop */
+       while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+               mdelay(1);
+               msecs--;
+       }
+       if (atomic_read(&waiting_for_crash_ipi) > 0)
+               pr_warn("Non-crashing CPUs did not react to IPI\n");
+
+       cpus_stopped = 1;
+}
+
 static void machine_kexec_mask_interrupts(void)
 {
        unsigned int i;
@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
-       unsigned long msecs;
-
        local_irq_disable();
-
-       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
-       smp_call_function(machine_crash_nonpanic_core, NULL, false);
-       msecs = 1000; /* Wait at most a second for the other cpus to stop */
-       while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
-               mdelay(1);
-               msecs--;
-       }
-       if (atomic_read(&waiting_for_crash_ipi) > 0)
-               pr_warn("Non-crashing CPUs did not react to IPI\n");
+       crash_smp_send_stop();
 
        crash_save_cpu(regs, smp_processor_id());
        machine_kexec_mask_interrupts();
index b9e08f50df41845fa7d36cc54bbf519ed5b3045c..0978282d5fc27a7c4a5e6b0e274da8bfc4c14c8d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/irq_work.h>
 
 #include <linux/atomic.h>
+#include <asm/bugs.h>
 #include <asm/smp.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu.h>
@@ -404,6 +405,9 @@ asmlinkage void secondary_start_kernel(void)
         * before we continue - which happens after __cpu_up returns.
         */
        set_cpu_online(cpu, true);
+
+       check_other_bugs();
+
        complete(&cpu_running);
 
        local_irq_enable();
index a40ebb7c0896b2f7be17ebe7eea11c961b20fba7..d08099269e35b9a6392e74ec2a52d3b81f439616 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/slab.h>
 #include <linux/mm_types.h>
 
+#include <asm/bugs.h>
 #include <asm/cacheflush.h>
 #include <asm/idmap.h>
 #include <asm/pgalloc.h>
@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                cpu_switch_mm(mm->pgd, mm);
                local_flush_bp_all();
                local_flush_tlb_all();
+               check_other_bugs();
        }
 
        return ret;
index 5e3633c24e636575c19059cdb99bd247a666b5f5..2fe87109ae468bce6d38ff575f395fc2f41cc72e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 #include <linux/kdebug.h>
+#include <linux/kprobes.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
 #include <linux/bug.h>
@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
        raw_spin_unlock_irqrestore(&undef_lock, flags);
 }
 
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 {
        struct undef_hook *hook;
        unsigned long flags;
@@ -490,6 +492,7 @@ die_sig:
 
        arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 }
+NOKPROBE_SYMBOL(do_undefinstr)
 
 /*
  * Handle FIQ similarly to NMI on x86 systems.
index 95a2faefc070f5c4b172c65dfdb5dd85b8b3df89..aa3f9a9837acafb43e8d97acdac7fac03a37d2b2 100644 (file)
@@ -16,6 +16,7 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/linkage.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_asm.h>
@@ -71,6 +72,90 @@ __kvm_hyp_vector:
        W(b)    hyp_irq
        W(b)    hyp_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       .align 5
+__kvm_hyp_vector_ic_inv:
+       .global __kvm_hyp_vector_ic_inv
+
+       /*
+        * We encode the exception entry in the bottom 3 bits of
+        * SP, and we have to guarantee to be 8 bytes aligned.
+        */
+       W(add)  sp, sp, #1      /* Reset          7 */
+       W(add)  sp, sp, #1      /* Undef          6 */
+       W(add)  sp, sp, #1      /* Syscall        5 */
+       W(add)  sp, sp, #1      /* Prefetch abort 4 */
+       W(add)  sp, sp, #1      /* Data abort     3 */
+       W(add)  sp, sp, #1      /* HVC            2 */
+       W(add)  sp, sp, #1      /* IRQ            1 */
+       W(nop)                  /* FIQ            0 */
+
+       mcr     p15, 0, r0, c7, c5, 0   /* ICIALLU */
+       isb
+
+       b       decode_vectors
+
+       .align 5
+__kvm_hyp_vector_bp_inv:
+       .global __kvm_hyp_vector_bp_inv
+
+       /*
+        * We encode the exception entry in the bottom 3 bits of
+        * SP, and we have to guarantee to be 8 bytes aligned.
+        */
+       W(add)  sp, sp, #1      /* Reset          7 */
+       W(add)  sp, sp, #1      /* Undef          6 */
+       W(add)  sp, sp, #1      /* Syscall        5 */
+       W(add)  sp, sp, #1      /* Prefetch abort 4 */
+       W(add)  sp, sp, #1      /* Data abort     3 */
+       W(add)  sp, sp, #1      /* HVC            2 */
+       W(add)  sp, sp, #1      /* IRQ            1 */
+       W(nop)                  /* FIQ            0 */
+
+       mcr     p15, 0, r0, c7, c5, 6   /* BPIALL */
+       isb
+
+decode_vectors:
+
+#ifdef CONFIG_THUMB2_KERNEL
+       /*
+        * Yet another silly hack: Use VPIDR as a temp register.
+        * Thumb2 is really a pain, as SP cannot be used with most
+        * of the bitwise instructions. The vect_br macro ensures
+        * things gets cleaned-up.
+        */
+       mcr     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mov     r0, sp
+       and     r0, r0, #7
+       sub     sp, sp, r0
+       push    {r1, r2}
+       mov     r1, r0
+       mrc     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mrc     p15, 0, r2, c0, c0, 0   /* MIDR  */
+       mcr     p15, 4, r2, c0, c0, 0   /* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM(   eor     sp, sp, #\val   )
+ARM(   tst     sp, #7          )
+ARM(   eorne   sp, sp, #\val   )
+
+THUMB( cmp     r1, #\val       )
+THUMB( popeq   {r1, r2}        )
+
+       beq     \targ
+.endm
+
+       vect_br 0, hyp_fiq
+       vect_br 1, hyp_irq
+       vect_br 2, hyp_hvc
+       vect_br 3, hyp_dabt
+       vect_br 4, hyp_pabt
+       vect_br 5, hyp_svc
+       vect_br 6, hyp_undef
+       vect_br 7, hyp_reset
+#endif
+
 .macro invalid_vector label, cause
        .align
 \label:        mov     r0, #\cause
@@ -118,7 +203,7 @@ hyp_hvc:
        lsr     r2, r2, #16
        and     r2, r2, #0xff
        cmp     r2, #0
-       bne     guest_trap              @ Guest called HVC
+       bne     guest_hvc_trap          @ Guest called HVC
 
        /*
         * Getting here means host called HVC, we shift parameters and branch
@@ -149,7 +234,14 @@ hyp_hvc:
        bx      ip
 
 1:
-       push    {lr}
+       /*
+        * Pushing r2 here is just a way of keeping the stack aligned to
+        * 8 bytes on any path that can trigger a HYP exception. Here,
+        * we may well be about to jump into the guest, and the guest
+        * exit would otherwise be badly decoded by our fancy
+        * "decode-exception-without-a-branch" code...
+        */
+       push    {r2, lr}
 
        mov     lr, r0
        mov     r0, r1
@@ -159,7 +251,21 @@ hyp_hvc:
 THUMB( orr     lr, #1)
        blx     lr                      @ Call the HYP function
 
-       pop     {lr}
+       pop     {r2, lr}
+       eret
+
+guest_hvc_trap:
+       movw    r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+       movt    r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+       ldr     r0, [sp]                @ Guest's r0
+       teq     r0, r2
+       bne     guest_trap
+       add     sp, sp, #12
+       @ Returns:
+       @ r0 = 0
+       @ r1 = HSR value (perfectly predictable)
+       @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
+       mov     r0, #0
        eret
 
 guest_trap:
index df73914e81c8344feccac5df8d5791dcbe92ed60..746e7801dcdf70fed9e339c2d6800b3f275c49b7 100644 (file)
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
 
 ENTRY(__get_user_2)
        check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb    .req    r0
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
 
 ENTRY(__get_user_4)
        check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_4)
+_ASM_NOKPROBE(__get_user_4)
 
 ENTRY(__get_user_8)
        check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_8)
+_ASM_NOKPROBE(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_32t_8)
+_ASM_NOKPROBE(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
        check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_1)
+_ASM_NOKPROBE(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
        check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@ rb  .req    r0
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_2)
+_ASM_NOKPROBE(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
        check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_4)
+_ASM_NOKPROBE(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -131,6 +139,8 @@ __get_user_bad:
        ret     lr
 ENDPROC(__get_user_bad)
 ENDPROC(__get_user_bad8)
+_ASM_NOKPROBE(__get_user_bad)
+_ASM_NOKPROBE(__get_user_bad8)
 
 .pushsection __ex_table, "a"
        .long   1b, __get_user_bad
index 7f14acf67caff57b2daf8cca2b97f28d120d8532..9357ff52c2217ee3567a2f1a797ba408b87f631b 100644 (file)
@@ -415,6 +415,7 @@ config CPU_V7
        select CPU_CP15_MPU if !MMU
        select CPU_HAS_ASID if MMU
        select CPU_PABRT_V7
+       select CPU_SPECTRE if MMU
        select CPU_THUMB_CAPABLE
        select CPU_TLB_V7 if MMU
 
@@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
        help
          Say Y here to disable branch prediction.  If unsure, say N.
 
+config CPU_SPECTRE
+       bool
+
+config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+       depends on CPU_SPECTRE
+       default y
+       help
+          Speculation attacks against some high-performance processors rely
+          on being able to manipulate the branch predictor for a victim
+          context by executing aliasing branches in the attacker context.
+          Such attacks can be partially mitigated against by clearing
+          internal branch predictor state and limiting the prediction
+          logic in some situations.
+
+          This config option will take CPU-specific actions to harden
+          the branch predictor against aliasing attacks and may rely on
+          specific instruction sequences or control bits being set by
+          the system firmware.
+
+          If unsure, say Y.
+
 config TLS_REG_EMUL
        bool
        select NEED_KUSER_HELPERS
index d19b209e04e02807f79004ce062c3e8ad1ba9e89..7cb1699fbfc4f488c128d804ae3a36abf1f68d55 100644 (file)
@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK)      += proc-mohawk.o
 obj-$(CONFIG_CPU_FEROCEON)     += proc-feroceon.o
 obj-$(CONFIG_CPU_V6)           += proc-v6.o
 obj-$(CONFIG_CPU_V6K)          += proc-v6.o
-obj-$(CONFIG_CPU_V7)           += proc-v7.o
+obj-$(CONFIG_CPU_V7)           += proc-v7.o proc-v7-bugs.o
 obj-$(CONFIG_CPU_V7M)          += proc-v7m.o
 
 AFLAGS_proc-v6.o       :=-Wa,-march=armv6
index b75eada23d0a3be3e635b612adb431e5c06e34ef..3b1ba003c4f9c56b2f20b46db0e96012945494f7 100644 (file)
@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
 {
        struct siginfo si;
 
+       if (addr > TASK_SIZE)
+               harden_branch_predictor();
+
 #ifdef CONFIG_DEBUG_USER
        if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
            ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
index f10e31d0730afa572cb6604d65813eb6df1dcfa9..81d0efb055c66080e976f9504c69866f7699b1a6 100644 (file)
        mcr     p15, 0, ip, c7, c10, 4          @ data write barrier
        .endm
 
-.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
+.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
        .type   \name\()_processor_functions, #object
        .align 2
 ENTRY(\name\()_processor_functions)
        .word   \dabort
        .word   \pabort
        .word   cpu_\name\()_proc_init
+       .word   \bugs
        .word   cpu_\name\()_proc_fin
        .word   cpu_\name\()_reset
        .word   cpu_\name\()_do_idle
index c6141a5435c3c719f2b135fbbcf209f84000d386..f8d45ad2a515bf8db86175852e1f27ff81f7525e 100644 (file)
  *     even on Cortex-A8 revisions not affected by 430973.
  *     If IBE is not set, the flush BTAC/BTB won't do anything.
  */
-ENTRY(cpu_ca8_switch_mm)
-#ifdef CONFIG_MMU
-       mov     r2, #0
-       mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
-#endif
 ENTRY(cpu_v7_switch_mm)
 #ifdef CONFIG_MMU
        mmid    r1, r1                          @ get mm->context.id
@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
 #endif
        bx      lr
 ENDPROC(cpu_v7_switch_mm)
-ENDPROC(cpu_ca8_switch_mm)
 
 /*
  *     cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
new file mode 100644 (file)
index 0000000..5544b82
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/psci.h>
+#include <linux/smp.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/proc-fns.h>
+#include <asm/system_misc.h>
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+
+extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+
+static void harden_branch_predictor_bpiall(void)
+{
+       write_sysreg(0, BPIALL);
+}
+
+static void harden_branch_predictor_iciallu(void)
+{
+       write_sysreg(0, ICIALLU);
+}
+
+static void __maybe_unused call_smc_arch_workaround_1(void)
+{
+       arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void __maybe_unused call_hvc_arch_workaround_1(void)
+{
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void cpu_v7_spectre_init(void)
+{
+       const char *spectre_v2_method = NULL;
+       int cpu = smp_processor_id();
+
+       if (per_cpu(harden_branch_predictor_fn, cpu))
+               return;
+
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A8:
+       case ARM_CPU_PART_CORTEX_A9:
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+               if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
+                       goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+               break;
+
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+               if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
+                       goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+               break;
+
+#ifdef CONFIG_ARM_PSCI
+       default:
+               /* Other ARM CPUs require no workaround */
+               if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
+                       break;
+               /* fallthrough */
+               /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72: {
+               struct arm_smccc_res res;
+
+               if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+                       break;
+
+               switch (psci_ops.conduit) {
+               case PSCI_CONDUIT_HVC:
+                       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+                       if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
+                               goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_hvc_arch_workaround_1;
+                       processor.switch_mm = cpu_v7_hvc_switch_mm;
+                       spectre_v2_method = "hypervisor";
+                       break;
+
+               case PSCI_CONDUIT_SMC:
+                       arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+                       if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
+                               goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_smc_arch_workaround_1;
+                       processor.switch_mm = cpu_v7_smc_switch_mm;
+                       spectre_v2_method = "firmware";
+                       break;
+
+               default:
+                       break;
+               }
+       }
+#endif
+       }
+
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+       return;
+
+bl_error:
+       pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
+               cpu);
+}
+#else
+static void cpu_v7_spectre_init(void)
+{
+}
+#endif
+
+static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+{
+       u32 aux_cr;
+
+       asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
+
+       if ((aux_cr & mask) != mask) {
+               if (!*warned)
+                       pr_err("CPU%u: %s", smp_processor_id(), msg);
+               *warned = true;
+               return false;
+       }
+       return true;
+}
+
+static DEFINE_PER_CPU(bool, spectre_warned);
+
+static bool check_spectre_auxcr(bool *warned, u32 bit)
+{
+       return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
+               cpu_v7_check_auxcr_set(warned, bit,
+                                      "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
+}
+
+void cpu_v7_ca8_ibe(void)
+{
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+               cpu_v7_spectre_init();
+}
+
+void cpu_v7_ca15_ibe(void)
+{
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+               cpu_v7_spectre_init();
+}
+
+void cpu_v7_bugs_init(void)
+{
+       cpu_v7_spectre_init();
+}
index b528a15f460dc91edced923b26eae9d13fdb9d27..6fe52819e0148c6f3f04b11c75e278cd0b04a1f9 100644 (file)
@@ -9,6 +9,7 @@
  *
  *  This is the "shell" of the ARMv7 processor support.
  */
+#include <linux/arm-smccc.h>
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/assembler.h>
@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
        ret     lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
+#ifdef CONFIG_ARM_PSCI
+       .arch_extension sec
+ENTRY(cpu_v7_smc_switch_mm)
+       stmfd   sp!, {r0 - r3}
+       movw    r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+       movt    r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+       smc     #0
+       ldmfd   sp!, {r0 - r3}
+       b       cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+       .arch_extension virt
+ENTRY(cpu_v7_hvc_switch_mm)
+       stmfd   sp!, {r0 - r3}
+       movw    r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+       movt    r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+       hvc     #0
+       ldmfd   sp!, {r0 - r3}
+       b       cpu_v7_switch_mm
+ENDPROC(cpu_v7_smc_switch_mm)
+#endif
+ENTRY(cpu_v7_iciallu_switch_mm)
+       mov     r3, #0
+       mcr     p15, 0, r3, c7, c5, 0           @ ICIALLU
+       b       cpu_v7_switch_mm
+ENDPROC(cpu_v7_iciallu_switch_mm)
+ENTRY(cpu_v7_bpiall_switch_mm)
+       mov     r3, #0
+       mcr     p15, 0, r3, c7, c5, 6           @ flush BTAC/BTB
+       b       cpu_v7_switch_mm
+ENDPROC(cpu_v7_bpiall_switch_mm)
+
        string  cpu_v7_name, "ARMv7 Processor"
        .align
 
@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
 ENDPROC(cpu_v7_do_resume)
 #endif
 
-/*
- * Cortex-A8
- */
-       globl_equ       cpu_ca8_proc_init,      cpu_v7_proc_init
-       globl_equ       cpu_ca8_proc_fin,       cpu_v7_proc_fin
-       globl_equ       cpu_ca8_reset,          cpu_v7_reset
-       globl_equ       cpu_ca8_do_idle,        cpu_v7_do_idle
-       globl_equ       cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
-       globl_equ       cpu_ca8_set_pte_ext,    cpu_v7_set_pte_ext
-       globl_equ       cpu_ca8_suspend_size,   cpu_v7_suspend_size
-#ifdef CONFIG_ARM_CPU_SUSPEND
-       globl_equ       cpu_ca8_do_suspend,     cpu_v7_do_suspend
-       globl_equ       cpu_ca8_do_resume,      cpu_v7_do_resume
-#endif
-
-/*
- * Cortex-A9 processor functions
- */
-       globl_equ       cpu_ca9mp_proc_init,    cpu_v7_proc_init
-       globl_equ       cpu_ca9mp_proc_fin,     cpu_v7_proc_fin
-       globl_equ       cpu_ca9mp_reset,        cpu_v7_reset
-       globl_equ       cpu_ca9mp_do_idle,      cpu_v7_do_idle
-       globl_equ       cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
-       globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_switch_mm
-       globl_equ       cpu_ca9mp_set_pte_ext,  cpu_v7_set_pte_ext
 .globl cpu_ca9mp_suspend_size
 .equ   cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
 #ifdef CONFIG_ARM_CPU_SUSPEND
@@ -547,12 +554,79 @@ __v7_setup_stack:
 
        __INITDATA
 
+       .weak cpu_v7_bugs_init
+
        @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
-       define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+       define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       @ generic v7 bpiall on context switch
+       globl_equ       cpu_v7_bpiall_proc_init,        cpu_v7_proc_init
+       globl_equ       cpu_v7_bpiall_proc_fin,         cpu_v7_proc_fin
+       globl_equ       cpu_v7_bpiall_reset,            cpu_v7_reset
+       globl_equ       cpu_v7_bpiall_do_idle,          cpu_v7_do_idle
+       globl_equ       cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
+       globl_equ       cpu_v7_bpiall_set_pte_ext,      cpu_v7_set_pte_ext
+       globl_equ       cpu_v7_bpiall_suspend_size,     cpu_v7_suspend_size
+#ifdef CONFIG_ARM_CPU_SUSPEND
+       globl_equ       cpu_v7_bpiall_do_suspend,       cpu_v7_do_suspend
+       globl_equ       cpu_v7_bpiall_do_resume,        cpu_v7_do_resume
+#endif
+       define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+
+#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
+#else
+#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
+#endif
+
 #ifndef CONFIG_ARM_LPAE
-       define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
-       define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+       @ Cortex-A8 - always needs bpiall switch_mm implementation
+       globl_equ       cpu_ca8_proc_init,      cpu_v7_proc_init
+       globl_equ       cpu_ca8_proc_fin,       cpu_v7_proc_fin
+       globl_equ       cpu_ca8_reset,          cpu_v7_reset
+       globl_equ       cpu_ca8_do_idle,        cpu_v7_do_idle
+       globl_equ       cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
+       globl_equ       cpu_ca8_set_pte_ext,    cpu_v7_set_pte_ext
+       globl_equ       cpu_ca8_switch_mm,      cpu_v7_bpiall_switch_mm
+       globl_equ       cpu_ca8_suspend_size,   cpu_v7_suspend_size
+#ifdef CONFIG_ARM_CPU_SUSPEND
+       globl_equ       cpu_ca8_do_suspend,     cpu_v7_do_suspend
+       globl_equ       cpu_ca8_do_resume,      cpu_v7_do_resume
+#endif
+       define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
+
+       @ Cortex-A9 - needs more registers preserved across suspend/resume
+       @ and bpiall switch_mm for hardening
+       globl_equ       cpu_ca9mp_proc_init,    cpu_v7_proc_init
+       globl_equ       cpu_ca9mp_proc_fin,     cpu_v7_proc_fin
+       globl_equ       cpu_ca9mp_reset,        cpu_v7_reset
+       globl_equ       cpu_ca9mp_do_idle,      cpu_v7_do_idle
+       globl_equ       cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_bpiall_switch_mm
+#else
+       globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_switch_mm
+#endif
+       globl_equ       cpu_ca9mp_set_pte_ext,  cpu_v7_set_pte_ext
+       define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
 #endif
+
+       @ Cortex-A15 - needs iciallu switch_mm for hardening
+       globl_equ       cpu_ca15_proc_init,     cpu_v7_proc_init
+       globl_equ       cpu_ca15_proc_fin,      cpu_v7_proc_fin
+       globl_equ       cpu_ca15_reset,         cpu_v7_reset
+       globl_equ       cpu_ca15_do_idle,       cpu_v7_do_idle
+       globl_equ       cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       globl_equ       cpu_ca15_switch_mm,     cpu_v7_iciallu_switch_mm
+#else
+       globl_equ       cpu_ca15_switch_mm,     cpu_v7_switch_mm
+#endif
+       globl_equ       cpu_ca15_set_pte_ext,   cpu_v7_set_pte_ext
+       globl_equ       cpu_ca15_suspend_size,  cpu_v7_suspend_size
+       globl_equ       cpu_ca15_do_suspend,    cpu_v7_do_suspend
+       globl_equ       cpu_ca15_do_resume,     cpu_v7_do_resume
+       define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
 #ifdef CONFIG_CPU_PJ4B
        define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
 #endif
@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info:
 __v7_ca12mp_proc_info:
        .long   0x410fc0d0
        .long   0xff0ffff0
-       __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
+       __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
        .size   __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
 
        /*
@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info:
 __v7_ca15mp_proc_info:
        .long   0x410fc0f0
        .long   0xff0ffff0
-       __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
+       __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
        .size   __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
 
        /*
@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info:
 __v7_b15mp_proc_info:
        .long   0x420f00f0
        .long   0xff0ffff0
-       __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns
+       __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
        .size   __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
 
        /*
@@ -699,9 +773,25 @@ __v7_b15mp_proc_info:
 __v7_ca17mp_proc_info:
        .long   0x410fc0e0
        .long   0xff0ffff0
-       __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
+       __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
        .size   __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
 
+       /* ARM Ltd. Cortex A73 processor */
+       .type   __v7_ca73_proc_info, #object
+__v7_ca73_proc_info:
+       .long   0x410fd090
+       .long   0xff0ffff0
+       __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+       .size   __v7_ca73_proc_info, . - __v7_ca73_proc_info
+
+       /* ARM Ltd. Cortex A75 processor */
+       .type   __v7_ca75_proc_info, #object
+__v7_ca75_proc_info:
+       .long   0x410fd0a0
+       .long   0xff0ffff0
+       __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+       .size   __v7_ca75_proc_info, . - __v7_ca75_proc_info
+
        /*
         * Qualcomm Inc. Krait processors.
         */
index bcdecc25461bcaa51f6df405807935bb481de2c5..b2aa9b32bff2b5e9d2e6d102a4cd58f6cf8c5676 100644 (file)
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 {
        unsigned long flags;
        struct kprobe *p = &op->kp;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       struct kprobe_ctlblk *kcb;
 
        /* Save skipped registers */
        regs->ARM_pc = (unsigned long)op->kp.addr;
        regs->ARM_ORIG_r0 = ~0UL;
 
        local_irq_save(flags);
+       kcb = get_kprobe_ctlblk();
 
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
 
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(optimized_callback)
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
 {
index 4c375e11ae9531bec8b5a05bd14ab8424ccd6653..af4ee2cef2f9650e699de3a335eaedc58dc9d37b 100644 (file)
@@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
 
        if (exceptions == VFP_EXCEPTION_ERROR) {
                vfp_panic("unhandled bounce", inst);
-               vfp_raise_sigfpe(FPE_FIXME, regs);
+               vfp_raise_sigfpe(FPE_FLTINV, regs);
                return;
        }