arm64: Implement Shadow Call Stack
authorSami Tolvanen <samitolvanen@google.com>
Mon, 27 Apr 2020 16:00:16 +0000 (09:00 -0700)
committerWill Deacon <will@kernel.org>
Fri, 15 May 2020 15:35:50 +0000 (16:35 +0100)
This change implements shadow stack switching, initial SCS set-up,
and interrupt shadow stacks for arm64.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/Kconfig
arch/arm64/include/asm/scs.h [new file with mode: 0644]
arch/arm64/include/asm/thread_info.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/process.c
arch/arm64/kernel/scs.c [new file with mode: 0644]

index 40fb05d96c6072c9357cf69965ca006c0a5fdb27..c380a16533f616be516b86bfbd196a77d7b212a3 100644 (file)
@@ -64,6 +64,7 @@ config ARM64
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_SUPPORTS_MEMORY_FAILURE
+       select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
        select ARCH_SUPPORTS_NUMA_BALANCING
@@ -1025,6 +1026,10 @@ config ARCH_HAS_CACHE_LINE_SIZE
 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
        def_bool y if PGTABLE_LEVELS > 2
 
+# Supported by clang >= 7.0
+config CC_HAVE_SHADOW_CALL_STACK
+       def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
+
 config SECCOMP
        bool "Enable seccomp to safely compute untrusted bytecode"
        ---help---
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
new file mode 100644 (file)
index 0000000..9654935
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+       .macro scs_load tsk, tmp
+       ldp     x18, \tmp, [\tsk, #TSK_TI_SCS_BASE]
+       add     x18, x18, \tmp
+       .endm
+
+       .macro scs_save tsk, tmp
+       ldr     \tmp, [\tsk, #TSK_TI_SCS_BASE]
+       sub     \tmp, x18, \tmp
+       str     \tmp, [\tsk, #TSK_TI_SCS_OFFSET]
+       .endm
+#else
+       .macro scs_load tsk, tmp
+       .endm
+
+       .macro scs_save tsk, tmp
+       .endm
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#else /* __ASSEMBLY__ */
+
+#include <linux/scs.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+static inline void scs_overflow_check(struct task_struct *tsk)
+{
+       if (unlikely(scs_corrupted(tsk)))
+               panic("corrupted shadow stack detected inside scheduler\n");
+}
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+static inline void scs_overflow_check(struct task_struct *tsk) {}
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#endif /* __ASSEMBLY __ */
+
+#endif /* _ASM_SCS_H */
index 512174a8e7891527ef071d9dd275723ec3930378..9df79c0a4c4368d4b6df44bb3a90893101638367 100644 (file)
@@ -41,6 +41,10 @@ struct thread_info {
 #endif
                } preempt;
        };
+#ifdef CONFIG_SHADOW_CALL_STACK
+       void                    *scs_base;
+       unsigned long           scs_offset;
+#endif
 };
 
 #define thread_saved_pc(tsk)   \
@@ -100,11 +104,20 @@ void arch_release_task_struct(struct task_struct *tsk);
                                 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
                                 _TIF_SYSCALL_EMU)
 
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define INIT_SCS                                                       \
+       .scs_base       = init_shadow_call_stack,                       \
+       .scs_offset     = 0,
+#else
+#define INIT_SCS
+#endif
+
 #define INIT_THREAD_INFO(tsk)                                          \
 {                                                                      \
        .flags          = _TIF_FOREIGN_FPSTATE,                         \
        .preempt_count  = INIT_PREEMPT_COUNT,                           \
        .addr_limit     = KERNEL_DS,                                    \
+       INIT_SCS                                                        \
 }
 
 #endif /* __ASM_THREAD_INFO_H */
index 4e5b8ee314423ac9404c3c35ad6d7632cb9964e6..151f28521f1ece843ca9ac74bf92811354da090f 100644 (file)
@@ -63,6 +63,7 @@ obj-$(CONFIG_CRASH_CORE)              += crash_core.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)                += sdei.o
 obj-$(CONFIG_ARM64_SSBD)               += ssbd.o
 obj-$(CONFIG_ARM64_PTR_AUTH)           += pointer_auth.o
+obj-$(CONFIG_SHADOW_CALL_STACK)                += scs.o
 
 obj-y                                  += vdso/ probes/
 obj-$(CONFIG_COMPAT_VDSO)              += vdso32/
index 9981a0a5a87f13b86067520f8ae1cf23a2a6fd9a..d7934250b68c1b7451d88de38454283e94c9707a 100644 (file)
@@ -33,6 +33,10 @@ int main(void)
   DEFINE(TSK_TI_ADDR_LIMIT,    offsetof(struct task_struct, thread_info.addr_limit));
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
   DEFINE(TSK_TI_TTBR0,         offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+#ifdef CONFIG_SHADOW_CALL_STACK
+  DEFINE(TSK_TI_SCS_BASE,      offsetof(struct task_struct, thread_info.scs_base));
+  DEFINE(TSK_TI_SCS_OFFSET,    offsetof(struct task_struct, thread_info.scs_offset));
 #endif
   DEFINE(TSK_STACK,            offsetof(struct task_struct, stack));
 #ifdef CONFIG_STACKPROTECTOR
index ddcde093c433b83e0c86d28d7a5d5f81367d6c86..244268d5ae4789f1f16064c3ef30a10c33560f07 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/scs.h>
 #include <asm/thread_info.h>
 #include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
@@ -179,6 +180,8 @@ alternative_cb_end
        apply_ssbd 1, x22, x23
 
        ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
+
+       scs_load tsk, x20
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_current_task tsk
@@ -343,6 +346,8 @@ alternative_else_nop_endif
        msr     cntkctl_el1, x1
 4:
 #endif
+       scs_save tsk, x0
+
        /* No kernel C function calls after this as user keys are set. */
        ptrauth_keys_install_user tsk, x0, x1, x2
 
@@ -388,6 +393,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
+#ifdef CONFIG_SHADOW_CALL_STACK
+       mov     x24, x18                // preserve the original shadow stack
+#endif
 
        /*
         * Compare sp with the base of the task stack.
@@ -405,15 +413,25 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 
        /* switch to the irq stack */
        mov     sp, x26
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+       /* also switch to the irq shadow stack */
+       adr_this_cpu x18, irq_shadow_call_stack, x26
+#endif
+
 9998:
        .endm
 
        /*
-        * x19 should be preserved between irq_stack_entry and
-        * irq_stack_exit.
+        * The callee-saved regs (x19-x29) should be preserved between
+        * irq_stack_entry and irq_stack_exit, but note that kernel_entry
+        * uses x20-x23 to store data for later use.
         */
        .macro  irq_stack_exit
        mov     sp, x19
+#ifdef CONFIG_SHADOW_CALL_STACK
+       mov     x18, x24
+#endif
        .endm
 
 /* GPRs used by entry code */
@@ -901,6 +919,8 @@ SYM_FUNC_START(cpu_switch_to)
        mov     sp, x9
        msr     sp_el0, x1
        ptrauth_keys_install_kernel x1, 1, x8, x9, x10
+       scs_save x0, x8
+       scs_load x1, x8
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
index 57a91032b4c21ca2dc74447c10093f0d4dd0cec7..2b01c19c548314a97161b43b07db4b098d9c3c26 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
+#include <asm/scs.h>
 #include <asm/smp.h>
 #include <asm/sysreg.h>
 #include <asm/thread_info.h>
@@ -424,6 +425,10 @@ SYM_FUNC_START_LOCAL(__primary_switched)
        stp     xzr, x30, [sp, #-16]!
        mov     x29, sp
 
+#ifdef CONFIG_SHADOW_CALL_STACK
+       adr_l   x18, init_shadow_call_stack     // Set shadow call stack
+#endif
+
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
 
        ldr_l   x4, kimage_vaddr                // Save the offset between
@@ -737,6 +742,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
+       scs_load x2, x3
        mov     x29, #0
        mov     x30, #0
        b       secondary_start_kernel
index 56be4cbf771f604a849f958382aec9acdf4e837f..a35d3318492c31e97856b3625b436aab84fb1f5c 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/mmu_context.h>
 #include <asm/processor.h>
 #include <asm/pointer_auth.h>
+#include <asm/scs.h>
 #include <asm/stacktrace.h>
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
@@ -515,6 +516,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        entry_task_switch(next);
        uao_thread_switch(next);
        ssbs_thread_switch(next);
+       scs_overflow_check(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c
new file mode 100644 (file)
index 0000000..acc6741
--- /dev/null
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#include <linux/percpu.h>
+#include <asm/scs.h>
+
+/* Allocate a static per-CPU shadow stack */
+#define DEFINE_SCS(name)                                               \
+       DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name)     \
+
+DEFINE_SCS(irq_shadow_call_stack);