Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 13:05:35 +0000 (14:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 13:05:35 +0000 (14:05 +0100)
Signed-off-by: Marc Zyngier <maz@kernel.org>
43 files changed:
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/hyp_image.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/spectre.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/image-vars.h
arch/arm64/kernel/process.c
arch/arm64/kernel/proton-pack.c [new file with mode: 0644]
arch/arm64/kernel/ssbd.c [deleted file]
arch/arm64/kernel/suspend.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/.gitignore [new file with mode: 0644]
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/hyp.lds.S [new file with mode: 0644]
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/pmu.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c

index 6d232837cbeee8abb0c5a9c37b05c8566260156f..51259274a819aac5013b967195f4add0554e906e 100644 (file)
@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
 
          If unsure, say Y.
 
-config HARDEN_BRANCH_PREDICTOR
-       bool "Harden the branch predictor against aliasing attacks" if EXPERT
-       default y
-       help
-         Speculation attacks against some high-performance processors rely on
-         being able to manipulate the branch predictor for a victim context by
-         executing aliasing branches in the attacker context.  Such attacks
-         can be partially mitigated against by clearing internal branch
-         predictor state and limiting the prediction logic in some situations.
-
-         This config option will take CPU-specific actions to harden the
-         branch predictor against aliasing attacks and may rely on specific
-         instruction sequences or control bits being set by the system
-         firmware.
-
-         If unsure, say Y.
-
-config ARM64_SSBD
-       bool "Speculative Store Bypass Disable" if EXPERT
-       default y
-       help
-         This enables mitigation of the bypassing of previous stores
-         by speculative loads.
-
-         If unsure, say Y.
-
 config RODATA_FULL_DEFAULT_ENABLED
        bool "Apply r/o permissions of VM areas also to their linear aliases"
        default y
index 54d1811776566e5ba5fe9410aec9f1c031b20c1f..ddbe6bf00e336d936457e5f9fbf414ec2a99a224 100644 (file)
@@ -218,6 +218,23 @@ lr .req    x30             // link register
        str     \src, [\tmp, :lo12:\sym]
        .endm
 
+       /*
+        * @dst: destination register
+        */
+#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
+       .macro  this_cpu_offset, dst
+       mrs     \dst, tpidr_el2
+       .endm
+#else
+       .macro  this_cpu_offset, dst
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       mrs     \dst, tpidr_el1
+alternative_else
+       mrs     \dst, tpidr_el2
+alternative_endif
+       .endm
+#endif
+
        /*
         * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
         * @sym: The name of the per-cpu variable
@@ -226,11 +243,7 @@ lr .req    x30             // link register
        .macro adr_this_cpu, dst, sym, tmp
        adrp    \tmp, \sym
        add     \dst, \tmp, #:lo12:\sym
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     \tmp, tpidr_el1
-alternative_else
-       mrs     \tmp, tpidr_el2
-alternative_endif
+       this_cpu_offset \tmp
        add     \dst, \dst, \tmp
        .endm
 
@@ -241,11 +254,7 @@ alternative_endif
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     \tmp, tpidr_el1
-alternative_else
-       mrs     \tmp, tpidr_el2
-alternative_endif
+       this_cpu_offset \tmp
        ldr     \dst, [\dst, \tmp]
        .endm
 
index 07b643a707100559ae4e4df475bc969eb6083177..c4ac9a13ad5facf8e19f339b5641d9150725218d 100644 (file)
 #define ARM64_HAS_DCPOP                                21
 #define ARM64_SVE                              22
 #define ARM64_UNMAP_KERNEL_AT_EL0              23
-#define ARM64_HARDEN_BRANCH_PREDICTOR          24
+#define ARM64_SPECTRE_V2                       24
 #define ARM64_HAS_RAS_EXTN                     25
 #define ARM64_WORKAROUND_843419                        26
 #define ARM64_HAS_CACHE_IDC                    27
 #define ARM64_HAS_CACHE_DIC                    28
 #define ARM64_HW_DBM                           29
-#define ARM64_SSBD                             30
+#define ARM64_SPECTRE_V4                       30
 #define ARM64_MISMATCHED_CACHE_TYPE            31
 #define ARM64_HAS_STAGE2_FWB                   32
 #define ARM64_HAS_CRC32                                33
index 89b4f0142c28784de876c5572bc412b15ee1d453..fba6700b457b7b04bcf91ca5554109071fc95e50 100644 (file)
@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
                cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
 }
 
-#define ARM64_BP_HARDEN_UNKNOWN                -1
-#define ARM64_BP_HARDEN_WA_NEEDED      0
-#define ARM64_BP_HARDEN_NOT_REQUIRED   1
-
-int get_spectre_v2_workaround_state(void);
-
-#define ARM64_SSBD_UNKNOWN             -1
-#define ARM64_SSBD_FORCE_DISABLE       0
-#define ARM64_SSBD_KERNEL              1
-#define ARM64_SSBD_FORCE_ENABLE                2
-#define ARM64_SSBD_MITIGATED           3
-
-static inline int arm64_get_ssbd_state(void)
-{
-#ifdef CONFIG_ARM64_SSBD
-       extern int ssbd_state;
-       return ssbd_state;
-#else
-       return ARM64_SSBD_UNKNOWN;
-#endif
-}
-
-void arm64_set_ssbd_mitigation(bool state);
-
 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 
 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h
new file mode 100644 (file)
index 0000000..daa1a1d
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Google LLC.
+ * Written by David Brazdil <dbrazdil@google.com>
+ */
+
+#ifndef __ARM64_HYP_IMAGE_H__
+#define __ARM64_HYP_IMAGE_H__
+
+/*
+ * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
+ * to separate it from the kernel proper.
+ */
+#define kvm_nvhe_sym(sym)      __kvm_nvhe_##sym
+
+#ifdef LINKER_SCRIPT
+
+/*
+ * KVM nVHE ELF section names are prefixed with .hyp, to separate them
+ * from the kernel proper.
+ */
+#define HYP_SECTION_NAME(NAME) .hyp##NAME
+
+/* Defines an ELF hyp section from input section @NAME and its subsections. */
+#define HYP_SECTION(NAME) \
+       HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) }
+
+/*
+ * Defines a linker script alias of a kernel-proper symbol referenced by
+ * KVM nVHE hyp code.
+ */
+#define KVM_NVHE_ALIAS(sym)    kvm_nvhe_sym(sym) = sym;
+
+#endif /* LINKER_SCRIPT */
+
+#endif /* __ARM64_HYP_IMAGE_H__ */
index 3e4577013d33c333a376ae281ae7bac1555d5a05..54387ccd1ab26ad11822e5bb85ad8f852e16e85d 100644 (file)
@@ -7,11 +7,9 @@
 #ifndef __ARM_KVM_ASM_H__
 #define __ARM_KVM_ASM_H__
 
+#include <asm/hyp_image.h>
 #include <asm/virt.h>
 
-#define        VCPU_WORKAROUND_2_FLAG_SHIFT    0
-#define        VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
-
 #define ARM_EXIT_WITH_SERROR_BIT  31
 #define ARM_EXCEPTION_CODE(x)    ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
 
 #include <linux/mm.h>
 
-/*
- * Translate name of a symbol defined in nVHE hyp to the name seen
- * by kernel proper. All nVHE symbols are prefixed by the build system
- * to avoid clashes with the VHE variants.
- */
-#define kvm_nvhe_sym(sym)      __kvm_nvhe_##sym
-
 #define DECLARE_KVM_VHE_SYM(sym)       extern char sym[]
 #define DECLARE_KVM_NVHE_SYM(sym)      extern char kvm_nvhe_sym(sym)[]
 
        DECLARE_KVM_VHE_SYM(sym);               \
        DECLARE_KVM_NVHE_SYM(sym)
 
+#define DECLARE_KVM_VHE_PER_CPU(type, sym)     \
+       DECLARE_PER_CPU(type, sym)
+#define DECLARE_KVM_NVHE_PER_CPU(type, sym)    \
+       DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
+
+#define DECLARE_KVM_HYP_PER_CPU(type, sym)     \
+       DECLARE_KVM_VHE_PER_CPU(type, sym);     \
+       DECLARE_KVM_NVHE_PER_CPU(type, sym)
+
+/*
+ * Compute pointer to a symbol defined in nVHE percpu region.
+ * Returns NULL if percpu memory has not been allocated yet.
+ */
+#define this_cpu_ptr_nvhe_sym(sym)     per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
+#define per_cpu_ptr_nvhe_sym(sym, cpu)                                         \
+       ({                                                                      \
+               unsigned long base, off;                                        \
+               base = kvm_arm_hyp_percpu_base[cpu];                            \
+               off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
+                     (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
+               base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
+       })
+
 #if defined(__KVM_NVHE_HYPERVISOR__)
 
-#define CHOOSE_HYP_SYM(sym)    CHOOSE_NVHE_SYM(sym)
 #define CHOOSE_NVHE_SYM(sym)   sym
+#define CHOOSE_HYP_SYM(sym)    CHOOSE_NVHE_SYM(sym)
+
 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
 extern void *__nvhe_undefined_symbol;
-#define CHOOSE_VHE_SYM(sym)    __nvhe_undefined_symbol
+#define CHOOSE_VHE_SYM(sym)            __nvhe_undefined_symbol
+#define this_cpu_ptr_hyp_sym(sym)      (&__nvhe_undefined_symbol)
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (&__nvhe_undefined_symbol)
 
-#elif defined(__KVM_VHE_HYPERVISOR)
+#elif defined(__KVM_VHE_HYPERVISOR__)
 
-#define CHOOSE_HYP_SYM(sym)    CHOOSE_VHE_SYM(sym)
 #define CHOOSE_VHE_SYM(sym)    sym
+#define CHOOSE_HYP_SYM(sym)    CHOOSE_VHE_SYM(sym)
+
 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
 extern void *__vhe_undefined_symbol;
-#define CHOOSE_NVHE_SYM(sym)   __vhe_undefined_symbol
+#define CHOOSE_NVHE_SYM(sym)           __vhe_undefined_symbol
+#define this_cpu_ptr_hyp_sym(sym)      (&__vhe_undefined_symbol)
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (&__vhe_undefined_symbol)
 
 #else
 
@@ -113,8 +133,18 @@ extern void *__vhe_undefined_symbol;
  * - Don't let the nVHE hypervisor have access to this, as it will
  *   pick the *wrong* symbol (yes, it runs at EL2...).
  */
-#define CHOOSE_HYP_SYM(sym)    (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
+#define CHOOSE_HYP_SYM(sym)            (is_kernel_in_hyp_mode()        \
+                                          ? CHOOSE_VHE_SYM(sym)        \
                                           : CHOOSE_NVHE_SYM(sym))
+
+#define this_cpu_ptr_hyp_sym(sym)      (is_kernel_in_hyp_mode()        \
+                                          ? this_cpu_ptr(&sym)         \
+                                          : this_cpu_ptr_nvhe_sym(sym))
+
+#define per_cpu_ptr_hyp_sym(sym, cpu)  (is_kernel_in_hyp_mode()        \
+                                          ? per_cpu_ptr(&sym, cpu)     \
+                                          : per_cpu_ptr_nvhe_sym(sym, cpu))
+
 #define CHOOSE_VHE_SYM(sym)    sym
 #define CHOOSE_NVHE_SYM(sym)   kvm_nvhe_sym(sym)
 
@@ -141,11 +171,13 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 #define __kvm_hyp_host_vector  CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
 #define __kvm_hyp_vector       CHOOSE_HYP_SYM(__kvm_hyp_vector)
 
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
+extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+DECLARE_KVM_NVHE_SYM(__per_cpu_start);
+DECLARE_KVM_NVHE_SYM(__per_cpu_end);
+
 extern atomic_t arm64_el2_vector_last_slot;
 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
 #define __bp_harden_hyp_vecs   CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
-#endif
 
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
@@ -188,26 +220,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
                addr;                                                   \
        })
 
-/*
- * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
- * provided that sym is really a *symbol* and not a pointer obtained from
- * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
- * sparse quiet.
- */
-#define __hyp_this_cpu_ptr(sym)                                                \
-       ({                                                              \
-               void *__ptr;                                            \
-               __verify_pcpu_ptr(&sym);                                \
-               __ptr = hyp_symbol_addr(sym);                           \
-               __ptr += read_sysreg(tpidr_el2);                        \
-               (typeof(sym) __kernel __force *)__ptr;                  \
-        })
-
-#define __hyp_this_cpu_read(sym)                                       \
-       ({                                                              \
-               *__hyp_this_cpu_ptr(sym);                               \
-        })
-
 #define __KVM_EXTABLE(from, to)                                                \
        "       .pushsection    __kvm_ex_table, \"a\"\n"                \
        "       .align          3\n"                                    \
@@ -238,20 +250,8 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 
 #else /* __ASSEMBLY__ */
 
-.macro hyp_adr_this_cpu reg, sym, tmp
-       adr_l   \reg, \sym
-       mrs     \tmp, tpidr_el2
-       add     \reg, \reg, \tmp
-.endm
-
-.macro hyp_ldr_this_cpu reg, sym, tmp
-       adr_l   \reg, \sym
-       mrs     \tmp, tpidr_el2
-       ldr     \reg,  [\reg, \tmp]
-.endm
-
 .macro get_host_ctxt reg, tmp
-       hyp_adr_this_cpu \reg, kvm_host_data, \tmp
+       adr_this_cpu \reg, kvm_host_data, \tmp
        add     \reg, \reg, #HOST_DATA_CONTEXT
 .endm
 
@@ -261,12 +261,12 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 .endm
 
 .macro get_loaded_vcpu vcpu, ctxt
-       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
+       adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 .endm
 
 .macro set_loaded_vcpu vcpu, ctxt, tmp
-       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
+       adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
        str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 .endm
 
index 49a55be2b9a20a260cd6fd2c0a31c7f875a8c692..96eccb107ec2e7fc2009ec0e48652bdf719f395e 100644 (file)
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
        return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 }
 
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
-                                                     bool flag)
-{
-       if (flag)
-               vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-       else
-               vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
-}
-
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
        if (vcpu_mode_is_32bit(vcpu)) {
index d56d67c3787e34e37ddebf3fda6bd97dcb37fe18..0aecbab6a7fb3bfc941f0776d5b31e32ca913195 100644 (file)
@@ -568,7 +568,7 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
-DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
 
 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
 {
@@ -634,46 +634,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 static inline void kvm_clr_pmu_events(u32 clr) {}
 #endif
 
-#define KVM_BP_HARDEN_UNKNOWN          -1
-#define KVM_BP_HARDEN_WA_NEEDED                0
-#define KVM_BP_HARDEN_NOT_REQUIRED     1
-
-static inline int kvm_arm_harden_branch_predictor(void)
-{
-       switch (get_spectre_v2_workaround_state()) {
-       case ARM64_BP_HARDEN_WA_NEEDED:
-               return KVM_BP_HARDEN_WA_NEEDED;
-       case ARM64_BP_HARDEN_NOT_REQUIRED:
-               return KVM_BP_HARDEN_NOT_REQUIRED;
-       case ARM64_BP_HARDEN_UNKNOWN:
-       default:
-               return KVM_BP_HARDEN_UNKNOWN;
-       }
-}
-
-#define KVM_SSBD_UNKNOWN               -1
-#define KVM_SSBD_FORCE_DISABLE         0
-#define KVM_SSBD_KERNEL                1
-#define KVM_SSBD_FORCE_ENABLE          2
-#define KVM_SSBD_MITIGATED             3
-
-static inline int kvm_arm_have_ssbd(void)
-{
-       switch (arm64_get_ssbd_state()) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               return KVM_SSBD_FORCE_DISABLE;
-       case ARM64_SSBD_KERNEL:
-               return KVM_SSBD_KERNEL;
-       case ARM64_SSBD_FORCE_ENABLE:
-               return KVM_SSBD_FORCE_ENABLE;
-       case ARM64_SSBD_MITIGATED:
-               return KVM_SSBD_MITIGATED;
-       case ARM64_SSBD_UNKNOWN:
-       default:
-               return KVM_SSBD_UNKNOWN;
-       }
-}
-
 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
 
index c490fe8089b3ee1a36ca4d11c3d86431ffa093e8..331394306ccee34633d178dbec9c1800612ac910 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <asm/page.h>
 #include <asm/memory.h>
+#include <asm/mmu.h>
 #include <asm/cpufeature.h>
 
 /*
@@ -207,19 +208,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
        return ret;
 }
 
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
 /*
  * EL2 vectors can be mapped and rerouted in a number of ways,
  * depending on the kernel configuration and CPU present:
  *
- * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
- *   hardening sequence is placed in one of the vector slots, which is
- *   executed before jumping to the real vectors.
+ * - If the CPU is affected by Spectre-v2, the hardening sequence is
+ *   placed in one of the vector slots, which is executed before jumping
+ *   to the real vectors.
  *
- * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
- *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
- *   hardening sequence is mapped next to the idmap page, and executed
- *   before jumping to the real vectors.
+ * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ *   containing the hardening sequence is mapped next to the idmap page,
+ *   and executed before jumping to the real vectors.
  *
  * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
  *   empty slot is selected, mapped next to the idmap page, and
@@ -229,19 +228,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
  * VHE, as we don't have hypervisor-specific mappings. If the system
  * is VHE and yet selects this capability, it will be ignored.
  */
-#include <asm/mmu.h>
-
 extern void *__kvm_bp_vect_base;
 extern int __kvm_harden_el2_vector_slot;
 
-/*  This is called on both VHE and !VHE systems */
 static inline void *kvm_get_hyp_vector(void)
 {
        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
        void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
        int slot = -1;
 
-       if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+       if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
                vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
                slot = data->hyp_vectors_slot;
        }
@@ -258,76 +254,6 @@ static inline void *kvm_get_hyp_vector(void)
        return vect;
 }
 
-/*  This is only called on a !VHE system */
-static inline int kvm_map_vectors(void)
-{
-       /*
-        * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
-        * HEL2 = ARM64_HARDEN_EL2_VECTORS
-        *
-        * !HBP + !HEL2 -> use direct vectors
-        *  HBP + !HEL2 -> use hardened vectors in place
-        * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
-        *  HBP +  HEL2 -> use hardened vertors and use exec mapping
-        */
-       if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
-               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
-               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
-       }
-
-       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
-               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-
-               /*
-                * Always allocate a spare vector slot, as we don't
-                * know yet which CPUs have a BP hardening slot that
-                * we can reuse.
-                */
-               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
-               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
-               return create_hyp_exec_mappings(vect_pa, size,
-                                               &__kvm_bp_vect_base);
-       }
-
-       return 0;
-}
-#else
-static inline void *kvm_get_hyp_vector(void)
-{
-       return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
-}
-
-static inline int kvm_map_vectors(void)
-{
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_ARM64_SSBD
-DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-static inline int hyp_map_aux_data(void)
-{
-       int cpu, err;
-
-       for_each_possible_cpu(cpu) {
-               u64 *ptr;
-
-               ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
-               err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-#else
-static inline int hyp_map_aux_data(void)
-{
-       return 0;
-}
-#endif
-
 #define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
 
 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
index a7a5ecaa2e836b509c373feaa9c5474df38c73b9..cbff2d42c1d8eabdc508ee75b33066786a551d5f 100644 (file)
@@ -45,7 +45,6 @@ struct bp_hardening_data {
        bp_hardening_cb_t       fn;
 };
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -57,21 +56,13 @@ static inline void arm64_apply_bp_hardening(void)
 {
        struct bp_hardening_data *d;
 
-       if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+       if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
                return;
 
        d = arm64_get_bp_hardening_data();
        if (d->fn)
                d->fn();
 }
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
-       return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void)      { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
 extern void arm64_memblock_init(void);
 extern void paging_init(void);
index 0b6409b89e5e014b623cb4096c391559237412c8..1599e17379d8687b83266017947cb82c5e8c607c 100644 (file)
@@ -19,7 +19,16 @@ static inline void set_my_cpu_offset(unsigned long off)
                        :: "r" (off) : "memory");
 }
 
-static inline unsigned long __my_cpu_offset(void)
+static inline unsigned long __hyp_my_cpu_offset(void)
+{
+       /*
+        * Non-VHE hyp code runs with preemption disabled. No need to hazard
+        * the register access against barrier() as in __kern_my_cpu_offset.
+        */
+       return read_sysreg(tpidr_el2);
+}
+
+static inline unsigned long __kern_my_cpu_offset(void)
 {
        unsigned long off;
 
@@ -35,7 +44,12 @@ static inline unsigned long __my_cpu_offset(void)
 
        return off;
 }
-#define __my_cpu_offset __my_cpu_offset()
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define __my_cpu_offset __hyp_my_cpu_offset()
+#else
+#define __my_cpu_offset __kern_my_cpu_offset()
+#endif
 
 #define PERCPU_RW_OPS(sz)                                              \
 static inline unsigned long __percpu_read_##sz(void *ptr)              \
@@ -227,4 +241,14 @@ PERCPU_RET_OP(add, add, ldadd)
 
 #include <asm-generic/percpu.h>
 
+/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */
+#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT)
+#undef this_cpu_ptr
+#define        this_cpu_ptr            raw_cpu_ptr
+#undef __this_cpu_read
+#define        __this_cpu_read         raw_cpu_read
+#undef __this_cpu_write
+#define        __this_cpu_write        raw_cpu_write
+#endif
+
 #endif /* __ASM_PERCPU_H */
index 240fe5e5b7209776b772d8a4fbbcbb66f7275a26..7d90ea2e20639b957190dfe1ebe37b02e78fb19a 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/pointer_auth.h>
 #include <asm/ptrace.h>
+#include <asm/spectre.h>
 #include <asm/types.h>
 
 /*
@@ -197,40 +198,15 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
                regs->pmr_save = GIC_PRIO_IRQON;
 }
 
-static inline void set_ssbs_bit(struct pt_regs *regs)
-{
-       regs->pstate |= PSR_SSBS_BIT;
-}
-
-static inline void set_compat_ssbs_bit(struct pt_regs *regs)
-{
-       regs->pstate |= PSR_AA32_SSBS_BIT;
-}
-
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
                                unsigned long sp)
 {
        start_thread_common(regs, pc);
        regs->pstate = PSR_MODE_EL0t;
-
-       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               set_ssbs_bit(regs);
-
+       spectre_v4_enable_task_mitigation(current);
        regs->sp = sp;
 }
 
-static inline bool is_ttbr0_addr(unsigned long addr)
-{
-       /* entry assembly clears tags for TTBR0 addrs */
-       return addr < TASK_SIZE;
-}
-
-static inline bool is_ttbr1_addr(unsigned long addr)
-{
-       /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
-       return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
-}
-
 #ifdef CONFIG_COMPAT
 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
                                       unsigned long sp)
@@ -244,13 +220,23 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
        regs->pstate |= PSR_AA32_E_BIT;
 #endif
 
-       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               set_compat_ssbs_bit(regs);
-
+       spectre_v4_enable_task_mitigation(current);
        regs->compat_sp = sp;
 }
 #endif
 
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+       /* entry assembly clears tags for TTBR0 addrs */
+       return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+       /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+       return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
+}
+
 /* Forward declaration, a strange C thing */
 struct task_struct;
 
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
new file mode 100644 (file)
index 0000000..fcdfbce
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interface for managing mitigations for Spectre vulnerabilities.
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <will@kernel.org>
+ */
+
+#ifndef __ASM_SPECTRE_H
+#define __ASM_SPECTRE_H
+
+#include <asm/cpufeature.h>
+
+/* Watch out, ordering is important here. */
+enum mitigation_state {
+       SPECTRE_UNAFFECTED,
+       SPECTRE_MITIGATED,
+       SPECTRE_VULNERABLE,
+};
+
+struct task_struct;
+
+enum mitigation_state arm64_get_spectre_v2_state(void);
+bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+
+enum mitigation_state arm64_get_spectre_v4_state(void);
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
+
+#endif /* __ASM_SPECTRE_H */
index 7b1511d6ce44af20249ec8e3d46d3a9a9f32e4f8..1c17c3a24411d6b0dfd33424d1b493d4750d0221 100644 (file)
@@ -257,6 +257,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL          0
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL              1
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED       2
+
+/*
+ * Only two states can be presented by the host kernel:
+ * - NOT_REQUIRED: the guest doesn't need to do anything
+ * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
+ *
+ * All the other values are deprecated. The host still accepts all
+ * values (they are ABI), but will narrow them to the above two.
+ */
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2    KVM_REG_ARM_FW_REG(2)
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL          0
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN            1
index a561cbb91d4dc5f5f91a367e96d5d89b663454e6..bd12b9a2ab4a918bedf5275eef05b6ca6b0a3ede 100644 (file)
@@ -19,7 +19,7 @@ obj-y                 := debug-monitors.o entry.o irq.o fpsimd.o              \
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o smccc-call.o       \
-                          syscall.o
+                          syscall.o proton-pack.o
 
 targets                        += efi-entry.o
 
@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 obj-$(CONFIG_CRASH_DUMP)               += crash_dump.o
 obj-$(CONFIG_CRASH_CORE)               += crash_core.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)                += sdei.o
-obj-$(CONFIG_ARM64_SSBD)               += ssbd.o
 obj-$(CONFIG_ARM64_PTR_AUTH)           += pointer_auth.o
 obj-$(CONFIG_SHADOW_CALL_STACK)                += scs.o
 
index c332d49780dc96c6afe8cbe7b88f7cbef3d727b9..6c8303559bebe26e050e0848f6d5e5e910cf812e 100644 (file)
@@ -106,365 +106,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
                sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 }
 
-atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
-static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
-                               const char *hyp_vecs_end)
-{
-       void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
-       int i;
-
-       for (i = 0; i < SZ_2K; i += 0x80)
-               memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
-
-       __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
-}
-
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
-                                   const char *hyp_vecs_start,
-                                   const char *hyp_vecs_end)
-{
-       static DEFINE_RAW_SPINLOCK(bp_lock);
-       int cpu, slot = -1;
-
-       /*
-        * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
-        * we're a guest. Skip the hyp-vectors work.
-        */
-       if (!hyp_vecs_start) {
-               __this_cpu_write(bp_hardening_data.fn, fn);
-               return;
-       }
-
-       raw_spin_lock(&bp_lock);
-       for_each_possible_cpu(cpu) {
-               if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
-                       slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
-                       break;
-               }
-       }
-
-       if (slot == -1) {
-               slot = atomic_inc_return(&arm64_el2_vector_last_slot);
-               BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
-               __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
-       }
-
-       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
-       __this_cpu_write(bp_hardening_data.fn, fn);
-       raw_spin_unlock(&bp_lock);
-}
-#else
-static void install_bp_hardening_cb(bp_hardening_cb_t fn,
-                                     const char *hyp_vecs_start,
-                                     const char *hyp_vecs_end)
-{
-       __this_cpu_write(bp_hardening_data.fn, fn);
-}
-#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-
-#include <linux/arm-smccc.h>
-
-static void __maybe_unused call_smc_arch_workaround_1(void)
-{
-       arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void call_hvc_arch_workaround_1(void)
-{
-       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
-}
-
-static void qcom_link_stack_sanitization(void)
-{
-       u64 tmp;
-
-       asm volatile("mov       %0, x30         \n"
-                    ".rept     16              \n"
-                    "bl        . + 4           \n"
-                    ".endr                     \n"
-                    "mov       x30, %0         \n"
-                    : "=&r" (tmp));
-}
-
-static bool __nospectre_v2;
-static int __init parse_nospectre_v2(char *str)
-{
-       __nospectre_v2 = true;
-       return 0;
-}
-early_param("nospectre_v2", parse_nospectre_v2);
-
-/*
- * -1: No workaround
- *  0: No workaround required
- *  1: Workaround installed
- */
-static int detect_harden_bp_fw(void)
-{
-       bp_hardening_cb_t cb;
-       void *smccc_start, *smccc_end;
-       struct arm_smccc_res res;
-       u32 midr = read_cpuid_id();
-
-       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                            ARM_SMCCC_ARCH_WORKAROUND_1, &res);
-
-       switch ((int)res.a0) {
-       case 1:
-               /* Firmware says we're just fine */
-               return 0;
-       case 0:
-               break;
-       default:
-               return -1;
-       }
-
-       switch (arm_smccc_1_1_get_conduit()) {
-       case SMCCC_CONDUIT_HVC:
-               cb = call_hvc_arch_workaround_1;
-               /* This is a guest, no need to patch KVM vectors */
-               smccc_start = NULL;
-               smccc_end = NULL;
-               break;
-
-#if IS_ENABLED(CONFIG_KVM)
-       case SMCCC_CONDUIT_SMC:
-               cb = call_smc_arch_workaround_1;
-               smccc_start = __smccc_workaround_1_smc;
-               smccc_end = __smccc_workaround_1_smc +
-                       __SMCCC_WORKAROUND_1_SMC_SZ;
-               break;
-#endif
-
-       default:
-               return -1;
-       }
-
-       if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
-           ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
-               cb = qcom_link_stack_sanitization;
-
-       if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
-               install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
-       return 1;
-}
-
-DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
-static bool __ssb_safe = true;
-
-static const struct ssbd_options {
-       const char      *str;
-       int             state;
-} ssbd_options[] = {
-       { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
-       { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
-       { "kernel",     ARM64_SSBD_KERNEL, },
-};
-
-static int __init ssbd_cfg(char *buf)
-{
-       int i;
-
-       if (!buf || !buf[0])
-               return -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
-               int len = strlen(ssbd_options[i].str);
-
-               if (strncmp(buf, ssbd_options[i].str, len))
-                       continue;
-
-               ssbd_state = ssbd_options[i].state;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-early_param("ssbd", ssbd_cfg);
-
-void __init arm64_update_smccc_conduit(struct alt_instr *alt,
-                                      __le32 *origptr, __le32 *updptr,
-                                      int nr_inst)
-{
-       u32 insn;
-
-       BUG_ON(nr_inst != 1);
-
-       switch (arm_smccc_1_1_get_conduit()) {
-       case SMCCC_CONDUIT_HVC:
-               insn = aarch64_insn_get_hvc_value();
-               break;
-       case SMCCC_CONDUIT_SMC:
-               insn = aarch64_insn_get_smc_value();
-               break;
-       default:
-               return;
-       }
-
-       *updptr = cpu_to_le32(insn);
-}
-
-void __init arm64_enable_wa2_handling(struct alt_instr *alt,
-                                     __le32 *origptr, __le32 *updptr,
-                                     int nr_inst)
-{
-       BUG_ON(nr_inst != 1);
-       /*
-        * Only allow mitigation on EL1 entry/exit and guest
-        * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
-        * be flipped.
-        */
-       if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
-               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
-}
-
-void arm64_set_ssbd_mitigation(bool state)
-{
-       int conduit;
-
-       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
-               pr_info_once("SSBD disabled by kernel configuration\n");
-               return;
-       }
-
-       if (this_cpu_has_cap(ARM64_SSBS)) {
-               if (state)
-                       asm volatile(SET_PSTATE_SSBS(0));
-               else
-                       asm volatile(SET_PSTATE_SSBS(1));
-               return;
-       }
-
-       conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
-                                      NULL);
-
-       WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
-}
-
-static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
-                                   int scope)
-{
-       struct arm_smccc_res res;
-       bool required = true;
-       s32 val;
-       bool this_cpu_safe = false;
-       int conduit;
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
-       if (cpu_mitigations_off())
-               ssbd_state = ARM64_SSBD_FORCE_DISABLE;
-
-       /* delay setting __ssb_safe until we get a firmware response */
-       if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
-               this_cpu_safe = true;
-
-       if (this_cpu_has_cap(ARM64_SSBS)) {
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               required = false;
-               goto out_printmsg;
-       }
-
-       conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                                      ARM_SMCCC_ARCH_WORKAROUND_2, &res);
-
-       if (conduit == SMCCC_CONDUIT_NONE) {
-               ssbd_state = ARM64_SSBD_UNKNOWN;
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-       }
-
-       val = (s32)res.a0;
-
-       switch (val) {
-       case SMCCC_RET_NOT_SUPPORTED:
-               ssbd_state = ARM64_SSBD_UNKNOWN;
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-
-       /* machines with mixed mitigation requirements must not return this */
-       case SMCCC_RET_NOT_REQUIRED:
-               pr_info_once("%s mitigation not required\n", entry->desc);
-               ssbd_state = ARM64_SSBD_MITIGATED;
-               return false;
-
-       case SMCCC_RET_SUCCESS:
-               __ssb_safe = false;
-               required = true;
-               break;
-
-       case 1: /* Mitigation not required on this CPU */
-               required = false;
-               break;
-
-       default:
-               WARN_ON(1);
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-       }
-
-       switch (ssbd_state) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               arm64_set_ssbd_mitigation(false);
-               required = false;
-               break;
-
-       case ARM64_SSBD_KERNEL:
-               if (required) {
-                       __this_cpu_write(arm64_ssbd_callback_required, 1);
-                       arm64_set_ssbd_mitigation(true);
-               }
-               break;
-
-       case ARM64_SSBD_FORCE_ENABLE:
-               arm64_set_ssbd_mitigation(true);
-               required = true;
-               break;
-
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-out_printmsg:
-       switch (ssbd_state) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               pr_info_once("%s disabled from command-line\n", entry->desc);
-               break;
-
-       case ARM64_SSBD_FORCE_ENABLE:
-               pr_info_once("%s forced from command-line\n", entry->desc);
-               break;
-       }
-
-       return required;
-}
-
-/* known invulnerable cores */
-static const struct midr_range arm64_ssb_cpus[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
-       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
-       {},
-};
-
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 
@@ -519,83 +160,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
        .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
        CAP_MIDR_RANGE_LIST(midr_list)
 
-/* Track overall mitigation state. We are only mitigated if all cores are ok */
-static bool __hardenbp_enab = true;
-static bool __spectrev2_safe = true;
-
-int get_spectre_v2_workaround_state(void)
-{
-       if (__spectrev2_safe)
-               return ARM64_BP_HARDEN_NOT_REQUIRED;
-
-       if (!__hardenbp_enab)
-               return ARM64_BP_HARDEN_UNKNOWN;
-
-       return ARM64_BP_HARDEN_WA_NEEDED;
-}
-
-/*
- * List of CPUs that do not need any Spectre-v2 mitigation at all.
- */
-static const struct midr_range spectre_v2_safe_list[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
-       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
-       MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
-       { /* sentinel */ }
-};
-
-/*
- * Track overall bp hardening for all heterogeneous cores in the machine.
- * We are only considered "safe" if all booted cores are known safe.
- */
-static bool __maybe_unused
-check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
-{
-       int need_wa;
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
-       /* If the CPU has CSV2 set, we're safe */
-       if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
-                                                ID_AA64PFR0_CSV2_SHIFT))
-               return false;
-
-       /* Alternatively, we have a list of unaffected CPUs */
-       if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
-               return false;
-
-       /* Fallback to firmware detection */
-       need_wa = detect_harden_bp_fw();
-       if (!need_wa)
-               return false;
-
-       __spectrev2_safe = false;
-
-       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
-               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
-               __hardenbp_enab = false;
-               return false;
-       }
-
-       /* forced off */
-       if (__nospectre_v2 || cpu_mitigations_off()) {
-               pr_info_once("spectrev2 mitigation disabled by command line option\n");
-               __hardenbp_enab = false;
-               return false;
-       }
-
-       if (need_wa < 0) {
-               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
-               __hardenbp_enab = false;
-       }
-
-       return (need_wa > 0);
-}
-
 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
        MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
@@ -887,9 +451,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        },
 #endif
        {
-               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               .desc = "Spectre-v2",
+               .capability = ARM64_SPECTRE_V2,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
-               .matches = check_branch_predictor,
+               .matches = has_spectre_v2,
+               .cpu_enable = spectre_v2_enable_mitigation,
        },
 #ifdef CONFIG_RANDOMIZE_BASE
        {
@@ -899,11 +465,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        },
 #endif
        {
-               .desc = "Speculative Store Bypass Disable",
-               .capability = ARM64_SSBD,
+               .desc = "Spectre-v4",
+               .capability = ARM64_SPECTRE_V4,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
-               .matches = has_ssbd_mitigation,
-               .midr_range_list = arm64_ssb_cpus,
+               .matches = has_spectre_v4,
+               .cpu_enable = spectre_v4_enable_mitigation,
        },
 #ifdef CONFIG_ARM64_ERRATUM_1418040
        {
@@ -956,40 +522,3 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
        }
 };
-
-ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
-                           char *buf)
-{
-       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
-}
-
-ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
-               char *buf)
-{
-       switch (get_spectre_v2_workaround_state()) {
-       case ARM64_BP_HARDEN_NOT_REQUIRED:
-               return sprintf(buf, "Not affected\n");
-        case ARM64_BP_HARDEN_WA_NEEDED:
-               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
-        case ARM64_BP_HARDEN_UNKNOWN:
-       default:
-               return sprintf(buf, "Vulnerable\n");
-       }
-}
-
-ssize_t cpu_show_spec_store_bypass(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       if (__ssb_safe)
-               return sprintf(buf, "Not affected\n");
-
-       switch (ssbd_state) {
-       case ARM64_SSBD_KERNEL:
-       case ARM64_SSBD_FORCE_ENABLE:
-               if (IS_ENABLED(CONFIG_ARM64_SSBD))
-                       return sprintf(buf,
-                           "Mitigation: Speculative Store Bypass disabled via prctl\n");
-       }
-
-       return sprintf(buf, "Vulnerable\n");
-}
index 6424584be01e6dd0a6d5e846d8e205dad6a6f1c0..a4debb63ebfbf329d9e6b006b3029b933a382aaf 100644 (file)
@@ -227,7 +227,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
        ARM64_FTR_END,
@@ -487,7 +487,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_pfr2[] = {
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
@@ -1583,48 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
        WARN_ON(val & (7 << 27 | 7 << 21));
 }
 
-#ifdef CONFIG_ARM64_SSBD
-static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
-{
-       if (user_mode(regs))
-               return 1;
-
-       if (instr & BIT(PSTATE_Imm_shift))
-               regs->pstate |= PSR_SSBS_BIT;
-       else
-               regs->pstate &= ~PSR_SSBS_BIT;
-
-       arm64_skip_faulting_instruction(regs, 4);
-       return 0;
-}
-
-static struct undef_hook ssbs_emulation_hook = {
-       .instr_mask     = ~(1U << PSTATE_Imm_shift),
-       .instr_val      = 0xd500401f | PSTATE_SSBS,
-       .fn             = ssbs_emulation_handler,
-};
-
-static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
-{
-       static bool undef_hook_registered = false;
-       static DEFINE_RAW_SPINLOCK(hook_lock);
-
-       raw_spin_lock(&hook_lock);
-       if (!undef_hook_registered) {
-               register_undef_hook(&ssbs_emulation_hook);
-               undef_hook_registered = true;
-       }
-       raw_spin_unlock(&hook_lock);
-
-       if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
-               sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
-               arm64_set_ssbd_mitigation(false);
-       } else {
-               arm64_set_ssbd_mitigation(true);
-       }
-}
-#endif /* CONFIG_ARM64_SSBD */
-
 #ifdef CONFIG_ARM64_PAN
 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
 {
@@ -1976,19 +1934,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
                .min_field_value = 1,
        },
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypassing Safe (SSBS)",
                .capability = ARM64_SSBS,
-               .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_cpuid_feature,
                .sys_reg = SYS_ID_AA64PFR1_EL1,
                .field_pos = ID_AA64PFR1_SSBS_SHIFT,
                .sign = FTR_UNSIGNED,
                .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
-               .cpu_enable = cpu_enable_ssbs,
        },
-#endif
 #ifdef CONFIG_ARM64_CNP
        {
                .desc = "Common not Private translations",
index 55af8b504b65abb2164f350e7019db108785d482..aeb337029d567410419f718658f116ec0c7cd33c 100644 (file)
@@ -132,9 +132,8 @@ alternative_else_nop_endif
         * them if required.
         */
        .macro  apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
-       b       .L__asm_ssbd_skip\@
+alternative_cb spectre_v4_patch_fw_mitigation_enable
+       b       .L__asm_ssbd_skip\@             // Patched to NOP
 alternative_cb_end
        ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
        cbz     \tmp2,  .L__asm_ssbd_skip\@
@@ -142,11 +141,10 @@ alternative_cb_end
        tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
        mov     w1, #\state
-alternative_cb arm64_update_smccc_conduit
+alternative_cb spectre_v4_patch_fw_mitigation_conduit
        nop                                     // Patched to SMC/HVC #0
 alternative_cb_end
 .L__asm_ssbd_skip\@:
-#endif
        .endm
 
        .macro  kernel_entry, el, regsize = 64
@@ -697,11 +695,9 @@ el0_irq_naked:
        bl      trace_hardirqs_off
 #endif
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        tbz     x22, #55, 1f
        bl      do_el0_irq_bp_hardening
 1:
-#endif
        irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
index 68e14152d6e9b09a83cec2893809c970ac569d50..c7b00120dc3e4b22aedb37dd945ac6babec07eb5 100644 (file)
@@ -332,11 +332,7 @@ int swsusp_arch_suspend(void)
                 * mitigation off behind our back, let's set the state
                 * to what we expect it to be.
                 */
-               switch (arm64_get_ssbd_state()) {
-               case ARM64_SSBD_FORCE_ENABLE:
-               case ARM64_SSBD_KERNEL:
-                       arm64_set_ssbd_mitigation(true);
-               }
+               spectre_v4_enable_mitigation(NULL);
        }
 
        local_daif_restore(flags);
index 9f419e4fc66bfab9910cd09d379dc588b1030c49..fbd4b6b1fde5d3ffc90b425b14aa8da214fc5139 100644 (file)
@@ -61,18 +61,11 @@ __efistub__ctype            = _ctype;
  * memory mappings.
  */
 
-#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
-
 /* Alternative callbacks for init-time patching of nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
 KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 
 /* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
-KVM_NVHE_ALIAS(kvm_host_data);
-KVM_NVHE_ALIAS(kvm_hyp_ctxt);
-KVM_NVHE_ALIAS(kvm_hyp_vector);
 KVM_NVHE_ALIAS(kvm_vgic_global_state);
 
 /* Kernel constant needed to compute idmap addresses. */
index f1804496b93508caad6174a635dbe91e98dfeedf..085d8ca39e47df59e80815c5b017184df58e12a3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/lockdep.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -421,8 +422,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                    cpus_have_const_cap(ARM64_HAS_UAO))
                        childregs->pstate |= PSR_UAO_BIT;
 
-               if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-                       set_ssbs_bit(childregs);
+               spectre_v4_enable_task_mitigation(p);
 
                if (system_uses_irq_prio_masking())
                        childregs->pmr_save = GIC_PRIO_IRQON;
@@ -472,8 +472,6 @@ void uao_thread_switch(struct task_struct *next)
  */
 static void ssbs_thread_switch(struct task_struct *next)
 {
-       struct pt_regs *regs = task_pt_regs(next);
-
        /*
         * Nothing to do for kernel threads, but 'regs' may be junk
         * (e.g. idle task) so check the flags and bail early.
@@ -485,18 +483,10 @@ static void ssbs_thread_switch(struct task_struct *next)
         * If all CPUs implement the SSBS extension, then we just need to
         * context-switch the PSTATE field.
         */
-       if (cpu_have_feature(cpu_feature(SSBS)))
-               return;
-
-       /* If the mitigation is enabled, then we leave SSBS clear. */
-       if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
-           test_tsk_thread_flag(next, TIF_SSBD))
+       if (cpus_have_const_cap(ARM64_SSBS))
                return;
 
-       if (compat_user_mode(regs))
-               set_compat_ssbs_bit(regs);
-       else if (user_mode(regs))
-               set_ssbs_bit(regs);
+       spectre_v4_enable_task_mitigation(next);
 }
 
 /*
@@ -620,6 +610,11 @@ void arch_setup_new_exec(void)
        current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
 
        ptrauth_thread_init_user(current);
+
+       if (task_spec_ssb_noexec(current)) {
+               arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
+                                        PR_SPEC_ENABLE);
+       }
 }
 
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
new file mode 100644 (file)
index 0000000..68b710f
--- /dev/null
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
+ * detailed at:
+ *
+ *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
+ *
+ * This code was originally written hastily under an awful lot of stress and so
+ * aspects of it are somewhat hacky. Unfortunately, changing anything in here
+ * instantly makes me feel ill. Thanks, Jann. Thann.
+ *
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ * Copyright (C) 2020 Google LLC
+ *
+ * "If there's something strange in your neighbourhood, who you gonna call?"
+ *
+ * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/spectre.h>
+#include <asm/traps.h>
+
+/*
+ * We try to ensure that the mitigation state can never change as the result of
+ * onlining a late CPU.
+ */
+static void update_mitigation_state(enum mitigation_state *oldp,
+                                   enum mitigation_state new)
+{
+       enum mitigation_state state;
+
+       do {
+               state = READ_ONCE(*oldp);
+               if (new <= state)
+                       break;
+
+               /* Userspace almost certainly can't deal with this. */
+               if (WARN_ON(system_capabilities_finalized()))
+                       break;
+       } while (cmpxchg_relaxed(oldp, state, new) != state);
+}
+
+/*
+ * Spectre v1.
+ *
+ * The kernel can't protect userspace for this one: it's each person for
+ * themselves. Advertise what we're doing and be done with it.
+ */
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+/*
+ * Spectre v2.
+ *
+ * This one sucks. A CPU is either:
+ *
+ * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in software by firmware.
+ * - Mitigated in software by a CPU-specific dance in the kernel.
+ * - Vulnerable.
+ *
+ * It's not unlikely for different CPUs in a big.LITTLE system to fall into
+ * different camps.
+ */
+static enum mitigation_state spectre_v2_state;
+
+static bool __read_mostly __nospectre_v2;
+static int __init parse_spectre_v2_param(char *str)
+{
+       __nospectre_v2 = true;
+       return 0;
+}
+early_param("nospectre_v2", parse_spectre_v2_param);
+
+static bool spectre_v2_mitigations_off(void)
+{
+       bool ret = __nospectre_v2 || cpu_mitigations_off();
+
+       if (ret)
+               pr_info_once("spectre-v2 mitigation disabled by command line option\n");
+
+       return ret;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+{
+       switch (spectre_v2_state) {
+       case SPECTRE_UNAFFECTED:
+               return sprintf(buf, "Not affected\n");
+       case SPECTRE_MITIGATED:
+               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+               return sprintf(buf, "Vulnerable\n");
+       }
+}
+
+static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
+{
+       u64 pfr0;
+       static const struct midr_range spectre_v2_safe_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+               MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+               MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+               { /* sentinel */ }
+       };
+
+       /* If the CPU has CSV2 set, we're safe */
+       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+       if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+               return SPECTRE_UNAFFECTED;
+
+       /* Alternatively, we have a list of unaffected CPUs */
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+               return SPECTRE_UNAFFECTED;
+
+       return SPECTRE_VULNERABLE;
+}
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED   (1)
+
+static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
+{
+       int ret;
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+
+       ret = res.a0;
+       switch (ret) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+       default:
+               fallthrough;
+       case SMCCC_RET_NOT_SUPPORTED:
+               return SPECTRE_VULNERABLE;
+       }
+}
+
+bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
+               return false;
+
+       if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
+               return false;
+
+       return true;
+}
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+enum mitigation_state arm64_get_spectre_v2_state(void)
+{
+       return spectre_v2_state;
+}
+
+#ifdef CONFIG_KVM
+#include <asm/cacheflush.h>
+#include <asm/kvm_asm.h>
+
+atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+{
+       void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
+       int i;
+
+       for (i = 0; i < SZ_2K; i += 0x80)
+               memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+       __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+       static DEFINE_RAW_SPINLOCK(bp_lock);
+       int cpu, slot = -1;
+       const char *hyp_vecs_start = __smccc_workaround_1_smc;
+       const char *hyp_vecs_end = __smccc_workaround_1_smc +
+                                  __SMCCC_WORKAROUND_1_SMC_SZ;
+
+       /*
+        * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+        * we're a guest. Skip the hyp-vectors work.
+        */
+       if (!is_hyp_mode_available()) {
+               __this_cpu_write(bp_hardening_data.fn, fn);
+               return;
+       }
+
+       raw_spin_lock(&bp_lock);
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+                       slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+                       break;
+               }
+       }
+
+       if (slot == -1) {
+               slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
+               __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+       }
+
+       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+       __this_cpu_write(bp_hardening_data.fn, fn);
+       raw_spin_unlock(&bp_lock);
+}
+#else
+static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+{
+       __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void call_smc_arch_workaround_1(void)
+{
+       arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void qcom_link_stack_sanitisation(void)
+{
+       u64 tmp;
+
+       asm volatile("mov       %0, x30         \n"
+                    ".rept     16              \n"
+                    "bl        . + 4           \n"
+                    ".endr                     \n"
+                    "mov       x30, %0         \n"
+                    : "=&r" (tmp));
+}
+
+static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
+{
+       bp_hardening_cb_t cb;
+       enum mitigation_state state;
+
+       state = spectre_v2_get_cpu_fw_mitigation_state();
+       if (state != SPECTRE_MITIGATED)
+               return state;
+
+       if (spectre_v2_mitigations_off())
+               return SPECTRE_VULNERABLE;
+
+       switch (arm_smccc_1_1_get_conduit()) {
+       case SMCCC_CONDUIT_HVC:
+               cb = call_hvc_arch_workaround_1;
+               break;
+
+       case SMCCC_CONDUIT_SMC:
+               cb = call_smc_arch_workaround_1;
+               break;
+
+       default:
+               return SPECTRE_VULNERABLE;
+       }
+
+       install_bp_hardening_cb(cb);
+       return SPECTRE_MITIGATED;
+}
+
+static enum mitigation_state spectre_v2_enable_sw_mitigation(void)
+{
+       u32 midr;
+
+       if (spectre_v2_mitigations_off())
+               return SPECTRE_VULNERABLE;
+
+       midr = read_cpuid_id();
+       if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
+           ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
+               return SPECTRE_VULNERABLE;
+
+       install_bp_hardening_cb(qcom_link_stack_sanitisation);
+       return SPECTRE_MITIGATED;
+}
+
+void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+       enum mitigation_state state;
+
+       WARN_ON(preemptible());
+
+       state = spectre_v2_get_cpu_hw_mitigation_state();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v2_enable_fw_mitigation();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v2_enable_sw_mitigation();
+
+       update_mitigation_state(&spectre_v2_state, state);
+}
+
+/*
+ * Spectre v4.
+ *
+ * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
+ * either:
+ *
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in hardware via PSTATE.SSBS.
+ * - Mitigated in software by firmware (sometimes referred to as SSBD).
+ *
+ * Wait, that doesn't sound so bad, does it? Keep reading...
+ *
+ * A major source of headaches is that the software mitigation is enabled both
+ * on a per-task basis, but can also be forced on for the kernel, necessitating
+ * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
+ * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
+ * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
+ * so you can have systems that have both firmware and SSBS mitigations. This
+ * means we actually have to reject late onlining of CPUs with mitigations if
+ * all of the currently onlined CPUs are safelisted, as the mitigation tends to
+ * be opt-in for userspace. Yes, really, the cure is worse than the disease.
+ *
+ * The only good part is that if the firmware mitigation is present, then it is
+ * present for all CPUs, meaning we don't have to worry about late onlining of a
+ * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
+ *
+ * Give me a VAX-11/780 any day of the week...
+ */
+static enum mitigation_state spectre_v4_state;
+
+/* This is the per-cpu state tracking whether we need to talk to firmware */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+enum spectre_v4_policy {
+       SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
+       SPECTRE_V4_POLICY_MITIGATION_ENABLED,
+       SPECTRE_V4_POLICY_MITIGATION_DISABLED,
+};
+
+static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
+
+static const struct spectre_v4_param {
+       const char              *str;
+       enum spectre_v4_policy  policy;
+} spectre_v4_params[] = {
+       { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
+       { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
+       { "kernel",     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
+};
+static int __init parse_spectre_v4_param(char *str)
+{
+       int i;
+
+       if (!str || !str[0])
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
+               const struct spectre_v4_param *param = &spectre_v4_params[i];
+
+               if (strncmp(str, param->str, strlen(param->str)))
+                       continue;
+
+               __spectre_v4_policy = param->policy;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+early_param("ssbd", parse_spectre_v4_param);
+
+/*
+ * Because this was all written in a rush by people working in different silos,
+ * we've ended up with multiple command line options to control the same thing.
+ * Wrap these up in some helpers, which prefer disabling the mitigation if faced
+ * with contradictory parameters. The mitigation is always either "off",
+ * "dynamic" or "on".
+ */
+static bool spectre_v4_mitigations_off(void)
+{
+       bool ret = cpu_mitigations_off() ||
+                  __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
+
+       if (ret)
+               pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
+
+       return ret;
+}
+
+/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
+static bool spectre_v4_mitigations_dynamic(void)
+{
+       return !spectre_v4_mitigations_off() &&
+              __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
+}
+
+static bool spectre_v4_mitigations_on(void)
+{
+       return !spectre_v4_mitigations_off() &&
+              __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       switch (spectre_v4_state) {
+       case SPECTRE_UNAFFECTED:
+               return sprintf(buf, "Not affected\n");
+       case SPECTRE_MITIGATED:
+               return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+               return sprintf(buf, "Vulnerable\n");
+       }
+}
+
+enum mitigation_state arm64_get_spectre_v4_state(void)
+{
+       return spectre_v4_state;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
+{
+       static const struct midr_range spectre_v4_safe_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+               MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+               { /* sentinel */ },
+       };
+
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+               return SPECTRE_UNAFFECTED;
+
+       /* CPU features are detected first */
+       if (this_cpu_has_cap(ARM64_SSBS))
+               return SPECTRE_MITIGATED;
+
+       return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
+{
+       int ret;
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+
+       ret = res.a0;
+       switch (ret) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               fallthrough;
+       case SMCCC_RET_NOT_REQUIRED:
+               return SPECTRE_UNAFFECTED;
+       default:
+               fallthrough;
+       case SMCCC_RET_NOT_SUPPORTED:
+               return SPECTRE_VULNERABLE;
+       }
+}
+
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
+{
+       enum mitigation_state state;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       state = spectre_v4_get_cpu_hw_mitigation_state();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v4_get_cpu_fw_mitigation_state();
+
+       return state != SPECTRE_UNAFFECTED;
+}
+
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+       if (user_mode(regs))
+               return 1;
+
+       if (instr & BIT(PSTATE_Imm_shift))
+               regs->pstate |= PSR_SSBS_BIT;
+       else
+               regs->pstate &= ~PSR_SSBS_BIT;
+
+       arm64_skip_faulting_instruction(regs, 4);
+       return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+       .instr_mask     = ~(1U << PSTATE_Imm_shift),
+       .instr_val      = 0xd500401f | PSTATE_SSBS,
+       .fn             = ssbs_emulation_handler,
+};
+
+static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+{
+       static bool undef_hook_registered = false;
+       static DEFINE_RAW_SPINLOCK(hook_lock);
+       enum mitigation_state state;
+
+       /*
+        * If the system is mitigated but this CPU doesn't have SSBS, then
+        * we must be on the safelist and there's nothing more to do.
+        */
+       state = spectre_v4_get_cpu_hw_mitigation_state();
+       if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+               return state;
+
+       raw_spin_lock(&hook_lock);
+       if (!undef_hook_registered) {
+               register_undef_hook(&ssbs_emulation_hook);
+               undef_hook_registered = true;
+       }
+       raw_spin_unlock(&hook_lock);
+
+       if (spectre_v4_mitigations_off()) {
+               sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+               asm volatile(SET_PSTATE_SSBS(1));
+               return SPECTRE_VULNERABLE;
+       }
+
+       /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+       asm volatile(SET_PSTATE_SSBS(0));
+       return SPECTRE_MITIGATED;
+}
+
+/*
+ * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
+ * we fallthrough and check whether firmware needs to be called on this CPU.
+ */
+void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
+                                                 __le32 *origptr,
+                                                 __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+       if (spectre_v4_mitigations_off())
+               return;
+
+       if (cpus_have_final_cap(ARM64_SSBS))
+               return;
+
+       if (spectre_v4_mitigations_dynamic())
+               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/*
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+                                                  __le32 *origptr,
+                                                  __le32 *updptr, int nr_inst)
+{
+       u32 insn;
+
+       BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
+
+       switch (arm_smccc_1_1_get_conduit()) {
+       case SMCCC_CONDUIT_HVC:
+               insn = aarch64_insn_get_hvc_value();
+               break;
+       case SMCCC_CONDUIT_SMC:
+               insn = aarch64_insn_get_smc_value();
+               break;
+       default:
+               return;
+       }
+
+       *updptr = cpu_to_le32(insn);
+}
+
+static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
+{
+       enum mitigation_state state;
+
+       state = spectre_v4_get_cpu_fw_mitigation_state();
+       if (state != SPECTRE_MITIGATED)
+               return state;
+
+       if (spectre_v4_mitigations_off()) {
+               arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
+               return SPECTRE_VULNERABLE;
+       }
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
+
+       if (spectre_v4_mitigations_dynamic())
+               __this_cpu_write(arm64_ssbd_callback_required, 1);
+
+       return SPECTRE_MITIGATED;
+}
+
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+       enum mitigation_state state;
+
+       WARN_ON(preemptible());
+
+       state = spectre_v4_enable_hw_mitigation();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v4_enable_fw_mitigation();
+
+       update_mitigation_state(&spectre_v4_state, state);
+}
+
+static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
+{
+       u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+       if (state)
+               regs->pstate |= bit;
+       else
+               regs->pstate &= ~bit;
+}
+
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
+{
+       struct pt_regs *regs = task_pt_regs(tsk);
+       bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
+
+       if (spectre_v4_mitigations_off())
+               ssbs = true;
+       else if (spectre_v4_mitigations_dynamic() && !kthread)
+               ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
+
+       __update_pstate_ssbs(regs, ssbs);
+}
+
+/*
+ * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
+ * This is interesting because the "speculation disabled" behaviour can be
+ * configured so that it is preserved across exec(), which means that the
+ * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
+ * from userspace.
+ */
+static void ssbd_prctl_enable_mitigation(struct task_struct *task)
+{
+       task_clear_spec_ssb_noexec(task);
+       task_set_spec_ssb_disable(task);
+       set_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static void ssbd_prctl_disable_mitigation(struct task_struct *task)
+{
+       task_clear_spec_ssb_noexec(task);
+       task_clear_spec_ssb_disable(task);
+       clear_tsk_thread_flag(task, TIF_SSBD);
+}
+
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               /* Enable speculation: disable mitigation */
+               /*
+                * Force disabled speculation prevents it from being
+                * re-enabled.
+                */
+               if (task_spec_ssb_force_disable(task))
+                       return -EPERM;
+
+               /*
+                * If the mitigation is forced on, then speculation is forced
+                * off and we again prevent it from being re-enabled.
+                */
+               if (spectre_v4_mitigations_on())
+                       return -EPERM;
+
+               ssbd_prctl_disable_mitigation(task);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               /* Force disable speculation: force enable mitigation */
+               /*
+                * If the mitigation is forced off, then speculation is forced
+                * on and we prevent it from being disabled.
+                */
+               if (spectre_v4_mitigations_off())
+                       return -EPERM;
+
+               task_set_spec_ssb_force_disable(task);
+               fallthrough;
+       case PR_SPEC_DISABLE:
+               /* Disable speculation: enable mitigation */
+               /* Same as PR_SPEC_FORCE_DISABLE */
+               if (spectre_v4_mitigations_off())
+                       return -EPERM;
+
+               ssbd_prctl_enable_mitigation(task);
+               break;
+       case PR_SPEC_DISABLE_NOEXEC:
+               /* Disable speculation until execve(): enable mitigation */
+               /*
+                * If the mitigation state is forced one way or the other, then
+                * we must fail now before we try to toggle it on execve().
+                */
+               if (task_spec_ssb_force_disable(task) ||
+                   spectre_v4_mitigations_off() ||
+                   spectre_v4_mitigations_on()) {
+                       return -EPERM;
+               }
+
+               ssbd_prctl_enable_mitigation(task);
+               task_set_spec_ssb_noexec(task);
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       spectre_v4_enable_task_mitigation(task);
+       return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+                            unsigned long ctrl)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssbd_prctl_set(task, ctrl);
+       default:
+               return -ENODEV;
+       }
+}
+
+static int ssbd_prctl_get(struct task_struct *task)
+{
+       switch (spectre_v4_state) {
+       case SPECTRE_UNAFFECTED:
+               return PR_SPEC_NOT_AFFECTED;
+       case SPECTRE_MITIGATED:
+               if (spectre_v4_mitigations_on())
+                       return PR_SPEC_NOT_AFFECTED;
+
+               if (spectre_v4_mitigations_dynamic())
+                       break;
+
+               /* Mitigations are disabled, so we're vulnerable. */
+               fallthrough;
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+               return PR_SPEC_ENABLE;
+       }
+
+       /* Check the mitigation state for this task */
+       if (task_spec_ssb_force_disable(task))
+               return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+
+       if (task_spec_ssb_noexec(task))
+               return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
+
+       if (task_spec_ssb_disable(task))
+               return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+
+       return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssbd_prctl_get(task);
+       default:
+               return -ENODEV;
+       }
+}
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
deleted file mode 100644 (file)
index b26955f..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
- */
-
-#include <linux/compat.h>
-#include <linux/errno.h>
-#include <linux/prctl.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/thread_info.h>
-
-#include <asm/cpufeature.h>
-
-static void ssbd_ssbs_enable(struct task_struct *task)
-{
-       u64 val = is_compat_thread(task_thread_info(task)) ?
-                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
-       task_pt_regs(task)->pstate |= val;
-}
-
-static void ssbd_ssbs_disable(struct task_struct *task)
-{
-       u64 val = is_compat_thread(task_thread_info(task)) ?
-                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
-
-       task_pt_regs(task)->pstate &= ~val;
-}
-
-/*
- * prctl interface for SSBD
- */
-static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
-{
-       int state = arm64_get_ssbd_state();
-
-       /* Unsupported */
-       if (state == ARM64_SSBD_UNKNOWN)
-               return -ENODEV;
-
-       /* Treat the unaffected/mitigated state separately */
-       if (state == ARM64_SSBD_MITIGATED) {
-               switch (ctrl) {
-               case PR_SPEC_ENABLE:
-                       return -EPERM;
-               case PR_SPEC_DISABLE:
-               case PR_SPEC_FORCE_DISABLE:
-                       return 0;
-               }
-       }
-
-       /*
-        * Things are a bit backward here: the arm64 internal API
-        * *enables the mitigation* when the userspace API *disables
-        * speculation*. So much fun.
-        */
-       switch (ctrl) {
-       case PR_SPEC_ENABLE:
-               /* If speculation is force disabled, enable is not allowed */
-               if (state == ARM64_SSBD_FORCE_ENABLE ||
-                   task_spec_ssb_force_disable(task))
-                       return -EPERM;
-               task_clear_spec_ssb_disable(task);
-               clear_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_enable(task);
-               break;
-       case PR_SPEC_DISABLE:
-               if (state == ARM64_SSBD_FORCE_DISABLE)
-                       return -EPERM;
-               task_set_spec_ssb_disable(task);
-               set_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_disable(task);
-               break;
-       case PR_SPEC_FORCE_DISABLE:
-               if (state == ARM64_SSBD_FORCE_DISABLE)
-                       return -EPERM;
-               task_set_spec_ssb_disable(task);
-               task_set_spec_ssb_force_disable(task);
-               set_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_disable(task);
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       return 0;
-}
-
-int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
-                            unsigned long ctrl)
-{
-       switch (which) {
-       case PR_SPEC_STORE_BYPASS:
-               return ssbd_prctl_set(task, ctrl);
-       default:
-               return -ENODEV;
-       }
-}
-
-static int ssbd_prctl_get(struct task_struct *task)
-{
-       switch (arm64_get_ssbd_state()) {
-       case ARM64_SSBD_UNKNOWN:
-               return -ENODEV;
-       case ARM64_SSBD_FORCE_ENABLE:
-               return PR_SPEC_DISABLE;
-       case ARM64_SSBD_KERNEL:
-               if (task_spec_ssb_force_disable(task))
-                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
-               if (task_spec_ssb_disable(task))
-                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
-               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
-       case ARM64_SSBD_FORCE_DISABLE:
-               return PR_SPEC_ENABLE;
-       default:
-               return PR_SPEC_NOT_AFFECTED;
-       }
-}
-
-int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
-{
-       switch (which) {
-       case PR_SPEC_STORE_BYPASS:
-               return ssbd_prctl_get(task);
-       default:
-               return -ENODEV;
-       }
-}
index c1dee9066ff97115fce01ccf16e1b1320b9be265..584c14ce3c860d814cbf60658e2dec9aba0f701a 100644 (file)
@@ -72,8 +72,7 @@ void notrace __cpu_suspend_exit(void)
         * have turned the mitigation on. If the user has forcefully
         * disabled it, make sure their wishes are obeyed.
         */
-       if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-               arm64_set_ssbd_mitigation(false);
+       spectre_v4_enable_mitigation(NULL);
 }
 
 /*
index 7cba7623fcec75a7411c3388a3d5b516e46c49cc..d52e6b5dbfd3e95aa4d76addb44f0944898b0fb5 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
+#include <asm/hyp_image.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/page.h>
@@ -21,12 +22,23 @@ ENTRY(_text)
 jiffies = jiffies_64;
 
 
+#ifdef CONFIG_KVM
 #define HYPERVISOR_EXTABLE                                     \
        . = ALIGN(SZ_8);                                        \
        __start___kvm_ex_table = .;                             \
        *(__kvm_ex_table)                                       \
        __stop___kvm_ex_table = .;
 
+#define HYPERVISOR_PERCPU_SECTION                              \
+       . = ALIGN(PAGE_SIZE);                                   \
+       HYP_SECTION_NAME(.data..percpu) : {                     \
+               *(HYP_SECTION_NAME(.data..percpu))              \
+       }
+#else /* CONFIG_KVM */
+#define HYPERVISOR_EXTABLE
+#define HYPERVISOR_PERCPU_SECTION
+#endif
+
 #define HYPERVISOR_TEXT                                        \
        /*                                              \
         * Align to 4 KB so that                        \
@@ -190,6 +202,7 @@ SECTIONS
        }
 
        PERCPU_SECTION(L1_CACHE_BYTES)
+       HYPERVISOR_PERCPU_SECTION
 
        .rela.dyn : ALIGN(8) {
                *(.rela .rela*)
index 318c8f2df2452ef03b8bcf84f34b10172d6bb0ac..043756db8f6ec27c72fc0fc1d4c261d82f87b445 100644 (file)
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
          Adds support for a virtual Performance Monitoring Unit (PMU) in
          virtual machines.
 
-config KVM_INDIRECT_VECTORS
-       def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
-
 endif # KVM
 
 endif # VIRTUALIZATION
index e49189012af11c4fddeae8a9f9e5eb622c79145f..f56122eedffc84866395218d6006c47037b061a5 100644 (file)
 __asm__(".arch_extension       virt");
 #endif
 
-DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
-DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
-DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
+
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@ -1263,6 +1263,53 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
+static unsigned long nvhe_percpu_size(void)
+{
+       return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
+               (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
+}
+
+static unsigned long nvhe_percpu_order(void)
+{
+       unsigned long size = nvhe_percpu_size();
+
+       return size ? get_order(size) : 0;
+}
+
+static int kvm_map_vectors(void)
+{
+       /*
+        * SV2  = ARM64_SPECTRE_V2
+        * HEL2 = ARM64_HARDEN_EL2_VECTORS
+        *
+        * !SV2 + !HEL2 -> use direct vectors
+        *  SV2 + !HEL2 -> use hardened vectors in place
+        * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
+        *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
+        */
+       if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+
+       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+               /*
+                * Always allocate a spare vector slot, as we don't
+                * know yet which CPUs have a BP hardening slot that
+                * we can reuse.
+                */
+               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+               return create_hyp_exec_mappings(vect_pa, size,
+                                               &__kvm_bp_vect_base);
+       }
+
+       return 0;
+}
+
 static void cpu_init_hyp_mode(void)
 {
        phys_addr_t pgd_ptr;
@@ -1279,8 +1326,8 @@ static void cpu_init_hyp_mode(void)
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
         */
-       tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
-                    (unsigned long)kvm_ksym_ref(&kvm_host_data));
+       tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+                   (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
 
        pgd_ptr = kvm_mmu_get_httbr();
        hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
@@ -1303,7 +1350,7 @@ static void cpu_init_hyp_mode(void)
         * at EL2.
         */
        if (this_cpu_has_cap(ARM64_SSBS) &&
-           arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+           arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
                kvm_call_hyp_nvhe(__kvm_enable_ssbs);
        }
 }
@@ -1316,11 +1363,11 @@ static void cpu_hyp_reset(void)
 
 static void cpu_hyp_reinit(void)
 {
-       kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+       kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
 
        cpu_hyp_reset();
 
-       __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector());
+       *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
 
        if (is_kernel_in_hyp_mode())
                kvm_timer_init_vhe();
@@ -1472,8 +1519,10 @@ static void teardown_hyp_mode(void)
        int cpu;
 
        free_hyp_pgds();
-       for_each_possible_cpu(cpu)
+       for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+               free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+       }
 }
 
 /**
@@ -1506,6 +1555,24 @@ static int init_hyp_mode(void)
                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
        }
 
+       /*
+        * Allocate and initialize pages for Hypervisor-mode percpu regions.
+        */
+       for_each_possible_cpu(cpu) {
+               struct page *page;
+               void *page_addr;
+
+               page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
+               if (!page) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               page_addr = page_address(page);
+               memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
+               kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+       }
+
        /*
         * Map the Hyp-code called directly from the host
         */
@@ -1550,40 +1617,21 @@ static int init_hyp_mode(void)
                }
        }
 
+       /*
+        * Map Hyp percpu pages
+        */
        for_each_possible_cpu(cpu) {
-               struct kvm_host_data *cpu_data;
-               struct kvm_cpu_context *hyp_ctxt;
-               unsigned long *vector;
+               char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+               char *percpu_end = percpu_begin + nvhe_percpu_size();
 
-               cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
-               err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
+               err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
 
                if (err) {
-                       kvm_err("Cannot map host CPU state: %d\n", err);
-                       goto out_err;
-               }
-
-               hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu);
-               err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP);
-
-               if (err) {
-                       kvm_err("Cannot map hyp context: %d\n", err);
-                       goto out_err;
-               }
-
-               vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
-               err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
-
-               if (err) {
-                       kvm_err("Cannot map hyp guest vector address\n");
+                       kvm_err("Cannot map hyp percpu region\n");
                        goto out_err;
                }
        }
 
-       err = hyp_map_aux_data();
-       if (err)
-               kvm_err("Cannot map host auxiliary data: %d\n", err);
-
        return 0;
 
 out_err:
index 607b8a898826601aff841f74c5bfc702e73385d2..4a81eddabcd83f064fc47adc246c7b203c783323 100644 (file)
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir)                               \
                    -DDISABLE_BRANCH_PROFILING          \
                    $(DISABLE_STACKLEAK_PLUGIN)
 
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
-obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
index afaa8d1f24854cd158d91ae51ef3de94e22a6c6a..b0afad7a99c6e81c0f5f8bfb70cd9acfc67ff68c 100644 (file)
@@ -25,7 +25,7 @@ SYM_FUNC_START(__guest_enter)
        // x1-x17: clobbered by macros
        // x29: guest context
 
-       hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
+       adr_this_cpu x1, kvm_hyp_ctxt, x2
 
        // Store the hyp regs
        save_callee_saved_regs x1
@@ -93,7 +93,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
        // return address to tail call into hyp_panic. As a side effect, the
        // current state is saved to the guest context but it will only be
        // accurate if the guest had been completely restored.
-       hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
+       adr_this_cpu x0, kvm_hyp_ctxt, x1
        adr     x1, hyp_panic
        str     x1, [x0, #CPU_XREG_OFFSET(30)]
 
@@ -131,7 +131,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // Store the guest's sp_el0
        save_sp_el0     x1, x2
 
-       hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
+       adr_this_cpu x2, kvm_hyp_ctxt, x3
 
        // Macro ptrauth_switch_to_hyp format:
        //      ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
index bc9f53df46f5f789efaf30c963df58b64ac29c8d..0a5b36eb54b3e9e2fb17becf14644b29b6903e1c 100644 (file)
@@ -63,35 +63,6 @@ el1_sync:                            // Guest trapped into EL2
                          ARM_SMCCC_ARCH_WORKAROUND_2)
        cbnz    w1, el1_trap
 
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
-       b       wa2_end
-alternative_cb_end
-       get_vcpu_ptr    x2, x0
-       ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
-       // Sanitize the argument and update the guest flags
-       ldr     x1, [sp, #8]                    // Guest's x1
-       clz     w1, w1                          // Murphy's device:
-       lsr     w1, w1, #5                      // w1 = !!w1 without using
-       eor     w1, w1, #1                      // the flags...
-       bfi     x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
-       str     x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
-       /* Check that we actually need to perform the call */
-       hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
-       cbz     x0, wa2_end
-
-       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
-       smc     #0
-
-       /* Don't leak data from the SMC call */
-       mov     x3, xzr
-wa2_end:
-       mov     x2, xzr
-       mov     x1, xzr
-#endif
-
 wa_epilogue:
        mov     x0, xzr
        add     sp, sp, #16
@@ -216,7 +187,6 @@ SYM_CODE_START(__kvm_hyp_vector)
        valid_vect      el1_error               // Error 32-bit EL1
 SYM_CODE_END(__kvm_hyp_vector)
 
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
 .macro hyp_ventry
        .align 7
 1:     esb
@@ -266,4 +236,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
 1:     .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
        .org 1b
 SYM_CODE_END(__bp_harden_hyp_vecs)
-#endif
index 5e28ea6aa097b026c0b61fd37f5be18ab34ba649..4ebe9f558f3af4def340a8c40e8943ce2dbb5b59 100644 (file)
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
                return;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        guest_ctxt = &vcpu->arch.ctxt;
        host_dbg = &vcpu->arch.host_debug_state.regs;
        guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
                return;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        guest_ctxt = &vcpu->arch.ctxt;
        host_dbg = &vcpu->arch.host_debug_state.regs;
        guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
index 4536b50ddc06c97439c59d8d208c905cc05ff863..eeac62b685a9e50a2722441dcf7b52e0411e29f7 100644 (file)
@@ -383,7 +383,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
            !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
                return false;
 
-       ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
+       ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
        __ptrauth_save_key(ctxt, APIA);
        __ptrauth_save_key(ctxt, APIB);
        __ptrauth_save_key(ctxt, APDA);
@@ -476,39 +476,6 @@ exit:
        return false;
 }
 
-static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
-{
-       if (!cpus_have_final_cap(ARM64_SSBD))
-               return false;
-
-       return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
-}
-
-static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
-       /*
-        * The host runs with the workaround always present. If the
-        * guest wants it disabled, so be it...
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
-}
-
-static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
-       /*
-        * If the guest has disabled the workaround, bring it back on.
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
-}
-
 static inline void __kvm_unexpected_el2_exception(void)
 {
        extern char __guest_exit_panic[];
diff --git a/arch/arm64/kvm/hyp/nvhe/.gitignore b/arch/arm64/kvm/hyp/nvhe/.gitignore
new file mode 100644 (file)
index 0000000..695d73d
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+hyp.lds
index 46c89e8c30bcacc75335162b702578e1c626b78e..ddde15fe85f2fc65087f0d77fa5bc26855f635eb 100644 (file)
@@ -10,40 +10,46 @@ obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-
 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
         ../fpsimd.o ../hyp-entry.o
 
-obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
-extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
+##
+## Build rules for compiling nVHE hyp code
+## Output of this folder is `kvm_nvhe.o`, a partially linked object
+## file containing all nVHE hyp code and data.
+##
 
-$(obj)/%.hyp.tmp.o: $(src)/%.c FORCE
+hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
+obj-y := kvm_nvhe.o
+extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
+
+# 1) Compile all source files to `.nvhe.o` object files. The file extension
+#    avoids file name clashes for files shared with VHE.
+$(obj)/%.nvhe.o: $(src)/%.c FORCE
        $(call if_changed_rule,cc_o_c)
-$(obj)/%.hyp.tmp.o: $(src)/%.S FORCE
+$(obj)/%.nvhe.o: $(src)/%.S FORCE
        $(call if_changed_rule,as_o_S)
-$(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
-       $(call if_changed,hypcopy)
 
-# Disable reordering functions by GCC (enabled at -O2).
-# This pass puts functions into '.text.*' sections to aid the linker
-# in optimizing ELF layout. See HYPCOPY comment below for more info.
-ccflags-y += $(call cc-option,-fno-reorder-functions)
+# 2) Compile linker script.
+$(obj)/hyp.lds: $(src)/hyp.lds.S FORCE
+       $(call if_changed_dep,cpp_lds_S)
+
+# 3) Partially link all '.nvhe.o' files and apply the linker script.
+#    Prefixes names of ELF sections with '.hyp', eg. '.hyp.text'.
+#    Note: The following rule assumes that the 'ld' rule puts LDFLAGS before
+#          the list of dependencies to form '-T $(obj)/hyp.lds'. This is to
+#          keep the dependency on the target while avoiding an error from
+#          GNU ld if the linker script is passed to it twice.
+LDFLAGS_kvm_nvhe.tmp.o := -r -T
+$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
+       $(call if_changed,ld)
+
+# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
+#    Prefixes names of ELF symbols with '__kvm_nvhe_'.
+$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
+       $(call if_changed,hypcopy)
 
 # The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
-# and relevant ELF section names to avoid clashes with VHE code/data.
-#
-# Hyp code is assumed to be in the '.text' section of the input object
-# files (with the exception of specialized sections such as
-# '.hyp.idmap.text'). This assumption may be broken by a compiler that
-# divides code into sections like '.text.unlikely' so as to optimize
-# ELF layout. HYPCOPY checks that no such sections exist in the input
-# using `objdump`, otherwise they would be linked together with other
-# kernel code and not memory-mapped correctly at runtime.
+# to avoid clashes with VHE code/data.
 quiet_cmd_hypcopy = HYPCOPY $@
-      cmd_hypcopy =                                                    \
-       if $(OBJDUMP) -h $< | grep -F '.text.'; then                    \
-               echo "$@: function reordering not supported in nVHE hyp code" >&2; \
-               /bin/false;                                             \
-       fi;                                                             \
-       $(OBJCOPY) --prefix-symbols=__kvm_nvhe_                         \
-                  --rename-section=.text=.hyp.text                     \
-                  $< $@
+      cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
 
 # Remove ftrace and Shadow Call Stack CFLAGS.
 # This is equivalent to the 'notrace' and '__noscs' annotations.
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
new file mode 100644 (file)
index 0000000..bb2d986
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Google LLC.
+ * Written by David Brazdil <dbrazdil@google.com>
+ *
+ * Linker script used for partial linking of nVHE EL2 object files.
+ */
+
+#include <asm/hyp_image.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/memory.h>
+
+SECTIONS {
+       HYP_SECTION(.text)
+       HYP_SECTION_NAME(.data..percpu) : {
+               PERCPU_INPUT(L1_CACHE_BYTES)
+       }
+}
index a29f247f35e3a395babdaa77b32374eca180cc4d..a457a0306e031fbd3ecf5993f8a14173dfddff8e 100644 (file)
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 
+/* Non-VHE specific context */
+DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 val;
@@ -42,7 +47,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
        }
 
        write_sysreg(val, cptr_el2);
-       write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
+       write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
 
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -176,7 +181,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                pmr_sync();
        }
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
 
@@ -203,8 +208,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
        __debug_switch_to_guest(vcpu);
 
-       __set_guest_arch_workaround_state(vcpu);
-
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);
@@ -212,8 +215,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
-       __set_host_arch_workaround_state(vcpu);
-
        __sysreg_save_state_nvhe(guest_ctxt);
        __sysreg32_save_state(vcpu);
        __timer_disable_traps(vcpu);
@@ -254,7 +255,7 @@ void __noreturn hyp_panic(void)
        struct kvm_cpu_context *host_ctxt;
        struct kvm_vcpu *vcpu;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        vcpu = host_ctxt->__hyp_running_vcpu;
 
        if (vcpu) {
index cf477f856e51bf15814a6e8fea285128675e9fcc..fe69de16dadc698030954c5d5a60b237b1b201a8 100644 (file)
 
 const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
 
+/* VHE specific context */
+DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 val;
@@ -108,7 +113,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        struct kvm_cpu_context *guest_ctxt;
        u64 exit_code;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
 
@@ -131,8 +136,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
 
-       __set_guest_arch_workaround_state(vcpu);
-
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);
@@ -140,8 +143,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
-       __set_host_arch_workaround_state(vcpu);
-
        sysreg_save_guest_state_vhe(guest_ctxt);
 
        __deactivate_traps(vcpu);
@@ -197,7 +198,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
        struct kvm_cpu_context *host_ctxt;
        struct kvm_vcpu *vcpu;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        vcpu = host_ctxt->__hyp_running_vcpu;
 
        __deactivate_traps(vcpu);
index 996471e4c138099148a545580a9175f49162dfdf..2a0b8c88d74fc53359551afa35061b0a05910faf 100644 (file)
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
        struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
        struct kvm_cpu_context *host_ctxt;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        __sysreg_save_user_state(host_ctxt);
 
        /*
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
        struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
        struct kvm_cpu_context *host_ctxt;
 
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        deactivate_traps_vhe_put();
 
        __sysreg_save_el1_state(guest_ctxt);
index 550dfa3e53cddd3a7c5b567f488e4a16a0e90ab9..9824025ccc5c047067f9456c1860606a1afaeb7c 100644 (file)
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
                feature = smccc_get_arg1(vcpu);
                switch (feature) {
                case ARM_SMCCC_ARCH_WORKAROUND_1:
-                       switch (kvm_arm_harden_branch_predictor()) {
-                       case KVM_BP_HARDEN_UNKNOWN:
+                       switch (arm64_get_spectre_v2_state()) {
+                       case SPECTRE_VULNERABLE:
                                break;
-                       case KVM_BP_HARDEN_WA_NEEDED:
+                       case SPECTRE_MITIGATED:
                                val = SMCCC_RET_SUCCESS;
                                break;
-                       case KVM_BP_HARDEN_NOT_REQUIRED:
+                       case SPECTRE_UNAFFECTED:
                                val = SMCCC_RET_NOT_REQUIRED;
                                break;
                        }
                        break;
                case ARM_SMCCC_ARCH_WORKAROUND_2:
-                       switch (kvm_arm_have_ssbd()) {
-                       case KVM_SSBD_FORCE_DISABLE:
-                       case KVM_SSBD_UNKNOWN:
+                       switch (arm64_get_spectre_v4_state()) {
+                       case SPECTRE_VULNERABLE:
                                break;
-                       case KVM_SSBD_KERNEL:
-                               val = SMCCC_RET_SUCCESS;
-                               break;
-                       case KVM_SSBD_FORCE_ENABLE:
-                       case KVM_SSBD_MITIGATED:
+                       case SPECTRE_MITIGATED:
+                               /*
+                                * SSBS everywhere: Indicate no firmware
+                                * support, as the SSBS support will be
+                                * indicated to the guest and the default is
+                                * safe.
+                                *
+                                * Otherwise, expose a permanent mitigation
+                                * to the guest, and hide SSBS so that the
+                                * guest stays protected.
+                                */
+                               if (cpus_have_final_cap(ARM64_SSBS))
+                                       break;
+                               fallthrough;
+                       case SPECTRE_UNAFFECTED:
                                val = SMCCC_RET_NOT_REQUIRED;
                                break;
                        }
index 3c224162b3ddd7b150bb9d695e282b93350c9b8d..faf32a44ba04a0ac237f42eaaeab199185ab4c16 100644 (file)
@@ -31,9 +31,9 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
  */
 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
 {
-       struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+       struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
 
-       if (!kvm_pmu_switch_needed(attr))
+       if (!ctx || !kvm_pmu_switch_needed(attr))
                return;
 
        if (!attr->exclude_host)
@@ -47,7 +47,10 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
  */
 void kvm_clr_pmu_events(u32 clr)
 {
-       struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+       struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+
+       if (!ctx)
+               return;
 
        ctx->pmu_events.events_host &= ~clr;
        ctx->pmu_events.events_guest &= ~clr;
@@ -173,7 +176,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
                return;
 
        preempt_disable();
-       host = this_cpu_ptr(&kvm_host_data);
+       host = this_cpu_ptr_hyp_sym(kvm_host_data);
        events_guest = host->pmu_events.events_guest;
        events_host = host->pmu_events.events_host;
 
@@ -193,7 +196,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
        if (!has_vhe())
                return;
 
-       host = this_cpu_ptr(&kvm_host_data);
+       host = this_cpu_ptr_hyp_sym(kvm_host_data);
        events_guest = host->pmu_events.events_guest;
        events_host = host->pmu_events.events_host;
 
index 83415e96b589fff983322b813ee5bf414c8bb1ce..db4056ecccfda9319233e7be71c27d8d9db16c84 100644 (file)
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
 {
        switch (regid) {
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
-               switch (kvm_arm_harden_branch_predictor()) {
-               case KVM_BP_HARDEN_UNKNOWN:
+               switch (arm64_get_spectre_v2_state()) {
+               case SPECTRE_VULNERABLE:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
-               case KVM_BP_HARDEN_WA_NEEDED:
+               case SPECTRE_MITIGATED:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
-               case KVM_BP_HARDEN_NOT_REQUIRED:
+               case SPECTRE_UNAFFECTED:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
                }
                return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
-               switch (kvm_arm_have_ssbd()) {
-               case KVM_SSBD_FORCE_DISABLE:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
-               case KVM_SSBD_KERNEL:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
-               case KVM_SSBD_FORCE_ENABLE:
-               case KVM_SSBD_MITIGATED:
+               switch (arm64_get_spectre_v4_state()) {
+               case SPECTRE_MITIGATED:
+                       /*
+                        * As for the hypercall discovery, we pretend we
+                        * don't have any FW mitigation if SSBS is there at
+                        * all times.
+                        */
+                       if (cpus_have_final_cap(ARM64_SSBS))
+                               return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+                       fallthrough;
+               case SPECTRE_UNAFFECTED:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
-               case KVM_SSBD_UNKNOWN:
-               default:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
+               case SPECTRE_VULNERABLE:
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                }
        }
 
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                val = kvm_psci_version(vcpu, vcpu->kvm);
                break;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
-               val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-               break;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
                val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-
-               if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
-                   kvm_arm_get_vcpu_workaround_2_flag(vcpu))
-                       val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
                break;
        default:
                return -ENOENT;
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                            KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
                        return -EINVAL;
 
-               wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
-
-               if (get_kernel_wa_level(reg->id) < wa_level)
-                       return -EINVAL;
-
                /* The enabled bit must not be set unless the level is AVAIL. */
-               if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
-                   wa_level != val)
+               if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
+                   (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
                        return -EINVAL;
 
-               /* Are we finished or do we need to check the enable bit ? */
-               if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
-                       return 0;
-
                /*
-                * If this kernel supports the workaround to be switched on
-                * or off, make sure it matches the requested setting.
+                * Map all the possible incoming states to the only two we
+                * really want to deal with.
                 */
-               switch (wa_level) {
-               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
-                       kvm_arm_set_vcpu_workaround_2_flag(vcpu,
-                           val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
+               switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+                       wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                        break;
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
                case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
-                       kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
+                       wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
                        break;
+               default:
+                       return -EINVAL;
                }
 
+               /*
+                * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
+                * other way around.
+                */
+               if (get_kernel_wa_level(reg->id) < wa_level)
+                       return -EINVAL;
+
                return 0;
        default:
                return -ENOENT;
index 2202b710d44c9dc001d6dd4c0eb186754b03e975..f32490229a4c79b80cda1dd602522e0cad3b76b2 100644 (file)
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                vcpu->arch.reset_state.reset = false;
        }
 
-       /* Default workaround setup is enabled (if supported) */
-       if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
-               vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-
        /* Reset timer */
        ret = kvm_timer_vcpu_reset(vcpu);
 out:
index 20ab2a7d37cacf2084a64cea839a992678d48938..3c203cb8c103ff6c87450e2365a0424c0f166551 100644 (file)
@@ -1128,6 +1128,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                if (!vcpu_has_sve(vcpu))
                        val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
                val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
+               if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
+                   arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+                       val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
        } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
                val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
                         (0xfUL << ID_AA64ISAR1_API_SHIFT) |