arm64: Force SSBS on context switch
authorMarc Zyngier <marc.zyngier@arm.com>
Mon, 22 Jul 2019 13:53:09 +0000 (14:53 +0100)
committerWill Deacon <will@kernel.org>
Mon, 22 Jul 2019 14:24:16 +0000 (15:24 +0100)
On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
[will: inverted logic and added comments]
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/processor.h
arch/arm64/kernel/process.c

index fd5b1a4efc70eda285ba8941d62f6e0f3ee13f05..844e2964b0f5e5b0aad4b866283f24e26f794afe 100644 (file)
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
                regs->pmr_save = GIC_PRIO_IRQON;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
                                unsigned long sp)
 {
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
        regs->pstate = PSR_MODE_EL0t;
 
        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               regs->pstate |= PSR_SSBS_BIT;
+               set_ssbs_bit(regs);
 
        regs->sp = sp;
 }
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 #endif
 
        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               regs->pstate |= PSR_AA32_SSBS_BIT;
+               set_compat_ssbs_bit(regs);
 
        regs->compat_sp = sp;
 }
index 8d836d0abc964677ff0001d84ba3183ce66c7c7f..f674f28df663b29aa26f4856aa837c14e1da4234 100644 (file)
@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
                        childregs->pstate |= PSR_UAO_BIT;
 
                if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-                       childregs->pstate |= PSR_SSBS_BIT;
+                       set_ssbs_bit(childregs);
 
                if (system_uses_irq_prio_masking())
                        childregs->pmr_save = GIC_PRIO_IRQON;
@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
        }
 }
 
+/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+       struct pt_regs *regs = task_pt_regs(next);
+
+       /*
+        * Nothing to do for kernel threads, but 'regs' may be junk
+        * (e.g. idle task) so check the flags and bail early.
+        */
+       if (unlikely(next->flags & PF_KTHREAD))
+               return;
+
+       /* If the mitigation is enabled, then we leave SSBS clear. */
+       if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+           test_tsk_thread_flag(next, TIF_SSBD))
+               return;
+
+       if (compat_user_mode(regs))
+               set_compat_ssbs_bit(regs);
+       else if (user_mode(regs))
+               set_ssbs_bit(regs);
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
        entry_task_switch(next);
        uao_thread_switch(next);
        ptrauth_thread_switch(next);
+       ssbs_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case