1 // SPDX-License-Identifier: GPL-2.0-only
3 * FP/SIMD context switching and fault handling
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/ctype.h>
19 #include <linux/kernel.h>
20 #include <linux/linkage.h>
21 #include <linux/irqflags.h>
22 #include <linux/init.h>
23 #include <linux/percpu.h>
24 #include <linux/prctl.h>
25 #include <linux/preempt.h>
26 #include <linux/ptrace.h>
27 #include <linux/sched/signal.h>
28 #include <linux/sched/task_stack.h>
29 #include <linux/signal.h>
30 #include <linux/slab.h>
31 #include <linux/stddef.h>
32 #include <linux/sysctl.h>
33 #include <linux/swab.h>
36 #include <asm/exception.h>
37 #include <asm/fpsimd.h>
38 #include <asm/cpufeature.h>
39 #include <asm/cputype.h>
41 #include <asm/processor.h>
43 #include <asm/sigcontext.h>
44 #include <asm/sysreg.h>
45 #include <asm/traps.h>
48 #define FPEXC_IOF (1 << 0)
49 #define FPEXC_DZF (1 << 1)
50 #define FPEXC_OFF (1 << 2)
51 #define FPEXC_UFF (1 << 3)
52 #define FPEXC_IXF (1 << 4)
53 #define FPEXC_IDF (1 << 7)
56 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
58 * In order to reduce the number of times the FPSIMD state is needlessly saved
59 * and restored, we need to keep track of two things:
60 * (a) for each task, we need to remember which CPU was the last one to have
61 * the task's FPSIMD state loaded into its FPSIMD registers;
62 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
63 * been loaded into its FPSIMD registers most recently, or whether it has
64 * been used to perform kernel mode NEON in the meantime.
66 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
67 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
68 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
69 * address of the userland FPSIMD state of the task that was loaded onto the CPU
70 * the most recently, or NULL if kernel mode NEON has been performed after that.
72 * With this in place, we no longer have to restore the next FPSIMD state right
73 * when switching between tasks. Instead, we can defer this check to userland
74 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
75 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
76 * can omit the FPSIMD restore.
78 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
79 * indicate whether or not the userland FPSIMD state of the current task is
80 * present in the registers. The flag is set unless the FPSIMD registers of this
81 * CPU currently contain the most recent userland FPSIMD state of the current
82 * task. If the task is behaving as a VMM, then this is will be managed by
83 * KVM which will clear it to indicate that the vcpu FPSIMD state is currently
84 * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
85 * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
86 * flag the register state as invalid.
88 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may be
89 * called from softirq context, which will save the task's FPSIMD context back
90 * to task_struct. To prevent this from racing with the manipulation of the
91 * task's FPSIMD state from task context and thereby corrupting the state, it
92 * is necessary to protect any manipulation of a task's fpsimd_state or
93 * TIF_FOREIGN_FPSTATE flag with get_cpu_fpsimd_context(), which will suspend
94 * softirq servicing entirely until put_cpu_fpsimd_context() is called.
96 * For a certain task, the sequence may look something like this:
97 * - the task gets scheduled in; if both the task's fpsimd_cpu field
98 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
99 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
100 * cleared, otherwise it is set;
102 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
103 * userland FPSIMD state is copied from memory to the registers, the task's
104 * fpsimd_cpu field is set to the id of the current CPU, the current
105 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
106 * TIF_FOREIGN_FPSTATE flag is cleared;
108 * - the task executes an ordinary syscall; upon return to userland, the
109 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
112 * - the task executes a syscall which executes some NEON instructions; this is
113 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
114 * register contents to memory, clears the fpsimd_last_state per-cpu variable
115 * and sets the TIF_FOREIGN_FPSTATE flag;
117 * - the task gets preempted after kernel_neon_end() is called; as we have not
118 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
119 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
122 static DEFINE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
124 __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
125 #ifdef CONFIG_ARM64_SVE
127 .type = ARM64_VEC_SVE,
129 .min_vl = SVE_VL_MIN,
130 .max_vl = SVE_VL_MIN,
131 .max_virtualisable_vl = SVE_VL_MIN,
134 #ifdef CONFIG_ARM64_SME
136 .type = ARM64_VEC_SME,
142 static unsigned int vec_vl_inherit_flag(enum vec_type type)
146 return TIF_SVE_VL_INHERIT;
148 return TIF_SME_VL_INHERIT;
156 int __default_vl; /* Default VL for tasks */
159 static struct vl_config vl_config[ARM64_VEC_MAX];
161 static inline int get_default_vl(enum vec_type type)
163 return READ_ONCE(vl_config[type].__default_vl);
166 #ifdef CONFIG_ARM64_SVE
168 static inline int get_sve_default_vl(void)
170 return get_default_vl(ARM64_VEC_SVE);
173 static inline void set_default_vl(enum vec_type type, int val)
175 WRITE_ONCE(vl_config[type].__default_vl, val);
178 static inline void set_sve_default_vl(int val)
180 set_default_vl(ARM64_VEC_SVE, val);
183 static void __percpu *efi_sve_state;
185 #else /* ! CONFIG_ARM64_SVE */
187 /* Dummy declaration for code that will be optimised out: */
188 extern void __percpu *efi_sve_state;
190 #endif /* ! CONFIG_ARM64_SVE */
192 #ifdef CONFIG_ARM64_SME
194 static int get_sme_default_vl(void)
196 return get_default_vl(ARM64_VEC_SME);
199 static void set_sme_default_vl(int val)
201 set_default_vl(ARM64_VEC_SME, val);
204 static void sme_free(struct task_struct *);
208 static inline void sme_free(struct task_struct *t) { }
212 static void fpsimd_bind_task_to_cpu(void);
215 * Claim ownership of the CPU FPSIMD context for use by the calling context.
217 * The caller may freely manipulate the FPSIMD context metadata until
218 * put_cpu_fpsimd_context() is called.
220 * On RT kernels local_bh_disable() is not sufficient because it only
221 * serializes soft interrupt related sections via a local lock, but stays
222 * preemptible. Disabling preemption is the right choice here as bottom
223 * half processing is always in thread context on RT kernels so it
224 * implicitly prevents bottom half processing as well.
226 static void get_cpu_fpsimd_context(void)
228 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
235 * Release the CPU FPSIMD context.
237 * Must be called from a context in which get_cpu_fpsimd_context() was
238 * previously called, with no call to put_cpu_fpsimd_context() in the
241 static void put_cpu_fpsimd_context(void)
243 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
249 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
251 return task->thread.vl[type];
254 void task_set_vl(struct task_struct *task, enum vec_type type,
257 task->thread.vl[type] = vl;
260 unsigned int task_get_vl_onexec(const struct task_struct *task,
263 return task->thread.vl_onexec[type];
266 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
269 task->thread.vl_onexec[type] = vl;
273 * TIF_SME controls whether a task can use SME without trapping while
274 * in userspace, when TIF_SME is set then we must have storage
275 * allocated in sve_state and sme_state to store the contents of both ZA
276 * and the SVE registers for both streaming and non-streaming modes.
278 * If both SVCR.ZA and SVCR.SM are disabled then at any point we
279 * may disable TIF_SME and reenable traps.
284 * TIF_SVE controls whether a task can use SVE without trapping while
285 * in userspace, and also (together with TIF_SME) the way a task's
286 * FPSIMD/SVE state is stored in thread_struct.
288 * The kernel uses this flag to track whether a user task is actively
289 * using SVE, and therefore whether full SVE register state needs to
290 * be tracked. If not, the cheaper FPSIMD context handling code can
291 * be used instead of the more costly SVE equivalents.
293 * * TIF_SVE or SVCR.SM set:
295 * The task can execute SVE instructions while in userspace without
296 * trapping to the kernel.
298 * During any syscall, the kernel may optionally clear TIF_SVE and
299 * discard the vector state except for the FPSIMD subset.
303 * An attempt by the user task to execute an SVE instruction causes
304 * do_sve_acc() to be called, which does some preparation and then
307 * During any syscall, the kernel may optionally clear TIF_SVE and
308 * discard the vector state except for the FPSIMD subset.
310 * The data will be stored in one of two formats:
312 * * FPSIMD only - FP_STATE_FPSIMD:
314 * When the FPSIMD only state stored task->thread.fp_type is set to
315 * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in
316 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
317 * logically zero but not stored anywhere; P0-P15 and FFR are not
318 * stored and have unspecified values from userspace's point of
319 * view. For hygiene purposes, the kernel zeroes them on next use,
320 * but userspace is discouraged from relying on this.
322 * task->thread.sve_state does not need to be non-NULL, valid or any
323 * particular size: it must not be dereferenced and any data stored
324 * there should be considered stale and not referenced.
326 * * SVE state - FP_STATE_SVE:
328 * When the full SVE state is stored task->thread.fp_type is set to
329 * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the
330 * corresponding Zn), P0-P15 and FFR are encoded in in
331 * task->thread.sve_state, formatted appropriately for vector
332 * length task->thread.sve_vl or, if SVCR.SM is set,
333 * task->thread.sme_vl. The storage for the vector registers in
334 * task->thread.uw.fpsimd_state should be ignored.
336 * task->thread.sve_state must point to a valid buffer at least
337 * sve_state_size(task) bytes in size. The data stored in
338 * task->thread.uw.fpsimd_state.vregs should be considered stale
339 * and not referenced.
341 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
342 * irrespective of whether TIF_SVE is clear or set, since these are
343 * not vector length dependent.
347 * Update current's FPSIMD/SVE registers from thread_struct.
349 * This function should be called only when the FPSIMD/SVE state in
350 * thread_struct is known to be up to date, when preparing to enter
353 static void task_fpsimd_load(void)
355 bool restore_sve_regs = false;
358 WARN_ON(!system_supports_fpsimd());
359 WARN_ON(preemptible());
360 WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
362 if (system_supports_sve() || system_supports_sme()) {
363 switch (current->thread.fp_type) {
364 case FP_STATE_FPSIMD:
365 /* Stop tracking SVE for this task until next use. */
366 if (test_and_clear_thread_flag(TIF_SVE))
370 if (!thread_sm_enabled(¤t->thread) &&
371 !WARN_ON_ONCE(!test_and_set_thread_flag(TIF_SVE)))
374 if (test_thread_flag(TIF_SVE))
375 sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
377 restore_sve_regs = true;
382 * This indicates either a bug in
383 * fpsimd_save_user_state() or memory corruption, we
384 * should always record an explicit format
385 * when we save. We always at least have the
386 * memory allocated for FPSMID registers so
387 * try that and hope for the best.
390 clear_thread_flag(TIF_SVE);
395 /* Restore SME, override SVE register configuration if needed */
396 if (system_supports_sme()) {
397 unsigned long sme_vl = task_get_sme_vl(current);
399 /* Ensure VL is set up for restoring data */
400 if (test_thread_flag(TIF_SME))
401 sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
403 write_sysreg_s(current->thread.svcr, SYS_SVCR);
405 if (thread_za_enabled(¤t->thread))
406 sme_load_state(current->thread.sme_state,
407 system_supports_sme2());
409 if (thread_sm_enabled(¤t->thread))
410 restore_ffr = system_supports_fa64();
413 if (restore_sve_regs) {
414 WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE);
415 sve_load_state(sve_pffr(¤t->thread),
416 ¤t->thread.uw.fpsimd_state.fpsr,
419 WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD);
420 fpsimd_load_state(¤t->thread.uw.fpsimd_state);
425 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
426 * date with respect to the CPU registers. Note carefully that the
427 * current context is the context last bound to the CPU stored in
428 * last, if KVM is involved this may be the guest VM context rather
429 * than the host thread for the VM pointed to by current. This means
430 * that we must always reference the state storage via last rather
431 * than via current, if we are saving KVM state then it will have
432 * ensured that the type of registers to save is set in last->to_save.
434 static void fpsimd_save_user_state(void)
436 struct cpu_fp_state const *last =
437 this_cpu_ptr(&fpsimd_last_state);
438 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
439 bool save_sve_regs = false;
443 WARN_ON(!system_supports_fpsimd());
444 WARN_ON(preemptible());
446 if (test_thread_flag(TIF_FOREIGN_FPSTATE))
450 * If a task is in a syscall the ABI allows us to only
451 * preserve the state shared with FPSIMD so don't bother
452 * saving the full SVE state in that case.
454 if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE) &&
455 !in_syscall(current_pt_regs())) ||
456 last->to_save == FP_STATE_SVE) {
457 save_sve_regs = true;
462 if (system_supports_sme()) {
463 u64 *svcr = last->svcr;
465 *svcr = read_sysreg_s(SYS_SVCR);
467 if (*svcr & SVCR_ZA_MASK)
468 sme_save_state(last->sme_state,
469 system_supports_sme2());
471 /* If we are in streaming mode override regular SVE. */
472 if (*svcr & SVCR_SM_MASK) {
473 save_sve_regs = true;
474 save_ffr = system_supports_fa64();
479 if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) {
480 /* Get the configured VL from RDVL, will account for SM */
481 if (WARN_ON(sve_get_vl() != vl)) {
483 * Can't save the user regs, so current would
484 * re-enter user with corrupt state.
485 * There's no way to recover, so kill it:
487 force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
491 sve_save_state((char *)last->sve_state +
493 &last->st->fpsr, save_ffr);
494 *last->fp_type = FP_STATE_SVE;
496 fpsimd_save_state(last->st);
497 *last->fp_type = FP_STATE_FPSIMD;
502 * All vector length selection from userspace comes through here.
503 * We're on a slow path, so some sanity-checks are included.
504 * If things go wrong there's a bug somewhere, but try to fall back to a
507 static unsigned int find_supported_vector_length(enum vec_type type,
510 struct vl_info *info = &vl_info[type];
512 int max_vl = info->max_vl;
514 if (WARN_ON(!sve_vl_valid(vl)))
517 if (WARN_ON(!sve_vl_valid(max_vl)))
518 max_vl = info->min_vl;
522 if (vl < info->min_vl)
525 bit = find_next_bit(info->vq_map, SVE_VQ_MAX,
526 __vq_to_bit(sve_vq_from_vl(vl)));
527 return sve_vl_from_vq(__bit_to_vq(bit));
530 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
532 static int vec_proc_do_default_vl(struct ctl_table *table, int write,
533 void *buffer, size_t *lenp, loff_t *ppos)
535 struct vl_info *info = table->extra1;
536 enum vec_type type = info->type;
538 int vl = get_default_vl(type);
539 struct ctl_table tmp_table = {
541 .maxlen = sizeof(vl),
544 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
548 /* Writing -1 has the special meaning "set to max": */
552 if (!sve_vl_valid(vl))
555 set_default_vl(type, find_supported_vector_length(type, vl));
559 static struct ctl_table sve_default_vl_table[] = {
561 .procname = "sve_default_vector_length",
563 .proc_handler = vec_proc_do_default_vl,
564 .extra1 = &vl_info[ARM64_VEC_SVE],
568 static int __init sve_sysctl_init(void)
570 if (system_supports_sve())
571 if (!register_sysctl("abi", sve_default_vl_table))
577 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
578 static int __init sve_sysctl_init(void) { return 0; }
579 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
581 #if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
582 static struct ctl_table sme_default_vl_table[] = {
584 .procname = "sme_default_vector_length",
586 .proc_handler = vec_proc_do_default_vl,
587 .extra1 = &vl_info[ARM64_VEC_SME],
591 static int __init sme_sysctl_init(void)
593 if (system_supports_sme())
594 if (!register_sysctl("abi", sme_default_vl_table))
600 #else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
601 static int __init sme_sysctl_init(void) { return 0; }
602 #endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
604 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
605 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
607 #ifdef CONFIG_CPU_BIG_ENDIAN
608 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
611 u64 b = swab64(x >> 64);
613 return ((__uint128_t)a << 64) | b;
616 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
622 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
624 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
630 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
631 p = (__uint128_t *)ZREG(sst, vq, i);
632 *p = arm64_cpu_to_le128(fst->vregs[i]);
637 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
638 * task->thread.sve_state.
640 * Task can be a non-runnable task, or current. In the latter case,
641 * the caller must have ownership of the cpu FPSIMD context before calling
643 * task->thread.sve_state must point to at least sve_state_size(task)
644 * bytes of allocated kernel memory.
645 * task->thread.uw.fpsimd_state must be up to date before calling this
648 static void fpsimd_to_sve(struct task_struct *task)
651 void *sst = task->thread.sve_state;
652 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
654 if (!system_supports_sve() && !system_supports_sme())
657 vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
658 __fpsimd_to_sve(sst, fst, vq);
662 * Transfer the SVE state in task->thread.sve_state to
663 * task->thread.uw.fpsimd_state.
665 * Task can be a non-runnable task, or current. In the latter case,
666 * the caller must have ownership of the cpu FPSIMD context before calling
668 * task->thread.sve_state must point to at least sve_state_size(task)
669 * bytes of allocated kernel memory.
670 * task->thread.sve_state must be up to date before calling this function.
672 static void sve_to_fpsimd(struct task_struct *task)
675 void const *sst = task->thread.sve_state;
676 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
678 __uint128_t const *p;
680 if (!system_supports_sve() && !system_supports_sme())
683 vl = thread_get_cur_vl(&task->thread);
684 vq = sve_vq_from_vl(vl);
685 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
686 p = (__uint128_t const *)ZREG(sst, vq, i);
687 fst->vregs[i] = arm64_le128_to_cpu(*p);
691 #ifdef CONFIG_ARM64_SVE
693 * Call __sve_free() directly only if you know task can't be scheduled
696 static void __sve_free(struct task_struct *task)
698 kfree(task->thread.sve_state);
699 task->thread.sve_state = NULL;
702 static void sve_free(struct task_struct *task)
704 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
710 * Return how many bytes of memory are required to store the full SVE
711 * state for task, given task's currently configured vector length.
713 size_t sve_state_size(struct task_struct const *task)
717 if (system_supports_sve())
718 vl = task_get_sve_vl(task);
719 if (system_supports_sme())
720 vl = max(vl, task_get_sme_vl(task));
722 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
726 * Ensure that task->thread.sve_state is allocated and sufficiently large.
728 * This function should be used only in preparation for replacing
729 * task->thread.sve_state with new data. The memory is always zeroed
730 * here to prevent stale data from showing through: this is done in
731 * the interest of testability and predictability: except in the
732 * do_sve_acc() case, there is no ABI requirement to hide stale data
733 * written previously be task.
735 void sve_alloc(struct task_struct *task, bool flush)
737 if (task->thread.sve_state) {
739 memset(task->thread.sve_state, 0,
740 sve_state_size(task));
744 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
745 task->thread.sve_state =
746 kzalloc(sve_state_size(task), GFP_KERNEL);
751 * Force the FPSIMD state shared with SVE to be updated in the SVE state
752 * even if the SVE state is the current active state.
754 * This should only be called by ptrace. task must be non-runnable.
755 * task->thread.sve_state must point to at least sve_state_size(task)
756 * bytes of allocated kernel memory.
758 void fpsimd_force_sync_to_sve(struct task_struct *task)
764 * Ensure that task->thread.sve_state is up to date with respect to
765 * the user task, irrespective of when SVE is in use or not.
767 * This should only be called by ptrace. task must be non-runnable.
768 * task->thread.sve_state must point to at least sve_state_size(task)
769 * bytes of allocated kernel memory.
771 void fpsimd_sync_to_sve(struct task_struct *task)
773 if (!test_tsk_thread_flag(task, TIF_SVE) &&
774 !thread_sm_enabled(&task->thread))
779 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
780 * the user task, irrespective of whether SVE is in use or not.
782 * This should only be called by ptrace. task must be non-runnable.
783 * task->thread.sve_state must point to at least sve_state_size(task)
784 * bytes of allocated kernel memory.
786 void sve_sync_to_fpsimd(struct task_struct *task)
788 if (task->thread.fp_type == FP_STATE_SVE)
793 * Ensure that task->thread.sve_state is up to date with respect to
794 * the task->thread.uw.fpsimd_state.
796 * This should only be called by ptrace to merge new FPSIMD register
797 * values into a task for which SVE is currently active.
798 * task must be non-runnable.
799 * task->thread.sve_state must point to at least sve_state_size(task)
800 * bytes of allocated kernel memory.
801 * task->thread.uw.fpsimd_state must already have been initialised with
802 * the new FPSIMD register values to be merged in.
804 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
807 void *sst = task->thread.sve_state;
808 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
810 if (!test_tsk_thread_flag(task, TIF_SVE) &&
811 !thread_sm_enabled(&task->thread))
814 vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
816 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
817 __fpsimd_to_sve(sst, fst, vq);
820 int vec_set_vector_length(struct task_struct *task, enum vec_type type,
821 unsigned long vl, unsigned long flags)
823 bool free_sme = false;
825 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
826 PR_SVE_SET_VL_ONEXEC))
829 if (!sve_vl_valid(vl))
833 * Clamp to the maximum vector length that VL-agnostic code
834 * can work with. A flag may be assigned in the future to
835 * allow setting of larger vector lengths without confusing
838 if (vl > VL_ARCH_MAX)
841 vl = find_supported_vector_length(type, vl);
843 if (flags & (PR_SVE_VL_INHERIT |
844 PR_SVE_SET_VL_ONEXEC))
845 task_set_vl_onexec(task, type, vl);
847 /* Reset VL to system default on next exec: */
848 task_set_vl_onexec(task, type, 0);
850 /* Only actually set the VL if not deferred: */
851 if (flags & PR_SVE_SET_VL_ONEXEC)
854 if (vl == task_get_vl(task, type))
858 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
859 * write any live register state back to task_struct, and convert to a
860 * regular FPSIMD thread.
862 if (task == current) {
863 get_cpu_fpsimd_context();
865 fpsimd_save_user_state();
868 fpsimd_flush_task_state(task);
869 if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
870 thread_sm_enabled(&task->thread)) {
872 task->thread.fp_type = FP_STATE_FPSIMD;
875 if (system_supports_sme()) {
876 if (type == ARM64_VEC_SME ||
877 !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
879 * We are changing the SME VL or weren't using
880 * SME anyway, discard the state and force a
883 task->thread.svcr &= ~(SVCR_SM_MASK |
885 clear_tsk_thread_flag(task, TIF_SME);
891 put_cpu_fpsimd_context();
893 task_set_vl(task, type, vl);
896 * Free the changed states if they are not in use, SME will be
897 * reallocated to the correct size on next use and we just
898 * allocate SVE now in case it is needed for use in streaming
902 sve_alloc(task, true);
908 update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
909 flags & PR_SVE_VL_INHERIT);
915 * Encode the current vector length and flags for return.
916 * This is only required for prctl(): ptrace has separate fields.
917 * SVE and SME use the same bits for _ONEXEC and _INHERIT.
919 * flags are as for vec_set_vector_length().
921 static int vec_prctl_status(enum vec_type type, unsigned long flags)
925 if (flags & PR_SVE_SET_VL_ONEXEC)
926 ret = task_get_vl_onexec(current, type);
928 ret = task_get_vl(current, type);
930 if (test_thread_flag(vec_vl_inherit_flag(type)))
931 ret |= PR_SVE_VL_INHERIT;
937 int sve_set_current_vl(unsigned long arg)
939 unsigned long vl, flags;
942 vl = arg & PR_SVE_VL_LEN_MASK;
945 if (!system_supports_sve() || is_compat_task())
948 ret = vec_set_vector_length(current, ARM64_VEC_SVE, vl, flags);
952 return vec_prctl_status(ARM64_VEC_SVE, flags);
956 int sve_get_current_vl(void)
958 if (!system_supports_sve() || is_compat_task())
961 return vec_prctl_status(ARM64_VEC_SVE, 0);
964 #ifdef CONFIG_ARM64_SME
966 int sme_set_current_vl(unsigned long arg)
968 unsigned long vl, flags;
971 vl = arg & PR_SME_VL_LEN_MASK;
974 if (!system_supports_sme() || is_compat_task())
977 ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags);
981 return vec_prctl_status(ARM64_VEC_SME, flags);
985 int sme_get_current_vl(void)
987 if (!system_supports_sme() || is_compat_task())
990 return vec_prctl_status(ARM64_VEC_SME, 0);
992 #endif /* CONFIG_ARM64_SME */
994 static void vec_probe_vqs(struct vl_info *info,
995 DECLARE_BITMAP(map, SVE_VQ_MAX))
999 bitmap_zero(map, SVE_VQ_MAX);
1001 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
1002 write_vl(info->type, vq - 1); /* self-syncing */
1004 switch (info->type) {
1016 /* Minimum VL identified? */
1017 if (sve_vq_from_vl(vl) > vq)
1020 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
1021 set_bit(__vq_to_bit(vq), map);
1026 * Initialise the set of known supported VQs for the boot CPU.
1027 * This is called during kernel boot, before secondary CPUs are brought up.
1029 void __init vec_init_vq_map(enum vec_type type)
1031 struct vl_info *info = &vl_info[type];
1032 vec_probe_vqs(info, info->vq_map);
1033 bitmap_copy(info->vq_partial_map, info->vq_map, SVE_VQ_MAX);
1037 * If we haven't committed to the set of supported VQs yet, filter out
1038 * those not supported by the current CPU.
1039 * This function is called during the bring-up of early secondary CPUs only.
1041 void vec_update_vq_map(enum vec_type type)
1043 struct vl_info *info = &vl_info[type];
1044 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1046 vec_probe_vqs(info, tmp_map);
1047 bitmap_and(info->vq_map, info->vq_map, tmp_map, SVE_VQ_MAX);
1048 bitmap_or(info->vq_partial_map, info->vq_partial_map, tmp_map,
1053 * Check whether the current CPU supports all VQs in the committed set.
1054 * This function is called during the bring-up of late secondary CPUs only.
1056 int vec_verify_vq_map(enum vec_type type)
1058 struct vl_info *info = &vl_info[type];
1059 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1062 vec_probe_vqs(info, tmp_map);
1064 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
1065 if (bitmap_intersects(tmp_map, info->vq_map, SVE_VQ_MAX)) {
1066 pr_warn("%s: cpu%d: Required vector length(s) missing\n",
1067 info->name, smp_processor_id());
1071 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1075 * For KVM, it is necessary to ensure that this CPU doesn't
1076 * support any vector length that guests may have probed as
1080 /* Recover the set of supported VQs: */
1081 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
1082 /* Find VQs supported that are not globally supported: */
1083 bitmap_andnot(tmp_map, tmp_map, info->vq_map, SVE_VQ_MAX);
1085 /* Find the lowest such VQ, if any: */
1086 b = find_last_bit(tmp_map, SVE_VQ_MAX);
1087 if (b >= SVE_VQ_MAX)
1088 return 0; /* no mismatches */
1091 * Mismatches above sve_max_virtualisable_vl are fine, since
1092 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
1094 if (sve_vl_from_vq(__bit_to_vq(b)) <= info->max_virtualisable_vl) {
1095 pr_warn("%s: cpu%d: Unsupported vector length(s) present\n",
1096 info->name, smp_processor_id());
1103 static void __init sve_efi_setup(void)
1108 if (!IS_ENABLED(CONFIG_EFI))
1111 for (i = 0; i < ARRAY_SIZE(vl_info); i++)
1112 max_vl = max(vl_info[i].max_vl, max_vl);
1115 * alloc_percpu() warns and prints a backtrace if this goes wrong.
1116 * This is evidence of a crippled system and we are returning void,
1117 * so no attempt is made to handle this situation here.
1119 if (!sve_vl_valid(max_vl))
1122 efi_sve_state = __alloc_percpu(
1123 SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES);
1130 panic("Cannot allocate percpu memory for EFI SVE save/restore");
1133 void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
1135 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
1139 void __init sve_setup(void)
1141 struct vl_info *info = &vl_info[ARM64_VEC_SVE];
1142 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
1146 if (!system_supports_sve())
1150 * The SVE architecture mandates support for 128-bit vectors,
1151 * so sve_vq_map must have at least SVE_VQ_MIN set.
1152 * If something went wrong, at least try to patch it up:
1154 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map)))
1155 set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map);
1157 max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
1158 info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
1161 * For the default VL, pick the maximum supported value <= 64.
1162 * VL == 64 is guaranteed not to grow the signal frame.
1164 set_sve_default_vl(find_supported_vector_length(ARM64_VEC_SVE, 64));
1166 bitmap_andnot(tmp_map, info->vq_partial_map, info->vq_map,
1169 b = find_last_bit(tmp_map, SVE_VQ_MAX);
1170 if (b >= SVE_VQ_MAX)
1171 /* No non-virtualisable VLs found */
1172 info->max_virtualisable_vl = SVE_VQ_MAX;
1173 else if (WARN_ON(b == SVE_VQ_MAX - 1))
1174 /* No virtualisable VLs? This is architecturally forbidden. */
1175 info->max_virtualisable_vl = SVE_VQ_MIN;
1176 else /* b + 1 < SVE_VQ_MAX */
1177 info->max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
1179 if (info->max_virtualisable_vl > info->max_vl)
1180 info->max_virtualisable_vl = info->max_vl;
1182 pr_info("%s: maximum available vector length %u bytes per vector\n",
1183 info->name, info->max_vl);
1184 pr_info("%s: default vector length %u bytes per vector\n",
1185 info->name, get_sve_default_vl());
1187 /* KVM decides whether to support mismatched systems. Just warn here: */
1188 if (sve_max_virtualisable_vl() < sve_max_vl())
1189 pr_warn("%s: unvirtualisable vector lengths present\n",
1196 * Called from the put_task_struct() path, which cannot get here
1197 * unless dead_task is really dead and not schedulable.
1199 void fpsimd_release_task(struct task_struct *dead_task)
1201 __sve_free(dead_task);
1202 sme_free(dead_task);
1205 #endif /* CONFIG_ARM64_SVE */
1207 #ifdef CONFIG_ARM64_SME
1210 * Ensure that task->thread.sme_state is allocated and sufficiently large.
1212 * This function should be used only in preparation for replacing
1213 * task->thread.sme_state with new data. The memory is always zeroed
1214 * here to prevent stale data from showing through: this is done in
1215 * the interest of testability and predictability, the architecture
1216 * guarantees that when ZA is enabled it will be zeroed.
1218 void sme_alloc(struct task_struct *task, bool flush)
1220 if (task->thread.sme_state) {
1222 memset(task->thread.sme_state, 0,
1223 sme_state_size(task));
1227 /* This could potentially be up to 64K. */
1228 task->thread.sme_state =
1229 kzalloc(sme_state_size(task), GFP_KERNEL);
1232 static void sme_free(struct task_struct *task)
1234 kfree(task->thread.sme_state);
1235 task->thread.sme_state = NULL;
1238 void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
1240 /* Set priority for all PEs to architecturally defined minimum */
1241 write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
1244 /* Allow SME in kernel */
1245 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
1248 /* Allow EL0 to access TPIDR2 */
1249 write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
1253 void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
1255 /* This must be enabled after SME */
1256 BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
1258 /* Allow use of ZT0 */
1259 write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
1263 void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
1265 /* This must be enabled after SME */
1266 BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
1268 /* Allow use of FA64 */
1269 write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
1273 void __init sme_setup(void)
1275 struct vl_info *info = &vl_info[ARM64_VEC_SME];
1276 int min_bit, max_bit;
1278 if (!system_supports_sme())
1282 * SME doesn't require any particular vector length be
1283 * supported but it does require at least one. We should have
1284 * disabled the feature entirely while bringing up CPUs but
1285 * let's double check here. The bitmap is SVE_VQ_MAP sized for
1288 WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX));
1290 min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX);
1291 info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit));
1293 max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
1294 info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
1296 WARN_ON(info->min_vl > info->max_vl);
1299 * For the default VL, pick the maximum supported value <= 32
1300 * (256 bits) if there is one since this is guaranteed not to
1301 * grow the signal frame when in streaming mode, otherwise the
1302 * minimum available VL will be used.
1304 set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32));
1306 pr_info("SME: minimum available vector length %u bytes per vector\n",
1308 pr_info("SME: maximum available vector length %u bytes per vector\n",
1310 pr_info("SME: default vector length %u bytes per vector\n",
1311 get_sme_default_vl());
1314 #endif /* CONFIG_ARM64_SME */
1316 static void sve_init_regs(void)
1319 * Convert the FPSIMD state to SVE, zeroing all the state that
1320 * is not shared with FPSIMD. If (as is likely) the current
1321 * state is live in the registers then do this there and
1322 * update our metadata for the current task including
1323 * disabling the trap, otherwise update our in-memory copy.
1324 * We are guaranteed to not be in streaming mode, we can only
1325 * take a SVE trap when not in streaming mode and we can't be
1326 * in streaming mode when taking a SME trap.
1328 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
1329 unsigned long vq_minus_one =
1330 sve_vq_from_vl(task_get_sve_vl(current)) - 1;
1331 sve_set_vq(vq_minus_one);
1332 sve_flush_live(true, vq_minus_one);
1333 fpsimd_bind_task_to_cpu();
1335 fpsimd_to_sve(current);
1336 current->thread.fp_type = FP_STATE_SVE;
1341 * Trapped SVE access
1343 * Storage is allocated for the full SVE state, the current FPSIMD
1344 * register contents are migrated across, and the access trap is
1347 * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
1348 * would have disabled the SVE access trap for userspace during
1349 * ret_to_user, making an SVE access trap impossible in that case.
1351 void do_sve_acc(unsigned long esr, struct pt_regs *regs)
1353 /* Even if we chose not to use SVE, the hardware could still trap: */
1354 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
1355 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1359 sve_alloc(current, true);
1360 if (!current->thread.sve_state) {
1365 get_cpu_fpsimd_context();
1367 if (test_and_set_thread_flag(TIF_SVE))
1368 WARN_ON(1); /* SVE access shouldn't have trapped */
1371 * Even if the task can have used streaming mode we can only
1372 * generate SVE access traps in normal SVE mode and
1373 * transitioning out of streaming mode may discard any
1374 * streaming mode state. Always clear the high bits to avoid
1375 * any potential errors tracking what is properly initialised.
1379 put_cpu_fpsimd_context();
1383 * Trapped SME access
1385 * Storage is allocated for the full SVE and SME state, the current
1386 * FPSIMD register contents are migrated to SVE if SVE is not already
1387 * active, and the access trap is disabled.
1389 * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
1390 * would have disabled the SME access trap for userspace during
1391 * ret_to_user, making an SME access trap impossible in that case.
1393 void do_sme_acc(unsigned long esr, struct pt_regs *regs)
1395 /* Even if we chose not to use SME, the hardware could still trap: */
1396 if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) {
1397 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1402 * If this not a trap due to SME being disabled then something
1403 * is being used in the wrong mode, report as SIGILL.
1405 if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) {
1406 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1410 sve_alloc(current, false);
1411 sme_alloc(current, true);
1412 if (!current->thread.sve_state || !current->thread.sme_state) {
1417 get_cpu_fpsimd_context();
1419 /* With TIF_SME userspace shouldn't generate any traps */
1420 if (test_and_set_thread_flag(TIF_SME))
1423 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
1424 unsigned long vq_minus_one =
1425 sve_vq_from_vl(task_get_sme_vl(current)) - 1;
1426 sme_set_vq(vq_minus_one);
1428 fpsimd_bind_task_to_cpu();
1431 put_cpu_fpsimd_context();
1435 * Trapped FP/ASIMD access.
1437 void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
1439 /* Even if we chose not to use FPSIMD, the hardware could still trap: */
1440 if (!system_supports_fpsimd()) {
1441 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
1446 * When FPSIMD is enabled, we should never take a trap unless something
1447 * has gone very wrong.
1453 * Raise a SIGFPE for the current process.
1455 void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
1457 unsigned int si_code = FPE_FLTUNK;
1459 if (esr & ESR_ELx_FP_EXC_TFV) {
1460 if (esr & FPEXC_IOF)
1461 si_code = FPE_FLTINV;
1462 else if (esr & FPEXC_DZF)
1463 si_code = FPE_FLTDIV;
1464 else if (esr & FPEXC_OFF)
1465 si_code = FPE_FLTOVF;
1466 else if (esr & FPEXC_UFF)
1467 si_code = FPE_FLTUND;
1468 else if (esr & FPEXC_IXF)
1469 si_code = FPE_FLTRES;
1472 send_sig_fault(SIGFPE, si_code,
1473 (void __user *)instruction_pointer(regs),
1477 static void fpsimd_load_kernel_state(struct task_struct *task)
1479 struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1482 * Elide the load if this CPU holds the most recent kernel mode
1483 * FPSIMD context of the current task.
1485 if (last->st == &task->thread.kernel_fpsimd_state &&
1486 task->thread.kernel_fpsimd_cpu == smp_processor_id())
1489 fpsimd_load_state(&task->thread.kernel_fpsimd_state);
1492 static void fpsimd_save_kernel_state(struct task_struct *task)
1494 struct cpu_fp_state cpu_fp_state = {
1495 .st = &task->thread.kernel_fpsimd_state,
1496 .to_save = FP_STATE_FPSIMD,
1499 fpsimd_save_state(&task->thread.kernel_fpsimd_state);
1500 fpsimd_bind_state_to_cpu(&cpu_fp_state);
1502 task->thread.kernel_fpsimd_cpu = smp_processor_id();
1505 void fpsimd_thread_switch(struct task_struct *next)
1507 bool wrong_task, wrong_cpu;
1509 if (!system_supports_fpsimd())
1512 WARN_ON_ONCE(!irqs_disabled());
1514 /* Save unsaved fpsimd state, if any: */
1515 if (test_thread_flag(TIF_KERNEL_FPSTATE))
1516 fpsimd_save_kernel_state(current);
1518 fpsimd_save_user_state();
1520 if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
1521 fpsimd_load_kernel_state(next);
1522 set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
1525 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1526 * state. For kernel threads, FPSIMD registers are never
1527 * loaded with user mode FPSIMD state and so wrong_task and
1528 * wrong_cpu will always be true.
1530 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1531 &next->thread.uw.fpsimd_state;
1532 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1534 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1535 wrong_task || wrong_cpu);
1539 static void fpsimd_flush_thread_vl(enum vec_type type)
1541 int vl, supported_vl;
1544 * Reset the task vector length as required. This is where we
1545 * ensure that all user tasks have a valid vector length
1546 * configured: no kernel task can become a user task without
1547 * an exec and hence a call to this function. By the time the
1548 * first call to this function is made, all early hardware
1549 * probing is complete, so __sve_default_vl should be valid.
1550 * If a bug causes this to go wrong, we make some noise and
1551 * try to fudge thread.sve_vl to a safe value here.
1553 vl = task_get_vl_onexec(current, type);
1555 vl = get_default_vl(type);
1557 if (WARN_ON(!sve_vl_valid(vl)))
1558 vl = vl_info[type].min_vl;
1560 supported_vl = find_supported_vector_length(type, vl);
1561 if (WARN_ON(supported_vl != vl))
1564 task_set_vl(current, type, vl);
1567 * If the task is not set to inherit, ensure that the vector
1568 * length will be reset by a subsequent exec:
1570 if (!test_thread_flag(vec_vl_inherit_flag(type)))
1571 task_set_vl_onexec(current, type, 0);
1574 void fpsimd_flush_thread(void)
1576 void *sve_state = NULL;
1577 void *sme_state = NULL;
1579 if (!system_supports_fpsimd())
1582 get_cpu_fpsimd_context();
1584 fpsimd_flush_task_state(current);
1585 memset(¤t->thread.uw.fpsimd_state, 0,
1586 sizeof(current->thread.uw.fpsimd_state));
1588 if (system_supports_sve()) {
1589 clear_thread_flag(TIF_SVE);
1591 /* Defer kfree() while in atomic context */
1592 sve_state = current->thread.sve_state;
1593 current->thread.sve_state = NULL;
1595 fpsimd_flush_thread_vl(ARM64_VEC_SVE);
1598 if (system_supports_sme()) {
1599 clear_thread_flag(TIF_SME);
1601 /* Defer kfree() while in atomic context */
1602 sme_state = current->thread.sme_state;
1603 current->thread.sme_state = NULL;
1605 fpsimd_flush_thread_vl(ARM64_VEC_SME);
1606 current->thread.svcr = 0;
1609 current->thread.fp_type = FP_STATE_FPSIMD;
1611 put_cpu_fpsimd_context();
1617 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1618 * currently held in the registers does in fact belong to 'current'
1620 void fpsimd_preserve_current_state(void)
1622 if (!system_supports_fpsimd())
1625 get_cpu_fpsimd_context();
1626 fpsimd_save_user_state();
1627 put_cpu_fpsimd_context();
1631 * Like fpsimd_preserve_current_state(), but ensure that
1632 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1635 void fpsimd_signal_preserve_current_state(void)
1637 fpsimd_preserve_current_state();
1638 if (test_thread_flag(TIF_SVE))
1639 sve_to_fpsimd(current);
1643 * Called by KVM when entering the guest.
1645 void fpsimd_kvm_prepare(void)
1647 if (!system_supports_sve())
1651 * KVM does not save host SVE state since we can only enter
1652 * the guest from a syscall so the ABI means that only the
1653 * non-saved SVE state needs to be saved. If we have left
1654 * SVE enabled for performance reasons then update the task
1655 * state to be FPSIMD only.
1657 get_cpu_fpsimd_context();
1659 if (test_and_clear_thread_flag(TIF_SVE)) {
1660 sve_to_fpsimd(current);
1661 current->thread.fp_type = FP_STATE_FPSIMD;
1664 put_cpu_fpsimd_context();
1668 * Associate current's FPSIMD context with this cpu
1669 * The caller must have ownership of the cpu FPSIMD context before calling
1672 static void fpsimd_bind_task_to_cpu(void)
1674 struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1676 WARN_ON(!system_supports_fpsimd());
1677 last->st = ¤t->thread.uw.fpsimd_state;
1678 last->sve_state = current->thread.sve_state;
1679 last->sme_state = current->thread.sme_state;
1680 last->sve_vl = task_get_sve_vl(current);
1681 last->sme_vl = task_get_sme_vl(current);
1682 last->svcr = ¤t->thread.svcr;
1683 last->fp_type = ¤t->thread.fp_type;
1684 last->to_save = FP_STATE_CURRENT;
1685 current->thread.fpsimd_cpu = smp_processor_id();
1688 * Toggle SVE and SME trapping for userspace if needed, these
1689 * are serialsied by ret_to_user().
1691 if (system_supports_sme()) {
1692 if (test_thread_flag(TIF_SME))
1698 if (system_supports_sve()) {
1699 if (test_thread_flag(TIF_SVE))
1706 void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
1708 struct cpu_fp_state *last = this_cpu_ptr(&fpsimd_last_state);
1710 WARN_ON(!system_supports_fpsimd());
1711 WARN_ON(!in_softirq() && !irqs_disabled());
1717 * Load the userland FPSIMD state of 'current' from memory, but only if the
1718 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1719 * state of 'current'. This is called when we are preparing to return to
1720 * userspace to ensure that userspace sees a good register state.
1722 void fpsimd_restore_current_state(void)
1725 * TIF_FOREIGN_FPSTATE is set on the init task and copied by
1726 * arch_dup_task_struct() regardless of whether FP/SIMD is detected.
1727 * Thus user threads can have this set even when FP/SIMD hasn't been
1730 * When FP/SIMD is detected, begin_new_exec() will set
1731 * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
1732 * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
1733 * switching tasks. We detect FP/SIMD before we exec the first user
1734 * process, ensuring this has TIF_FOREIGN_FPSTATE set and
1735 * do_notify_resume() will call fpsimd_restore_current_state() to
1736 * install the user FP/SIMD context.
1738 * When FP/SIMD is not detected, nothing else will clear or set
1739 * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
1740 * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
1741 * looping forever calling fpsimd_restore_current_state().
1743 if (!system_supports_fpsimd()) {
1744 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1748 get_cpu_fpsimd_context();
1750 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1752 fpsimd_bind_task_to_cpu();
1755 put_cpu_fpsimd_context();
1759 * Load an updated userland FPSIMD state for 'current' from memory and set the
1760 * flag that indicates that the FPSIMD register contents are the most recent
1761 * FPSIMD state of 'current'. This is used by the signal code to restore the
1762 * register state when returning from a signal handler in FPSIMD only cases,
1763 * any SVE context will be discarded.
1765 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1767 if (WARN_ON(!system_supports_fpsimd()))
1770 get_cpu_fpsimd_context();
1772 current->thread.uw.fpsimd_state = *state;
1773 if (test_thread_flag(TIF_SVE))
1774 fpsimd_to_sve(current);
1777 fpsimd_bind_task_to_cpu();
1779 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1781 put_cpu_fpsimd_context();
1785 * Invalidate live CPU copies of task t's FPSIMD state
1787 * This function may be called with preemption enabled. The barrier()
1788 * ensures that the assignment to fpsimd_cpu is visible to any
1789 * preemption/softirq that could race with set_tsk_thread_flag(), so
1790 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1792 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1795 void fpsimd_flush_task_state(struct task_struct *t)
1797 t->thread.fpsimd_cpu = NR_CPUS;
1799 * If we don't support fpsimd, bail out after we have
1800 * reset the fpsimd_cpu for this task and clear the
1803 if (!system_supports_fpsimd())
1806 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1812 * Invalidate any task's FPSIMD state that is present on this cpu.
1813 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1814 * before calling this function.
1816 static void fpsimd_flush_cpu_state(void)
1818 WARN_ON(!system_supports_fpsimd());
1819 __this_cpu_write(fpsimd_last_state.st, NULL);
1822 * Leaving streaming mode enabled will cause issues for any kernel
1823 * NEON and leaving streaming mode or ZA enabled may increase power
1826 if (system_supports_sme())
1829 set_thread_flag(TIF_FOREIGN_FPSTATE);
1833 * Save the FPSIMD state to memory and invalidate cpu view.
1834 * This function must be called with preemption disabled.
1836 void fpsimd_save_and_flush_cpu_state(void)
1838 unsigned long flags;
1840 if (!system_supports_fpsimd())
1842 WARN_ON(preemptible());
1843 local_irq_save(flags);
1844 fpsimd_save_user_state();
1845 fpsimd_flush_cpu_state();
1846 local_irq_restore(flags);
1849 #ifdef CONFIG_KERNEL_MODE_NEON
1852 * Kernel-side NEON support functions
1856 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1859 * Must not be called unless may_use_simd() returns true.
1860 * Task context in the FPSIMD registers is saved back to memory as necessary.
1862 * A matching call to kernel_neon_end() must be made before returning from the
1865 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1868 void kernel_neon_begin(void)
1870 if (WARN_ON(!system_supports_fpsimd()))
1873 BUG_ON(!may_use_simd());
1875 get_cpu_fpsimd_context();
1877 /* Save unsaved fpsimd state, if any: */
1878 if (test_thread_flag(TIF_KERNEL_FPSTATE)) {
1879 BUG_ON(IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq());
1880 fpsimd_save_kernel_state(current);
1882 fpsimd_save_user_state();
1885 * Set the thread flag so that the kernel mode FPSIMD state
1886 * will be context switched along with the rest of the task
1889 * On non-PREEMPT_RT, softirqs may interrupt task level kernel
1890 * mode FPSIMD, but the task will not be preemptible so setting
1891 * TIF_KERNEL_FPSTATE for those would be both wrong (as it
1892 * would mark the task context FPSIMD state as requiring a
1893 * context switch) and unnecessary.
1895 * On PREEMPT_RT, softirqs are serviced from a separate thread,
1896 * which is scheduled as usual, and this guarantees that these
1897 * softirqs are not interrupting use of the FPSIMD in kernel
1898 * mode in task context. So in this case, setting the flag here
1899 * is always appropriate.
1901 if (IS_ENABLED(CONFIG_PREEMPT_RT) || !in_serving_softirq())
1902 set_thread_flag(TIF_KERNEL_FPSTATE);
1905 /* Invalidate any task state remaining in the fpsimd regs: */
1906 fpsimd_flush_cpu_state();
1908 put_cpu_fpsimd_context();
1910 EXPORT_SYMBOL_GPL(kernel_neon_begin);
1913 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1915 * Must be called from a context in which kernel_neon_begin() was previously
1916 * called, with no call to kernel_neon_end() in the meantime.
1918 * The caller must not use the FPSIMD registers after this function is called,
1919 * unless kernel_neon_begin() is called again in the meantime.
1921 void kernel_neon_end(void)
1923 if (!system_supports_fpsimd())
1927 * If we are returning from a nested use of kernel mode FPSIMD, restore
1928 * the task context kernel mode FPSIMD state. This can only happen when
1929 * running in softirq context on non-PREEMPT_RT.
1931 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && in_serving_softirq() &&
1932 test_thread_flag(TIF_KERNEL_FPSTATE))
1933 fpsimd_load_kernel_state(current);
1935 clear_thread_flag(TIF_KERNEL_FPSTATE);
1937 EXPORT_SYMBOL_GPL(kernel_neon_end);
1941 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1942 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1943 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1944 static DEFINE_PER_CPU(bool, efi_sm_state);
1947 * EFI runtime services support functions
1949 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1950 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1951 * is always used rather than being an optional accelerator.
1953 * These functions provide the necessary support for ensuring FPSIMD
1954 * save/restore in the contexts from which EFI is used.
1956 * Do not use them for any other purpose -- if tempted to do so, you are
1957 * either doing something wrong or you need to propose some refactoring.
1961 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1963 void __efi_fpsimd_begin(void)
1965 if (!system_supports_fpsimd())
1968 WARN_ON(preemptible());
1970 if (may_use_simd()) {
1971 kernel_neon_begin();
1974 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1977 if (system_supports_sve() && likely(efi_sve_state)) {
1978 char *sve_state = this_cpu_ptr(efi_sve_state);
1982 __this_cpu_write(efi_sve_state_used, true);
1984 if (system_supports_sme()) {
1985 svcr = read_sysreg_s(SYS_SVCR);
1987 __this_cpu_write(efi_sm_state,
1988 svcr & SVCR_SM_MASK);
1991 * Unless we have FA64 FFR does not
1992 * exist in streaming mode.
1994 if (!system_supports_fa64())
1995 ffr = !(svcr & SVCR_SM_MASK);
1998 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
1999 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
2002 if (system_supports_sme())
2003 sysreg_clear_set_s(SYS_SVCR,
2007 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
2010 __this_cpu_write(efi_fpsimd_state_used, true);
2015 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
2017 void __efi_fpsimd_end(void)
2019 if (!system_supports_fpsimd())
2022 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
2025 if (system_supports_sve() &&
2026 likely(__this_cpu_read(efi_sve_state_used))) {
2027 char const *sve_state = this_cpu_ptr(efi_sve_state);
2031 * Restore streaming mode; EFI calls are
2032 * normal function calls so should not return in
2035 if (system_supports_sme()) {
2036 if (__this_cpu_read(efi_sm_state)) {
2037 sysreg_clear_set_s(SYS_SVCR,
2042 * Unless we have FA64 FFR does not
2043 * exist in streaming mode.
2045 if (!system_supports_fa64())
2050 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()),
2051 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
2054 __this_cpu_write(efi_sve_state_used, false);
2056 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
2061 #endif /* CONFIG_EFI */
2063 #endif /* CONFIG_KERNEL_MODE_NEON */
2065 #ifdef CONFIG_CPU_PM
2066 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
2067 unsigned long cmd, void *v)
2071 fpsimd_save_and_flush_cpu_state();
2075 case CPU_PM_ENTER_FAILED:
2082 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
2083 .notifier_call = fpsimd_cpu_pm_notifier,
2086 static void __init fpsimd_pm_init(void)
2088 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
2092 static inline void fpsimd_pm_init(void) { }
2093 #endif /* CONFIG_CPU_PM */
2095 #ifdef CONFIG_HOTPLUG_CPU
2096 static int fpsimd_cpu_dead(unsigned int cpu)
2098 per_cpu(fpsimd_last_state.st, cpu) = NULL;
2102 static inline void fpsimd_hotplug_init(void)
2104 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
2105 NULL, fpsimd_cpu_dead);
2109 static inline void fpsimd_hotplug_init(void) { }
2112 void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p)
2114 unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN;
2115 write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1);
2120 * FP/SIMD support code initialisation.
2122 static int __init fpsimd_init(void)
2124 if (cpu_have_named_feature(FP)) {
2126 fpsimd_hotplug_init();
2128 pr_notice("Floating-point is not implemented\n");
2131 if (!cpu_have_named_feature(ASIMD))
2132 pr_notice("Advanced SIMD is not implemented\n");
2140 core_initcall(fpsimd_init);