KVM: arm64: Provide KVM's own save/restore SVE primitives
authorMarc Zyngier <maz@kernel.org>
Thu, 11 Mar 2021 11:52:38 +0000 (11:52 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 18 Mar 2021 11:23:14 +0000 (11:23 +0000)
as we are about to change the way KVM deals with SVE, provide
KVM with its own save/restore SVE primitives.

No functional change intended.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/fpsimdmacros.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kvm/hyp/fpsimd.S
arch/arm64/kvm/hyp/include/hyp/switch.h

index af43367534c7a5db2c1092799bb8a17de92df90d..e9b72d35b8679382ea2db298c24ea81d32a6c14c 100644 (file)
@@ -6,6 +6,8 @@
  * Author: Catalin Marinas <catalin.marinas@arm.com>
  */
 
+#include <asm/assembler.h>
+
 .macro fpsimd_save state, tmpnr
        stp     q0, q1, [\state, #16 * 0]
        stp     q2, q3, [\state, #16 * 2]
index c0450828378b528280f4dcc2f472e5694fc462c2..e8b0f7fcd86b87e0e5020f5cb110ca1361dd219c 100644 (file)
@@ -85,6 +85,8 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
 
 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+void __sve_save_state(void *sve_pffr, u32 *fpsr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
 
 #ifndef __KVM_NVHE_HYPERVISOR__
 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
index 01f114aa47b092de4f1128b5a08060d6abb5761b..95b22e10996cc56cb36c3cd6fc333ea5b342d5fb 100644 (file)
@@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
        fpsimd_restore  x0, 1
        ret
 SYM_FUNC_END(__fpsimd_restore_state)
+
+SYM_FUNC_START(__sve_restore_state)
+       sve_load 0, x1, x2, 3, x4
+       ret
+SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+       sve_save 0, x1, 2
+       ret
+SYM_FUNC_END(__sve_save_state)
index 54f4860cd87c06b1ed701eb3f2664176b6458806..807bc4734828a3269e691ddeb3919dbfb8dbe53c 100644 (file)
@@ -256,8 +256,8 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
                                vcpu->arch.host_fpsimd_state,
                                struct thread_struct, uw.fpsimd_state);
 
-                       sve_save_state(sve_pffr(thread),
-                                      &vcpu->arch.host_fpsimd_state->fpsr);
+                       __sve_save_state(sve_pffr(thread),
+                                        &vcpu->arch.host_fpsimd_state->fpsr);
                } else {
                        __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
                }
@@ -266,9 +266,9 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
        }
 
        if (sve_guest) {
-               sve_load_state(vcpu_sve_pffr(vcpu),
-                              &vcpu->arch.ctxt.fp_regs.fpsr,
-                              sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+               __sve_restore_state(vcpu_sve_pffr(vcpu),
+                                   &vcpu->arch.ctxt.fp_regs.fpsr,
+                                   sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
                write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
        } else {
                __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);