KVM: arm64: nv: Add switching support for HFGxTR/HDFGxTR
authorMarc Zyngier <maz@kernel.org>
Tue, 15 Aug 2023 18:38:59 +0000 (19:38 +0100)
committerMarc Zyngier <maz@kernel.org>
Thu, 17 Aug 2023 09:00:28 +0000 (10:00 +0100)
Now that we can evaluate the FGT registers, allow them to be merged
with the hypervisor's own configuration (in the case of HFG{RW}TR_EL2)
or simply set for HFGITR_EL2, HDGFRTR_EL2 and HDFGWTR_EL2.

Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Jing Zhang <jingzhangos@google.com>
Link: https://lore.kernel.org/r/20230815183903.2735724-26-maz@kernel.org
arch/arm64/kvm/hyp/include/hyp/switch.h

index e096b16e85fd595612d44c885fa01b8509d57d6a..a4750070563f7261ba3cfdd8067b125cf4bc9311 100644 (file)
@@ -70,6 +70,13 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
        }
 }
 
+#define compute_clr_set(vcpu, reg, clr, set)                           \
+       do {                                                            \
+               u64 hfg;                                                \
+               hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0;  \
+               set |= hfg & __ ## reg ## _MASK;                        \
+               clr |= ~hfg & __ ## reg ## _nMASK;                      \
+       } while(0)
 
 
 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
@@ -97,6 +104,10 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
                w_set |= HFGxTR_EL2_TCR_EL1_MASK;
 
+       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+               compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
+               compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
+       }
 
        /* The default is not to trap anything but ACCDATA_EL1 */
        r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
@@ -109,6 +120,38 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
 
        write_sysreg_s(r_val, SYS_HFGRTR_EL2);
        write_sysreg_s(w_val, SYS_HFGWTR_EL2);
+
+       if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+               return;
+
+       ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2);
+
+       r_set = r_clr = 0;
+       compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set);
+       r_val = __HFGITR_EL2_nMASK;
+       r_val |= r_set;
+       r_val &= ~r_clr;
+
+       write_sysreg_s(r_val, SYS_HFGITR_EL2);
+
+       ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2);
+       ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2);
+
+       r_clr = r_set = w_clr = w_set = 0;
+
+       compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set);
+       compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set);
+
+       r_val = __HDFGRTR_EL2_nMASK;
+       r_val |= r_set;
+       r_val &= ~r_clr;
+
+       w_val = __HDFGWTR_EL2_nMASK;
+       w_val |= w_set;
+       w_val &= ~w_clr;
+
+       write_sysreg_s(r_val, SYS_HDFGRTR_EL2);
+       write_sysreg_s(w_val, SYS_HDFGWTR_EL2);
 }
 
 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
@@ -121,7 +164,12 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
        write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
 
+       if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+               return;
 
+       write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
+       write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
+       write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
 }
 
 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)