KVM: x86: introduce ISA specific SMM entry/exit callbacks
authorLadi Prosek <lprosek@redhat.com>
Wed, 11 Oct 2017 14:54:40 +0000 (16:54 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Oct 2017 12:01:55 +0000 (14:01 +0200)
Entering and exiting SMM may require ISA specific handling under certain
circumstances. This commit adds two new callbacks with empty implementations.
Actual functionality will be added in following commits.

* pre_enter_smm() is to be called when injecting an SMM, before any
  SMM related vcpu state has been changed
* pre_leave_smm() is to be called when emulating the RSM instruction,
  when the vcpu is in real mode and before any SMM related vcpu state
  has been restored

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/emulate.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index fa2558e120249e6c36e368e396d2c9e188c57056..ad38c5e918ecc97d02301f3b69e667569e960d27 100644 (file)
@@ -225,6 +225,8 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
+       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+
 };
 
 typedef u32 __attribute__((vector_size(16))) sse128_t;
index c73e493adf0748108c31389ca62437267ea877e2..23a9a5339f3f82d3b25670092f55f7c1528d3e79 100644 (file)
@@ -1061,6 +1061,9 @@ struct kvm_x86_ops {
        void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
        void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+       int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
 };
 
 struct kvm_arch_async_pf {
index d90cdc77e077354f1407235e6b73f2fda21c430c..8079d141792af91994421d15c19c26d3bd386c59 100644 (file)
@@ -2591,6 +2591,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
 
        smbase = ctxt->ops->get_smbase(ctxt);
+
+       /*
+        * Give pre_leave_smm() a chance to make ISA-specific changes to the
+        * vCPU state (e.g. enter guest mode) before loading state from the SMM
+        * state-save area.
+        */
+       if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+               return X86EMUL_UNHANDLEABLE;
+
        if (emulator_has_longmode(ctxt))
                ret = rsm_load_state_64(ctxt, smbase + 0x8000);
        else
index 84f18634d87cd23558344f3e337b84715d4a156f..c4e9b99d48d861ed91b2fc96dd43f97001e242c7 100644 (file)
@@ -5401,6 +5401,18 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
        vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+       /* TODO: Implement */
+       return 0;
+}
+
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+       /* TODO: Implement */
+       return 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -5511,6 +5523,9 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .deliver_posted_interrupt = svm_deliver_avic_intr,
        .update_pi_irte = svm_update_pi_irte,
        .setup_mce = svm_setup_mce,
+
+       .pre_enter_smm = svm_pre_enter_smm,
+       .pre_leave_smm = svm_pre_leave_smm,
 };
 
 static int __init svm_init(void)
index c9214e3a01dfeaf0cff6c18b0cdfd91361185cc3..1305bb65688b7740b1829a2c8778ab648c532615 100644 (file)
@@ -11916,6 +11916,18 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
                        ~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+       /* TODO: Implement */
+       return 0;
+}
+
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+       /* TODO: Implement */
+       return 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -12041,6 +12053,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 #endif
 
        .setup_mce = vmx_setup_mce,
+
+       .pre_enter_smm = vmx_pre_enter_smm,
+       .pre_leave_smm = vmx_pre_leave_smm,
 };
 
 static int __init vmx_init(void)
index 4ac261000e7e616e0f5ebf53b7273b7f370b4477..9e85a69ccb124dd212176dd4546bd71aef96f888 100644 (file)
@@ -5281,6 +5281,11 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
        kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
 }
 
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+{
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+}
+
 static const struct x86_emulate_ops emulate_ops = {
        .read_gpr            = emulator_read_gpr,
        .write_gpr           = emulator_write_gpr,
@@ -5322,6 +5327,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .set_nmi_mask        = emulator_set_nmi_mask,
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
+       .pre_leave_smm       = emulator_pre_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6647,13 +6653,20 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        u32 cr0;
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
-       vcpu->arch.hflags |= HF_SMM_MASK;
        memset(buf, 0, 512);
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
                enter_smm_save_state_32(vcpu, buf);
 
+       /*
+        * Give pre_enter_smm() a chance to make ISA-specific changes to the
+        * vCPU state (e.g. leave guest mode) after we've saved the state into
+        * the SMM state-save area.
+        */
+       kvm_x86_ops->pre_enter_smm(vcpu, buf);
+
+       vcpu->arch.hflags |= HF_SMM_MASK;
        kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
 
        if (kvm_x86_ops->get_nmi_mask(vcpu))