KVM: vmx: use MSR_IA32_TSX_CTRL to hard-disable TSX on guest that lack it
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 18 Nov 2019 17:23:01 +0000 (12:23 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 21 Nov 2019 09:01:02 +0000 (10:01 +0100)
If X86_FEATURE_RTM is disabled, the guest should not be able to access
MSR_IA32_TSX_CTRL.  We can therefore use it in KVM to force all
transactions from the guest to abort.

Tested-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c

index 73cbe02640b888b4b33e9ea9de924882e3e83f2d..813171bb802ae3428d1bc7669e154beef14b0066 100644 (file)
@@ -639,6 +639,23 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
        return NULL;
 }
 
+static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data)
+{
+       int ret = 0;
+
+       u64 old_msr_data = msr->data;
+       msr->data = data;
+       if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+               preempt_disable();
+               ret = kvm_set_shared_msr(msr->index, msr->data,
+                                        msr->mask);
+               preempt_enable();
+               if (ret)
+                       msr->data = old_msr_data;
+       }
+       return ret;
+}
+
 void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
 {
        vmcs_clear(loaded_vmcs->vmcs);
@@ -2174,20 +2191,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        default:
        find_shared_msr:
                msr = find_msr_entry(vmx, msr_index);
-               if (msr) {
-                       u64 old_msr_data = msr->data;
-                       msr->data = data;
-                       if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
-                               preempt_disable();
-                               ret = kvm_set_shared_msr(msr->index, msr->data,
-                                                        msr->mask);
-                               preempt_enable();
-                               if (ret)
-                                       msr->data = old_msr_data;
-                       }
-                       break;
-               }
-               ret = kvm_set_msr_common(vcpu, msr_info);
+               if (msr)
+                       ret = vmx_set_guest_msr(vmx, msr, data);
+               else
+                       ret = kvm_set_msr_common(vcpu, msr_info);
        }
 
        return ret;
@@ -7142,6 +7149,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
                        guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
                update_intel_pt_cfg(vcpu);
+
+       if (boot_cpu_has(X86_FEATURE_RTM)) {
+               struct shared_msr_entry *msr;
+               msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
+               if (msr) {
+                       bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
+                       vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
+               }
+       }
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)