KVM: x86: Prevent host from panicking on shared MSR writes.
authorAndy Honig <ahonig@google.com>
Wed, 27 Aug 2014 18:16:44 +0000 (11:16 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Oct 2014 11:21:08 +0000 (13:21 +0200)
The previous patch blocked invalid writes directly when the MSR
is written.  As a precaution, prevent future similar mistakes by
gracefulling handle GPs caused by writes to shared MSRs.

Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
[Remove parts obsoleted by Nadav's patch. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index ccc94de4ac499db2681af23ae2b457ec5e813ff5..6ed0c30d6a0c347e6dd4c9ac0cbd26acfb9ec53b 100644 (file)
@@ -1064,7 +1064,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
                                           unsigned long address);
 
 void kvm_define_shared_msr(unsigned index, u32 msr);
-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
index 148020a7dd980e061d31649cd46f70fa4e7e7228..7e2c098b59c9a5f949200bbb0ac5f39be93c4834 100644 (file)
@@ -2659,12 +2659,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        default:
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       u64 old_msr_data = msr->data;
                        msr->data = data;
                        if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
                                preempt_disable();
-                               kvm_set_shared_msr(msr->index, msr->data,
-                                                  msr->mask);
+                               ret = kvm_set_shared_msr(msr->index, msr->data,
+                                                        msr->mask);
                                preempt_enable();
+                               if (ret)
+                                       msr->data = old_msr_data;
                        }
                        break;
                }
index 5a7195573a327b263aee6ded80c07a34714e784b..0033df32a74585f69f5a8d83515f7182f1c7d7af 100644 (file)
@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
                shared_msr_update(i, shared_msrs_global.msrs[i]);
 }
 
-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
        unsigned int cpu = smp_processor_id();
        struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
+       int err;
 
        if (((value ^ smsr->values[slot].curr) & mask) == 0)
-               return;
+               return 0;
        smsr->values[slot].curr = value;
-       wrmsrl(shared_msrs_global.msrs[slot], value);
+       err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
+       if (err)
+               return 1;
+
        if (!smsr->registered) {
                smsr->urn.on_user_return = kvm_on_user_return;
                user_return_notifier_register(&smsr->urn);
                smsr->registered = true;
        }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);