Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Dec 2008 23:56:17 +0000 (15:56 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Dec 2008 23:56:17 +0000 (15:56 -0800)
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: MMU: avoid creation of unreachable pages in the shadow
  KVM: ppc: stop leaking host memory on VM exit
  KVM: MMU: fix sync of ptes addressed at owner pagetable
  KVM: ia64: Fix: Use correct calling convention for PAL_VPS_RESUME_HANDLER
  KVM: ia64: Fix incorrect kbuild CFLAGS override
  KVM: VMX: Fix interrupt loss during race with NMI
  KVM: s390: Fix problem state handling in guest sigp handler

arch/ia64/kvm/Makefile
arch/ia64/kvm/optvfault.S
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/44x_tlb.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/sigp.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c

index 3ab4d6d507045379fc139836e2091560e4483a5a..92cef66ca268b8631db5b3b72b9887cb8f7dc446 100644 (file)
@@ -58,7 +58,7 @@ endif
 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
 obj-$(CONFIG_KVM) += kvm.o
 
-EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
+CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
        vtlb.o process.o
 #Add link memcpy and memset to avoid possible structure assignment error
index 634abad979b5143953e6eae5191eff3b4a673bc1..32254ce9a1bda61a25f3edce6143206a7d651aa1 100644 (file)
@@ -107,10 +107,10 @@ END(kvm_vps_resume_normal)
 GLOBAL_ENTRY(kvm_vps_resume_handler)
        movl r30 = PAL_VPS_RESUME_HANDLER
        ;;
-       ld8 r27=[r25]
+       ld8 r26=[r25]
        shr r17=r17,IA64_ISR_IR_BIT
        ;;
-       dep r27=r17,r27,63,1   // bit 63 of r27 indicate whether enable CFLE
+       dep r26=r17,r26,63,1   // bit 63 of r26 indicate whether enable CFLE
        mov pr=r23,-2
        br.sptk.many kvm_vps_entry
 END(kvm_vps_resume_handler)
@@ -894,12 +894,15 @@ ENTRY(kvm_resume_to_guest)
        ;;
        ld8 r19=[r19]
        mov b0=r29
-       cmp.ne p6,p7 = r0,r0
+       mov r27=cr.isr
        ;;
-       tbit.z p6,p7 = r19,IA64_PSR_IC_BIT              // p1=vpsr.ic
+       tbit.z p6,p7 = r19,IA64_PSR_IC_BIT              // p7=vpsr.ic
+       shr r27=r27,IA64_ISR_IR_BIT
        ;;
        (p6) ld8 r26=[r25]
        (p7) mov b0=r28
+       ;;
+       (p6) dep r26=r27,r26,63,1
        mov pr=r31,-2
        br.sptk.many b0             // call pal service
        ;;
index 8931ba729d2b519ec4d4efb1d0851301d96e1d3a..bb62ad876de32750c70cc7ecd91adcb9f0539d5a 100644 (file)
@@ -104,4 +104,6 @@ static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
        }
 }
 
+extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+
 #endif /* __POWERPC_KVM_PPC_H__ */
index 2e227a412bc240f2bc889b0c7c1fe2e17a7bdd8c..ad72c6f9811f62ed05695ca23221bd2d9b87f31a 100644 (file)
@@ -124,6 +124,14 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
        }
 }
 
+void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       for (i = 0; i <= tlb_44x_hwater; i++)
+               kvmppc_44x_shadow_release(vcpu, i);
+}
+
 void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
 {
     vcpu->arch.shadow_tlb_mod[i] = 1;
index 90a6fc422b238ccbd38becc6d36546c0a7ca0cd0..fda9baada132b1c5348dfec8b84ba99d8bc24dc1 100644 (file)
@@ -238,6 +238,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
+       kvmppc_core_destroy_mmu(vcpu);
 }
 
 /* Note: clearing MSR[DE] just means that the debug interrupt will not be
index 170392687ce042f64c69ea7f4fe85d863025b768..2a01b9e02801f14afd348162f31679237cbd340a 100644 (file)
@@ -237,6 +237,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
        u8 order_code;
        int rc;
 
+       /* sigp in userspace can exit */
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu,
+                                                  PGM_PRIVILEGED_OPERATION);
+
        order_code = disp2;
        if (base2)
                order_code += vcpu->arch.guest_gprs[base2];
index f1983d9477cd163bc13de766f1edd8ffee4c720b..410ddbc1aa2eb0fc2dfa45de560c829df364aa81 100644 (file)
@@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        }
 
        rmap_write_protect(vcpu->kvm, sp->gfn);
+       kvm_unlink_unsync_page(vcpu->kvm, sp);
        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
                kvm_mmu_zap_page(vcpu->kvm, sp);
                return 1;
        }
 
        kvm_mmu_flush_tlb(vcpu);
-       kvm_unlink_unsync_page(vcpu->kvm, sp);
        return 0;
 }
 
index 613ec9aa674afe06cd76c99b8ebc06eaa3f91f32..84eee43bbe742b005bd1825b0cdac24ba5efffbb 100644 (file)
@@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
                r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
                                          &curr_pte, sizeof(curr_pte));
                if (r || curr_pte != gw->ptes[level - 2]) {
+                       kvm_mmu_put_page(shadow_page, sptep);
                        kvm_release_pfn_clean(sw->pfn);
                        sw->sptep = NULL;
                        return 1;
index d06b4dc0e2eac49c3d1df9d78ec4b6d6ad975c4f..a4018b01e1f973bad1619d6ca16e0e67e29aceab 100644 (file)
@@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 
        if (cpu_has_virtual_nmis()) {
                if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
-                       if (vmx_nmi_enabled(vcpu)) {
+                       if (vcpu->arch.interrupt.pending) {
+                               enable_nmi_window(vcpu);
+                       } else if (vmx_nmi_enabled(vcpu)) {
                                vcpu->arch.nmi_pending = false;
                                vcpu->arch.nmi_injected = true;
                        } else {