KVM: remove kvm_guest_enter/exit wrappers
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jun 2016 13:18:26 +0000 (15:18 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Jul 2016 09:03:21 +0000 (11:03 +0200)
Use the functions from context_tracking.h directly.

Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/arm/kvm/arm.c
arch/mips/kvm/mips.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h

index f20ca84537f5d7849ac707d1eb939f42cba797d8..9ac4970882fefdcaa0a1e909dc9772acbb527d4b 100644 (file)
@@ -615,7 +615,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 * Enter the guest
                 */
                trace_kvm_entry(*vcpu_pc(vcpu));
-               __kvm_guest_enter();
+               guest_enter_irqoff();
                vcpu->mode = IN_GUEST_MODE;
 
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
@@ -641,14 +641,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                local_irq_enable();
 
                /*
-                * We do local_irq_enable() before calling kvm_guest_exit() so
+                * We do local_irq_enable() before calling guest_exit() so
                 * that if a timer interrupt hits while running the guest we
                 * account that tick as being spent in the guest.  We enable
-                * preemption after calling kvm_guest_exit() so that if we get
+                * preemption after calling guest_exit() so that if we get
                 * preempted we make sure ticks after that is not counted as
                 * guest time.
                 */
-               kvm_guest_exit();
+               guest_exit();
                trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
 
                /*
index 5a2b9034a05ce1fa7f4772e9c58a097c315bcca2..5f1163653b5062eca99c0465ea690603b1679e60 100644 (file)
@@ -406,7 +406,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        kvm_mips_deliver_interrupts(vcpu,
                                    kvm_read_c0_guest_cause(vcpu->arch.cop0));
 
-       __kvm_guest_enter();
+       guest_enter_irqoff();
 
        /* Disable hardware page table walking while in guest */
        htw_stop();
@@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        /* Re-enable HTW before enabling interrupts */
        htw_start();
 
-       __kvm_guest_exit();
+       guest_exit_irqoff();
        local_irq_enable();
 
        if (vcpu->sigset_active)
index e20beae5ca7a462d9f1cfb7211b9a84592383ddf..6b2859c12ae879ba3c53590c7b60f307647b37c5 100644 (file)
@@ -2522,7 +2522,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
                        spin_unlock(&pvc->lock);
 
-       kvm_guest_enter();
+       guest_enter();
 
        srcu_idx = srcu_read_lock(&vc->kvm->srcu);
 
@@ -2570,7 +2570,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 
        /* make sure updates to secondary vcpu structs are visible now */
        smp_mb();
-       kvm_guest_exit();
+       guest_exit();
 
        for (sub = 0; sub < core_info.n_subcores; ++sub)
                list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
index 8e4f64f0b7741d60e90f204c8265d1729bfa1788..6a66c5ff0827efa9d07debea802c46aa4e4c12c1 100644 (file)
@@ -914,7 +914,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
        /* We get here with MSR.EE=1 */
 
        trace_kvm_exit(exit_nr, vcpu);
-       kvm_guest_exit();
+       guest_exit();
 
        switch (exit_nr) {
        case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -1531,7 +1531,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        kvmppc_clear_debug(vcpu);
 
-       /* No need for kvm_guest_exit. It's done in handle_exit.
+       /* No need for guest_exit. It's done in handle_exit.
           We also get here with interrupts enabled. */
 
        /* Make sure we save the guest FPU/Altivec/VSX state */
index 4afae695899ad99373c033e53978603879bf078c..02b4672f7347ecd2ccfc0cd15f5238b8d1681f17 100644 (file)
@@ -776,7 +776,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
-       /* No need for kvm_guest_exit. It's done in handle_exit.
+       /* No need for guest_exit. It's done in handle_exit.
           We also get here with interrupts enabled. */
 
        /* Switch back to user space debug context */
@@ -1012,7 +1012,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        }
 
        trace_kvm_exit(exit_nr, vcpu);
-       __kvm_guest_exit();
+       guest_exit_irqoff();
 
        local_irq_enable();
 
index 02416fea765301303c0130d3900a1f929319582f..1ac036e45ed4f8255e0fb1f05a3bfe6ce165af04 100644 (file)
@@ -119,7 +119,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
                        continue;
                }
 
-               __kvm_guest_enter();
+               guest_enter_irqoff();
                return 1;
        }
 
index 03eeeb0ded2470a3d9ea0f6df82f6308266a3c83..d42428c11794122434261abdbb578259c11f0a33 100644 (file)
@@ -2623,14 +2623,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                 * guest_enter and guest_exit should be no uaccess.
                 */
                local_irq_disable();
-               __kvm_guest_enter();
+               guest_enter_irqoff();
                __disable_cpu_timer_accounting(vcpu);
                local_irq_enable();
                exit_reason = sie64a(vcpu->arch.sie_block,
                                     vcpu->run->s.regs.gprs);
                local_irq_disable();
                __enable_cpu_timer_accounting(vcpu);
-               __kvm_guest_exit();
+               guest_exit_irqoff();
                local_irq_enable();
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
index 6895e7b3be123767d4829072ff3ba39b53350cac..c106488b41371b2c8cc188df7ba5e8e5d7c06f31 100644 (file)
@@ -765,13 +765,13 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        local_irq_disable();
-       kvm_guest_enter();
+       guest_enter_irqoff();
        local_irq_enable();
 
        rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
 
        local_irq_disable();
-       kvm_guest_exit();
+       guest_exit_irqoff();
        local_irq_enable();
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
index 9e50e2ad6d0829c6746784ce0031a5413329cca4..618463abeec5b2479dab5900e0a573ceddcd8a15 100644 (file)
@@ -6658,7 +6658,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        trace_kvm_entry(vcpu->vcpu_id);
        wait_lapic_expire(vcpu);
-       __kvm_guest_enter();
+       guest_enter_irqoff();
 
        if (unlikely(vcpu->arch.switch_db_regs)) {
                set_debugreg(0, 7);
@@ -6717,7 +6717,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         */
        barrier();
 
-       kvm_guest_exit();
+       guest_exit();
 
        preempt_enable();
 
index ffff405226887bad0811f305b201002d5a0115da..66b2f6159aadd312afd263811a2b7bf9efdd861f 100644 (file)
@@ -875,28 +875,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
 }
 #endif
 
-/* must be called with irqs disabled */
-static inline void __kvm_guest_enter(void)
-{
-       guest_enter_irqoff();
-}
-
-/* must be called with irqs disabled */
-static inline void __kvm_guest_exit(void)
-{
-       guest_exit_irqoff();
-}
-
-static inline void kvm_guest_enter(void)
-{
-       guest_enter();
-}
-
-static inline void kvm_guest_exit(void)
-{
-       guest_exit();
-}
-
 /*
  * search_memslots() and __gfn_to_memslot() are here because they are
  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.