KVM: async_pf: change kvm_setup_async_pf()/kvm_arch_setup_async_pf() return type...
[sfrench/cifs-2.6.git] / arch / s390 / kvm / kvm-s390.c
index d47c19718615e23f61bc4ff642b632506b5d15c7..7fd4fdb165fc1d19929385b918ed894c7b13ebca 100644 (file)
@@ -3954,33 +3954,31 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
        return true;
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
 {
        hva_t hva;
        struct kvm_arch_async_pf arch;
-       int rc;
 
        if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
-               return 0;
+               return false;
        if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
            vcpu->arch.pfault_compare)
-               return 0;
+               return false;
        if (psw_extint_disabled(vcpu))
-               return 0;
+               return false;
        if (kvm_s390_vcpu_has_irq(vcpu, 0))
-               return 0;
+               return false;
        if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
-               return 0;
+               return false;
        if (!vcpu->arch.gmap->pfault_enabled)
-               return 0;
+               return false;
 
        hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
        hva += current->thread.gmap_addr & ~PAGE_MASK;
        if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
-               return 0;
+               return false;
 
-       rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
-       return rc;
+       return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
 }
 
 static int vcpu_pre_run(struct kvm_vcpu *vcpu)