Merge tag 'kvm-s390-next-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / s390 / kvm / kvm-s390.c
index d9e6bf3d54f0f1547bd8fda5df4187e93b2a34ed..d7ff30e45589935890ad1c1f67c0f36eaa4d480e 100644 (file)
@@ -529,6 +529,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_CMMA_MIGRATION:
        case KVM_CAP_S390_AIS:
        case KVM_CAP_S390_AIS_MIGRATION:
+       case KVM_CAP_S390_VCPU_RESETS:
                r = 1;
                break;
        case KVM_CAP_S390_HPAGE_1M:
@@ -2530,9 +2531,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        if (vcpu->kvm->arch.use_cmma)
                kvm_s390_vcpu_unsetup_cmma(vcpu);
        free_page((unsigned long)(vcpu->arch.sie_block));
-
-       kvm_vcpu_uninit(vcpu);
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
 static void kvm_free_vcpus(struct kvm *kvm)
@@ -2541,7 +2539,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
 
        kvm_for_each_vcpu(i, vcpu, kvm)
-               kvm_arch_vcpu_destroy(vcpu);
+               kvm_vcpu_destroy(vcpu);
 
        mutex_lock(&kvm->lock);
        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
@@ -2703,39 +2701,6 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
        return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
 }
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
-       kvm_clear_async_pf_completion_queue(vcpu);
-       vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
-                                   KVM_SYNC_GPRS |
-                                   KVM_SYNC_ACRS |
-                                   KVM_SYNC_CRS |
-                                   KVM_SYNC_ARCH0 |
-                                   KVM_SYNC_PFAULT;
-       kvm_s390_set_prefix(vcpu, 0);
-       if (test_kvm_facility(vcpu->kvm, 64))
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
-       if (test_kvm_facility(vcpu->kvm, 82))
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
-       if (test_kvm_facility(vcpu->kvm, 133))
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
-       if (test_kvm_facility(vcpu->kvm, 156))
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
-       /* fprs can be synchronized via vrs, even if the guest has no vx. With
-        * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
-        */
-       if (MACHINE_HAS_VX)
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
-       else
-               vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
-
-       if (kvm_is_ucontrol(vcpu->kvm))
-               return __kvm_ucontrol_vcpu_init(vcpu);
-
-       return 0;
-}
-
 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 {
@@ -2844,35 +2809,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 }
 
-static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
-{
-       /* this equals initial cpu reset in pop, but we don't switch to ESA */
-       vcpu->arch.sie_block->gpsw.mask = 0UL;
-       vcpu->arch.sie_block->gpsw.addr = 0UL;
-       kvm_s390_set_prefix(vcpu, 0);
-       kvm_s390_set_cpu_timer(vcpu, 0);
-       vcpu->arch.sie_block->ckc       = 0UL;
-       vcpu->arch.sie_block->todpr     = 0;
-       memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
-       vcpu->arch.sie_block->gcr[0]  = CR0_UNUSED_56 |
-                                       CR0_INTERRUPT_KEY_SUBMASK |
-                                       CR0_MEASUREMENT_ALERT_SUBMASK;
-       vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
-                                       CR14_UNUSED_33 |
-                                       CR14_EXTERNAL_DAMAGE_SUBMASK;
-       /* make sure the new fpc will be lazily loaded */
-       save_fpu_regs();
-       current->thread.fpu.fpc = 0;
-       vcpu->arch.sie_block->gbea = 1;
-       vcpu->arch.sie_block->pp = 0;
-       vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
-       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
-       kvm_clear_async_pf_completion_queue(vcpu);
-       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
-               kvm_s390_vcpu_stop(vcpu);
-       kvm_s390_clear_local_irqs(vcpu);
-}
-
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
        mutex_lock(&vcpu->kvm->lock);
@@ -2962,7 +2898,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
                vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
 }
 
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
 {
        int rc = 0;
 
@@ -3035,26 +2971,22 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        return rc;
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
-                                     unsigned int id)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 {
-       struct kvm_vcpu *vcpu;
-       struct sie_page *sie_page;
-       int rc = -EINVAL;
-
        if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
-               goto out;
-
-       rc = -ENOMEM;
+               return -EINVAL;
+       return 0;
+}
 
-       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-       if (!vcpu)
-               goto out;
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+       struct sie_page *sie_page;
+       int rc;
 
        BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
        sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
        if (!sie_page)
-               goto out_free_cpu;
+               return -ENOMEM;
 
        vcpu->arch.sie_block = &sie_page->sie_block;
        vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
@@ -3063,27 +2995,59 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.sie_block->mso = 0;
        vcpu->arch.sie_block->msl = sclp.hamax;
 
-       vcpu->arch.sie_block->icpua = id;
+       vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
        spin_lock_init(&vcpu->arch.local_int.lock);
-       vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
+       vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
        if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
                vcpu->arch.sie_block->gd |= GISA_FORMAT1;
        seqcount_init(&vcpu->arch.cputm_seqcount);
 
-       rc = kvm_vcpu_init(vcpu, kvm, id);
+       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+       kvm_clear_async_pf_completion_queue(vcpu);
+       vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
+                                   KVM_SYNC_GPRS |
+                                   KVM_SYNC_ACRS |
+                                   KVM_SYNC_CRS |
+                                   KVM_SYNC_ARCH0 |
+                                   KVM_SYNC_PFAULT;
+       kvm_s390_set_prefix(vcpu, 0);
+       if (test_kvm_facility(vcpu->kvm, 64))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+       if (test_kvm_facility(vcpu->kvm, 82))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
+       if (test_kvm_facility(vcpu->kvm, 133))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
+       if (test_kvm_facility(vcpu->kvm, 156))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
+       /* fprs can be synchronized via vrs, even if the guest has no vx. With
+        * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
+        */
+       if (MACHINE_HAS_VX)
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+       else
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
+
+       if (kvm_is_ucontrol(vcpu->kvm)) {
+               rc = __kvm_ucontrol_vcpu_init(vcpu);
+               if (rc)
+                       goto out_free_sie_block;
+       }
+
+       VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
+                vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+       trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
+
+       rc = kvm_s390_vcpu_setup(vcpu);
        if (rc)
-               goto out_free_sie_block;
-       VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
-                vcpu->arch.sie_block);
-       trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
+               goto out_ucontrol_uninit;
+       return 0;
 
-       return vcpu;
+out_ucontrol_uninit:
+       if (kvm_is_ucontrol(vcpu->kvm))
+               gmap_remove(vcpu->arch.gmap);
 out_free_sie_block:
        free_page((unsigned long)(vcpu->arch.sie_block));
-out_free_cpu:
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-       return ERR_PTR(rc);
+       return rc;
 }
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
@@ -3287,10 +3251,53 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
        return r;
 }
 
-static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
 {
-       kvm_s390_vcpu_initial_reset(vcpu);
-       return 0;
+       vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
+       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+       memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
+
+       kvm_clear_async_pf_completion_queue(vcpu);
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+               kvm_s390_vcpu_stop(vcpu);
+       kvm_s390_clear_local_irqs(vcpu);
+}
+
+static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+{
+       /* Initial reset is a superset of the normal reset */
+       kvm_arch_vcpu_ioctl_normal_reset(vcpu);
+
+       /* this equals initial cpu reset in pop, but we don't switch to ESA */
+       vcpu->arch.sie_block->gpsw.mask = 0;
+       vcpu->arch.sie_block->gpsw.addr = 0;
+       kvm_s390_set_prefix(vcpu, 0);
+       kvm_s390_set_cpu_timer(vcpu, 0);
+       vcpu->arch.sie_block->ckc = 0;
+       vcpu->arch.sie_block->todpr = 0;
+       memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
+       vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
+       vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
+       vcpu->run->s.regs.fpc = 0;
+       vcpu->arch.sie_block->gbea = 1;
+       vcpu->arch.sie_block->pp = 0;
+       vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+}
+
+static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
+{
+       struct kvm_sync_regs *regs = &vcpu->run->s.regs;
+
+       /* Clear reset is a superset of the initial reset */
+       kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+
+       memset(&regs->gprs, 0, sizeof(regs->gprs));
+       memset(&regs->vrs, 0, sizeof(regs->vrs));
+       memset(&regs->acrs, 0, sizeof(regs->acrs));
+       memset(&regs->gscb, 0, sizeof(regs->gscb));
+
+       regs->etoken = 0;
+       regs->etoken_extension = 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -4351,7 +4358,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        switch (ioctl) {
        case KVM_S390_STORE_STATUS:
                idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = kvm_s390_vcpu_store_status(vcpu, arg);
+               r = kvm_s390_store_status_unloaded(vcpu, arg);
                srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        case KVM_S390_SET_INITIAL_PSW: {
@@ -4363,8 +4370,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
                break;
        }
+       case KVM_S390_CLEAR_RESET:
+               r = 0;
+               kvm_arch_vcpu_ioctl_clear_reset(vcpu);
+               break;
        case KVM_S390_INITIAL_RESET:
-               r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+               r = 0;
+               kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+               break;
+       case KVM_S390_NORMAL_RESET:
+               r = 0;
+               kvm_arch_vcpu_ioctl_normal_reset(vcpu);
                break;
        case KVM_SET_ONE_REG:
        case KVM_GET_ONE_REG: {