Merge tag 'kvm-s390-next-4.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / s390 / kvm / kvm-s390.c
index 3f2884e99ed4ce461cdb6f08148968880e90747b..40d0a1a97889b04f5e0bf1beb9d509721e1d5ff8 100644 (file)
@@ -130,6 +130,12 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { NULL }
 };
 
+struct kvm_s390_tod_clock_ext {
+       __u8 epoch_idx;
+       __u64 tod;
+       __u8 reserved[7];
+} __packed;
+
 /* allow nested virtualization in KVM (if enabled by user space) */
 static int nested;
 module_param(nested, int, S_IRUGO);
@@ -874,6 +880,26 @@ static int kvm_s390_vm_get_migration(struct kvm *kvm,
        return 0;
 }
 
+static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_s390_vm_tod_clock gtod;
+
+       if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+               return -EFAULT;
+
+       if (test_kvm_facility(kvm, 139))
+               kvm_s390_set_tod_clock_ext(kvm, &gtod);
+       else if (gtod.epoch_idx == 0)
+               kvm_s390_set_tod_clock(kvm, gtod.tod);
+       else
+               return -EINVAL;
+
+       VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
+               gtod.epoch_idx, gtod.tod);
+
+       return 0;
+}
+
 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 {
        u8 gtod_high;
@@ -909,6 +935,9 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
                return -EINVAL;
 
        switch (attr->attr) {
+       case KVM_S390_VM_TOD_EXT:
+               ret = kvm_s390_set_tod_ext(kvm, attr);
+               break;
        case KVM_S390_VM_TOD_HIGH:
                ret = kvm_s390_set_tod_high(kvm, attr);
                break;
@@ -922,6 +951,43 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
        return ret;
 }
 
+static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
+                                       struct kvm_s390_vm_tod_clock *gtod)
+{
+       struct kvm_s390_tod_clock_ext htod;
+
+       preempt_disable();
+
+       get_tod_clock_ext((char *)&htod);
+
+       gtod->tod = htod.tod + kvm->arch.epoch;
+       gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
+
+       if (gtod->tod < htod.tod)
+               gtod->epoch_idx += 1;
+
+       preempt_enable();
+}
+
+static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       struct kvm_s390_vm_tod_clock gtod;
+
+       memset(&gtod, 0, sizeof(gtod));
+
+       if (test_kvm_facility(kvm, 139))
+               kvm_s390_get_tod_clock_ext(kvm, &gtod);
+       else
+               gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
+
+       if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
+               return -EFAULT;
+
+       VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
+               gtod.epoch_idx, gtod.tod);
+       return 0;
+}
+
 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 {
        u8 gtod_high = 0;
@@ -954,6 +1020,9 @@ static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
                return -EINVAL;
 
        switch (attr->attr) {
+       case KVM_S390_VM_TOD_EXT:
+               ret = kvm_s390_get_tod_ext(kvm, attr);
+               break;
        case KVM_S390_VM_TOD_HIGH:
                ret = kvm_s390_get_tod_high(kvm, attr);
                break;
@@ -1324,7 +1393,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 {
        uint8_t *keys;
        uint64_t hva;
-       int i, r = 0;
+       int srcu_idx, i, r = 0;
 
        if (args->flags != 0)
                return -EINVAL;
@@ -1342,6 +1411,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
                return -ENOMEM;
 
        down_read(&current->mm->mmap_sem);
+       srcu_idx = srcu_read_lock(&kvm->srcu);
        for (i = 0; i < args->count; i++) {
                hva = gfn_to_hva(kvm, args->start_gfn + i);
                if (kvm_is_error_hva(hva)) {
@@ -1353,6 +1423,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
                if (r)
                        break;
        }
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
        up_read(&current->mm->mmap_sem);
 
        if (!r) {
@@ -1370,7 +1441,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
 {
        uint8_t *keys;
        uint64_t hva;
-       int i, r = 0;
+       int srcu_idx, i, r = 0;
 
        if (args->flags != 0)
                return -EINVAL;
@@ -1396,6 +1467,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
                goto out;
 
        down_read(&current->mm->mmap_sem);
+       srcu_idx = srcu_read_lock(&kvm->srcu);
        for (i = 0; i < args->count; i++) {
                hva = gfn_to_hva(kvm, args->start_gfn + i);
                if (kvm_is_error_hva(hva)) {
@@ -1413,6 +1485,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
                if (r)
                        break;
        }
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
        up_read(&current->mm->mmap_sem);
 out:
        kvfree(keys);
@@ -1501,7 +1574,7 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
                if (r < 0)
                        pgstev = 0;
                /* save the value */
-               res[i++] = (pgstev >> 24) & 0x3;
+               res[i++] = (pgstev >> 24) & 0x43;
                /*
                 * if the next bit is too far away, stop.
                 * if we reached the previous "next", find the next one
@@ -1579,7 +1652,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
 
                pgstev = bits[i];
                pgstev = pgstev << 24;
-               mask &= _PGSTE_GPS_USAGE_MASK;
+               mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
                set_pgste_bits(kvm->mm, hva, mask, pgstev);
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -1854,8 +1927,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
 
+       /* we are always in czam mode - even on pre z14 machines */
+       set_kvm_facility(kvm->arch.model.fac_mask, 138);
+       set_kvm_facility(kvm->arch.model.fac_list, 138);
+       /* we emulate STHYI in kvm */
        set_kvm_facility(kvm->arch.model.fac_mask, 74);
        set_kvm_facility(kvm->arch.model.fac_list, 74);
+       if (MACHINE_HAS_TLB_GUEST) {
+               set_kvm_facility(kvm->arch.model.fac_mask, 147);
+               set_kvm_facility(kvm->arch.model.fac_list, 147);
+       }
 
        kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
        kvm->arch.model.ibc = sclp.ibc & 0x0fff;
@@ -2365,6 +2446,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
                vcpu->arch.sie_block->eca |= ECA_VX;
                vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
        }
+       if (test_kvm_facility(vcpu->kvm, 139))
+               vcpu->arch.sie_block->ecd |= ECD_MEF;
+
        vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
                                        | SDNXC;
        vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
@@ -2443,6 +2527,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return kvm_s390_vcpu_has_irq(vcpu, 0);
 }
 
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
+}
+
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
        atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
@@ -2851,6 +2940,35 @@ retry:
        return 0;
 }
 
+void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
+                                const struct kvm_s390_vm_tod_clock *gtod)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_s390_tod_clock_ext htod;
+       int i;
+
+       mutex_lock(&kvm->lock);
+       preempt_disable();
+
+       get_tod_clock_ext((char *)&htod);
+
+       kvm->arch.epoch = gtod->tod - htod.tod;
+       kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+
+       if (kvm->arch.epoch > gtod->tod)
+               kvm->arch.epdx -= 1;
+
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+               vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
+       }
+
+       kvm_s390_vcpu_unblock_all(kvm);
+       preempt_enable();
+       mutex_unlock(&kvm->lock);
+}
+
 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
 {
        struct kvm_vcpu *vcpu;