Merge tag 'kvm-ppc-next-4.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / powerpc / kvm / powerpc.c
index 3764d000872ed67a5cce683293936bf433f67141..0e8c20c5eaace243850e255acf5be966c85f0a5c 100644 (file)
@@ -648,9 +648,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        case KVM_CAP_PPC_HTM:
-               r = hv_enabled &&
-                   (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
-                    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
+               r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
+                    (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
                break;
 #endif
        default:
@@ -907,6 +906,26 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
        }
 }
 
+static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
+       u32 gpr)
+{
+       union kvmppc_one_reg val;
+       int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+       if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+               val.vsx32val[0] = gpr;
+               val.vsx32val[1] = gpr;
+               val.vsx32val[2] = gpr;
+               val.vsx32val[3] = gpr;
+               VCPU_VSX_VR(vcpu, index) = val.vval;
+       } else {
+               val.vsx32val[0] = gpr;
+               val.vsx32val[1] = gpr;
+               VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
+               VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+       }
+}
+
 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
        u32 gpr32)
 {
@@ -933,30 +952,110 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_ALTIVEC
+static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
+               int index, int element_size)
+{
+       int offset;
+       int elts = sizeof(vector128)/element_size;
+
+       if ((index < 0) || (index >= elts))
+               return -1;
+
+       if (kvmppc_need_byteswap(vcpu))
+               offset = elts - index - 1;
+       else
+               offset = index;
+
+       return offset;
+}
+
+static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
+               int index)
+{
+       return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
+}
+
+static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
+               int index)
+{
+       return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
+}
+
+static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
+               int index)
+{
+       return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
+}
+
+static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
+               int index)
+{
+       return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
+}
+
+
 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
-               u64 gpr)
+       u64 gpr)
 {
+       union kvmppc_one_reg val;
+       int offset = kvmppc_get_vmx_dword_offset(vcpu,
+                       vcpu->arch.mmio_vmx_offset);
        int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
-       u32 hi, lo;
-       u32 di;
 
-#ifdef __BIG_ENDIAN
-       hi = gpr >> 32;
-       lo = gpr & 0xffffffff;
-#else
-       lo = gpr >> 32;
-       hi = gpr & 0xffffffff;
-#endif
+       if (offset == -1)
+               return;
+
+       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vsxval[offset] = gpr;
+       VCPU_VSX_VR(vcpu, index) = val.vval;
+}
+
+static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
+       u32 gpr32)
+{
+       union kvmppc_one_reg val;
+       int offset = kvmppc_get_vmx_word_offset(vcpu,
+                       vcpu->arch.mmio_vmx_offset);
+       int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
 
-       di = 2 - vcpu->arch.mmio_vmx_copy_nums;         /* doubleword index */
-       if (di > 1)
+       if (offset == -1)
                return;
 
-       if (vcpu->arch.mmio_host_swabbed)
-               di = 1 - di;
+       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vsx32val[offset] = gpr32;
+       VCPU_VSX_VR(vcpu, index) = val.vval;
+}
+
+static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
+       u16 gpr16)
+{
+       union kvmppc_one_reg val;
+       int offset = kvmppc_get_vmx_hword_offset(vcpu,
+                       vcpu->arch.mmio_vmx_offset);
+       int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+       if (offset == -1)
+               return;
+
+       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vsx16val[offset] = gpr16;
+       VCPU_VSX_VR(vcpu, index) = val.vval;
+}
+
+static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
+       u8 gpr8)
+{
+       union kvmppc_one_reg val;
+       int offset = kvmppc_get_vmx_byte_offset(vcpu,
+                       vcpu->arch.mmio_vmx_offset);
+       int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+       if (offset == -1)
+               return;
 
-       VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
-       VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
+       val.vval = VCPU_VSX_VR(vcpu, index);
+       val.vsx8val[offset] = gpr8;
+       VCPU_VSX_VR(vcpu, index) = val.vval;
 }
 #endif /* CONFIG_ALTIVEC */
 
@@ -1041,6 +1140,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
                kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
                break;
        case KVM_MMIO_REG_FPR:
+               if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+                       vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
+
                VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
                break;
 #ifdef CONFIG_PPC_BOOK3S
@@ -1054,18 +1156,36 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 #endif
 #ifdef CONFIG_VSX
        case KVM_MMIO_REG_VSX:
-               if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
+               if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+                       vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
+
+               if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
                        kvmppc_set_vsr_dword(vcpu, gpr);
-               else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
+               else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
                        kvmppc_set_vsr_word(vcpu, gpr);
-               else if (vcpu->arch.mmio_vsx_copy_type ==
+               else if (vcpu->arch.mmio_copy_type ==
                                KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
                        kvmppc_set_vsr_dword_dump(vcpu, gpr);
+               else if (vcpu->arch.mmio_copy_type ==
+                               KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
+                       kvmppc_set_vsr_word_dump(vcpu, gpr);
                break;
 #endif
 #ifdef CONFIG_ALTIVEC
        case KVM_MMIO_REG_VMX:
-               kvmppc_set_vmx_dword(vcpu, gpr);
+               if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+                       vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
+
+               if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
+                       kvmppc_set_vmx_dword(vcpu, gpr);
+               else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
+                       kvmppc_set_vmx_word(vcpu, gpr);
+               else if (vcpu->arch.mmio_copy_type ==
+                               KVMPPC_VMX_COPY_HWORD)
+                       kvmppc_set_vmx_hword(vcpu, gpr);
+               else if (vcpu->arch.mmio_copy_type ==
+                               KVMPPC_VMX_COPY_BYTE)
+                       kvmppc_set_vmx_byte(vcpu, gpr);
                break;
 #endif
        default:
@@ -1228,7 +1348,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
        u32 dword_offset, word_offset;
        union kvmppc_one_reg reg;
        int vsx_offset = 0;
-       int copy_type = vcpu->arch.mmio_vsx_copy_type;
+       int copy_type = vcpu->arch.mmio_copy_type;
        int result = 0;
 
        switch (copy_type) {
@@ -1344,14 +1464,16 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_ALTIVEC
-/* handle quadword load access in two halves */
-int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
-               unsigned int rt, int is_default_endian)
+int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+               unsigned int rt, unsigned int bytes, int is_default_endian)
 {
        enum emulation_result emulated = EMULATE_DONE;
 
+       if (vcpu->arch.mmio_vsx_copy_nums > 2)
+               return EMULATE_FAIL;
+
        while (vcpu->arch.mmio_vmx_copy_nums) {
-               emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
+               emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
                                is_default_endian, 0);
 
                if (emulated != EMULATE_DONE)
@@ -1359,55 +1481,127 @@ int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                vcpu->arch.paddr_accessed += run->mmio.len;
                vcpu->arch.mmio_vmx_copy_nums--;
+               vcpu->arch.mmio_vmx_offset++;
        }
 
        return emulated;
 }
 
-static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
+int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
 {
-       vector128 vrs = VCPU_VSX_VR(vcpu, rs);
-       u32 di;
-       u64 w0, w1;
+       union kvmppc_one_reg reg;
+       int vmx_offset = 0;
+       int result = 0;
 
-       di = 2 - vcpu->arch.mmio_vmx_copy_nums;         /* doubleword index */
-       if (di > 1)
+       vmx_offset =
+               kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+       if (vmx_offset == -1)
                return -1;
 
-       if (vcpu->arch.mmio_host_swabbed)
-               di = 1 - di;
+       reg.vval = VCPU_VSX_VR(vcpu, index);
+       *val = reg.vsxval[vmx_offset];
 
-       w0 = vrs.u[di * 2];
-       w1 = vrs.u[di * 2 + 1];
+       return result;
+}
 
-#ifdef __BIG_ENDIAN
-       *val = (w0 << 32) | w1;
-#else
-       *val = (w1 << 32) | w0;
-#endif
-       return 0;
+int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+{
+       union kvmppc_one_reg reg;
+       int vmx_offset = 0;
+       int result = 0;
+
+       vmx_offset =
+               kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+       if (vmx_offset == -1)
+               return -1;
+
+       reg.vval = VCPU_VSX_VR(vcpu, index);
+       *val = reg.vsx32val[vmx_offset];
+
+       return result;
+}
+
+int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+{
+       union kvmppc_one_reg reg;
+       int vmx_offset = 0;
+       int result = 0;
+
+       vmx_offset =
+               kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+       if (vmx_offset == -1)
+               return -1;
+
+       reg.vval = VCPU_VSX_VR(vcpu, index);
+       *val = reg.vsx16val[vmx_offset];
+
+       return result;
 }
 
-/* handle quadword store in two halves */
-int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
-               unsigned int rs, int is_default_endian)
+int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+{
+       union kvmppc_one_reg reg;
+       int vmx_offset = 0;
+       int result = 0;
+
+       vmx_offset =
+               kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
+
+       if (vmx_offset == -1)
+               return -1;
+
+       reg.vval = VCPU_VSX_VR(vcpu, index);
+       *val = reg.vsx8val[vmx_offset];
+
+       return result;
+}
+
+int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+               unsigned int rs, unsigned int bytes, int is_default_endian)
 {
        u64 val = 0;
+       unsigned int index = rs & KVM_MMIO_REG_MASK;
        enum emulation_result emulated = EMULATE_DONE;
 
+       if (vcpu->arch.mmio_vsx_copy_nums > 2)
+               return EMULATE_FAIL;
+
        vcpu->arch.io_gpr = rs;
 
        while (vcpu->arch.mmio_vmx_copy_nums) {
-               if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
+               switch (vcpu->arch.mmio_copy_type) {
+               case KVMPPC_VMX_COPY_DWORD:
+                       if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
+                               return EMULATE_FAIL;
+
+                       break;
+               case KVMPPC_VMX_COPY_WORD:
+                       if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
+                               return EMULATE_FAIL;
+                       break;
+               case KVMPPC_VMX_COPY_HWORD:
+                       if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
+                               return EMULATE_FAIL;
+                       break;
+               case KVMPPC_VMX_COPY_BYTE:
+                       if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
+                               return EMULATE_FAIL;
+                       break;
+               default:
                        return EMULATE_FAIL;
+               }
 
-               emulated = kvmppc_handle_store(run, vcpu, val, 8,
+               emulated = kvmppc_handle_store(run, vcpu, val, bytes,
                                is_default_endian);
                if (emulated != EMULATE_DONE)
                        break;
 
                vcpu->arch.paddr_accessed += run->mmio.len;
                vcpu->arch.mmio_vmx_copy_nums--;
+               vcpu->arch.mmio_vmx_offset++;
        }
 
        return emulated;
@@ -1422,11 +1616,11 @@ static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
        vcpu->arch.paddr_accessed += run->mmio.len;
 
        if (!vcpu->mmio_is_write) {
-               emulated = kvmppc_handle_load128_by2x64(run, vcpu,
-                               vcpu->arch.io_gpr, 1);
+               emulated = kvmppc_handle_vmx_load(run, vcpu,
+                               vcpu->arch.io_gpr, run->mmio.len, 1);
        } else {
-               emulated = kvmppc_handle_store128_by2x64(run, vcpu,
-                               vcpu->arch.io_gpr, 1);
+               emulated = kvmppc_handle_vmx_store(run, vcpu,
+                               vcpu->arch.io_gpr, run->mmio.len, 1);
        }
 
        switch (emulated) {
@@ -1570,8 +1764,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 #endif
 #ifdef CONFIG_ALTIVEC
-               if (vcpu->arch.mmio_vmx_copy_nums > 0)
+               if (vcpu->arch.mmio_vmx_copy_nums > 0) {
                        vcpu->arch.mmio_vmx_copy_nums--;
+                       vcpu->arch.mmio_vmx_offset++;
+               }
 
                if (vcpu->arch.mmio_vmx_copy_nums > 0) {
                        r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
@@ -1784,16 +1980,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        void __user *argp = (void __user *)arg;
        long r;
 
-       vcpu_load(vcpu);
-
        switch (ioctl) {
        case KVM_ENABLE_CAP:
        {
                struct kvm_enable_cap cap;
                r = -EFAULT;
+               vcpu_load(vcpu);
                if (copy_from_user(&cap, argp, sizeof(cap)))
                        goto out;
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+               vcpu_put(vcpu);
                break;
        }
 
@@ -1815,9 +2011,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_DIRTY_TLB: {
                struct kvm_dirty_tlb dirty;
                r = -EFAULT;
+               vcpu_load(vcpu);
                if (copy_from_user(&dirty, argp, sizeof(dirty)))
                        goto out;
                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
+               vcpu_put(vcpu);
                break;
        }
 #endif
@@ -1826,7 +2024,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
 
 out:
-       vcpu_put(vcpu);
        return r;
 }