KVM: arm64: Share the parts of get/set events useful to 32bit
authorJames Morse <james.morse@arm.com>
Thu, 19 Jul 2018 15:24:24 +0000 (16:24 +0100)
committerMarc Zyngier <marc.zyngier@arm.com>
Sat, 21 Jul 2018 15:02:31 +0000 (16:02 +0100)
The get/set events helpers to do some work to check reserved
and padding fields are zero. This is useful on 32bit too.

Move this code into virt/kvm/arm/arm.c, and give the arch
code some underscores.

This is temporarily hidden behind __KVM_HAVE_VCPU_EVENTS until
32bit is wired up.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Dongjiu Geng <gengdongjiu@huawei.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/guest.c
virt/kvm/arm/arm.c

index bc244cc6e45142035897bae2dd6d2438473a04bd..f26055f2306e1f9a479417507c2edf428c9f99de 100644 (file)
@@ -350,11 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
-                           struct kvm_vcpu_events *events);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+                             struct kvm_vcpu_events *events);
 
-int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
-                           struct kvm_vcpu_events *events);
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+                             struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
index dd05be96d981d27e62e23759f7a5831627cc9230..725c7545e91a84cc0df674c00a42e77ec648ced1 100644 (file)
@@ -289,11 +289,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        return -EINVAL;
 }
 
-int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
-                       struct kvm_vcpu_events *events)
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+                             struct kvm_vcpu_events *events)
 {
-       memset(events, 0, sizeof(*events));
-
        events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
        events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
 
@@ -303,23 +301,12 @@ int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
-                       struct kvm_vcpu_events *events)
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+                             struct kvm_vcpu_events *events)
 {
-       int i;
        bool serror_pending = events->exception.serror_pending;
        bool has_esr = events->exception.serror_has_esr;
 
-       /* check whether the reserved field is zero */
-       for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
-               if (events->reserved[i])
-                       return -EINVAL;
-
-       /* check whether the pad field is zero */
-       for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
-               if (events->exception.pad[i])
-                       return -EINVAL;
-
        if (serror_pending && has_esr) {
                if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
                        return -EINVAL;
index 1c72247aeb1d74602815b1539724f6894621a64c..14f8fad1c7aeccb1dbf1dcdd1d071ea75f95099a 100644 (file)
@@ -1050,6 +1050,34 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
        return ret;
 }
 
+#ifdef __KVM_HAVE_VCPU_EVENTS  /* temporary: until 32bit is wired up */
+static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+                                  struct kvm_vcpu_events *events)
+{
+       memset(events, 0, sizeof(*events));
+
+       return __kvm_arm_vcpu_get_events(vcpu, events);
+}
+
+static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+                                  struct kvm_vcpu_events *events)
+{
+       int i;
+
+       /* check whether the reserved field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
+               if (events->reserved[i])
+                       return -EINVAL;
+
+       /* check whether the pad field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
+               if (events->exception.pad[i])
+                       return -EINVAL;
+
+       return __kvm_arm_vcpu_set_events(vcpu, events);
+}
+#endif /* __KVM_HAVE_VCPU_EVENTS */
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {