Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Dec 2019 18:26:59 +0000 (10:26 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 Dec 2019 18:26:59 +0000 (10:26 -0800)
Pull KVM fixes from Paolo Bonzini:
 "PPC:
   - Fix a bug where we try to do an ultracall on a system without an
     ultravisor

  KVM:
   - Fix uninitialised sysreg accessor
   - Fix handling of demand-paged device mappings
   - Stop spamming the console on IMPDEF sysregs
   - Relax mappings of writable memslots
   - Assorted cleanups

  MIPS:
   - Now orphan, James Hogan is stepping down

  x86:
   - MAINTAINERS change, so long Radim and thanks for all the fish
   - supported CPUID fixes for AMD machines without SPEC_CTRL"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  MAINTAINERS: remove Radim from KVM maintainers
  MAINTAINERS: Orphan KVM for MIPS
  kvm: x86: Host feature SSBD doesn't imply guest feature AMD_SSBD
  kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
  KVM: PPC: Book3S HV: Don't do ultravisor calls on systems without ultravisor
  KVM: arm/arm64: Properly handle faulting of device mappings
  KVM: arm64: Ensure 'params' is initialised when looking up sys register
  KVM: arm/arm64: Remove excessive permission check in kvm_arch_prepare_memory_region
  KVM: arm64: Don't log IMP DEF sysreg traps
  KVM: arm64: Sanely ratelimit sysreg messages
  KVM: arm/arm64: vgic: Use wrapper function to lock/unlock all vcpus in kvm_vgic_create()
  KVM: arm/arm64: vgic: Fix potential double free dist->spis in __kvm_vgic_destroy()
  KVM: arm/arm64: Get rid of unused arg in cpu_init_hyp_mode()

MAINTAINERS
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.h
arch/powerpc/kvm/book3s_hv.c
arch/x86/kvm/cpuid.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-init.c

index 1b998c8d2e0c1b63f9616038d4fb2623b0b46e71..ffa3371bc750c673bbb6546ce13a5afe0e3301b7 100644 (file)
@@ -9041,7 +9041,6 @@ F:        include/linux/umh.h
 
 KERNEL VIRTUAL MACHINE (KVM)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-M:     Radim Krčmář <rkrcmar@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://www.linux-kvm.org
 T:     git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -9076,9 +9075,9 @@ F:        virt/kvm/arm/
 F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
-M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@vger.kernel.org
-S:     Supported
+L:     kvm@vger.kernel.org
+S:     Orphan
 F:     arch/mips/include/uapi/asm/kvm*
 F:     arch/mips/include/asm/kvm*
 F:     arch/mips/kvm/
@@ -9113,7 +9112,6 @@ F:        tools/testing/selftests/kvm/*/s390x/
 
 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-M:     Radim Krčmář <rkrcmar@redhat.com>
 R:     Sean Christopherson <sean.j.christopherson@intel.com>
 R:     Vitaly Kuznetsov <vkuznets@redhat.com>
 R:     Wanpeng Li <wanpengli@tencent.com>
index 46822afc57e00461843b56f72cd403a939f17e69..9f2165937f7d82752855ae59bb0dd32f82740134 100644 (file)
@@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
                WARN_ON(1);
        }
 
-       kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
-               cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
-       print_sys_reg_instr(params);
+       print_sys_reg_msg(params,
+                         "Unsupported guest CP%d access at: %08lx [%08lx]\n",
+                         cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
        kvm_inject_undefined(vcpu);
 }
 
@@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
                                NULL, 0);
 }
 
+static bool is_imp_def_sys_reg(struct sys_reg_params *params)
+{
+       // See ARM DDI 0487E.a, section D12.3.2
+       return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
+}
+
 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
                           struct sys_reg_params *params)
 {
@@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
 
        if (likely(r)) {
                perform_access(vcpu, params, r);
+       } else if (is_imp_def_sys_reg(params)) {
+               kvm_inject_undefined(vcpu);
        } else {
-               kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
-                       *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
-               print_sys_reg_instr(params);
+               print_sys_reg_msg(params,
+                                 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
+                                 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
                kvm_inject_undefined(vcpu);
        }
        return 1;
@@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
        if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
                return NULL;
 
+       if (!index_to_params(id, &params))
+               return NULL;
+
        table = get_target_table(vcpu->arch.target, true, &num);
-       r = find_reg_by_id(id, &params, table, num);
+       r = find_reg(&params, table, num);
        if (!r)
                r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
 
index 9bca0312d7982c4d1a8620ef907ea683abd6c598..5a6fc30f59894db482b667c20dd892baba0e6c7f 100644 (file)
@@ -62,11 +62,24 @@ struct sys_reg_desc {
 #define REG_HIDDEN_USER                (1 << 0) /* hidden from userspace ioctls */
 #define REG_HIDDEN_GUEST       (1 << 1) /* hidden from guest */
 
-static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+static __printf(2, 3)
+inline void print_sys_reg_msg(const struct sys_reg_params *p,
+                                      char *fmt, ...)
 {
+       va_list va;
+
+       va_start(va, fmt);
        /* Look, we even formatted it for you to paste into the table! */
-       kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+       kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
+                     &(struct va_format){ fmt, &va },
                      p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
+       va_end(va);
+}
+
+static inline void print_sys_reg_instr(const struct sys_reg_params *p)
+{
+       /* GCC warns on an empty format string */
+       print_sys_reg_msg(p, "%s", "");
 }
 
 static inline bool ignore_write(struct kvm_vcpu *vcpu,
index dc53578193ee00ef0cd39b43465af4569a51f5b2..6ff3f896d90816efa520657cb747be3a0ea0572d 100644 (file)
@@ -4983,7 +4983,8 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
                if (nesting_enabled(kvm))
                        kvmhv_release_all_nested(kvm);
                kvm->arch.process_table = 0;
-               uv_svm_terminate(kvm->arch.lpid);
+               if (kvm->arch.secure_guest)
+                       uv_svm_terminate(kvm->arch.lpid);
                kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
        }
 
index cfafa320a8cf75e8437153fc36310a1f5bc3d4b3..cf55629ff0ff642ea85fb5783f2fd4df678b9d7c 100644 (file)
@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
                        entry->edx |= F(SPEC_CTRL);
                if (boot_cpu_has(X86_FEATURE_STIBP))
                        entry->edx |= F(INTEL_STIBP);
-               if (boot_cpu_has(X86_FEATURE_SSBD))
+               if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   boot_cpu_has(X86_FEATURE_AMD_SSBD))
                        entry->edx |= F(SPEC_CTRL_SSBD);
                /*
                 * We emulate ARCH_CAPABILITIES in software even
@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
                        entry->ebx |= F(AMD_IBRS);
                if (boot_cpu_has(X86_FEATURE_STIBP))
                        entry->ebx |= F(AMD_STIBP);
-               if (boot_cpu_has(X86_FEATURE_SSBD))
+               if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   boot_cpu_has(X86_FEATURE_AMD_SSBD))
                        entry->ebx |= F(AMD_SSBD);
                if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
                        entry->ebx |= F(AMD_SSB_NO);
index 12e0280291cee9c0670f0d5c222ff6f24e4c0060..8de4daf25097d9267cf097d3be879f4372e3ff6a 100644 (file)
@@ -1352,7 +1352,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
-static void cpu_init_hyp_mode(void *dummy)
+static void cpu_init_hyp_mode(void)
 {
        phys_addr_t pgd_ptr;
        unsigned long hyp_stack_ptr;
@@ -1386,7 +1386,7 @@ static void cpu_hyp_reinit(void)
        if (is_kernel_in_hyp_mode())
                kvm_timer_init_vhe();
        else
-               cpu_init_hyp_mode(NULL);
+               cpu_init_hyp_mode();
 
        kvm_arm_init_debug();
 
index 38b4c910b6c3861609979f6292f81462ee39d595..0b32a904a1bb3a5636363803eceb34a45766cd25 100644 (file)
@@ -38,6 +38,11 @@ static unsigned long io_map_base;
 #define KVM_S2PTE_FLAG_IS_IOMAP                (1UL << 0)
 #define KVM_S2_FLAG_LOGGING_ACTIVE     (1UL << 1)
 
+static bool is_iomap(unsigned long flags)
+{
+       return flags & KVM_S2PTE_FLAG_IS_IOMAP;
+}
+
 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
 {
        return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
        vma_pagesize = vma_kernel_pagesize(vma);
        if (logging_active ||
+           (vma->vm_flags & VM_PFNMAP) ||
            !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
                force_pte = true;
                vma_pagesize = PAGE_SIZE;
@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        writable = false;
        }
 
+       if (exec_fault && is_iomap(flags))
+               return -ENOEXEC;
+
        spin_lock(&kvm->mmu_lock);
        if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (writable)
                kvm_set_pfn_dirty(pfn);
 
-       if (fault_status != FSC_PERM)
+       if (fault_status != FSC_PERM && !is_iomap(flags))
                clean_dcache_guest_page(pfn, vma_pagesize);
 
        if (exec_fault)
@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
                if (is_iabt) {
                        /* Prefetch Abort on I/O address */
-                       kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       ret = 1;
-                       goto out_unlock;
+                       ret = -ENOEXEC;
+                       goto out;
                }
 
                /*
@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
        ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
        if (ret == 0)
                ret = 1;
+out:
+       if (ret == -ENOEXEC) {
+               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               ret = 1;
+       }
 out_unlock:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;
@@ -2301,15 +2314,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                if (!vma || vma->vm_start >= reg_end)
                        break;
 
-               /*
-                * Mapping a read-only VMA is only allowed if the
-                * memory region is configured as read-only.
-                */
-               if (writable && !(vma->vm_flags & VM_WRITE)) {
-                       ret = -EPERM;
-                       break;
-               }
-
                /*
                 * Take the intersection of this VMA with the memory region
                 */
index b3c5de48064c91b3a0b32fae72590708f7783695..a963b9d766b73a75d93a500cdaf0c2cc49ba8fdc 100644 (file)
@@ -70,7 +70,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
  */
 int kvm_vgic_create(struct kvm *kvm, u32 type)
 {
-       int i, vcpu_lock_idx = -1, ret;
+       int i, ret;
        struct kvm_vcpu *vcpu;
 
        if (irqchip_in_kernel(kvm))
@@ -86,17 +86,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                !kvm_vgic_global_state.can_emulate_gicv2)
                return -ENODEV;
 
-       /*
-        * Any time a vcpu is run, vcpu_load is called which tries to grab the
-        * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-        * that no other VCPUs are run while we create the vgic.
-        */
        ret = -EBUSY;
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (!mutex_trylock(&vcpu->mutex))
-                       goto out_unlock;
-               vcpu_lock_idx = i;
-       }
+       if (!lock_all_vcpus(kvm))
+               return ret;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu->arch.has_run_once)
@@ -125,10 +117,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
 
 out_unlock:
-       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-               vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
-               mutex_unlock(&vcpu->mutex);
-       }
+       unlock_all_vcpus(kvm);
        return ret;
 }
 
@@ -177,6 +166,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
                        break;
                default:
                        kfree(dist->spis);
+                       dist->spis = NULL;
                        return -EINVAL;
                }
        }