Merge tag 'kvm-riscv-fixes-5.19-1' of https://github.com/kvm-riscv/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jun 2022 13:45:00 +0000 (09:45 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jun 2022 13:45:00 +0000 (09:45 -0400)
KVM/riscv fixes for 5.19, take #1

- Typo fix in arch/riscv/kvm/vmid.c

- Remove broken reference pattern from MAINTAINERS entry

1  2 
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
virt/kvm/kvm_main.c

diff --combined arch/x86/kvm/mmu/mmu.c
index e46771e9519112625a95a38f84d64da16f24845a,f4653688fa6db1d4bf0246e1f16188bf11612462..e826ee9138fa895dcc48ce5b7d784dd202c0c4bd
@@@ -5179,7 -5179,7 +5179,7 @@@ static void __kvm_mmu_free_obsolete_roo
                roots_to_free |= KVM_MMU_ROOT_CURRENT;
  
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 -              if (is_obsolete_root(kvm, mmu->root.hpa))
 +              if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
                        roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
        }
  
@@@ -5481,14 -5481,16 +5481,16 @@@ void kvm_mmu_invpcid_gva(struct kvm_vcp
        uint i;
  
        if (pcid == kvm_get_active_pcid(vcpu)) {
-               mmu->invlpg(vcpu, gva, mmu->root.hpa);
+               if (mmu->invlpg)
+                       mmu->invlpg(vcpu, gva, mmu->root.hpa);
                tlb_flush = true;
        }
  
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
                if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
                    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
-                       mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+                       if (mmu->invlpg)
+                               mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
                        tlb_flush = true;
                }
        }
@@@ -5676,6 -5678,7 +5678,7 @@@ static void kvm_zap_obsolete_pages(stru
  {
        struct kvm_mmu_page *sp, *node;
        int nr_zapped, batch = 0;
+       bool unstable;
  
  restart:
        list_for_each_entry_safe_reverse(sp, node,
                        goto restart;
                }
  
-               if (__kvm_mmu_prepare_zap_page(kvm, sp,
-                               &kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
-                       batch += nr_zapped;
+               unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
+                               &kvm->arch.zapped_obsolete_pages, &nr_zapped);
+               batch += nr_zapped;
+               if (unstable)
                        goto restart;
-               }
        }
  
        /*
diff --combined arch/x86/kvm/svm/svm.c
index 921fcb85a9cdb9cecf0fbbcadc1f4dffc383728b,200045f71df04e522e883e34756a71eda29f5cf5..1dc02cdf69602e19934f9ac08f7cdcc2ce7c25ee
@@@ -465,24 -465,11 +465,24 @@@ static int has_svm(void
        return 1;
  }
  
 +void __svm_write_tsc_multiplier(u64 multiplier)
 +{
 +      preempt_disable();
 +
 +      if (multiplier == __this_cpu_read(current_tsc_ratio))
 +              goto out;
 +
 +      wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
 +      __this_cpu_write(current_tsc_ratio, multiplier);
 +out:
 +      preempt_enable();
 +}
 +
  static void svm_hardware_disable(void)
  {
        /* Make sure we clean up behind us */
        if (tsc_scaling)
 -              wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
 +              __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
  
        cpu_svm_disable();
  
@@@ -528,7 -515,8 +528,7 @@@ static int svm_hardware_enable(void
                 * Set the default value, even if we don't use TSC scaling
                 * to avoid having stale value in the msr
                 */
 -              wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
 -              __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
 +              __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
        }
  
  
@@@ -1011,12 -999,11 +1011,12 @@@ static void svm_write_tsc_offset(struc
        vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
  }
  
 -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
 +static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
  {
 -      wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
 +      __svm_write_tsc_multiplier(multiplier);
  }
  
 +
  /* Evaluate instruction intercepts that depend on guest CPUID features. */
  static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
                                              struct vcpu_svm *svm)
@@@ -1370,14 -1357,19 +1370,14 @@@ static void svm_prepare_switch_to_guest
         */
        vmsave(__sme_page_pa(sd->save_area));
        if (sev_es_guest(vcpu->kvm)) {
-               struct vmcb_save_area *hostsa;
-               hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
+               struct sev_es_save_area *hostsa;
+               hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
  
                sev_es_prepare_switch_to_guest(hostsa);
        }
  
 -      if (tsc_scaling) {
 -              u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
 -              if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
 -                      __this_cpu_write(current_tsc_ratio, tsc_ratio);
 -                      wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
 -              }
 -      }
 +      if (tsc_scaling)
 +              __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
  
        if (likely(tsc_aux_uret_slot >= 0))
                kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
@@@ -3205,8 -3197,8 +3205,8 @@@ static void dump_vmcb(struct kvm_vcpu *
               "tr:",
               save01->tr.selector, save01->tr.attrib,
               save01->tr.limit, save01->tr.base);
-       pr_err("cpl:            %d                efer:         %016llx\n",
-               save->cpl, save->efer);
+       pr_err("vmpl: %d   cpl:  %d               efer:          %016llx\n",
+              save->vmpl, save->cpl, save->efer);
        pr_err("%-15s %016llx %-13s %016llx\n",
               "cr0:", save->cr0, "cr2:", save->cr2);
        pr_err("%-15s %016llx %-13s %016llx\n",
@@@ -4263,8 -4255,6 +4263,8 @@@ out
  
  static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
  {
 +      if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
 +              vcpu->arch.at_instruction_boundary = true;
  }
  
  static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
diff --combined arch/x86/kvm/svm/svm.h
index 29d6fd205a4946a1bca4ad7fa015111670979672,21c5460e947aaf5b54e32cdfd1a602336bce47cf..500348c1cb350871b0d996d3154a1a509662ad70
@@@ -182,7 -182,7 +182,7 @@@ struct svm_nested_state 
  
  struct vcpu_sev_es_state {
        /* SEV-ES support */
-       struct vmcb_save_area *vmsa;
+       struct sev_es_save_area *vmsa;
        struct ghcb *ghcb;
        struct kvm_host_map ghcb_map;
        bool received_first_sipi;
@@@ -590,7 -590,7 +590,7 @@@ int nested_svm_check_exception(struct v
                               bool has_error_code, u32 error_code);
  int nested_svm_exit_special(struct vcpu_svm *svm);
  void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
 -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
 +void __svm_write_tsc_multiplier(u64 multiplier);
  void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
                                       struct vmcb_control_area *control);
  void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
@@@ -655,7 -655,7 +655,7 @@@ int sev_es_string_io(struct vcpu_svm *s
  void sev_es_init_vmcb(struct vcpu_svm *svm);
  void sev_es_vcpu_reset(struct vcpu_svm *svm);
  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
- void sev_es_prepare_switch_to_guest(struct vmcb_save_area *hostsa);
+ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
  void sev_es_unmap_ghcb(struct vcpu_svm *svm);
  
  /* vmenter.S */
diff --combined arch/x86/kvm/vmx/vmx.c
index 14e01178a753c5b0c8f35a401ff6da4a059b5fb0,a07e8cd753ec55660f9142f283a7be81926b2d7d..9bd86ecccdab56d455fdcc30e7fab6039b127eee
@@@ -6219,7 -6219,7 +6219,7 @@@ static noinstr void vmx_l1d_flush(struc
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
  
        /*
-        * This code is only executed when the the flush mode is 'cond' or
+        * This code is only executed when the flush mode is 'cond' or
         * 'always'
         */
        if (static_branch_likely(&vmx_l1d_flush_cond)) {
@@@ -6547,7 -6547,6 +6547,7 @@@ static void handle_external_interrupt_i
                return;
  
        handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
 +      vcpu->arch.at_instruction_boundary = true;
  }
  
  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
diff --combined arch/x86/kvm/x86.c
index 25a517206c4d33fa2130904f22a541d9eaa0e680,e9473c7c73903a4fba783cb2c98bf04bdd63c1d7..03fbfbbec460fab308dc3a490bf20c22d119ba39
@@@ -296,8 -296,6 +296,8 @@@ const struct _kvm_stats_desc kvm_vcpu_s
        STATS_DESC_COUNTER(VCPU, nested_run),
        STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
        STATS_DESC_COUNTER(VCPU, directed_yield_successful),
 +      STATS_DESC_COUNTER(VCPU, preemption_reported),
 +      STATS_DESC_COUNTER(VCPU, preemption_other),
        STATS_DESC_ICOUNTER(VCPU, guest_mode)
  };
  
@@@ -4627,19 -4625,6 +4627,19 @@@ static void kvm_steal_time_set_preempte
        struct kvm_memslots *slots;
        static const u8 preempted = KVM_VCPU_PREEMPTED;
  
 +      /*
 +       * The vCPU can be marked preempted if and only if the VM-Exit was on
 +       * an instruction boundary and will not trigger guest emulation of any
 +       * kind (see vcpu_run).  Vendor specific code controls (conservatively)
 +       * when this is true, for example allowing the vCPU to be marked
 +       * preempted if and only if the VM-Exit was due to a host interrupt.
 +       */
 +      if (!vcpu->arch.at_instruction_boundary) {
 +              vcpu->stat.preemption_other++;
 +              return;
 +      }
 +
 +      vcpu->stat.preemption_reported++;
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
  
@@@ -4669,21 -4654,19 +4669,21 @@@ void kvm_arch_vcpu_put(struct kvm_vcpu 
  {
        int idx;
  
 -      if (vcpu->preempted && !vcpu->arch.guest_state_protected)
 -              vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 +      if (vcpu->preempted) {
 +              if (!vcpu->arch.guest_state_protected)
 +                      vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
  
 -      /*
 -       * Take the srcu lock as memslots will be accessed to check the gfn
 -       * cache generation against the memslots generation.
 -       */
 -      idx = srcu_read_lock(&vcpu->kvm->srcu);
 -      if (kvm_xen_msr_enabled(vcpu->kvm))
 -              kvm_xen_runstate_set_preempted(vcpu);
 -      else
 -              kvm_steal_time_set_preempted(vcpu);
 -      srcu_read_unlock(&vcpu->kvm->srcu, idx);
 +              /*
 +               * Take the srcu lock as memslots will be accessed to check the gfn
 +               * cache generation against the memslots generation.
 +               */
 +              idx = srcu_read_lock(&vcpu->kvm->srcu);
 +              if (kvm_xen_msr_enabled(vcpu->kvm))
 +                      kvm_xen_runstate_set_preempted(vcpu);
 +              else
 +                      kvm_steal_time_set_preempted(vcpu);
 +              srcu_read_unlock(&vcpu->kvm->srcu, idx);
 +      }
  
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
@@@ -10439,13 -10422,6 +10439,13 @@@ static int vcpu_run(struct kvm_vcpu *vc
        vcpu->arch.l1tf_flush_l1d = true;
  
        for (;;) {
 +              /*
 +               * If another guest vCPU requests a PV TLB flush in the middle
 +               * of instruction emulation, the rest of the emulation could
 +               * use a stale page translation. Assume that any code after
 +               * this point can start executing an instruction.
 +               */
 +              vcpu->arch.at_instruction_boundary = false;
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {
@@@ -11961,7 -11937,7 +11961,7 @@@ void kvm_arch_destroy_vm(struct kvm *kv
        if (current->mm == kvm->mm) {
                /*
                 * Free memory regions allocated on behalf of userspace,
-                * unless the the memory map has changed due to process exit
+                * unless the memory map has changed due to process exit
                 * or fd copying.
                 */
                mutex_lock(&kvm->slots_lock);
diff --combined virt/kvm/kvm_main.c
index f2922ba3b7a817838fd0877f0f4ad4fbb8a8e1df,64ec2222a1968eb237e45270dd0494632862a637..44c47670447aa4cc250f255ae9e5ec31ad1df377
@@@ -1561,7 -1561,7 +1561,7 @@@ static int kvm_prepare_memory_region(st
        r = kvm_arch_prepare_memory_region(kvm, old, new, change);
  
        /* Free the bitmap on failure if it was allocated above. */
-       if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
+       if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
                kvm_destroy_dirty_bitmap(new);
  
        return r;
@@@ -4300,11 -4300,8 +4300,11 @@@ static int kvm_ioctl_create_device(stru
                kvm_put_kvm_no_destroy(kvm);
                mutex_lock(&kvm->lock);
                list_del(&dev->vm_node);
 +              if (ops->release)
 +                      ops->release(dev);
                mutex_unlock(&kvm->lock);
 -              ops->destroy(dev);
 +              if (ops->destroy)
 +                      ops->destroy(dev);
                return ret;
        }