Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / arm64 / kvm / arm.c
index acf9a993dfb6b4765f554c3bed6478c26e1707cb..f56122eedffc84866395218d6006c47037b061a5 100644 (file)
 __asm__(".arch_extension       virt");
 #endif
 
-DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
+
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@ -145,6 +147,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 {
        int i;
 
+       bitmap_free(kvm->arch.pmu_filter);
+
        kvm_vgic_destroy(kvm);
 
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
@@ -286,7 +290,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
                static_branch_dec(&userspace_irqchip_in_use);
 
-       kvm_mmu_free_memory_caches(vcpu);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
        kvm_timer_vcpu_terminate(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
 
@@ -1259,6 +1263,19 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
+static unsigned long nvhe_percpu_size(void)
+{
+       return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
+               (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
+}
+
+static unsigned long nvhe_percpu_order(void)
+{
+       unsigned long size = nvhe_percpu_size();
+
+       return size ? get_order(size) : 0;
+}
+
 static int kvm_map_vectors(void)
 {
        /*
@@ -1299,6 +1316,7 @@ static void cpu_init_hyp_mode(void)
        unsigned long hyp_stack_ptr;
        unsigned long vector_ptr;
        unsigned long tpidr_el2;
+       struct arm_smccc_res res;
 
        /* Switch from the HYP stub to our own HYP init vector */
        __hyp_set_vectors(kvm_get_idmap_vector());
@@ -1308,12 +1326,13 @@ static void cpu_init_hyp_mode(void)
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
         */
-       tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
-                    (unsigned long)kvm_ksym_ref(&kvm_host_data));
+       tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+                   (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
 
        pgd_ptr = kvm_mmu_get_httbr();
        hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
-       vector_ptr = (unsigned long)kvm_get_hyp_vector();
+       hyp_stack_ptr = kern_hyp_va(hyp_stack_ptr);
+       vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector));
 
        /*
         * Call initialization code, and switch to the full blown HYP code.
@@ -1322,7 +1341,9 @@ static void cpu_init_hyp_mode(void)
         * cpus_have_const_cap() wrapper.
         */
        BUG_ON(!system_capabilities_finalized());
-       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+       arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
+                         pgd_ptr, tpidr_el2, hyp_stack_ptr, vector_ptr, &res);
+       WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
 
        /*
         * Disabling SSBD on a non-VHE system requires us to enable SSBS
@@ -1342,10 +1363,12 @@ static void cpu_hyp_reset(void)
 
 static void cpu_hyp_reinit(void)
 {
-       kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+       kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
 
        cpu_hyp_reset();
 
+       *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
+
        if (is_kernel_in_hyp_mode())
                kvm_timer_init_vhe();
        else
@@ -1496,8 +1519,10 @@ static void teardown_hyp_mode(void)
        int cpu;
 
        free_hyp_pgds();
-       for_each_possible_cpu(cpu)
+       for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+               free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+       }
 }
 
 /**
@@ -1530,6 +1555,24 @@ static int init_hyp_mode(void)
                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
        }
 
+       /*
+        * Allocate and initialize pages for Hypervisor-mode percpu regions.
+        */
+       for_each_possible_cpu(cpu) {
+               struct page *page;
+               void *page_addr;
+
+               page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
+               if (!page) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               page_addr = page_address(page);
+               memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
+               kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+       }
+
        /*
         * Map the Hyp-code called directly from the host
         */
@@ -1574,14 +1617,17 @@ static int init_hyp_mode(void)
                }
        }
 
+       /*
+        * Map Hyp percpu pages
+        */
        for_each_possible_cpu(cpu) {
-               kvm_host_data_t *cpu_data;
+               char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+               char *percpu_end = percpu_begin + nvhe_percpu_size();
 
-               cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
-               err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
+               err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
 
                if (err) {
-                       kvm_err("Cannot map host CPU state: %d\n", err);
+                       kvm_err("Cannot map hyp percpu region\n");
                        goto out_err;
                }
        }