1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 #include <linux/cpu_pm.h>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/sched.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_irqfd.h>
20 #include <linux/irqbypass.h>
21 #include <linux/sched/stat.h>
22 #include <trace/events/kvm.h>
24 #define CREATE_TRACE_POINTS
27 #include <linux/uaccess.h>
28 #include <asm/ptrace.h>
30 #include <asm/tlbflush.h>
31 #include <asm/cacheflush.h>
32 #include <asm/cpufeature.h>
34 #include <asm/kvm_arm.h>
35 #include <asm/kvm_asm.h>
36 #include <asm/kvm_mmu.h>
37 #include <asm/kvm_emulate.h>
38 #include <asm/kvm_coproc.h>
39 #include <asm/sections.h>
41 #include <kvm/arm_hypercalls.h>
42 #include <kvm/arm_pmu.h>
43 #include <kvm/arm_psci.h>
46 __asm__(".arch_extension virt");
49 DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
50 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
52 /* The VMID used in the VTTBR */
53 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
54 static u32 kvm_next_vmid;
55 static DEFINE_SPINLOCK(kvm_vmid_lock);
57 static bool vgic_present;
59 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
60 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
62 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
67 int kvm_arch_hardware_setup(void)
72 int kvm_arch_check_processor_compat(void)
77 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
78 struct kvm_enable_cap *cap)
86 case KVM_CAP_ARM_NISV_TO_USER:
88 kvm->arch.return_nisv_io_abort_to_user = true;
99 * kvm_arch_init_vm - initializes a VM data structure
100 * @kvm: pointer to the KVM struct
102 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
106 ret = kvm_arm_setup_stage2(kvm, type);
110 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
111 if (!kvm->arch.last_vcpu_ran)
114 for_each_possible_cpu(cpu)
115 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
117 ret = kvm_alloc_stage2_pgd(kvm);
121 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
123 goto out_free_stage2_pgd;
125 kvm_vgic_early_init(kvm);
127 /* Mark the initial VMID generation invalid */
128 kvm->arch.vmid.vmid_gen = 0;
130 /* The maximum number of VCPUs is limited by the host's GIC model */
131 kvm->arch.max_vcpus = vgic_present ?
132 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
136 kvm_free_stage2_pgd(kvm);
138 free_percpu(kvm->arch.last_vcpu_ran);
139 kvm->arch.last_vcpu_ran = NULL;
143 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
148 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
150 return VM_FAULT_SIGBUS;
155 * kvm_arch_destroy_vm - destroy the VM data structure
156 * @kvm: pointer to the KVM struct
158 void kvm_arch_destroy_vm(struct kvm *kvm)
162 kvm_vgic_destroy(kvm);
164 free_percpu(kvm->arch.last_vcpu_ran);
165 kvm->arch.last_vcpu_ran = NULL;
167 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
169 kvm_vcpu_destroy(kvm->vcpus[i]);
170 kvm->vcpus[i] = NULL;
173 atomic_set(&kvm->online_vcpus, 0);
176 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
180 case KVM_CAP_IRQCHIP:
183 case KVM_CAP_IOEVENTFD:
184 case KVM_CAP_DEVICE_CTRL:
185 case KVM_CAP_USER_MEMORY:
186 case KVM_CAP_SYNC_MMU:
187 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
188 case KVM_CAP_ONE_REG:
189 case KVM_CAP_ARM_PSCI:
190 case KVM_CAP_ARM_PSCI_0_2:
191 case KVM_CAP_READONLY_MEM:
192 case KVM_CAP_MP_STATE:
193 case KVM_CAP_IMMEDIATE_EXIT:
194 case KVM_CAP_VCPU_EVENTS:
195 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
196 case KVM_CAP_ARM_NISV_TO_USER:
197 case KVM_CAP_ARM_INJECT_EXT_DABT:
200 case KVM_CAP_ARM_SET_DEVICE_ADDR:
203 case KVM_CAP_NR_VCPUS:
204 r = num_online_cpus();
206 case KVM_CAP_MAX_VCPUS:
209 case KVM_CAP_MAX_VCPU_ID:
212 case KVM_CAP_MSI_DEVID:
216 r = kvm->arch.vgic.msis_require_devid;
218 case KVM_CAP_ARM_USER_IRQ:
220 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
221 * (bump this number if adding more devices)
226 r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
232 long kvm_arch_dev_ioctl(struct file *filp,
233 unsigned int ioctl, unsigned long arg)
238 struct kvm *kvm_arch_alloc_vm(void)
241 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
243 return vzalloc(sizeof(struct kvm));
246 void kvm_arch_free_vm(struct kvm *kvm)
254 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
256 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
259 if (id >= kvm->arch.max_vcpus)
265 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
269 /* Force users to call KVM_ARM_VCPU_INIT */
270 vcpu->arch.target = -1;
271 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
273 /* Set up the timer */
274 kvm_timer_vcpu_init(vcpu);
276 kvm_pmu_vcpu_init(vcpu);
278 kvm_arm_reset_debug_ptr(vcpu);
280 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
282 err = kvm_vgic_vcpu_init(vcpu);
286 return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
289 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
293 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
295 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
296 static_branch_dec(&userspace_irqchip_in_use);
298 kvm_mmu_free_memory_caches(vcpu);
299 kvm_timer_vcpu_terminate(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu);
302 kvm_arm_vcpu_destroy(vcpu);
305 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
307 return kvm_timer_is_pending(vcpu);
310 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
313 * If we're about to block (most likely because we've just hit a
314 * WFI), we need to sync back the state of the GIC CPU interface
315 * so that we have the latest PMR and group enables. This ensures
316 * that kvm_arch_vcpu_runnable has up-to-date data to decide
317 * whether we have pending interrupts.
319 * For the same reason, we want to tell GICv4 that we need
320 * doorbells to be signalled, should an interrupt become pending.
323 kvm_vgic_vmcr_sync(vcpu);
324 vgic_v4_put(vcpu, true);
328 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
335 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
338 kvm_host_data_t *cpu_data;
340 last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
341 cpu_data = this_cpu_ptr(&kvm_host_data);
344 * We might get preempted before the vCPU actually runs, but
345 * over-invalidation doesn't affect correctness.
347 if (*last_ran != vcpu->vcpu_id) {
348 kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
349 *last_ran = vcpu->vcpu_id;
353 vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
356 kvm_timer_vcpu_load(vcpu);
357 kvm_vcpu_load_sysregs(vcpu);
358 kvm_arch_vcpu_load_fp(vcpu);
359 kvm_vcpu_pmu_restore_guest(vcpu);
360 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
361 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
363 if (single_task_running())
364 vcpu_clear_wfx_traps(vcpu);
366 vcpu_set_wfx_traps(vcpu);
368 vcpu_ptrauth_setup_lazy(vcpu);
371 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
373 kvm_arch_vcpu_put_fp(vcpu);
374 kvm_vcpu_put_sysregs(vcpu);
375 kvm_timer_vcpu_put(vcpu);
377 kvm_vcpu_pmu_restore_host(vcpu);
382 static void vcpu_power_off(struct kvm_vcpu *vcpu)
384 vcpu->arch.power_off = true;
385 kvm_make_request(KVM_REQ_SLEEP, vcpu);
389 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
390 struct kvm_mp_state *mp_state)
392 if (vcpu->arch.power_off)
393 mp_state->mp_state = KVM_MP_STATE_STOPPED;
395 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
400 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
401 struct kvm_mp_state *mp_state)
405 switch (mp_state->mp_state) {
406 case KVM_MP_STATE_RUNNABLE:
407 vcpu->arch.power_off = false;
409 case KVM_MP_STATE_STOPPED:
410 vcpu_power_off(vcpu);
420 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
421 * @v: The VCPU pointer
423 * If the guest CPU is not waiting for interrupts or an interrupt line is
424 * asserted, the CPU is by definition runnable.
426 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
428 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
429 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
430 && !v->arch.power_off && !v->arch.pause);
433 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
435 return vcpu_mode_priv(vcpu);
438 /* Just ensure a guest exit from a particular CPU */
439 static void exit_vm_noop(void *info)
443 void force_vm_exit(const cpumask_t *mask)
446 smp_call_function_many(mask, exit_vm_noop, NULL, true);
451 * need_new_vmid_gen - check that the VMID is still valid
452 * @vmid: The VMID to check
454 * return true if there is a new generation of VMIDs being used
456 * The hardware supports a limited set of values with the value zero reserved
457 * for the host, so we check if an assigned value belongs to a previous
458 * generation, which which requires us to assign a new value. If we're the
459 * first to use a VMID for the new generation, we must flush necessary caches
460 * and TLBs on all CPUs.
462 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
464 u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
465 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
466 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
470 * update_vmid - Update the vmid with a valid VMID for the current generation
471 * @kvm: The guest that struct vmid belongs to
472 * @vmid: The stage-2 VMID information struct
474 static void update_vmid(struct kvm_vmid *vmid)
476 if (!need_new_vmid_gen(vmid))
479 spin_lock(&kvm_vmid_lock);
482 * We need to re-check the vmid_gen here to ensure that if another vcpu
483 * already allocated a valid vmid for this vm, then this vcpu should
486 if (!need_new_vmid_gen(vmid)) {
487 spin_unlock(&kvm_vmid_lock);
491 /* First user of a new VMID generation? */
492 if (unlikely(kvm_next_vmid == 0)) {
493 atomic64_inc(&kvm_vmid_gen);
497 * On SMP we know no other CPUs can use this CPU's or each
498 * other's VMID after force_vm_exit returns since the
499 * kvm_vmid_lock blocks them from reentry to the guest.
501 force_vm_exit(cpu_all_mask);
503 * Now broadcast TLB + ICACHE invalidation over the inner
504 * shareable domain to make sure all data structures are
507 kvm_call_hyp(__kvm_flush_vm_context);
510 vmid->vmid = kvm_next_vmid;
512 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
515 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
517 spin_unlock(&kvm_vmid_lock);
520 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
522 struct kvm *kvm = vcpu->kvm;
525 if (likely(vcpu->arch.has_run_once))
528 if (!kvm_arm_vcpu_is_finalized(vcpu))
531 vcpu->arch.has_run_once = true;
533 if (likely(irqchip_in_kernel(kvm))) {
535 * Map the VGIC hardware resources before running a vcpu the
536 * first time on this VM.
538 if (unlikely(!vgic_ready(kvm))) {
539 ret = kvm_vgic_map_resources(kvm);
545 * Tell the rest of the code that there are userspace irqchip
548 static_branch_inc(&userspace_irqchip_in_use);
551 ret = kvm_timer_enable(vcpu);
555 ret = kvm_arm_pmu_v3_enable(vcpu);
560 bool kvm_arch_intc_initialized(struct kvm *kvm)
562 return vgic_initialized(kvm);
565 void kvm_arm_halt_guest(struct kvm *kvm)
568 struct kvm_vcpu *vcpu;
570 kvm_for_each_vcpu(i, vcpu, kvm)
571 vcpu->arch.pause = true;
572 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
575 void kvm_arm_resume_guest(struct kvm *kvm)
578 struct kvm_vcpu *vcpu;
580 kvm_for_each_vcpu(i, vcpu, kvm) {
581 vcpu->arch.pause = false;
582 swake_up_one(kvm_arch_vcpu_wq(vcpu));
586 static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
588 struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
590 swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
591 (!vcpu->arch.pause)));
593 if (vcpu->arch.power_off || vcpu->arch.pause) {
594 /* Awaken to handle a signal, request we sleep again later. */
595 kvm_make_request(KVM_REQ_SLEEP, vcpu);
599 * Make sure we will observe a potential reset request if we've
600 * observed a change to the power state. Pairs with the smp_wmb() in
601 * kvm_psci_vcpu_on().
606 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
608 return vcpu->arch.target >= 0;
611 static void check_vcpu_requests(struct kvm_vcpu *vcpu)
613 if (kvm_request_pending(vcpu)) {
614 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
615 vcpu_req_sleep(vcpu);
617 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
618 kvm_reset_vcpu(vcpu);
621 * Clear IRQ_PENDING requests that were made to guarantee
622 * that a VCPU sees new virtual interrupts.
624 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
626 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
627 kvm_update_stolen_time(vcpu);
632 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
633 * @vcpu: The VCPU pointer
634 * @run: The kvm_run structure pointer used for userspace state exchange
636 * This function is called through the VCPU_RUN ioctl called from user space. It
637 * will execute VM code in a loop until the time slice for the process is used
638 * or some emulation is needed from user space in which case the function will
639 * return with return value 0 and with the kvm_run structure filled in with the
640 * required data for the requested emulation.
642 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
646 if (unlikely(!kvm_vcpu_initialized(vcpu)))
649 ret = kvm_vcpu_first_run_init(vcpu);
653 if (run->exit_reason == KVM_EXIT_MMIO) {
654 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
659 if (run->immediate_exit)
664 kvm_sigset_activate(vcpu);
667 run->exit_reason = KVM_EXIT_UNKNOWN;
670 * Check conditions before entering the guest
674 update_vmid(&vcpu->kvm->arch.vmid);
676 check_vcpu_requests(vcpu);
679 * Preparing the interrupts to be injected also
680 * involves poking the GIC, which must be done in a
681 * non-preemptible context.
685 kvm_pmu_flush_hwstate(vcpu);
689 kvm_vgic_flush_hwstate(vcpu);
692 * Exit if we have a signal pending so that we can deliver the
693 * signal to user space.
695 if (signal_pending(current)) {
697 run->exit_reason = KVM_EXIT_INTR;
701 * If we're using a userspace irqchip, then check if we need
702 * to tell a userspace irqchip about timer or PMU level
703 * changes and if so, exit to userspace (the actual level
704 * state gets updated in kvm_timer_update_run and
705 * kvm_pmu_update_run below).
707 if (static_branch_unlikely(&userspace_irqchip_in_use)) {
708 if (kvm_timer_should_notify_user(vcpu) ||
709 kvm_pmu_should_notify_user(vcpu)) {
711 run->exit_reason = KVM_EXIT_INTR;
716 * Ensure we set mode to IN_GUEST_MODE after we disable
717 * interrupts and before the final VCPU requests check.
718 * See the comment in kvm_vcpu_exiting_guest_mode() and
719 * Documentation/virt/kvm/vcpu-requests.rst
721 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
723 if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
724 kvm_request_pending(vcpu)) {
725 vcpu->mode = OUTSIDE_GUEST_MODE;
726 isb(); /* Ensure work in x_flush_hwstate is committed */
727 kvm_pmu_sync_hwstate(vcpu);
728 if (static_branch_unlikely(&userspace_irqchip_in_use))
729 kvm_timer_sync_hwstate(vcpu);
730 kvm_vgic_sync_hwstate(vcpu);
736 kvm_arm_setup_debug(vcpu);
738 /**************************************************************
741 trace_kvm_entry(*vcpu_pc(vcpu));
742 guest_enter_irqoff();
745 ret = kvm_vcpu_run_vhe(vcpu);
747 ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
750 vcpu->mode = OUTSIDE_GUEST_MODE;
754 *************************************************************/
756 kvm_arm_clear_debug(vcpu);
759 * We must sync the PMU state before the vgic state so
760 * that the vgic can properly sample the updated state of the
763 kvm_pmu_sync_hwstate(vcpu);
766 * Sync the vgic state before syncing the timer state because
767 * the timer code needs to know if the virtual timer
768 * interrupts are active.
770 kvm_vgic_sync_hwstate(vcpu);
773 * Sync the timer hardware state before enabling interrupts as
774 * we don't want vtimer interrupts to race with syncing the
775 * timer virtual interrupt state.
777 if (static_branch_unlikely(&userspace_irqchip_in_use))
778 kvm_timer_sync_hwstate(vcpu);
780 kvm_arch_vcpu_ctxsync_fp(vcpu);
783 * We may have taken a host interrupt in HYP mode (ie
784 * while executing the guest). This interrupt is still
785 * pending, as we haven't serviced it yet!
787 * We're now back in SVC mode, with interrupts
788 * disabled. Enabling the interrupts now will have
789 * the effect of taking the interrupt again, in SVC
795 * We do local_irq_enable() before calling guest_exit() so
796 * that if a timer interrupt hits while running the guest we
797 * account that tick as being spent in the guest. We enable
798 * preemption after calling guest_exit() so that if we get
799 * preempted we make sure ticks after that is not counted as
803 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
805 /* Exit types that need handling before we can be preempted */
806 handle_exit_early(vcpu, run, ret);
810 ret = handle_exit(vcpu, run, ret);
813 /* Tell userspace about in-kernel device output levels */
814 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
815 kvm_timer_update_run(vcpu);
816 kvm_pmu_update_run(vcpu);
819 kvm_sigset_deactivate(vcpu);
825 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
831 if (number == KVM_ARM_IRQ_CPU_IRQ)
832 bit_index = __ffs(HCR_VI);
833 else /* KVM_ARM_IRQ_CPU_FIQ */
834 bit_index = __ffs(HCR_VF);
836 hcr = vcpu_hcr(vcpu);
838 set = test_and_set_bit(bit_index, hcr);
840 set = test_and_clear_bit(bit_index, hcr);
843 * If we didn't change anything, no need to wake up or kick other CPUs
849 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
850 * trigger a world-switch round on the running physical CPU to set the
851 * virtual IRQ/FIQ fields in the HCR appropriately.
853 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
859 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
862 u32 irq = irq_level->irq;
863 unsigned int irq_type, vcpu_idx, irq_num;
864 int nrcpus = atomic_read(&kvm->online_vcpus);
865 struct kvm_vcpu *vcpu = NULL;
866 bool level = irq_level->level;
868 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
869 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
870 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
871 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
873 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
876 case KVM_ARM_IRQ_TYPE_CPU:
877 if (irqchip_in_kernel(kvm))
880 if (vcpu_idx >= nrcpus)
883 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
887 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
890 return vcpu_interrupt_line(vcpu, irq_num, level);
891 case KVM_ARM_IRQ_TYPE_PPI:
892 if (!irqchip_in_kernel(kvm))
895 if (vcpu_idx >= nrcpus)
898 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
902 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
905 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
906 case KVM_ARM_IRQ_TYPE_SPI:
907 if (!irqchip_in_kernel(kvm))
910 if (irq_num < VGIC_NR_PRIVATE_IRQS)
913 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
919 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
920 const struct kvm_vcpu_init *init)
923 int phys_target = kvm_target_cpu();
925 if (init->target != phys_target)
929 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
930 * use the same target.
932 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
935 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
936 for (i = 0; i < sizeof(init->features) * 8; i++) {
937 bool set = (init->features[i / 32] & (1 << (i % 32)));
939 if (set && i >= KVM_VCPU_MAX_FEATURES)
943 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
944 * use the same feature set.
946 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
947 test_bit(i, vcpu->arch.features) != set)
951 set_bit(i, vcpu->arch.features);
954 vcpu->arch.target = phys_target;
956 /* Now we know what it is, we can reset it. */
957 ret = kvm_reset_vcpu(vcpu);
959 vcpu->arch.target = -1;
960 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
966 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
967 struct kvm_vcpu_init *init)
971 ret = kvm_vcpu_set_target(vcpu, init);
976 * Ensure a rebooted VM will fault in RAM pages and detect if the
977 * guest MMU is turned off and flush the caches as needed.
979 if (vcpu->arch.has_run_once)
980 stage2_unmap_vm(vcpu->kvm);
982 vcpu_reset_hcr(vcpu);
985 * Handle the "start in power-off" case.
987 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
988 vcpu_power_off(vcpu);
990 vcpu->arch.power_off = false;
995 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
996 struct kvm_device_attr *attr)
1000 switch (attr->group) {
1002 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1009 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1010 struct kvm_device_attr *attr)
1014 switch (attr->group) {
1016 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1023 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1024 struct kvm_device_attr *attr)
1028 switch (attr->group) {
1030 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1037 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1038 struct kvm_vcpu_events *events)
1040 memset(events, 0, sizeof(*events));
1042 return __kvm_arm_vcpu_get_events(vcpu, events);
1045 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1046 struct kvm_vcpu_events *events)
1050 /* check whether the reserved field is zero */
1051 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1052 if (events->reserved[i])
1055 /* check whether the pad field is zero */
1056 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1057 if (events->exception.pad[i])
1060 return __kvm_arm_vcpu_set_events(vcpu, events);
1063 long kvm_arch_vcpu_ioctl(struct file *filp,
1064 unsigned int ioctl, unsigned long arg)
1066 struct kvm_vcpu *vcpu = filp->private_data;
1067 void __user *argp = (void __user *)arg;
1068 struct kvm_device_attr attr;
1072 case KVM_ARM_VCPU_INIT: {
1073 struct kvm_vcpu_init init;
1076 if (copy_from_user(&init, argp, sizeof(init)))
1079 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1082 case KVM_SET_ONE_REG:
1083 case KVM_GET_ONE_REG: {
1084 struct kvm_one_reg reg;
1087 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1091 if (copy_from_user(®, argp, sizeof(reg)))
1094 if (ioctl == KVM_SET_ONE_REG)
1095 r = kvm_arm_set_reg(vcpu, ®);
1097 r = kvm_arm_get_reg(vcpu, ®);
1100 case KVM_GET_REG_LIST: {
1101 struct kvm_reg_list __user *user_list = argp;
1102 struct kvm_reg_list reg_list;
1106 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1110 if (!kvm_arm_vcpu_is_finalized(vcpu))
1114 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1117 reg_list.n = kvm_arm_num_regs(vcpu);
1118 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1123 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1126 case KVM_SET_DEVICE_ATTR: {
1128 if (copy_from_user(&attr, argp, sizeof(attr)))
1130 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1133 case KVM_GET_DEVICE_ATTR: {
1135 if (copy_from_user(&attr, argp, sizeof(attr)))
1137 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1140 case KVM_HAS_DEVICE_ATTR: {
1142 if (copy_from_user(&attr, argp, sizeof(attr)))
1144 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1147 case KVM_GET_VCPU_EVENTS: {
1148 struct kvm_vcpu_events events;
1150 if (kvm_arm_vcpu_get_events(vcpu, &events))
1153 if (copy_to_user(argp, &events, sizeof(events)))
1158 case KVM_SET_VCPU_EVENTS: {
1159 struct kvm_vcpu_events events;
1161 if (copy_from_user(&events, argp, sizeof(events)))
1164 return kvm_arm_vcpu_set_events(vcpu, &events);
1166 case KVM_ARM_VCPU_FINALIZE: {
1169 if (!kvm_vcpu_initialized(vcpu))
1172 if (get_user(what, (const int __user *)argp))
1175 return kvm_arm_vcpu_finalize(vcpu, what);
1185 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1186 * @kvm: kvm instance
1187 * @log: slot id and address to which we copy the log
1189 * Steps 1-4 below provide general overview of dirty page logging. See
1190 * kvm_get_dirty_log_protect() function description for additional details.
1192 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1193 * always flush the TLB (step 4) even if previous step failed and the dirty
1194 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1195 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1196 * writes will be marked dirty for next log read.
1198 * 1. Take a snapshot of the bit and clear it if needed.
1199 * 2. Write protect the corresponding page.
1200 * 3. Copy the snapshot to the userspace.
1201 * 4. Flush TLB's if needed.
1203 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1208 mutex_lock(&kvm->slots_lock);
1210 r = kvm_get_dirty_log_protect(kvm, log, &flush);
1213 kvm_flush_remote_tlbs(kvm);
1215 mutex_unlock(&kvm->slots_lock);
1219 int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
1224 mutex_lock(&kvm->slots_lock);
1226 r = kvm_clear_dirty_log_protect(kvm, log, &flush);
1229 kvm_flush_remote_tlbs(kvm);
1231 mutex_unlock(&kvm->slots_lock);
1235 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1236 struct kvm_arm_device_addr *dev_addr)
1238 unsigned long dev_id, type;
1240 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
1241 KVM_ARM_DEVICE_ID_SHIFT;
1242 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
1243 KVM_ARM_DEVICE_TYPE_SHIFT;
1246 case KVM_ARM_DEVICE_VGIC_V2:
1249 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1255 long kvm_arch_vm_ioctl(struct file *filp,
1256 unsigned int ioctl, unsigned long arg)
1258 struct kvm *kvm = filp->private_data;
1259 void __user *argp = (void __user *)arg;
1262 case KVM_CREATE_IRQCHIP: {
1266 mutex_lock(&kvm->lock);
1267 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1268 mutex_unlock(&kvm->lock);
1271 case KVM_ARM_SET_DEVICE_ADDR: {
1272 struct kvm_arm_device_addr dev_addr;
1274 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1276 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1278 case KVM_ARM_PREFERRED_TARGET: {
1280 struct kvm_vcpu_init init;
1282 err = kvm_vcpu_preferred_target(&init);
1286 if (copy_to_user(argp, &init, sizeof(init)))
1296 static void cpu_init_hyp_mode(void)
1298 phys_addr_t pgd_ptr;
1299 unsigned long hyp_stack_ptr;
1300 unsigned long stack_page;
1301 unsigned long vector_ptr;
1303 /* Switch from the HYP stub to our own HYP init vector */
1304 __hyp_set_vectors(kvm_get_idmap_vector());
1306 pgd_ptr = kvm_mmu_get_httbr();
1307 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
1308 hyp_stack_ptr = stack_page + PAGE_SIZE;
1309 vector_ptr = (unsigned long)kvm_get_hyp_vector();
1311 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1312 __cpu_init_stage2();
1315 static void cpu_hyp_reset(void)
1317 if (!is_kernel_in_hyp_mode())
1318 __hyp_reset_vectors();
1321 static void cpu_hyp_reinit(void)
1323 kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
1327 if (is_kernel_in_hyp_mode())
1328 kvm_timer_init_vhe();
1330 cpu_init_hyp_mode();
1332 kvm_arm_init_debug();
1335 kvm_vgic_init_cpu_hardware();
1338 static void _kvm_arch_hardware_enable(void *discard)
1340 if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1342 __this_cpu_write(kvm_arm_hardware_enabled, 1);
1346 int kvm_arch_hardware_enable(void)
1348 _kvm_arch_hardware_enable(NULL);
1352 static void _kvm_arch_hardware_disable(void *discard)
1354 if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1356 __this_cpu_write(kvm_arm_hardware_enabled, 0);
1360 void kvm_arch_hardware_disable(void)
1362 _kvm_arch_hardware_disable(NULL);
1365 #ifdef CONFIG_CPU_PM
1366 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1371 * kvm_arm_hardware_enabled is left with its old value over
1372 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1377 if (__this_cpu_read(kvm_arm_hardware_enabled))
1379 * don't update kvm_arm_hardware_enabled here
1380 * so that the hardware will be re-enabled
1381 * when we resume. See below.
1386 case CPU_PM_ENTER_FAILED:
1388 if (__this_cpu_read(kvm_arm_hardware_enabled))
1389 /* The hardware was enabled before suspend. */
1399 static struct notifier_block hyp_init_cpu_pm_nb = {
1400 .notifier_call = hyp_init_cpu_pm_notifier,
1403 static void __init hyp_cpu_pm_init(void)
1405 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1407 static void __init hyp_cpu_pm_exit(void)
1409 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1412 static inline void hyp_cpu_pm_init(void)
1415 static inline void hyp_cpu_pm_exit(void)
1420 static int init_common_resources(void)
1422 kvm_set_ipa_limit();
1427 static int init_subsystems(void)
1432 * Enable hardware so that subsystem initialisation can access EL2.
1434 on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1437 * Register CPU lower-power notifier
1442 * Init HYP view of VGIC
1444 err = kvm_vgic_hyp_init();
1447 vgic_present = true;
1451 vgic_present = false;
1459 * Init HYP architected timer support
1461 err = kvm_timer_hyp_init(vgic_present);
1466 kvm_coproc_table_init();
1469 on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1474 static void teardown_hyp_mode(void)
1479 for_each_possible_cpu(cpu)
1480 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1484 * Inits Hyp-mode on all online CPUs
1486 static int init_hyp_mode(void)
1492 * Allocate Hyp PGD and setup Hyp identity mapping
1494 err = kvm_mmu_init();
1499 * Allocate stack pages for Hypervisor-mode
1501 for_each_possible_cpu(cpu) {
1502 unsigned long stack_page;
1504 stack_page = __get_free_page(GFP_KERNEL);
1510 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1514 * Map the Hyp-code called directly from the host
1516 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1517 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1519 kvm_err("Cannot map world-switch code\n");
1523 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1524 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1526 kvm_err("Cannot map rodata section\n");
1530 err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1531 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1533 kvm_err("Cannot map bss section\n");
1537 err = kvm_map_vectors();
1539 kvm_err("Cannot map vectors\n");
1544 * Map the Hyp stack pages
1546 for_each_possible_cpu(cpu) {
1547 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1548 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
1552 kvm_err("Cannot map hyp stack\n");
1557 for_each_possible_cpu(cpu) {
1558 kvm_host_data_t *cpu_data;
1560 cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
1561 err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
1564 kvm_err("Cannot map host CPU state: %d\n", err);
1569 err = hyp_map_aux_data();
1571 kvm_err("Cannot map host auxiliary data: %d\n", err);
1576 teardown_hyp_mode();
1577 kvm_err("error initializing Hyp mode: %d\n", err);
1581 static void check_kvm_target_cpu(void *ret)
1583 *(int *)ret = kvm_target_cpu();
1586 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1588 struct kvm_vcpu *vcpu;
1591 mpidr &= MPIDR_HWID_BITMASK;
1592 kvm_for_each_vcpu(i, vcpu, kvm) {
1593 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1599 bool kvm_arch_has_irq_bypass(void)
1604 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
1605 struct irq_bypass_producer *prod)
1607 struct kvm_kernel_irqfd *irqfd =
1608 container_of(cons, struct kvm_kernel_irqfd, consumer);
1610 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1613 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
1614 struct irq_bypass_producer *prod)
1616 struct kvm_kernel_irqfd *irqfd =
1617 container_of(cons, struct kvm_kernel_irqfd, consumer);
1619 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1623 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
1625 struct kvm_kernel_irqfd *irqfd =
1626 container_of(cons, struct kvm_kernel_irqfd, consumer);
1628 kvm_arm_halt_guest(irqfd->kvm);
1631 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
1633 struct kvm_kernel_irqfd *irqfd =
1634 container_of(cons, struct kvm_kernel_irqfd, consumer);
1636 kvm_arm_resume_guest(irqfd->kvm);
1640 * Initialize Hyp-mode and memory mappings on all CPUs.
1642 int kvm_arch_init(void *opaque)
1648 if (!is_hyp_mode_available()) {
1649 kvm_info("HYP mode not available\n");
1653 in_hyp_mode = is_kernel_in_hyp_mode();
1655 if (!in_hyp_mode && kvm_arch_requires_vhe()) {
1656 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1660 for_each_online_cpu(cpu) {
1661 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1663 kvm_err("Error, CPU %d not supported!\n", cpu);
1668 err = init_common_resources();
1672 err = kvm_arm_init_sve();
1677 err = init_hyp_mode();
1682 err = init_subsystems();
1687 kvm_info("VHE mode initialized successfully\n");
1689 kvm_info("Hyp mode initialized successfully\n");
1696 teardown_hyp_mode();
1701 /* NOP: Compiling as a module not supported */
1702 void kvm_arch_exit(void)
1704 kvm_perf_teardown();
1707 static int arm_init(void)
1709 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1713 module_init(arm_init);