2 * Kernel-based Virtual Machine driver for Linux
4 * derived from drivers/kvm/kvm_main.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Amit Shah <amit.shah@qumranet.com>
15 * Ben-Ami Yassour <benami@il.ibm.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
22 #include <linux/kvm_host.h>
27 #include "kvm_cache_regs.h"
33 #include <linux/clocksource.h>
34 #include <linux/interrupt.h>
35 #include <linux/kvm.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <linux/moduleparam.h>
40 #include <linux/mman.h>
41 #include <linux/highmem.h>
42 #include <linux/iommu.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/cpufreq.h>
45 #include <linux/user-return-notifier.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/perf_event.h>
49 #include <linux/uaccess.h>
50 #include <linux/hash.h>
51 #include <linux/pci.h>
52 #include <linux/timekeeper_internal.h>
53 #include <linux/pvclock_gtod.h>
54 #include <linux/kvm_irqfd.h>
55 #include <linux/irqbypass.h>
56 #include <linux/sched/stat.h>
57 #include <linux/mem_encrypt.h>
59 #include <trace/events/kvm.h>
61 #include <asm/debugreg.h>
65 #include <linux/kernel_stat.h>
66 #include <asm/fpu/internal.h> /* Ugh! */
67 #include <asm/pvclock.h>
68 #include <asm/div64.h>
69 #include <asm/irq_remapping.h>
71 #define CREATE_TRACE_POINTS
74 #define MAX_IO_MSRS 256
75 #define KVM_MAX_MCE_BANKS 32
76 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
77 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
79 #define emul_to_vcpu(ctxt) \
80 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
83 * - enable syscall per default because its emulated by KVM
84 * - enable LME and LMA per default on 64 bit KVM
88 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
90 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
93 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
94 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
96 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
97 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
99 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
100 static void process_nmi(struct kvm_vcpu *vcpu);
101 static void enter_smm(struct kvm_vcpu *vcpu);
102 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
104 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
105 EXPORT_SYMBOL_GPL(kvm_x86_ops);
107 static bool __read_mostly ignore_msrs = 0;
108 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
110 unsigned int min_timer_period_us = 500;
111 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
113 static bool __read_mostly kvmclock_periodic_sync = true;
114 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
116 bool __read_mostly kvm_has_tsc_control;
117 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
118 u32 __read_mostly kvm_max_guest_tsc_khz;
119 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
120 u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits;
121 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
122 u64 __read_mostly kvm_max_tsc_scaling_ratio;
123 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
124 u64 __read_mostly kvm_default_tsc_scaling_ratio;
125 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
127 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
128 static u32 __read_mostly tsc_tolerance_ppm = 250;
129 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
131 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
132 unsigned int __read_mostly lapic_timer_advance_ns = 0;
133 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
135 static bool __read_mostly vector_hashing = true;
136 module_param(vector_hashing, bool, S_IRUGO);
138 #define KVM_NR_SHARED_MSRS 16
140 struct kvm_shared_msrs_global {
142 u32 msrs[KVM_NR_SHARED_MSRS];
145 struct kvm_shared_msrs {
146 struct user_return_notifier urn;
148 struct kvm_shared_msr_values {
151 } values[KVM_NR_SHARED_MSRS];
154 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
155 static struct kvm_shared_msrs __percpu *shared_msrs;
157 struct kvm_stats_debugfs_item debugfs_entries[] = {
158 { "pf_fixed", VCPU_STAT(pf_fixed) },
159 { "pf_guest", VCPU_STAT(pf_guest) },
160 { "tlb_flush", VCPU_STAT(tlb_flush) },
161 { "invlpg", VCPU_STAT(invlpg) },
162 { "exits", VCPU_STAT(exits) },
163 { "io_exits", VCPU_STAT(io_exits) },
164 { "mmio_exits", VCPU_STAT(mmio_exits) },
165 { "signal_exits", VCPU_STAT(signal_exits) },
166 { "irq_window", VCPU_STAT(irq_window_exits) },
167 { "nmi_window", VCPU_STAT(nmi_window_exits) },
168 { "halt_exits", VCPU_STAT(halt_exits) },
169 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
170 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
171 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
172 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
173 { "hypercalls", VCPU_STAT(hypercalls) },
174 { "request_irq", VCPU_STAT(request_irq_exits) },
175 { "irq_exits", VCPU_STAT(irq_exits) },
176 { "host_state_reload", VCPU_STAT(host_state_reload) },
177 { "efer_reload", VCPU_STAT(efer_reload) },
178 { "fpu_reload", VCPU_STAT(fpu_reload) },
179 { "insn_emulation", VCPU_STAT(insn_emulation) },
180 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
181 { "irq_injections", VCPU_STAT(irq_injections) },
182 { "nmi_injections", VCPU_STAT(nmi_injections) },
183 { "req_event", VCPU_STAT(req_event) },
184 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
185 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
186 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
187 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
188 { "mmu_flooded", VM_STAT(mmu_flooded) },
189 { "mmu_recycled", VM_STAT(mmu_recycled) },
190 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
191 { "mmu_unsync", VM_STAT(mmu_unsync) },
192 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
193 { "largepages", VM_STAT(lpages) },
194 { "max_mmu_page_hash_collisions",
195 VM_STAT(max_mmu_page_hash_collisions) },
199 u64 __read_mostly host_xcr0;
201 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
203 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
206 for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
207 vcpu->arch.apf.gfns[i] = ~0;
210 static void kvm_on_user_return(struct user_return_notifier *urn)
213 struct kvm_shared_msrs *locals
214 = container_of(urn, struct kvm_shared_msrs, urn);
215 struct kvm_shared_msr_values *values;
219 * Disabling irqs at this point since the following code could be
220 * interrupted and executed through kvm_arch_hardware_disable()
222 local_irq_save(flags);
223 if (locals->registered) {
224 locals->registered = false;
225 user_return_notifier_unregister(urn);
227 local_irq_restore(flags);
228 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
229 values = &locals->values[slot];
230 if (values->host != values->curr) {
231 wrmsrl(shared_msrs_global.msrs[slot], values->host);
232 values->curr = values->host;
237 static void shared_msr_update(unsigned slot, u32 msr)
240 unsigned int cpu = smp_processor_id();
241 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
243 /* only read, and nobody should modify it at this time,
244 * so don't need lock */
245 if (slot >= shared_msrs_global.nr) {
246 printk(KERN_ERR "kvm: invalid MSR slot!");
249 rdmsrl_safe(msr, &value);
250 smsr->values[slot].host = value;
251 smsr->values[slot].curr = value;
254 void kvm_define_shared_msr(unsigned slot, u32 msr)
256 BUG_ON(slot >= KVM_NR_SHARED_MSRS);
257 shared_msrs_global.msrs[slot] = msr;
258 if (slot >= shared_msrs_global.nr)
259 shared_msrs_global.nr = slot + 1;
261 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
263 static void kvm_shared_msr_cpu_online(void)
267 for (i = 0; i < shared_msrs_global.nr; ++i)
268 shared_msr_update(i, shared_msrs_global.msrs[i]);
271 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
273 unsigned int cpu = smp_processor_id();
274 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
277 if (((value ^ smsr->values[slot].curr) & mask) == 0)
279 smsr->values[slot].curr = value;
280 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
284 if (!smsr->registered) {
285 smsr->urn.on_user_return = kvm_on_user_return;
286 user_return_notifier_register(&smsr->urn);
287 smsr->registered = true;
291 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
293 static void drop_user_return_notifiers(void)
295 unsigned int cpu = smp_processor_id();
296 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
298 if (smsr->registered)
299 kvm_on_user_return(&smsr->urn);
302 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
304 return vcpu->arch.apic_base;
306 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
308 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
310 u64 old_state = vcpu->arch.apic_base &
311 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
312 u64 new_state = msr_info->data &
313 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
314 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
315 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
317 if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE)
319 if (!msr_info->host_initiated &&
320 ((new_state == MSR_IA32_APICBASE_ENABLE &&
321 old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
322 (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
326 kvm_lapic_set_base(vcpu, msr_info->data);
329 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
331 asmlinkage __visible void kvm_spurious_fault(void)
333 /* Fault while not rebooting. We want the trace. */
336 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
338 #define EXCPT_BENIGN 0
339 #define EXCPT_CONTRIBUTORY 1
342 static int exception_class(int vector)
352 return EXCPT_CONTRIBUTORY;
359 #define EXCPT_FAULT 0
361 #define EXCPT_ABORT 2
362 #define EXCPT_INTERRUPT 3
364 static int exception_type(int vector)
368 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
369 return EXCPT_INTERRUPT;
373 /* #DB is trap, as instruction watchpoints are handled elsewhere */
374 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
377 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
380 /* Reserved exceptions will result in fault */
384 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
385 unsigned nr, bool has_error, u32 error_code,
391 kvm_make_request(KVM_REQ_EVENT, vcpu);
393 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
395 if (has_error && !is_protmode(vcpu))
399 * On vmentry, vcpu->arch.exception.pending is only
400 * true if an event injection was blocked by
401 * nested_run_pending. In that case, however,
402 * vcpu_enter_guest requests an immediate exit,
403 * and the guest shouldn't proceed far enough to
406 WARN_ON_ONCE(vcpu->arch.exception.pending);
407 vcpu->arch.exception.injected = true;
409 vcpu->arch.exception.pending = true;
410 vcpu->arch.exception.injected = false;
412 vcpu->arch.exception.has_error_code = has_error;
413 vcpu->arch.exception.nr = nr;
414 vcpu->arch.exception.error_code = error_code;
418 /* to check exception */
419 prev_nr = vcpu->arch.exception.nr;
420 if (prev_nr == DF_VECTOR) {
421 /* triple fault -> shutdown */
422 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
425 class1 = exception_class(prev_nr);
426 class2 = exception_class(nr);
427 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
428 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
430 * Generate double fault per SDM Table 5-5. Set
431 * exception.pending = true so that the double fault
432 * can trigger a nested vmexit.
434 vcpu->arch.exception.pending = true;
435 vcpu->arch.exception.injected = false;
436 vcpu->arch.exception.has_error_code = true;
437 vcpu->arch.exception.nr = DF_VECTOR;
438 vcpu->arch.exception.error_code = 0;
440 /* replace previous exception with a new one in a hope
441 that instruction re-execution will regenerate lost
446 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
448 kvm_multiple_exception(vcpu, nr, false, 0, false);
450 EXPORT_SYMBOL_GPL(kvm_queue_exception);
452 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
454 kvm_multiple_exception(vcpu, nr, false, 0, true);
456 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
458 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
461 kvm_inject_gp(vcpu, 0);
463 return kvm_skip_emulated_instruction(vcpu);
467 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
469 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
471 ++vcpu->stat.pf_guest;
472 vcpu->arch.exception.nested_apf =
473 is_guest_mode(vcpu) && fault->async_page_fault;
474 if (vcpu->arch.exception.nested_apf)
475 vcpu->arch.apf.nested_apf_token = fault->address;
477 vcpu->arch.cr2 = fault->address;
478 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
480 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
482 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
484 if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
485 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
487 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
489 return fault->nested_page_fault;
492 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
494 atomic_inc(&vcpu->arch.nmi_queued);
495 kvm_make_request(KVM_REQ_NMI, vcpu);
497 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
499 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
501 kvm_multiple_exception(vcpu, nr, true, error_code, false);
503 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
505 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
507 kvm_multiple_exception(vcpu, nr, true, error_code, true);
509 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
512 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
513 * a #GP and return false.
515 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
517 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
519 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
522 EXPORT_SYMBOL_GPL(kvm_require_cpl);
524 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
526 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
529 kvm_queue_exception(vcpu, UD_VECTOR);
532 EXPORT_SYMBOL_GPL(kvm_require_dr);
535 * This function will be used to read from the physical memory of the currently
536 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
537 * can read from guest physical or from the guest's guest physical memory.
539 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
540 gfn_t ngfn, void *data, int offset, int len,
543 struct x86_exception exception;
547 ngpa = gfn_to_gpa(ngfn);
548 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
549 if (real_gfn == UNMAPPED_GVA)
552 real_gfn = gpa_to_gfn(real_gfn);
554 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
556 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
558 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
559 void *data, int offset, int len, u32 access)
561 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
562 data, offset, len, access);
566 * Load the pae pdptrs. Return true is they are all valid.
568 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
570 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
571 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
574 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
576 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
577 offset * sizeof(u64), sizeof(pdpte),
578 PFERR_USER_MASK|PFERR_WRITE_MASK);
583 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
584 if ((pdpte[i] & PT_PRESENT_MASK) &&
586 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
593 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
594 __set_bit(VCPU_EXREG_PDPTR,
595 (unsigned long *)&vcpu->arch.regs_avail);
596 __set_bit(VCPU_EXREG_PDPTR,
597 (unsigned long *)&vcpu->arch.regs_dirty);
602 EXPORT_SYMBOL_GPL(load_pdptrs);
604 bool pdptrs_changed(struct kvm_vcpu *vcpu)
606 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
612 if (is_long_mode(vcpu) || !is_pae(vcpu))
615 if (!test_bit(VCPU_EXREG_PDPTR,
616 (unsigned long *)&vcpu->arch.regs_avail))
619 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
620 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
621 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
622 PFERR_USER_MASK | PFERR_WRITE_MASK);
625 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
630 EXPORT_SYMBOL_GPL(pdptrs_changed);
632 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
634 unsigned long old_cr0 = kvm_read_cr0(vcpu);
635 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
640 if (cr0 & 0xffffffff00000000UL)
644 cr0 &= ~CR0_RESERVED_BITS;
646 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
649 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
652 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
654 if ((vcpu->arch.efer & EFER_LME)) {
659 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
664 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
669 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
672 kvm_x86_ops->set_cr0(vcpu, cr0);
674 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
675 kvm_clear_async_pf_completion_queue(vcpu);
676 kvm_async_pf_hash_reset(vcpu);
679 if ((cr0 ^ old_cr0) & update_bits)
680 kvm_mmu_reset_context(vcpu);
682 if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
683 kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
684 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
685 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
689 EXPORT_SYMBOL_GPL(kvm_set_cr0);
691 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
693 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
695 EXPORT_SYMBOL_GPL(kvm_lmsw);
697 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
699 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
700 !vcpu->guest_xcr0_loaded) {
701 /* kvm_set_xcr() also depends on this */
702 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
703 vcpu->guest_xcr0_loaded = 1;
707 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
709 if (vcpu->guest_xcr0_loaded) {
710 if (vcpu->arch.xcr0 != host_xcr0)
711 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
712 vcpu->guest_xcr0_loaded = 0;
716 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
719 u64 old_xcr0 = vcpu->arch.xcr0;
722 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
723 if (index != XCR_XFEATURE_ENABLED_MASK)
725 if (!(xcr0 & XFEATURE_MASK_FP))
727 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
731 * Do not allow the guest to set bits that we do not support
732 * saving. However, xcr0 bit 0 is always set, even if the
733 * emulated CPU does not support XSAVE (see fx_init).
735 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
736 if (xcr0 & ~valid_bits)
739 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
740 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
743 if (xcr0 & XFEATURE_MASK_AVX512) {
744 if (!(xcr0 & XFEATURE_MASK_YMM))
746 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
749 vcpu->arch.xcr0 = xcr0;
751 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
752 kvm_update_cpuid(vcpu);
756 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
758 if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
759 __kvm_set_xcr(vcpu, index, xcr)) {
760 kvm_inject_gp(vcpu, 0);
765 EXPORT_SYMBOL_GPL(kvm_set_xcr);
767 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
769 unsigned long old_cr4 = kvm_read_cr4(vcpu);
770 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
771 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
773 if (cr4 & CR4_RESERVED_BITS)
776 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
779 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
782 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
785 if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
788 if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
791 if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
794 if (is_long_mode(vcpu)) {
795 if (!(cr4 & X86_CR4_PAE))
797 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
798 && ((cr4 ^ old_cr4) & pdptr_bits)
799 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
803 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
804 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
807 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
808 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
812 if (kvm_x86_ops->set_cr4(vcpu, cr4))
815 if (((cr4 ^ old_cr4) & pdptr_bits) ||
816 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
817 kvm_mmu_reset_context(vcpu);
819 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
820 kvm_update_cpuid(vcpu);
824 EXPORT_SYMBOL_GPL(kvm_set_cr4);
826 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
829 cr3 &= ~CR3_PCID_INVD;
832 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
833 kvm_mmu_sync_roots(vcpu);
834 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
838 if (is_long_mode(vcpu) &&
839 (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
841 else if (is_pae(vcpu) && is_paging(vcpu) &&
842 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
845 vcpu->arch.cr3 = cr3;
846 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
847 kvm_mmu_new_cr3(vcpu);
850 EXPORT_SYMBOL_GPL(kvm_set_cr3);
852 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
854 if (cr8 & CR8_RESERVED_BITS)
856 if (lapic_in_kernel(vcpu))
857 kvm_lapic_set_tpr(vcpu, cr8);
859 vcpu->arch.cr8 = cr8;
862 EXPORT_SYMBOL_GPL(kvm_set_cr8);
864 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
866 if (lapic_in_kernel(vcpu))
867 return kvm_lapic_get_cr8(vcpu);
869 return vcpu->arch.cr8;
871 EXPORT_SYMBOL_GPL(kvm_get_cr8);
873 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
877 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
878 for (i = 0; i < KVM_NR_DB_REGS; i++)
879 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
880 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
884 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
886 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
887 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
890 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
894 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
895 dr7 = vcpu->arch.guest_debug_dr7;
897 dr7 = vcpu->arch.dr7;
898 kvm_x86_ops->set_dr7(vcpu, dr7);
899 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
900 if (dr7 & DR7_BP_EN_MASK)
901 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
904 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
906 u64 fixed = DR6_FIXED_1;
908 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
913 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
917 vcpu->arch.db[dr] = val;
918 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
919 vcpu->arch.eff_db[dr] = val;
924 if (val & 0xffffffff00000000ULL)
926 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
927 kvm_update_dr6(vcpu);
932 if (val & 0xffffffff00000000ULL)
934 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
935 kvm_update_dr7(vcpu);
942 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
944 if (__kvm_set_dr(vcpu, dr, val)) {
945 kvm_inject_gp(vcpu, 0);
950 EXPORT_SYMBOL_GPL(kvm_set_dr);
952 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
956 *val = vcpu->arch.db[dr];
961 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
962 *val = vcpu->arch.dr6;
964 *val = kvm_x86_ops->get_dr6(vcpu);
969 *val = vcpu->arch.dr7;
974 EXPORT_SYMBOL_GPL(kvm_get_dr);
976 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
978 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
982 err = kvm_pmu_rdpmc(vcpu, ecx, &data);
985 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
986 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
989 EXPORT_SYMBOL_GPL(kvm_rdpmc);
992 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
993 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
995 * This list is modified at module load time to reflect the
996 * capabilities of the host cpu. This capabilities test skips MSRs that are
997 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
998 * may depend on host virtualization features rather than host cpu features.
1001 static u32 msrs_to_save[] = {
1002 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1004 #ifdef CONFIG_X86_64
1005 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1007 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1008 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1011 static unsigned num_msrs_to_save;
1013 static u32 emulated_msrs[] = {
1014 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1015 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1016 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1017 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1018 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1019 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1020 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1022 HV_X64_MSR_VP_INDEX,
1023 HV_X64_MSR_VP_RUNTIME,
1024 HV_X64_MSR_SCONTROL,
1025 HV_X64_MSR_STIMER0_CONFIG,
1026 HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1029 MSR_IA32_TSC_ADJUST,
1030 MSR_IA32_TSCDEADLINE,
1031 MSR_IA32_MISC_ENABLE,
1032 MSR_IA32_MCG_STATUS,
1034 MSR_IA32_MCG_EXT_CTL,
1037 MSR_MISC_FEATURES_ENABLES,
1040 static unsigned num_emulated_msrs;
1042 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1044 if (efer & efer_reserved_bits)
1047 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1050 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1055 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1057 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1059 u64 old_efer = vcpu->arch.efer;
1061 if (!kvm_valid_efer(vcpu, efer))
1065 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1069 efer |= vcpu->arch.efer & EFER_LMA;
1071 kvm_x86_ops->set_efer(vcpu, efer);
1073 /* Update reserved bits */
1074 if ((efer ^ old_efer) & EFER_NX)
1075 kvm_mmu_reset_context(vcpu);
1080 void kvm_enable_efer_bits(u64 mask)
1082 efer_reserved_bits &= ~mask;
1084 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1087 * Writes msr value into into the appropriate "register".
1088 * Returns 0 on success, non-0 otherwise.
1089 * Assumes vcpu_load() was already called.
1091 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1093 switch (msr->index) {
1096 case MSR_KERNEL_GS_BASE:
1099 if (is_noncanonical_address(msr->data, vcpu))
1102 case MSR_IA32_SYSENTER_EIP:
1103 case MSR_IA32_SYSENTER_ESP:
1105 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1106 * non-canonical address is written on Intel but not on
1107 * AMD (which ignores the top 32-bits, because it does
1108 * not implement 64-bit SYSENTER).
1110 * 64-bit code should hence be able to write a non-canonical
1111 * value on AMD. Making the address canonical ensures that
1112 * vmentry does not fail on Intel after writing a non-canonical
1113 * value, and that something deterministic happens if the guest
1114 * invokes 64-bit SYSENTER.
1116 msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu));
1118 return kvm_x86_ops->set_msr(vcpu, msr);
1120 EXPORT_SYMBOL_GPL(kvm_set_msr);
1123 * Adapt set_msr() to msr_io()'s calling convention
1125 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1127 struct msr_data msr;
1131 msr.host_initiated = true;
1132 r = kvm_get_msr(vcpu, &msr);
1140 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1142 struct msr_data msr;
1146 msr.host_initiated = true;
1147 return kvm_set_msr(vcpu, &msr);
1150 #ifdef CONFIG_X86_64
1151 struct pvclock_gtod_data {
1154 struct { /* extract of a clocksource struct */
1167 static struct pvclock_gtod_data pvclock_gtod_data;
1169 static void update_pvclock_gtod(struct timekeeper *tk)
1171 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1174 boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1176 write_seqcount_begin(&vdata->seq);
1178 /* copy pvclock gtod data */
1179 vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
1180 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
1181 vdata->clock.mask = tk->tkr_mono.mask;
1182 vdata->clock.mult = tk->tkr_mono.mult;
1183 vdata->clock.shift = tk->tkr_mono.shift;
1185 vdata->boot_ns = boot_ns;
1186 vdata->nsec_base = tk->tkr_mono.xtime_nsec;
1188 vdata->wall_time_sec = tk->xtime_sec;
1190 write_seqcount_end(&vdata->seq);
1194 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1197 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1198 * vcpu_enter_guest. This function is only called from
1199 * the physical CPU that is running vcpu.
1201 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1204 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1208 struct pvclock_wall_clock wc;
1209 struct timespec64 boot;
1214 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1219 ++version; /* first time write, random junk */
1223 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1227 * The guest calculates current wall clock time by adding
1228 * system time (updated by kvm_guest_time_update below) to the
1229 * wall clock specified here. guest system time equals host
1230 * system time for us, thus we must fill in host boot time here.
1232 getboottime64(&boot);
1234 if (kvm->arch.kvmclock_offset) {
1235 struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
1236 boot = timespec64_sub(boot, ts);
1238 wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1239 wc.nsec = boot.tv_nsec;
1240 wc.version = version;
1242 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1245 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1248 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1250 do_shl32_div32(dividend, divisor);
1254 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1255 s8 *pshift, u32 *pmultiplier)
1263 scaled64 = scaled_hz;
1264 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1269 tps32 = (uint32_t)tps64;
1270 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1271 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1279 *pmultiplier = div_frac(scaled64, tps32);
1281 pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
1282 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1285 #ifdef CONFIG_X86_64
1286 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1289 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1290 static unsigned long max_tsc_khz;
1292 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1294 u64 v = (u64)khz * (1000000 + ppm);
1299 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1303 /* Guest TSC same frequency as host TSC? */
1305 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1309 /* TSC scaling supported? */
1310 if (!kvm_has_tsc_control) {
1311 if (user_tsc_khz > tsc_khz) {
1312 vcpu->arch.tsc_catchup = 1;
1313 vcpu->arch.tsc_always_catchup = 1;
1316 WARN(1, "user requested TSC rate below hardware speed\n");
1321 /* TSC scaling required - calculate ratio */
1322 ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1323 user_tsc_khz, tsc_khz);
1325 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1326 WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1331 vcpu->arch.tsc_scaling_ratio = ratio;
1335 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1337 u32 thresh_lo, thresh_hi;
1338 int use_scaling = 0;
1340 /* tsc_khz can be zero if TSC calibration fails */
1341 if (user_tsc_khz == 0) {
1342 /* set tsc_scaling_ratio to a safe value */
1343 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1347 /* Compute a scale to convert nanoseconds in TSC cycles */
1348 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1349 &vcpu->arch.virtual_tsc_shift,
1350 &vcpu->arch.virtual_tsc_mult);
1351 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1354 * Compute the variation in TSC rate which is acceptable
1355 * within the range of tolerance and decide if the
1356 * rate being applied is within that bounds of the hardware
1357 * rate. If so, no scaling or compensation need be done.
1359 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1360 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1361 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
1362 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1365 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
1368 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1370 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1371 vcpu->arch.virtual_tsc_mult,
1372 vcpu->arch.virtual_tsc_shift);
1373 tsc += vcpu->arch.this_tsc_write;
1377 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1379 #ifdef CONFIG_X86_64
1381 struct kvm_arch *ka = &vcpu->kvm->arch;
1382 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1384 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1385 atomic_read(&vcpu->kvm->online_vcpus));
1388 * Once the masterclock is enabled, always perform request in
1389 * order to update it.
1391 * In order to enable masterclock, the host clocksource must be TSC
1392 * and the vcpus need to have matched TSCs. When that happens,
1393 * perform request to enable masterclock.
1395 if (ka->use_master_clock ||
1396 (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1397 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1399 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1400 atomic_read(&vcpu->kvm->online_vcpus),
1401 ka->use_master_clock, gtod->clock.vclock_mode);
1405 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1407 u64 curr_offset = vcpu->arch.tsc_offset;
1408 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1412 * Multiply tsc by a fixed point number represented by ratio.
1414 * The most significant 64-N bits (mult) of ratio represent the
1415 * integral part of the fixed point number; the remaining N bits
1416 * (frac) represent the fractional part, ie. ratio represents a fixed
1417 * point number (mult + frac * 2^(-N)).
1419 * N equals to kvm_tsc_scaling_ratio_frac_bits.
1421 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1423 return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1426 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1429 u64 ratio = vcpu->arch.tsc_scaling_ratio;
1431 if (ratio != kvm_default_tsc_scaling_ratio)
1432 _tsc = __scale_tsc(ratio, tsc);
1436 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1438 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1442 tsc = kvm_scale_tsc(vcpu, rdtsc());
1444 return target_tsc - tsc;
1447 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1449 return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1451 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1453 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1455 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1456 vcpu->arch.tsc_offset = offset;
1459 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1461 struct kvm *kvm = vcpu->kvm;
1462 u64 offset, ns, elapsed;
1463 unsigned long flags;
1465 bool already_matched;
1466 u64 data = msr->data;
1467 bool synchronizing = false;
1469 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1470 offset = kvm_compute_tsc_offset(vcpu, data);
1471 ns = ktime_get_boot_ns();
1472 elapsed = ns - kvm->arch.last_tsc_nsec;
1474 if (vcpu->arch.virtual_tsc_khz) {
1475 if (data == 0 && msr->host_initiated) {
1477 * detection of vcpu initialization -- need to sync
1478 * with other vCPUs. This particularly helps to keep
1479 * kvm_clock stable after CPU hotplug
1481 synchronizing = true;
1483 u64 tsc_exp = kvm->arch.last_tsc_write +
1484 nsec_to_cycles(vcpu, elapsed);
1485 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
1487 * Special case: TSC write with a small delta (1 second)
1488 * of virtual cycle time against real time is
1489 * interpreted as an attempt to synchronize the CPU.
1491 synchronizing = data < tsc_exp + tsc_hz &&
1492 data + tsc_hz > tsc_exp;
1497 * For a reliable TSC, we can match TSC offsets, and for an unstable
1498 * TSC, we add elapsed time in this computation. We could let the
1499 * compensation code attempt to catch up if we fall behind, but
1500 * it's better to try to match offsets from the beginning.
1502 if (synchronizing &&
1503 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1504 if (!check_tsc_unstable()) {
1505 offset = kvm->arch.cur_tsc_offset;
1506 pr_debug("kvm: matched tsc offset for %llu\n", data);
1508 u64 delta = nsec_to_cycles(vcpu, elapsed);
1510 offset = kvm_compute_tsc_offset(vcpu, data);
1511 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1514 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1517 * We split periods of matched TSC writes into generations.
1518 * For each generation, we track the original measured
1519 * nanosecond time, offset, and write, so if TSCs are in
1520 * sync, we can match exact offset, and if not, we can match
1521 * exact software computation in compute_guest_tsc()
1523 * These values are tracked in kvm->arch.cur_xxx variables.
1525 kvm->arch.cur_tsc_generation++;
1526 kvm->arch.cur_tsc_nsec = ns;
1527 kvm->arch.cur_tsc_write = data;
1528 kvm->arch.cur_tsc_offset = offset;
1530 pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1531 kvm->arch.cur_tsc_generation, data);
1535 * We also track th most recent recorded KHZ, write and time to
1536 * allow the matching interval to be extended at each write.
1538 kvm->arch.last_tsc_nsec = ns;
1539 kvm->arch.last_tsc_write = data;
1540 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1542 vcpu->arch.last_guest_tsc = data;
1544 /* Keep track of which generation this VCPU has synchronized to */
1545 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1546 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1547 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1549 if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
1550 update_ia32_tsc_adjust_msr(vcpu, offset);
1552 kvm_vcpu_write_tsc_offset(vcpu, offset);
1553 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1555 spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1557 kvm->arch.nr_vcpus_matched_tsc = 0;
1558 } else if (!already_matched) {
1559 kvm->arch.nr_vcpus_matched_tsc++;
1562 kvm_track_tsc_matching(vcpu);
1563 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1566 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1568 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1571 kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1574 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1576 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1577 WARN_ON(adjustment < 0);
1578 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1579 adjust_tsc_offset_guest(vcpu, adjustment);
1582 #ifdef CONFIG_X86_64
1584 static u64 read_tsc(void)
1586 u64 ret = (u64)rdtsc_ordered();
1587 u64 last = pvclock_gtod_data.clock.cycle_last;
1589 if (likely(ret >= last))
1593 * GCC likes to generate cmov here, but this branch is extremely
1594 * predictable (it's just a function of time and the likely is
1595 * very likely) and there's a data dependence, so force GCC
1596 * to generate a branch instead. I don't barrier() because
1597 * we don't actually need a barrier, and if this function
1598 * ever gets inlined it will generate worse code.
1604 static inline u64 vgettsc(u64 *cycle_now)
1607 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1609 *cycle_now = read_tsc();
1611 v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1612 return v * gtod->clock.mult;
1615 static int do_monotonic_boot(s64 *t, u64 *cycle_now)
1617 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1623 seq = read_seqcount_begin(>od->seq);
1624 mode = gtod->clock.vclock_mode;
1625 ns = gtod->nsec_base;
1626 ns += vgettsc(cycle_now);
1627 ns >>= gtod->clock.shift;
1628 ns += gtod->boot_ns;
1629 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
1635 static int do_realtime(struct timespec *ts, u64 *cycle_now)
1637 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1643 seq = read_seqcount_begin(>od->seq);
1644 mode = gtod->clock.vclock_mode;
1645 ts->tv_sec = gtod->wall_time_sec;
1646 ns = gtod->nsec_base;
1647 ns += vgettsc(cycle_now);
1648 ns >>= gtod->clock.shift;
1649 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
1651 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
1657 /* returns true if host is using tsc clocksource */
1658 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
1660 /* checked again under seqlock below */
1661 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1664 return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1667 /* returns true if host is using tsc clocksource */
1668 static bool kvm_get_walltime_and_clockread(struct timespec *ts,
1671 /* checked again under seqlock below */
1672 if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1675 return do_realtime(ts, cycle_now) == VCLOCK_TSC;
1681 * Assuming a stable TSC across physical CPUS, and a stable TSC
1682 * across virtual CPUs, the following condition is possible.
1683 * Each numbered line represents an event visible to both
1684 * CPUs at the next numbered event.
1686 * "timespecX" represents host monotonic time. "tscX" represents
1689 * VCPU0 on CPU0 | VCPU1 on CPU1
1691 * 1. read timespec0,tsc0
1692 * 2. | timespec1 = timespec0 + N
1694 * 3. transition to guest | transition to guest
1695 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1696 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
1697 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1699 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1702 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1704 * - 0 < N - M => M < N
1706 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1707 * always the case (the difference between two distinct xtime instances
1708 * might be smaller then the difference between corresponding TSC reads,
1709 * when updating guest vcpus pvclock areas).
1711 * To avoid that problem, do not allow visibility of distinct
1712 * system_timestamp/tsc_timestamp values simultaneously: use a master
1713 * copy of host monotonic time values. Update that master copy
1716 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1720 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1722 #ifdef CONFIG_X86_64
1723 struct kvm_arch *ka = &kvm->arch;
1725 bool host_tsc_clocksource, vcpus_matched;
1727 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1728 atomic_read(&kvm->online_vcpus));
1731 * If the host uses TSC clock, then passthrough TSC as stable
1734 host_tsc_clocksource = kvm_get_time_and_clockread(
1735 &ka->master_kernel_ns,
1736 &ka->master_cycle_now);
1738 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1739 && !ka->backwards_tsc_observed
1740 && !ka->boot_vcpu_runs_old_kvmclock;
1742 if (ka->use_master_clock)
1743 atomic_set(&kvm_guest_has_master_clock, 1);
1745 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1746 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1751 void kvm_make_mclock_inprogress_request(struct kvm *kvm)
1753 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
1756 static void kvm_gen_update_masterclock(struct kvm *kvm)
1758 #ifdef CONFIG_X86_64
1760 struct kvm_vcpu *vcpu;
1761 struct kvm_arch *ka = &kvm->arch;
1763 spin_lock(&ka->pvclock_gtod_sync_lock);
1764 kvm_make_mclock_inprogress_request(kvm);
1765 /* no guest entries from this point */
1766 pvclock_update_vm_gtod_copy(kvm);
1768 kvm_for_each_vcpu(i, vcpu, kvm)
1769 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1771 /* guest entries allowed */
1772 kvm_for_each_vcpu(i, vcpu, kvm)
1773 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
1775 spin_unlock(&ka->pvclock_gtod_sync_lock);
1779 u64 get_kvmclock_ns(struct kvm *kvm)
1781 struct kvm_arch *ka = &kvm->arch;
1782 struct pvclock_vcpu_time_info hv_clock;
1785 spin_lock(&ka->pvclock_gtod_sync_lock);
1786 if (!ka->use_master_clock) {
1787 spin_unlock(&ka->pvclock_gtod_sync_lock);
1788 return ktime_get_boot_ns() + ka->kvmclock_offset;
1791 hv_clock.tsc_timestamp = ka->master_cycle_now;
1792 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1793 spin_unlock(&ka->pvclock_gtod_sync_lock);
1795 /* both __this_cpu_read() and rdtsc() should be on the same cpu */
1798 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1799 &hv_clock.tsc_shift,
1800 &hv_clock.tsc_to_system_mul);
1801 ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1808 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1810 struct kvm_vcpu_arch *vcpu = &v->arch;
1811 struct pvclock_vcpu_time_info guest_hv_clock;
1813 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1814 &guest_hv_clock, sizeof(guest_hv_clock))))
1817 /* This VCPU is paused, but it's legal for a guest to read another
1818 * VCPU's kvmclock, so we really have to follow the specification where
1819 * it says that version is odd if data is being modified, and even after
1822 * Version field updates must be kept separate. This is because
1823 * kvm_write_guest_cached might use a "rep movs" instruction, and
1824 * writes within a string instruction are weakly ordered. So there
1825 * are three writes overall.
1827 * As a small optimization, only write the version field in the first
1828 * and third write. The vcpu->pv_time cache is still valid, because the
1829 * version field is the first in the struct.
1831 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1833 vcpu->hv_clock.version = guest_hv_clock.version + 1;
1834 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1836 sizeof(vcpu->hv_clock.version));
1840 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1841 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1843 if (vcpu->pvclock_set_guest_stopped_request) {
1844 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
1845 vcpu->pvclock_set_guest_stopped_request = false;
1848 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1850 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1852 sizeof(vcpu->hv_clock));
1856 vcpu->hv_clock.version++;
1857 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1859 sizeof(vcpu->hv_clock.version));
1862 static int kvm_guest_time_update(struct kvm_vcpu *v)
1864 unsigned long flags, tgt_tsc_khz;
1865 struct kvm_vcpu_arch *vcpu = &v->arch;
1866 struct kvm_arch *ka = &v->kvm->arch;
1868 u64 tsc_timestamp, host_tsc;
1870 bool use_master_clock;
1876 * If the host uses TSC clock, then passthrough TSC as stable
1879 spin_lock(&ka->pvclock_gtod_sync_lock);
1880 use_master_clock = ka->use_master_clock;
1881 if (use_master_clock) {
1882 host_tsc = ka->master_cycle_now;
1883 kernel_ns = ka->master_kernel_ns;
1885 spin_unlock(&ka->pvclock_gtod_sync_lock);
1887 /* Keep irq disabled to prevent changes to the clock */
1888 local_irq_save(flags);
1889 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1890 if (unlikely(tgt_tsc_khz == 0)) {
1891 local_irq_restore(flags);
1892 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1895 if (!use_master_clock) {
1897 kernel_ns = ktime_get_boot_ns();
1900 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
1903 * We may have to catch up the TSC to match elapsed wall clock
1904 * time for two reasons, even if kvmclock is used.
1905 * 1) CPU could have been running below the maximum TSC rate
1906 * 2) Broken TSC compensation resets the base at each VCPU
1907 * entry to avoid unknown leaps of TSC even when running
1908 * again on the same CPU. This may cause apparent elapsed
1909 * time to disappear, and the guest to stand still or run
1912 if (vcpu->tsc_catchup) {
1913 u64 tsc = compute_guest_tsc(v, kernel_ns);
1914 if (tsc > tsc_timestamp) {
1915 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1916 tsc_timestamp = tsc;
1920 local_irq_restore(flags);
1922 /* With all the info we got, fill in the values */
1924 if (kvm_has_tsc_control)
1925 tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
1927 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
1928 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
1929 &vcpu->hv_clock.tsc_shift,
1930 &vcpu->hv_clock.tsc_to_system_mul);
1931 vcpu->hw_tsc_khz = tgt_tsc_khz;
1934 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1935 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1936 vcpu->last_guest_tsc = tsc_timestamp;
1938 /* If the host uses TSC clocksource, then it is stable */
1940 if (use_master_clock)
1941 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1943 vcpu->hv_clock.flags = pvclock_flags;
1945 if (vcpu->pv_time_enabled)
1946 kvm_setup_pvclock_page(v);
1947 if (v == kvm_get_vcpu(v->kvm, 0))
1948 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
1953 * kvmclock updates which are isolated to a given vcpu, such as
1954 * vcpu->cpu migration, should not allow system_timestamp from
1955 * the rest of the vcpus to remain static. Otherwise ntp frequency
1956 * correction applies to one vcpu's system_timestamp but not
1959 * So in those cases, request a kvmclock update for all vcpus.
1960 * We need to rate-limit these requests though, as they can
1961 * considerably slow guests that have a large number of vcpus.
1962 * The time for a remote vcpu to update its kvmclock is bound
1963 * by the delay we use to rate-limit the updates.
1966 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1968 static void kvmclock_update_fn(struct work_struct *work)
1971 struct delayed_work *dwork = to_delayed_work(work);
1972 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1973 kvmclock_update_work);
1974 struct kvm *kvm = container_of(ka, struct kvm, arch);
1975 struct kvm_vcpu *vcpu;
1977 kvm_for_each_vcpu(i, vcpu, kvm) {
1978 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1979 kvm_vcpu_kick(vcpu);
1983 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1985 struct kvm *kvm = v->kvm;
1987 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1988 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1989 KVMCLOCK_UPDATE_DELAY);
1992 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1994 static void kvmclock_sync_fn(struct work_struct *work)
1996 struct delayed_work *dwork = to_delayed_work(work);
1997 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1998 kvmclock_sync_work);
1999 struct kvm *kvm = container_of(ka, struct kvm, arch);
2001 if (!kvmclock_periodic_sync)
2004 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
2005 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
2006 KVMCLOCK_SYNC_PERIOD);
2009 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2011 u64 mcg_cap = vcpu->arch.mcg_cap;
2012 unsigned bank_num = mcg_cap & 0xff;
2015 case MSR_IA32_MCG_STATUS:
2016 vcpu->arch.mcg_status = data;
2018 case MSR_IA32_MCG_CTL:
2019 if (!(mcg_cap & MCG_CTL_P))
2021 if (data != 0 && data != ~(u64)0)
2023 vcpu->arch.mcg_ctl = data;
2026 if (msr >= MSR_IA32_MC0_CTL &&
2027 msr < MSR_IA32_MCx_CTL(bank_num)) {
2028 u32 offset = msr - MSR_IA32_MC0_CTL;
2029 /* only 0 or all 1s can be written to IA32_MCi_CTL
2030 * some Linux kernels though clear bit 10 in bank 4 to
2031 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
2032 * this to avoid an uncatched #GP in the guest
2034 if ((offset & 0x3) == 0 &&
2035 data != 0 && (data | (1 << 10)) != ~(u64)0)
2037 vcpu->arch.mce_banks[offset] = data;
2045 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2047 struct kvm *kvm = vcpu->kvm;
2048 int lm = is_long_mode(vcpu);
2049 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2050 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2051 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2052 : kvm->arch.xen_hvm_config.blob_size_32;
2053 u32 page_num = data & ~PAGE_MASK;
2054 u64 page_addr = data & PAGE_MASK;
2059 if (page_num >= blob_size)
2062 page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2067 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
2076 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2078 gpa_t gpa = data & ~0x3f;
2080 /* Bits 3:5 are reserved, Should be zero */
2084 vcpu->arch.apf.msr_val = data;
2086 if (!(data & KVM_ASYNC_PF_ENABLED)) {
2087 kvm_clear_async_pf_completion_queue(vcpu);
2088 kvm_async_pf_hash_reset(vcpu);
2092 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2096 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2097 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
2098 kvm_async_pf_wakeup_all(vcpu);
2102 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2104 vcpu->arch.pv_time_enabled = false;
2107 static void record_steal_time(struct kvm_vcpu *vcpu)
2109 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2112 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2113 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2116 vcpu->arch.st.steal.preempted = 0;
2118 if (vcpu->arch.st.steal.version & 1)
2119 vcpu->arch.st.steal.version += 1; /* first time write, random junk */
2121 vcpu->arch.st.steal.version += 1;
2123 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2124 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2128 vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2129 vcpu->arch.st.last_steal;
2130 vcpu->arch.st.last_steal = current->sched_info.run_delay;
2132 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2133 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2137 vcpu->arch.st.steal.version += 1;
2139 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2140 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2143 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2146 u32 msr = msr_info->index;
2147 u64 data = msr_info->data;
2150 case MSR_AMD64_NB_CFG:
2151 case MSR_IA32_UCODE_REV:
2152 case MSR_IA32_UCODE_WRITE:
2153 case MSR_VM_HSAVE_PA:
2154 case MSR_AMD64_PATCH_LOADER:
2155 case MSR_AMD64_BU_CFG2:
2156 case MSR_AMD64_DC_CFG:
2160 return set_efer(vcpu, data);
2162 data &= ~(u64)0x40; /* ignore flush filter disable */
2163 data &= ~(u64)0x100; /* ignore ignne emulation enable */
2164 data &= ~(u64)0x8; /* ignore TLB cache disable */
2165 data &= ~(u64)0x40000; /* ignore Mc status write enable */
2167 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2172 case MSR_FAM10H_MMIO_CONF_BASE:
2174 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2179 case MSR_IA32_DEBUGCTLMSR:
2181 /* We support the non-activated case already */
2183 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2184 /* Values other than LBR and BTF are vendor-specific,
2185 thus reserved and should throw a #GP */
2188 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2191 case 0x200 ... 0x2ff:
2192 return kvm_mtrr_set_msr(vcpu, msr, data);
2193 case MSR_IA32_APICBASE:
2194 return kvm_set_apic_base(vcpu, msr_info);
2195 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2196 return kvm_x2apic_msr_write(vcpu, msr, data);
2197 case MSR_IA32_TSCDEADLINE:
2198 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2200 case MSR_IA32_TSC_ADJUST:
2201 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
2202 if (!msr_info->host_initiated) {
2203 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2204 adjust_tsc_offset_guest(vcpu, adj);
2206 vcpu->arch.ia32_tsc_adjust_msr = data;
2209 case MSR_IA32_MISC_ENABLE:
2210 vcpu->arch.ia32_misc_enable_msr = data;
2212 case MSR_IA32_SMBASE:
2213 if (!msr_info->host_initiated)
2215 vcpu->arch.smbase = data;
2217 case MSR_KVM_WALL_CLOCK_NEW:
2218 case MSR_KVM_WALL_CLOCK:
2219 vcpu->kvm->arch.wall_clock = data;
2220 kvm_write_wall_clock(vcpu->kvm, data);
2222 case MSR_KVM_SYSTEM_TIME_NEW:
2223 case MSR_KVM_SYSTEM_TIME: {
2224 struct kvm_arch *ka = &vcpu->kvm->arch;
2226 kvmclock_reset(vcpu);
2228 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2229 bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2231 if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2232 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2234 ka->boot_vcpu_runs_old_kvmclock = tmp;
2237 vcpu->arch.time = data;
2238 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2240 /* we verify if the enable bit is set... */
2244 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2245 &vcpu->arch.pv_time, data & ~1ULL,
2246 sizeof(struct pvclock_vcpu_time_info)))
2247 vcpu->arch.pv_time_enabled = false;
2249 vcpu->arch.pv_time_enabled = true;
2253 case MSR_KVM_ASYNC_PF_EN:
2254 if (kvm_pv_enable_async_pf(vcpu, data))
2257 case MSR_KVM_STEAL_TIME:
2259 if (unlikely(!sched_info_on()))
2262 if (data & KVM_STEAL_RESERVED_MASK)
2265 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2266 data & KVM_STEAL_VALID_BITS,
2267 sizeof(struct kvm_steal_time)))
2270 vcpu->arch.st.msr_val = data;
2272 if (!(data & KVM_MSR_ENABLED))
2275 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2278 case MSR_KVM_PV_EOI_EN:
2279 if (kvm_lapic_enable_pv_eoi(vcpu, data))
2283 case MSR_IA32_MCG_CTL:
2284 case MSR_IA32_MCG_STATUS:
2285 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2286 return set_msr_mce(vcpu, msr, data);
2288 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2289 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2290 pr = true; /* fall through */
2291 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2292 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2293 if (kvm_pmu_is_valid_msr(vcpu, msr))
2294 return kvm_pmu_set_msr(vcpu, msr_info);
2296 if (pr || data != 0)
2297 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2298 "0x%x data 0x%llx\n", msr, data);
2300 case MSR_K7_CLK_CTL:
2302 * Ignore all writes to this no longer documented MSR.
2303 * Writes are only relevant for old K7 processors,
2304 * all pre-dating SVM, but a recommended workaround from
2305 * AMD for these chips. It is possible to specify the
2306 * affected processor models on the command line, hence
2307 * the need to ignore the workaround.
2310 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2311 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2312 case HV_X64_MSR_CRASH_CTL:
2313 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2314 return kvm_hv_set_msr_common(vcpu, msr, data,
2315 msr_info->host_initiated);
2316 case MSR_IA32_BBL_CR_CTL3:
2317 /* Drop writes to this legacy MSR -- see rdmsr
2318 * counterpart for further detail.
2320 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2322 case MSR_AMD64_OSVW_ID_LENGTH:
2323 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2325 vcpu->arch.osvw.length = data;
2327 case MSR_AMD64_OSVW_STATUS:
2328 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2330 vcpu->arch.osvw.status = data;
2332 case MSR_PLATFORM_INFO:
2333 if (!msr_info->host_initiated ||
2334 data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
2335 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
2336 cpuid_fault_enabled(vcpu)))
2338 vcpu->arch.msr_platform_info = data;
2340 case MSR_MISC_FEATURES_ENABLES:
2341 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
2342 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
2343 !supports_cpuid_fault(vcpu)))
2345 vcpu->arch.msr_misc_features_enables = data;
2348 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2349 return xen_hvm_config(vcpu, data);
2350 if (kvm_pmu_is_valid_msr(vcpu, msr))
2351 return kvm_pmu_set_msr(vcpu, msr_info);
2353 vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2357 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2364 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2368 * Reads an msr value (of 'msr_index') into 'pdata'.
2369 * Returns 0 on success, non-0 otherwise.
2370 * Assumes vcpu_load() was already called.
2372 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2374 return kvm_x86_ops->get_msr(vcpu, msr);
2376 EXPORT_SYMBOL_GPL(kvm_get_msr);
2378 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2381 u64 mcg_cap = vcpu->arch.mcg_cap;
2382 unsigned bank_num = mcg_cap & 0xff;
2385 case MSR_IA32_P5_MC_ADDR:
2386 case MSR_IA32_P5_MC_TYPE:
2389 case MSR_IA32_MCG_CAP:
2390 data = vcpu->arch.mcg_cap;
2392 case MSR_IA32_MCG_CTL:
2393 if (!(mcg_cap & MCG_CTL_P))
2395 data = vcpu->arch.mcg_ctl;
2397 case MSR_IA32_MCG_STATUS:
2398 data = vcpu->arch.mcg_status;
2401 if (msr >= MSR_IA32_MC0_CTL &&
2402 msr < MSR_IA32_MCx_CTL(bank_num)) {
2403 u32 offset = msr - MSR_IA32_MC0_CTL;
2404 data = vcpu->arch.mce_banks[offset];
2413 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2415 switch (msr_info->index) {
2416 case MSR_IA32_PLATFORM_ID:
2417 case MSR_IA32_EBL_CR_POWERON:
2418 case MSR_IA32_DEBUGCTLMSR:
2419 case MSR_IA32_LASTBRANCHFROMIP:
2420 case MSR_IA32_LASTBRANCHTOIP:
2421 case MSR_IA32_LASTINTFROMIP:
2422 case MSR_IA32_LASTINTTOIP:
2424 case MSR_K8_TSEG_ADDR:
2425 case MSR_K8_TSEG_MASK:
2427 case MSR_VM_HSAVE_PA:
2428 case MSR_K8_INT_PENDING_MSG:
2429 case MSR_AMD64_NB_CFG:
2430 case MSR_FAM10H_MMIO_CONF_BASE:
2431 case MSR_AMD64_BU_CFG2:
2432 case MSR_IA32_PERF_CTL:
2433 case MSR_AMD64_DC_CFG:
2436 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2437 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2438 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2439 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2440 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2441 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2444 case MSR_IA32_UCODE_REV:
2445 msr_info->data = 0x100000000ULL;
2448 case 0x200 ... 0x2ff:
2449 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2450 case 0xcd: /* fsb frequency */
2454 * MSR_EBC_FREQUENCY_ID
2455 * Conservative value valid for even the basic CPU models.
2456 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2457 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2458 * and 266MHz for model 3, or 4. Set Core Clock
2459 * Frequency to System Bus Frequency Ratio to 1 (bits
2460 * 31:24) even though these are only valid for CPU
2461 * models > 2, however guests may end up dividing or
2462 * multiplying by zero otherwise.
2464 case MSR_EBC_FREQUENCY_ID:
2465 msr_info->data = 1 << 24;
2467 case MSR_IA32_APICBASE:
2468 msr_info->data = kvm_get_apic_base(vcpu);
2470 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2471 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
2473 case MSR_IA32_TSCDEADLINE:
2474 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2476 case MSR_IA32_TSC_ADJUST:
2477 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2479 case MSR_IA32_MISC_ENABLE:
2480 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2482 case MSR_IA32_SMBASE:
2483 if (!msr_info->host_initiated)
2485 msr_info->data = vcpu->arch.smbase;
2487 case MSR_IA32_PERF_STATUS:
2488 /* TSC increment by tick */
2489 msr_info->data = 1000ULL;
2490 /* CPU multiplier */
2491 msr_info->data |= (((uint64_t)4ULL) << 40);
2494 msr_info->data = vcpu->arch.efer;
2496 case MSR_KVM_WALL_CLOCK:
2497 case MSR_KVM_WALL_CLOCK_NEW:
2498 msr_info->data = vcpu->kvm->arch.wall_clock;
2500 case MSR_KVM_SYSTEM_TIME:
2501 case MSR_KVM_SYSTEM_TIME_NEW:
2502 msr_info->data = vcpu->arch.time;
2504 case MSR_KVM_ASYNC_PF_EN:
2505 msr_info->data = vcpu->arch.apf.msr_val;
2507 case MSR_KVM_STEAL_TIME:
2508 msr_info->data = vcpu->arch.st.msr_val;
2510 case MSR_KVM_PV_EOI_EN:
2511 msr_info->data = vcpu->arch.pv_eoi.msr_val;
2513 case MSR_IA32_P5_MC_ADDR:
2514 case MSR_IA32_P5_MC_TYPE:
2515 case MSR_IA32_MCG_CAP:
2516 case MSR_IA32_MCG_CTL:
2517 case MSR_IA32_MCG_STATUS:
2518 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2519 return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2520 case MSR_K7_CLK_CTL:
2522 * Provide expected ramp-up count for K7. All other
2523 * are set to zero, indicating minimum divisors for
2526 * This prevents guest kernels on AMD host with CPU
2527 * type 6, model 8 and higher from exploding due to
2528 * the rdmsr failing.
2530 msr_info->data = 0x20000000;
2532 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2533 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2534 case HV_X64_MSR_CRASH_CTL:
2535 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2536 return kvm_hv_get_msr_common(vcpu,
2537 msr_info->index, &msr_info->data);
2539 case MSR_IA32_BBL_CR_CTL3:
2540 /* This legacy MSR exists but isn't fully documented in current
2541 * silicon. It is however accessed by winxp in very narrow
2542 * scenarios where it sets bit #19, itself documented as
2543 * a "reserved" bit. Best effort attempt to source coherent
2544 * read data here should the balance of the register be
2545 * interpreted by the guest:
2547 * L2 cache control register 3: 64GB range, 256KB size,
2548 * enabled, latency 0x1, configured
2550 msr_info->data = 0xbe702111;
2552 case MSR_AMD64_OSVW_ID_LENGTH:
2553 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2555 msr_info->data = vcpu->arch.osvw.length;
2557 case MSR_AMD64_OSVW_STATUS:
2558 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2560 msr_info->data = vcpu->arch.osvw.status;
2562 case MSR_PLATFORM_INFO:
2563 msr_info->data = vcpu->arch.msr_platform_info;
2565 case MSR_MISC_FEATURES_ENABLES:
2566 msr_info->data = vcpu->arch.msr_misc_features_enables;
2569 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2570 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2572 vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
2576 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
2583 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2586 * Read or write a bunch of msrs. All parameters are kernel addresses.
2588 * @return number of msrs set successfully.
2590 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2591 struct kvm_msr_entry *entries,
2592 int (*do_msr)(struct kvm_vcpu *vcpu,
2593 unsigned index, u64 *data))
2597 idx = srcu_read_lock(&vcpu->kvm->srcu);
2598 for (i = 0; i < msrs->nmsrs; ++i)
2599 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2601 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2607 * Read or write a bunch of msrs. Parameters are user addresses.
2609 * @return number of msrs set successfully.
2611 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2612 int (*do_msr)(struct kvm_vcpu *vcpu,
2613 unsigned index, u64 *data),
2616 struct kvm_msrs msrs;
2617 struct kvm_msr_entry *entries;
2622 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2626 if (msrs.nmsrs >= MAX_IO_MSRS)
2629 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2630 entries = memdup_user(user_msrs->entries, size);
2631 if (IS_ERR(entries)) {
2632 r = PTR_ERR(entries);
2636 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2641 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2652 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2657 case KVM_CAP_IRQCHIP:
2659 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2660 case KVM_CAP_SET_TSS_ADDR:
2661 case KVM_CAP_EXT_CPUID:
2662 case KVM_CAP_EXT_EMUL_CPUID:
2663 case KVM_CAP_CLOCKSOURCE:
2665 case KVM_CAP_NOP_IO_DELAY:
2666 case KVM_CAP_MP_STATE:
2667 case KVM_CAP_SYNC_MMU:
2668 case KVM_CAP_USER_NMI:
2669 case KVM_CAP_REINJECT_CONTROL:
2670 case KVM_CAP_IRQ_INJECT_STATUS:
2671 case KVM_CAP_IOEVENTFD:
2672 case KVM_CAP_IOEVENTFD_NO_LENGTH:
2674 case KVM_CAP_PIT_STATE2:
2675 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2676 case KVM_CAP_XEN_HVM:
2677 case KVM_CAP_VCPU_EVENTS:
2678 case KVM_CAP_HYPERV:
2679 case KVM_CAP_HYPERV_VAPIC:
2680 case KVM_CAP_HYPERV_SPIN:
2681 case KVM_CAP_HYPERV_SYNIC:
2682 case KVM_CAP_HYPERV_SYNIC2:
2683 case KVM_CAP_HYPERV_VP_INDEX:
2684 case KVM_CAP_PCI_SEGMENT:
2685 case KVM_CAP_DEBUGREGS:
2686 case KVM_CAP_X86_ROBUST_SINGLESTEP:
2688 case KVM_CAP_ASYNC_PF:
2689 case KVM_CAP_GET_TSC_KHZ:
2690 case KVM_CAP_KVMCLOCK_CTRL:
2691 case KVM_CAP_READONLY_MEM:
2692 case KVM_CAP_HYPERV_TIME:
2693 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2694 case KVM_CAP_TSC_DEADLINE_TIMER:
2695 case KVM_CAP_ENABLE_CAP_VM:
2696 case KVM_CAP_DISABLE_QUIRKS:
2697 case KVM_CAP_SET_BOOT_CPU_ID:
2698 case KVM_CAP_SPLIT_IRQCHIP:
2699 case KVM_CAP_IMMEDIATE_EXIT:
2702 case KVM_CAP_ADJUST_CLOCK:
2703 r = KVM_CLOCK_TSC_STABLE;
2705 case KVM_CAP_X86_GUEST_MWAIT:
2706 r = kvm_mwait_in_guest();
2708 case KVM_CAP_X86_SMM:
2709 /* SMBASE is usually relocated above 1M on modern chipsets,
2710 * and SMM handlers might indeed rely on 4G segment limits,
2711 * so do not report SMM to be available if real mode is
2712 * emulated via vm86 mode. Still, do not go to great lengths
2713 * to avoid userspace's usage of the feature, because it is a
2714 * fringe case that is not enabled except via specific settings
2715 * of the module parameters.
2717 r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2720 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2722 case KVM_CAP_NR_VCPUS:
2723 r = KVM_SOFT_MAX_VCPUS;
2725 case KVM_CAP_MAX_VCPUS:
2728 case KVM_CAP_NR_MEMSLOTS:
2729 r = KVM_USER_MEM_SLOTS;
2731 case KVM_CAP_PV_MMU: /* obsolete */
2735 r = KVM_MAX_MCE_BANKS;
2738 r = boot_cpu_has(X86_FEATURE_XSAVE);
2740 case KVM_CAP_TSC_CONTROL:
2741 r = kvm_has_tsc_control;
2743 case KVM_CAP_X2APIC_API:
2744 r = KVM_X2APIC_API_VALID_FLAGS;
2754 long kvm_arch_dev_ioctl(struct file *filp,
2755 unsigned int ioctl, unsigned long arg)
2757 void __user *argp = (void __user *)arg;
2761 case KVM_GET_MSR_INDEX_LIST: {
2762 struct kvm_msr_list __user *user_msr_list = argp;
2763 struct kvm_msr_list msr_list;
2767 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2770 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2771 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2774 if (n < msr_list.nmsrs)
2777 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2778 num_msrs_to_save * sizeof(u32)))
2780 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2782 num_emulated_msrs * sizeof(u32)))
2787 case KVM_GET_SUPPORTED_CPUID:
2788 case KVM_GET_EMULATED_CPUID: {
2789 struct kvm_cpuid2 __user *cpuid_arg = argp;
2790 struct kvm_cpuid2 cpuid;
2793 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2796 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2802 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2807 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2809 if (copy_to_user(argp, &kvm_mce_cap_supported,
2810 sizeof(kvm_mce_cap_supported)))
2822 static void wbinvd_ipi(void *garbage)
2827 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2829 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2832 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2834 /* Address WBINVD may be executed by guest */
2835 if (need_emulate_wbinvd(vcpu)) {
2836 if (kvm_x86_ops->has_wbinvd_exit())
2837 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2838 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2839 smp_call_function_single(vcpu->cpu,
2840 wbinvd_ipi, NULL, 1);
2843 kvm_x86_ops->vcpu_load(vcpu, cpu);
2845 /* Apply any externally detected TSC adjustments (due to suspend) */
2846 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2847 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2848 vcpu->arch.tsc_offset_adjustment = 0;
2849 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2852 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2853 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2854 rdtsc() - vcpu->arch.last_host_tsc;
2856 mark_tsc_unstable("KVM discovered backwards TSC");
2858 if (check_tsc_unstable()) {
2859 u64 offset = kvm_compute_tsc_offset(vcpu,
2860 vcpu->arch.last_guest_tsc);
2861 kvm_vcpu_write_tsc_offset(vcpu, offset);
2862 vcpu->arch.tsc_catchup = 1;
2865 if (kvm_lapic_hv_timer_in_use(vcpu))
2866 kvm_lapic_restart_hv_timer(vcpu);
2869 * On a host with synchronized TSC, there is no need to update
2870 * kvmclock on vcpu->cpu migration
2872 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2873 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2874 if (vcpu->cpu != cpu)
2875 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
2879 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2882 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
2884 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2887 vcpu->arch.st.steal.preempted = 1;
2889 kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
2890 &vcpu->arch.st.steal.preempted,
2891 offsetof(struct kvm_steal_time, preempted),
2892 sizeof(vcpu->arch.st.steal.preempted));
2895 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2899 if (vcpu->preempted)
2900 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
2903 * Disable page faults because we're in atomic context here.
2904 * kvm_write_guest_offset_cached() would call might_fault()
2905 * that relies on pagefault_disable() to tell if there's a
2906 * bug. NOTE: the write to guest memory may not go through if
2907 * during postcopy live migration or if there's heavy guest
2910 pagefault_disable();
2912 * kvm_memslots() will be called by
2913 * kvm_write_guest_offset_cached() so take the srcu lock.
2915 idx = srcu_read_lock(&vcpu->kvm->srcu);
2916 kvm_steal_time_set_preempted(vcpu);
2917 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2919 kvm_x86_ops->vcpu_put(vcpu);
2920 kvm_put_guest_fpu(vcpu);
2921 vcpu->arch.last_host_tsc = rdtsc();
2924 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2925 struct kvm_lapic_state *s)
2927 if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
2928 kvm_x86_ops->sync_pir_to_irr(vcpu);
2930 return kvm_apic_get_state(vcpu, s);
2933 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2934 struct kvm_lapic_state *s)
2938 r = kvm_apic_set_state(vcpu, s);
2941 update_cr8_intercept(vcpu);
2946 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2948 return (!lapic_in_kernel(vcpu) ||
2949 kvm_apic_accept_pic_intr(vcpu));
2953 * if userspace requested an interrupt window, check that the
2954 * interrupt window is open.
2956 * No need to exit to userspace if we already have an interrupt queued.
2958 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2960 return kvm_arch_interrupt_allowed(vcpu) &&
2961 !kvm_cpu_has_interrupt(vcpu) &&
2962 !kvm_event_needs_reinjection(vcpu) &&
2963 kvm_cpu_accept_dm_intr(vcpu);
2966 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2967 struct kvm_interrupt *irq)
2969 if (irq->irq >= KVM_NR_INTERRUPTS)
2972 if (!irqchip_in_kernel(vcpu->kvm)) {
2973 kvm_queue_interrupt(vcpu, irq->irq, false);
2974 kvm_make_request(KVM_REQ_EVENT, vcpu);
2979 * With in-kernel LAPIC, we only use this to inject EXTINT, so
2980 * fail for in-kernel 8259.
2982 if (pic_in_kernel(vcpu->kvm))
2985 if (vcpu->arch.pending_external_vector != -1)
2988 vcpu->arch.pending_external_vector = irq->irq;
2989 kvm_make_request(KVM_REQ_EVENT, vcpu);
2993 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2995 kvm_inject_nmi(vcpu);
3000 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
3002 kvm_make_request(KVM_REQ_SMI, vcpu);
3007 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
3008 struct kvm_tpr_access_ctl *tac)
3012 vcpu->arch.tpr_access_reporting = !!tac->enabled;
3016 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
3020 unsigned bank_num = mcg_cap & 0xff, bank;
3023 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
3025 if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
3028 vcpu->arch.mcg_cap = mcg_cap;
3029 /* Init IA32_MCG_CTL to all 1s */
3030 if (mcg_cap & MCG_CTL_P)
3031 vcpu->arch.mcg_ctl = ~(u64)0;
3032 /* Init IA32_MCi_CTL to all 1s */
3033 for (bank = 0; bank < bank_num; bank++)
3034 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
3036 if (kvm_x86_ops->setup_mce)
3037 kvm_x86_ops->setup_mce(vcpu);
3042 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
3043 struct kvm_x86_mce *mce)
3045 u64 mcg_cap = vcpu->arch.mcg_cap;
3046 unsigned bank_num = mcg_cap & 0xff;
3047 u64 *banks = vcpu->arch.mce_banks;
3049 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
3052 * if IA32_MCG_CTL is not all 1s, the uncorrected error
3053 * reporting is disabled
3055 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3056 vcpu->arch.mcg_ctl != ~(u64)0)
3058 banks += 4 * mce->bank;
3060 * if IA32_MCi_CTL is not all 1s, the uncorrected error
3061 * reporting is disabled for the bank
3063 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3065 if (mce->status & MCI_STATUS_UC) {
3066 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3067 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3068 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3071 if (banks[1] & MCI_STATUS_VAL)
3072 mce->status |= MCI_STATUS_OVER;
3073 banks[2] = mce->addr;
3074 banks[3] = mce->misc;
3075 vcpu->arch.mcg_status = mce->mcg_status;
3076 banks[1] = mce->status;
3077 kvm_queue_exception(vcpu, MC_VECTOR);
3078 } else if (!(banks[1] & MCI_STATUS_VAL)
3079 || !(banks[1] & MCI_STATUS_UC)) {
3080 if (banks[1] & MCI_STATUS_VAL)
3081 mce->status |= MCI_STATUS_OVER;
3082 banks[2] = mce->addr;
3083 banks[3] = mce->misc;
3084 banks[1] = mce->status;
3086 banks[1] |= MCI_STATUS_OVER;
3090 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3091 struct kvm_vcpu_events *events)
3095 * FIXME: pass injected and pending separately. This is only
3096 * needed for nested virtualization, whose state cannot be
3097 * migrated yet. For now we can combine them.
3099 events->exception.injected =
3100 (vcpu->arch.exception.pending ||
3101 vcpu->arch.exception.injected) &&
3102 !kvm_exception_is_soft(vcpu->arch.exception.nr);
3103 events->exception.nr = vcpu->arch.exception.nr;
3104 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3105 events->exception.pad = 0;
3106 events->exception.error_code = vcpu->arch.exception.error_code;
3108 events->interrupt.injected =
3109 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
3110 events->interrupt.nr = vcpu->arch.interrupt.nr;
3111 events->interrupt.soft = 0;
3112 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3114 events->nmi.injected = vcpu->arch.nmi_injected;
3115 events->nmi.pending = vcpu->arch.nmi_pending != 0;
3116 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3117 events->nmi.pad = 0;
3119 events->sipi_vector = 0; /* never valid when reporting to user space */
3121 events->smi.smm = is_smm(vcpu);
3122 events->smi.pending = vcpu->arch.smi_pending;
3123 events->smi.smm_inside_nmi =
3124 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
3125 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
3127 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3128 | KVM_VCPUEVENT_VALID_SHADOW
3129 | KVM_VCPUEVENT_VALID_SMM);
3130 memset(&events->reserved, 0, sizeof(events->reserved));
3133 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
3135 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3136 struct kvm_vcpu_events *events)
3138 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3139 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3140 | KVM_VCPUEVENT_VALID_SHADOW
3141 | KVM_VCPUEVENT_VALID_SMM))
3144 if (events->exception.injected &&
3145 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
3146 is_guest_mode(vcpu)))
3149 /* INITs are latched while in SMM */
3150 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
3151 (events->smi.smm || events->smi.pending) &&
3152 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3156 vcpu->arch.exception.injected = false;
3157 vcpu->arch.exception.pending = events->exception.injected;
3158 vcpu->arch.exception.nr = events->exception.nr;
3159 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3160 vcpu->arch.exception.error_code = events->exception.error_code;
3162 vcpu->arch.interrupt.pending = events->interrupt.injected;
3163 vcpu->arch.interrupt.nr = events->interrupt.nr;
3164 vcpu->arch.interrupt.soft = events->interrupt.soft;
3165 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3166 kvm_x86_ops->set_interrupt_shadow(vcpu,
3167 events->interrupt.shadow);
3169 vcpu->arch.nmi_injected = events->nmi.injected;
3170 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3171 vcpu->arch.nmi_pending = events->nmi.pending;
3172 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3174 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3175 lapic_in_kernel(vcpu))
3176 vcpu->arch.apic->sipi_vector = events->sipi_vector;
3178 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3179 u32 hflags = vcpu->arch.hflags;
3180 if (events->smi.smm)
3181 hflags |= HF_SMM_MASK;
3183 hflags &= ~HF_SMM_MASK;
3184 kvm_set_hflags(vcpu, hflags);
3186 vcpu->arch.smi_pending = events->smi.pending;
3188 if (events->smi.smm) {
3189 if (events->smi.smm_inside_nmi)
3190 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3192 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
3193 if (lapic_in_kernel(vcpu)) {
3194 if (events->smi.latched_init)
3195 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3197 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3202 kvm_make_request(KVM_REQ_EVENT, vcpu);
3207 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3208 struct kvm_debugregs *dbgregs)
3212 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3213 kvm_get_dr(vcpu, 6, &val);
3215 dbgregs->dr7 = vcpu->arch.dr7;
3217 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3220 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3221 struct kvm_debugregs *dbgregs)
3226 if (dbgregs->dr6 & ~0xffffffffull)
3228 if (dbgregs->dr7 & ~0xffffffffull)
3231 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3232 kvm_update_dr0123(vcpu);
3233 vcpu->arch.dr6 = dbgregs->dr6;
3234 kvm_update_dr6(vcpu);
3235 vcpu->arch.dr7 = dbgregs->dr7;
3236 kvm_update_dr7(vcpu);
3241 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3243 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3245 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3246 u64 xstate_bv = xsave->header.xfeatures;
3250 * Copy legacy XSAVE area, to avoid complications with CPUID
3251 * leaves 0 and 1 in the loop below.
3253 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3256 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3257 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3260 * Copy each region from the possibly compacted offset to the
3261 * non-compacted offset.
3263 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3265 u64 feature = valid & -valid;
3266 int index = fls64(feature) - 1;
3267 void *src = get_xsave_addr(xsave, feature);
3270 u32 size, offset, ecx, edx;
3271 cpuid_count(XSTATE_CPUID, index,
3272 &size, &offset, &ecx, &edx);
3273 if (feature == XFEATURE_MASK_PKRU)
3274 memcpy(dest + offset, &vcpu->arch.pkru,
3275 sizeof(vcpu->arch.pkru));
3277 memcpy(dest + offset, src, size);
3285 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3287 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3288 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
3292 * Copy legacy XSAVE area, to avoid complications with CPUID
3293 * leaves 0 and 1 in the loop below.
3295 memcpy(xsave, src, XSAVE_HDR_OFFSET);
3297 /* Set XSTATE_BV and possibly XCOMP_BV. */
3298 xsave->header.xfeatures = xstate_bv;
3299 if (boot_cpu_has(X86_FEATURE_XSAVES))
3300 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3303 * Copy each region from the non-compacted offset to the
3304 * possibly compacted offset.
3306 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3308 u64 feature = valid & -valid;
3309 int index = fls64(feature) - 1;
3310 void *dest = get_xsave_addr(xsave, feature);
3313 u32 size, offset, ecx, edx;
3314 cpuid_count(XSTATE_CPUID, index,
3315 &size, &offset, &ecx, &edx);
3316 if (feature == XFEATURE_MASK_PKRU)
3317 memcpy(&vcpu->arch.pkru, src + offset,
3318 sizeof(vcpu->arch.pkru));
3320 memcpy(dest, src + offset, size);
3327 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3328 struct kvm_xsave *guest_xsave)
3330 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3331 memset(guest_xsave, 0, sizeof(struct kvm_xsave));
3332 fill_xsave((u8 *) guest_xsave->region, vcpu);
3334 memcpy(guest_xsave->region,
3335 &vcpu->arch.guest_fpu.state.fxsave,
3336 sizeof(struct fxregs_state));
3337 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
3338 XFEATURE_MASK_FPSSE;
3342 #define XSAVE_MXCSR_OFFSET 24
3344 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3345 struct kvm_xsave *guest_xsave)
3348 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3349 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
3351 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3353 * Here we allow setting states that are not present in
3354 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
3355 * with old userspace.
3357 if (xstate_bv & ~kvm_supported_xcr0() ||
3358 mxcsr & ~mxcsr_feature_mask)
3360 load_xsave(vcpu, (u8 *)guest_xsave->region);
3362 if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
3363 mxcsr & ~mxcsr_feature_mask)
3365 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3366 guest_xsave->region, sizeof(struct fxregs_state));
3371 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3372 struct kvm_xcrs *guest_xcrs)
3374 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3375 guest_xcrs->nr_xcrs = 0;
3379 guest_xcrs->nr_xcrs = 1;
3380 guest_xcrs->flags = 0;
3381 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3382 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3385 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3386 struct kvm_xcrs *guest_xcrs)
3390 if (!boot_cpu_has(X86_FEATURE_XSAVE))
3393 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3396 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3397 /* Only support XCR0 currently */
3398 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3399 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3400 guest_xcrs->xcrs[i].value);
3409 * kvm_set_guest_paused() indicates to the guest kernel that it has been
3410 * stopped by the hypervisor. This function will be called from the host only.
3411 * EINVAL is returned when the host attempts to set the flag for a guest that
3412 * does not support pv clocks.
3414 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
3416 if (!vcpu->arch.pv_time_enabled)
3418 vcpu->arch.pvclock_set_guest_stopped_request = true;
3419 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3423 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3424 struct kvm_enable_cap *cap)
3430 case KVM_CAP_HYPERV_SYNIC2:
3433 case KVM_CAP_HYPERV_SYNIC:
3434 if (!irqchip_in_kernel(vcpu->kvm))
3436 return kvm_hv_activate_synic(vcpu, cap->cap ==
3437 KVM_CAP_HYPERV_SYNIC2);
3443 long kvm_arch_vcpu_ioctl(struct file *filp,
3444 unsigned int ioctl, unsigned long arg)
3446 struct kvm_vcpu *vcpu = filp->private_data;
3447 void __user *argp = (void __user *)arg;
3450 struct kvm_lapic_state *lapic;
3451 struct kvm_xsave *xsave;
3452 struct kvm_xcrs *xcrs;
3458 case KVM_GET_LAPIC: {
3460 if (!lapic_in_kernel(vcpu))
3462 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3467 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3471 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3476 case KVM_SET_LAPIC: {
3478 if (!lapic_in_kernel(vcpu))
3480 u.lapic = memdup_user(argp, sizeof(*u.lapic));
3481 if (IS_ERR(u.lapic))
3482 return PTR_ERR(u.lapic);
3484 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3487 case KVM_INTERRUPT: {
3488 struct kvm_interrupt irq;
3491 if (copy_from_user(&irq, argp, sizeof irq))
3493 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3497 r = kvm_vcpu_ioctl_nmi(vcpu);
3501 r = kvm_vcpu_ioctl_smi(vcpu);
3504 case KVM_SET_CPUID: {
3505 struct kvm_cpuid __user *cpuid_arg = argp;
3506 struct kvm_cpuid cpuid;
3509 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3511 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3514 case KVM_SET_CPUID2: {
3515 struct kvm_cpuid2 __user *cpuid_arg = argp;
3516 struct kvm_cpuid2 cpuid;
3519 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3521 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3522 cpuid_arg->entries);
3525 case KVM_GET_CPUID2: {
3526 struct kvm_cpuid2 __user *cpuid_arg = argp;
3527 struct kvm_cpuid2 cpuid;
3530 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3532 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3533 cpuid_arg->entries);
3537 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3543 r = msr_io(vcpu, argp, do_get_msr, 1);
3546 r = msr_io(vcpu, argp, do_set_msr, 0);
3548 case KVM_TPR_ACCESS_REPORTING: {
3549 struct kvm_tpr_access_ctl tac;
3552 if (copy_from_user(&tac, argp, sizeof tac))
3554 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3558 if (copy_to_user(argp, &tac, sizeof tac))
3563 case KVM_SET_VAPIC_ADDR: {
3564 struct kvm_vapic_addr va;
3568 if (!lapic_in_kernel(vcpu))
3571 if (copy_from_user(&va, argp, sizeof va))
3573 idx = srcu_read_lock(&vcpu->kvm->srcu);
3574 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3575 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3578 case KVM_X86_SETUP_MCE: {
3582 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3584 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3587 case KVM_X86_SET_MCE: {
3588 struct kvm_x86_mce mce;
3591 if (copy_from_user(&mce, argp, sizeof mce))
3593 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3596 case KVM_GET_VCPU_EVENTS: {
3597 struct kvm_vcpu_events events;
3599 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3602 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3607 case KVM_SET_VCPU_EVENTS: {
3608 struct kvm_vcpu_events events;
3611 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3614 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3617 case KVM_GET_DEBUGREGS: {
3618 struct kvm_debugregs dbgregs;
3620 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3623 if (copy_to_user(argp, &dbgregs,
3624 sizeof(struct kvm_debugregs)))
3629 case KVM_SET_DEBUGREGS: {
3630 struct kvm_debugregs dbgregs;
3633 if (copy_from_user(&dbgregs, argp,
3634 sizeof(struct kvm_debugregs)))
3637 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3640 case KVM_GET_XSAVE: {
3641 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3646 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3649 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3654 case KVM_SET_XSAVE: {
3655 u.xsave = memdup_user(argp, sizeof(*u.xsave));
3656 if (IS_ERR(u.xsave))
3657 return PTR_ERR(u.xsave);
3659 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3662 case KVM_GET_XCRS: {
3663 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3668 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3671 if (copy_to_user(argp, u.xcrs,
3672 sizeof(struct kvm_xcrs)))
3677 case KVM_SET_XCRS: {
3678 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3680 return PTR_ERR(u.xcrs);
3682 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3685 case KVM_SET_TSC_KHZ: {
3689 user_tsc_khz = (u32)arg;
3691 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3694 if (user_tsc_khz == 0)
3695 user_tsc_khz = tsc_khz;
3697 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
3702 case KVM_GET_TSC_KHZ: {
3703 r = vcpu->arch.virtual_tsc_khz;
3706 case KVM_KVMCLOCK_CTRL: {
3707 r = kvm_set_guest_paused(vcpu);
3710 case KVM_ENABLE_CAP: {
3711 struct kvm_enable_cap cap;
3714 if (copy_from_user(&cap, argp, sizeof(cap)))
3716 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3727 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3729 return VM_FAULT_SIGBUS;
3732 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3736 if (addr > (unsigned int)(-3 * PAGE_SIZE))
3738 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3742 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3745 kvm->arch.ept_identity_map_addr = ident_addr;
3749 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3750 u32 kvm_nr_mmu_pages)
3752 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3755 mutex_lock(&kvm->slots_lock);
3757 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3758 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3760 mutex_unlock(&kvm->slots_lock);
3764 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3766 return kvm->arch.n_max_mmu_pages;
3769 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3771 struct kvm_pic *pic = kvm->arch.vpic;
3775 switch (chip->chip_id) {
3776 case KVM_IRQCHIP_PIC_MASTER:
3777 memcpy(&chip->chip.pic, &pic->pics[0],
3778 sizeof(struct kvm_pic_state));
3780 case KVM_IRQCHIP_PIC_SLAVE:
3781 memcpy(&chip->chip.pic, &pic->pics[1],
3782 sizeof(struct kvm_pic_state));
3784 case KVM_IRQCHIP_IOAPIC:
3785 kvm_get_ioapic(kvm, &chip->chip.ioapic);
3794 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3796 struct kvm_pic *pic = kvm->arch.vpic;
3800 switch (chip->chip_id) {
3801 case KVM_IRQCHIP_PIC_MASTER:
3802 spin_lock(&pic->lock);
3803 memcpy(&pic->pics[0], &chip->chip.pic,
3804 sizeof(struct kvm_pic_state));
3805 spin_unlock(&pic->lock);
3807 case KVM_IRQCHIP_PIC_SLAVE:
3808 spin_lock(&pic->lock);
3809 memcpy(&pic->pics[1], &chip->chip.pic,
3810 sizeof(struct kvm_pic_state));
3811 spin_unlock(&pic->lock);
3813 case KVM_IRQCHIP_IOAPIC:
3814 kvm_set_ioapic(kvm, &chip->chip.ioapic);
3820 kvm_pic_update_irq(pic);
3824 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3826 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
3828 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
3830 mutex_lock(&kps->lock);
3831 memcpy(ps, &kps->channels, sizeof(*ps));
3832 mutex_unlock(&kps->lock);
3836 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3839 struct kvm_pit *pit = kvm->arch.vpit;
3841 mutex_lock(&pit->pit_state.lock);
3842 memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
3843 for (i = 0; i < 3; i++)
3844 kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
3845 mutex_unlock(&pit->pit_state.lock);
3849 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3851 mutex_lock(&kvm->arch.vpit->pit_state.lock);