KVM: X86: Don't block vCPU if there is pending exception
[sfrench/cifs-2.6.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 #include "pmu.h"
31 #include "hyperv.h"
32
33 #include <linux/clocksource.h>
34 #include <linux/interrupt.h>
35 #include <linux/kvm.h>
36 #include <linux/fs.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <linux/moduleparam.h>
40 #include <linux/mman.h>
41 #include <linux/highmem.h>
42 #include <linux/iommu.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/cpufreq.h>
45 #include <linux/user-return-notifier.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/perf_event.h>
49 #include <linux/uaccess.h>
50 #include <linux/hash.h>
51 #include <linux/pci.h>
52 #include <linux/timekeeper_internal.h>
53 #include <linux/pvclock_gtod.h>
54 #include <linux/kvm_irqfd.h>
55 #include <linux/irqbypass.h>
56 #include <linux/sched/stat.h>
57 #include <linux/mem_encrypt.h>
58
59 #include <trace/events/kvm.h>
60
61 #include <asm/debugreg.h>
62 #include <asm/msr.h>
63 #include <asm/desc.h>
64 #include <asm/mce.h>
65 #include <linux/kernel_stat.h>
66 #include <asm/fpu/internal.h> /* Ugh! */
67 #include <asm/pvclock.h>
68 #include <asm/div64.h>
69 #include <asm/irq_remapping.h>
70
71 #define CREATE_TRACE_POINTS
72 #include "trace.h"
73
74 #define MAX_IO_MSRS 256
75 #define KVM_MAX_MCE_BANKS 32
76 u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
77 EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
78
79 #define emul_to_vcpu(ctxt) \
80         container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
81
82 /* EFER defaults:
83  * - enable syscall per default because its emulated by KVM
84  * - enable LME and LMA per default on 64 bit KVM
85  */
86 #ifdef CONFIG_X86_64
87 static
88 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
89 #else
90 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
91 #endif
92
93 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
94 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
95
96 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
97                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
98
99 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
100 static void process_nmi(struct kvm_vcpu *vcpu);
101 static void enter_smm(struct kvm_vcpu *vcpu);
102 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
103
104 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
105 EXPORT_SYMBOL_GPL(kvm_x86_ops);
106
107 static bool __read_mostly ignore_msrs = 0;
108 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
109
110 unsigned int min_timer_period_us = 500;
111 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
112
113 static bool __read_mostly kvmclock_periodic_sync = true;
114 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
115
116 bool __read_mostly kvm_has_tsc_control;
117 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
118 u32  __read_mostly kvm_max_guest_tsc_khz;
119 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
120 u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
121 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
122 u64  __read_mostly kvm_max_tsc_scaling_ratio;
123 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
124 u64 __read_mostly kvm_default_tsc_scaling_ratio;
125 EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
126
127 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
128 static u32 __read_mostly tsc_tolerance_ppm = 250;
129 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
130
131 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
132 unsigned int __read_mostly lapic_timer_advance_ns = 0;
133 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
134
135 static bool __read_mostly vector_hashing = true;
136 module_param(vector_hashing, bool, S_IRUGO);
137
138 #define KVM_NR_SHARED_MSRS 16
139
140 struct kvm_shared_msrs_global {
141         int nr;
142         u32 msrs[KVM_NR_SHARED_MSRS];
143 };
144
145 struct kvm_shared_msrs {
146         struct user_return_notifier urn;
147         bool registered;
148         struct kvm_shared_msr_values {
149                 u64 host;
150                 u64 curr;
151         } values[KVM_NR_SHARED_MSRS];
152 };
153
154 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
155 static struct kvm_shared_msrs __percpu *shared_msrs;
156
157 struct kvm_stats_debugfs_item debugfs_entries[] = {
158         { "pf_fixed", VCPU_STAT(pf_fixed) },
159         { "pf_guest", VCPU_STAT(pf_guest) },
160         { "tlb_flush", VCPU_STAT(tlb_flush) },
161         { "invlpg", VCPU_STAT(invlpg) },
162         { "exits", VCPU_STAT(exits) },
163         { "io_exits", VCPU_STAT(io_exits) },
164         { "mmio_exits", VCPU_STAT(mmio_exits) },
165         { "signal_exits", VCPU_STAT(signal_exits) },
166         { "irq_window", VCPU_STAT(irq_window_exits) },
167         { "nmi_window", VCPU_STAT(nmi_window_exits) },
168         { "halt_exits", VCPU_STAT(halt_exits) },
169         { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
170         { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
171         { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
172         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
173         { "hypercalls", VCPU_STAT(hypercalls) },
174         { "request_irq", VCPU_STAT(request_irq_exits) },
175         { "irq_exits", VCPU_STAT(irq_exits) },
176         { "host_state_reload", VCPU_STAT(host_state_reload) },
177         { "efer_reload", VCPU_STAT(efer_reload) },
178         { "fpu_reload", VCPU_STAT(fpu_reload) },
179         { "insn_emulation", VCPU_STAT(insn_emulation) },
180         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
181         { "irq_injections", VCPU_STAT(irq_injections) },
182         { "nmi_injections", VCPU_STAT(nmi_injections) },
183         { "req_event", VCPU_STAT(req_event) },
184         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
185         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
186         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
187         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
188         { "mmu_flooded", VM_STAT(mmu_flooded) },
189         { "mmu_recycled", VM_STAT(mmu_recycled) },
190         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
191         { "mmu_unsync", VM_STAT(mmu_unsync) },
192         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
193         { "largepages", VM_STAT(lpages) },
194         { "max_mmu_page_hash_collisions",
195                 VM_STAT(max_mmu_page_hash_collisions) },
196         { NULL }
197 };
198
199 u64 __read_mostly host_xcr0;
200
201 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
202
203 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
204 {
205         int i;
206         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
207                 vcpu->arch.apf.gfns[i] = ~0;
208 }
209
210 static void kvm_on_user_return(struct user_return_notifier *urn)
211 {
212         unsigned slot;
213         struct kvm_shared_msrs *locals
214                 = container_of(urn, struct kvm_shared_msrs, urn);
215         struct kvm_shared_msr_values *values;
216         unsigned long flags;
217
218         /*
219          * Disabling irqs at this point since the following code could be
220          * interrupted and executed through kvm_arch_hardware_disable()
221          */
222         local_irq_save(flags);
223         if (locals->registered) {
224                 locals->registered = false;
225                 user_return_notifier_unregister(urn);
226         }
227         local_irq_restore(flags);
228         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
229                 values = &locals->values[slot];
230                 if (values->host != values->curr) {
231                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
232                         values->curr = values->host;
233                 }
234         }
235 }
236
237 static void shared_msr_update(unsigned slot, u32 msr)
238 {
239         u64 value;
240         unsigned int cpu = smp_processor_id();
241         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
242
243         /* only read, and nobody should modify it at this time,
244          * so don't need lock */
245         if (slot >= shared_msrs_global.nr) {
246                 printk(KERN_ERR "kvm: invalid MSR slot!");
247                 return;
248         }
249         rdmsrl_safe(msr, &value);
250         smsr->values[slot].host = value;
251         smsr->values[slot].curr = value;
252 }
253
254 void kvm_define_shared_msr(unsigned slot, u32 msr)
255 {
256         BUG_ON(slot >= KVM_NR_SHARED_MSRS);
257         shared_msrs_global.msrs[slot] = msr;
258         if (slot >= shared_msrs_global.nr)
259                 shared_msrs_global.nr = slot + 1;
260 }
261 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
262
263 static void kvm_shared_msr_cpu_online(void)
264 {
265         unsigned i;
266
267         for (i = 0; i < shared_msrs_global.nr; ++i)
268                 shared_msr_update(i, shared_msrs_global.msrs[i]);
269 }
270
271 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
272 {
273         unsigned int cpu = smp_processor_id();
274         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
275         int err;
276
277         if (((value ^ smsr->values[slot].curr) & mask) == 0)
278                 return 0;
279         smsr->values[slot].curr = value;
280         err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
281         if (err)
282                 return 1;
283
284         if (!smsr->registered) {
285                 smsr->urn.on_user_return = kvm_on_user_return;
286                 user_return_notifier_register(&smsr->urn);
287                 smsr->registered = true;
288         }
289         return 0;
290 }
291 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
292
293 static void drop_user_return_notifiers(void)
294 {
295         unsigned int cpu = smp_processor_id();
296         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
297
298         if (smsr->registered)
299                 kvm_on_user_return(&smsr->urn);
300 }
301
302 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
303 {
304         return vcpu->arch.apic_base;
305 }
306 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
307
308 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
309 {
310         u64 old_state = vcpu->arch.apic_base &
311                 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
312         u64 new_state = msr_info->data &
313                 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
314         u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
315                 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
316
317         if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE)
318                 return 1;
319         if (!msr_info->host_initiated &&
320             ((new_state == MSR_IA32_APICBASE_ENABLE &&
321               old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
322              (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
323               old_state == 0)))
324                 return 1;
325
326         kvm_lapic_set_base(vcpu, msr_info->data);
327         return 0;
328 }
329 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
330
331 asmlinkage __visible void kvm_spurious_fault(void)
332 {
333         /* Fault while not rebooting.  We want the trace. */
334         BUG();
335 }
336 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
337
338 #define EXCPT_BENIGN            0
339 #define EXCPT_CONTRIBUTORY      1
340 #define EXCPT_PF                2
341
342 static int exception_class(int vector)
343 {
344         switch (vector) {
345         case PF_VECTOR:
346                 return EXCPT_PF;
347         case DE_VECTOR:
348         case TS_VECTOR:
349         case NP_VECTOR:
350         case SS_VECTOR:
351         case GP_VECTOR:
352                 return EXCPT_CONTRIBUTORY;
353         default:
354                 break;
355         }
356         return EXCPT_BENIGN;
357 }
358
359 #define EXCPT_FAULT             0
360 #define EXCPT_TRAP              1
361 #define EXCPT_ABORT             2
362 #define EXCPT_INTERRUPT         3
363
364 static int exception_type(int vector)
365 {
366         unsigned int mask;
367
368         if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
369                 return EXCPT_INTERRUPT;
370
371         mask = 1 << vector;
372
373         /* #DB is trap, as instruction watchpoints are handled elsewhere */
374         if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
375                 return EXCPT_TRAP;
376
377         if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
378                 return EXCPT_ABORT;
379
380         /* Reserved exceptions will result in fault */
381         return EXCPT_FAULT;
382 }
383
384 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
385                 unsigned nr, bool has_error, u32 error_code,
386                 bool reinject)
387 {
388         u32 prev_nr;
389         int class1, class2;
390
391         kvm_make_request(KVM_REQ_EVENT, vcpu);
392
393         if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
394         queue:
395                 if (has_error && !is_protmode(vcpu))
396                         has_error = false;
397                 if (reinject) {
398                         /*
399                          * On vmentry, vcpu->arch.exception.pending is only
400                          * true if an event injection was blocked by
401                          * nested_run_pending.  In that case, however,
402                          * vcpu_enter_guest requests an immediate exit,
403                          * and the guest shouldn't proceed far enough to
404                          * need reinjection.
405                          */
406                         WARN_ON_ONCE(vcpu->arch.exception.pending);
407                         vcpu->arch.exception.injected = true;
408                 } else {
409                         vcpu->arch.exception.pending = true;
410                         vcpu->arch.exception.injected = false;
411                 }
412                 vcpu->arch.exception.has_error_code = has_error;
413                 vcpu->arch.exception.nr = nr;
414                 vcpu->arch.exception.error_code = error_code;
415                 return;
416         }
417
418         /* to check exception */
419         prev_nr = vcpu->arch.exception.nr;
420         if (prev_nr == DF_VECTOR) {
421                 /* triple fault -> shutdown */
422                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
423                 return;
424         }
425         class1 = exception_class(prev_nr);
426         class2 = exception_class(nr);
427         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
428                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
429                 /*
430                  * Generate double fault per SDM Table 5-5.  Set
431                  * exception.pending = true so that the double fault
432                  * can trigger a nested vmexit.
433                  */
434                 vcpu->arch.exception.pending = true;
435                 vcpu->arch.exception.injected = false;
436                 vcpu->arch.exception.has_error_code = true;
437                 vcpu->arch.exception.nr = DF_VECTOR;
438                 vcpu->arch.exception.error_code = 0;
439         } else
440                 /* replace previous exception with a new one in a hope
441                    that instruction re-execution will regenerate lost
442                    exception */
443                 goto queue;
444 }
445
446 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
447 {
448         kvm_multiple_exception(vcpu, nr, false, 0, false);
449 }
450 EXPORT_SYMBOL_GPL(kvm_queue_exception);
451
452 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
453 {
454         kvm_multiple_exception(vcpu, nr, false, 0, true);
455 }
456 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
457
458 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
459 {
460         if (err)
461                 kvm_inject_gp(vcpu, 0);
462         else
463                 return kvm_skip_emulated_instruction(vcpu);
464
465         return 1;
466 }
467 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
468
469 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
470 {
471         ++vcpu->stat.pf_guest;
472         vcpu->arch.exception.nested_apf =
473                 is_guest_mode(vcpu) && fault->async_page_fault;
474         if (vcpu->arch.exception.nested_apf)
475                 vcpu->arch.apf.nested_apf_token = fault->address;
476         else
477                 vcpu->arch.cr2 = fault->address;
478         kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
479 }
480 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
481
482 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
483 {
484         if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
485                 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
486         else
487                 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
488
489         return fault->nested_page_fault;
490 }
491
492 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
493 {
494         atomic_inc(&vcpu->arch.nmi_queued);
495         kvm_make_request(KVM_REQ_NMI, vcpu);
496 }
497 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
498
499 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
500 {
501         kvm_multiple_exception(vcpu, nr, true, error_code, false);
502 }
503 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
504
505 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
506 {
507         kvm_multiple_exception(vcpu, nr, true, error_code, true);
508 }
509 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
510
511 /*
512  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
513  * a #GP and return false.
514  */
515 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
516 {
517         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
518                 return true;
519         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
520         return false;
521 }
522 EXPORT_SYMBOL_GPL(kvm_require_cpl);
523
524 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
525 {
526         if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
527                 return true;
528
529         kvm_queue_exception(vcpu, UD_VECTOR);
530         return false;
531 }
532 EXPORT_SYMBOL_GPL(kvm_require_dr);
533
534 /*
535  * This function will be used to read from the physical memory of the currently
536  * running guest. The difference to kvm_vcpu_read_guest_page is that this function
537  * can read from guest physical or from the guest's guest physical memory.
538  */
539 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
540                             gfn_t ngfn, void *data, int offset, int len,
541                             u32 access)
542 {
543         struct x86_exception exception;
544         gfn_t real_gfn;
545         gpa_t ngpa;
546
547         ngpa     = gfn_to_gpa(ngfn);
548         real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
549         if (real_gfn == UNMAPPED_GVA)
550                 return -EFAULT;
551
552         real_gfn = gpa_to_gfn(real_gfn);
553
554         return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
555 }
556 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
557
558 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
559                                void *data, int offset, int len, u32 access)
560 {
561         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
562                                        data, offset, len, access);
563 }
564
565 /*
566  * Load the pae pdptrs.  Return true is they are all valid.
567  */
568 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
569 {
570         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
571         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
572         int i;
573         int ret;
574         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
575
576         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
577                                       offset * sizeof(u64), sizeof(pdpte),
578                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
579         if (ret < 0) {
580                 ret = 0;
581                 goto out;
582         }
583         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
584                 if ((pdpte[i] & PT_PRESENT_MASK) &&
585                     (pdpte[i] &
586                      vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
587                         ret = 0;
588                         goto out;
589                 }
590         }
591         ret = 1;
592
593         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
594         __set_bit(VCPU_EXREG_PDPTR,
595                   (unsigned long *)&vcpu->arch.regs_avail);
596         __set_bit(VCPU_EXREG_PDPTR,
597                   (unsigned long *)&vcpu->arch.regs_dirty);
598 out:
599
600         return ret;
601 }
602 EXPORT_SYMBOL_GPL(load_pdptrs);
603
604 bool pdptrs_changed(struct kvm_vcpu *vcpu)
605 {
606         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
607         bool changed = true;
608         int offset;
609         gfn_t gfn;
610         int r;
611
612         if (is_long_mode(vcpu) || !is_pae(vcpu))
613                 return false;
614
615         if (!test_bit(VCPU_EXREG_PDPTR,
616                       (unsigned long *)&vcpu->arch.regs_avail))
617                 return true;
618
619         gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
620         offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
621         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
622                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
623         if (r < 0)
624                 goto out;
625         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
626 out:
627
628         return changed;
629 }
630 EXPORT_SYMBOL_GPL(pdptrs_changed);
631
632 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
633 {
634         unsigned long old_cr0 = kvm_read_cr0(vcpu);
635         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
636
637         cr0 |= X86_CR0_ET;
638
639 #ifdef CONFIG_X86_64
640         if (cr0 & 0xffffffff00000000UL)
641                 return 1;
642 #endif
643
644         cr0 &= ~CR0_RESERVED_BITS;
645
646         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
647                 return 1;
648
649         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
650                 return 1;
651
652         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
653 #ifdef CONFIG_X86_64
654                 if ((vcpu->arch.efer & EFER_LME)) {
655                         int cs_db, cs_l;
656
657                         if (!is_pae(vcpu))
658                                 return 1;
659                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
660                         if (cs_l)
661                                 return 1;
662                 } else
663 #endif
664                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
665                                                  kvm_read_cr3(vcpu)))
666                         return 1;
667         }
668
669         if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
670                 return 1;
671
672         kvm_x86_ops->set_cr0(vcpu, cr0);
673
674         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
675                 kvm_clear_async_pf_completion_queue(vcpu);
676                 kvm_async_pf_hash_reset(vcpu);
677         }
678
679         if ((cr0 ^ old_cr0) & update_bits)
680                 kvm_mmu_reset_context(vcpu);
681
682         if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
683             kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
684             !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
685                 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
686
687         return 0;
688 }
689 EXPORT_SYMBOL_GPL(kvm_set_cr0);
690
691 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
692 {
693         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
694 }
695 EXPORT_SYMBOL_GPL(kvm_lmsw);
696
697 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
698 {
699         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
700                         !vcpu->guest_xcr0_loaded) {
701                 /* kvm_set_xcr() also depends on this */
702                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
703                 vcpu->guest_xcr0_loaded = 1;
704         }
705 }
706
707 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
708 {
709         if (vcpu->guest_xcr0_loaded) {
710                 if (vcpu->arch.xcr0 != host_xcr0)
711                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
712                 vcpu->guest_xcr0_loaded = 0;
713         }
714 }
715
716 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
717 {
718         u64 xcr0 = xcr;
719         u64 old_xcr0 = vcpu->arch.xcr0;
720         u64 valid_bits;
721
722         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
723         if (index != XCR_XFEATURE_ENABLED_MASK)
724                 return 1;
725         if (!(xcr0 & XFEATURE_MASK_FP))
726                 return 1;
727         if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
728                 return 1;
729
730         /*
731          * Do not allow the guest to set bits that we do not support
732          * saving.  However, xcr0 bit 0 is always set, even if the
733          * emulated CPU does not support XSAVE (see fx_init).
734          */
735         valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
736         if (xcr0 & ~valid_bits)
737                 return 1;
738
739         if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
740             (!(xcr0 & XFEATURE_MASK_BNDCSR)))
741                 return 1;
742
743         if (xcr0 & XFEATURE_MASK_AVX512) {
744                 if (!(xcr0 & XFEATURE_MASK_YMM))
745                         return 1;
746                 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
747                         return 1;
748         }
749         vcpu->arch.xcr0 = xcr0;
750
751         if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
752                 kvm_update_cpuid(vcpu);
753         return 0;
754 }
755
756 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
757 {
758         if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
759             __kvm_set_xcr(vcpu, index, xcr)) {
760                 kvm_inject_gp(vcpu, 0);
761                 return 1;
762         }
763         return 0;
764 }
765 EXPORT_SYMBOL_GPL(kvm_set_xcr);
766
767 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
768 {
769         unsigned long old_cr4 = kvm_read_cr4(vcpu);
770         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
771                                    X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
772
773         if (cr4 & CR4_RESERVED_BITS)
774                 return 1;
775
776         if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
777                 return 1;
778
779         if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
780                 return 1;
781
782         if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
783                 return 1;
784
785         if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
786                 return 1;
787
788         if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
789                 return 1;
790
791         if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
792                 return 1;
793
794         if (is_long_mode(vcpu)) {
795                 if (!(cr4 & X86_CR4_PAE))
796                         return 1;
797         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
798                    && ((cr4 ^ old_cr4) & pdptr_bits)
799                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
800                                    kvm_read_cr3(vcpu)))
801                 return 1;
802
803         if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
804                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
805                         return 1;
806
807                 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
808                 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
809                         return 1;
810         }
811
812         if (kvm_x86_ops->set_cr4(vcpu, cr4))
813                 return 1;
814
815         if (((cr4 ^ old_cr4) & pdptr_bits) ||
816             (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
817                 kvm_mmu_reset_context(vcpu);
818
819         if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
820                 kvm_update_cpuid(vcpu);
821
822         return 0;
823 }
824 EXPORT_SYMBOL_GPL(kvm_set_cr4);
825
826 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
827 {
828 #ifdef CONFIG_X86_64
829         cr3 &= ~CR3_PCID_INVD;
830 #endif
831
832         if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
833                 kvm_mmu_sync_roots(vcpu);
834                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
835                 return 0;
836         }
837
838         if (is_long_mode(vcpu) &&
839             (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
840                 return 1;
841         else if (is_pae(vcpu) && is_paging(vcpu) &&
842                    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
843                 return 1;
844
845         vcpu->arch.cr3 = cr3;
846         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
847         kvm_mmu_new_cr3(vcpu);
848         return 0;
849 }
850 EXPORT_SYMBOL_GPL(kvm_set_cr3);
851
852 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
853 {
854         if (cr8 & CR8_RESERVED_BITS)
855                 return 1;
856         if (lapic_in_kernel(vcpu))
857                 kvm_lapic_set_tpr(vcpu, cr8);
858         else
859                 vcpu->arch.cr8 = cr8;
860         return 0;
861 }
862 EXPORT_SYMBOL_GPL(kvm_set_cr8);
863
864 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
865 {
866         if (lapic_in_kernel(vcpu))
867                 return kvm_lapic_get_cr8(vcpu);
868         else
869                 return vcpu->arch.cr8;
870 }
871 EXPORT_SYMBOL_GPL(kvm_get_cr8);
872
873 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
874 {
875         int i;
876
877         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
878                 for (i = 0; i < KVM_NR_DB_REGS; i++)
879                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
880                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
881         }
882 }
883
884 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
885 {
886         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
887                 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
888 }
889
890 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
891 {
892         unsigned long dr7;
893
894         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
895                 dr7 = vcpu->arch.guest_debug_dr7;
896         else
897                 dr7 = vcpu->arch.dr7;
898         kvm_x86_ops->set_dr7(vcpu, dr7);
899         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
900         if (dr7 & DR7_BP_EN_MASK)
901                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
902 }
903
904 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
905 {
906         u64 fixed = DR6_FIXED_1;
907
908         if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
909                 fixed |= DR6_RTM;
910         return fixed;
911 }
912
913 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
914 {
915         switch (dr) {
916         case 0 ... 3:
917                 vcpu->arch.db[dr] = val;
918                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
919                         vcpu->arch.eff_db[dr] = val;
920                 break;
921         case 4:
922                 /* fall through */
923         case 6:
924                 if (val & 0xffffffff00000000ULL)
925                         return -1; /* #GP */
926                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
927                 kvm_update_dr6(vcpu);
928                 break;
929         case 5:
930                 /* fall through */
931         default: /* 7 */
932                 if (val & 0xffffffff00000000ULL)
933                         return -1; /* #GP */
934                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
935                 kvm_update_dr7(vcpu);
936                 break;
937         }
938
939         return 0;
940 }
941
942 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
943 {
944         if (__kvm_set_dr(vcpu, dr, val)) {
945                 kvm_inject_gp(vcpu, 0);
946                 return 1;
947         }
948         return 0;
949 }
950 EXPORT_SYMBOL_GPL(kvm_set_dr);
951
952 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
953 {
954         switch (dr) {
955         case 0 ... 3:
956                 *val = vcpu->arch.db[dr];
957                 break;
958         case 4:
959                 /* fall through */
960         case 6:
961                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
962                         *val = vcpu->arch.dr6;
963                 else
964                         *val = kvm_x86_ops->get_dr6(vcpu);
965                 break;
966         case 5:
967                 /* fall through */
968         default: /* 7 */
969                 *val = vcpu->arch.dr7;
970                 break;
971         }
972         return 0;
973 }
974 EXPORT_SYMBOL_GPL(kvm_get_dr);
975
976 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
977 {
978         u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
979         u64 data;
980         int err;
981
982         err = kvm_pmu_rdpmc(vcpu, ecx, &data);
983         if (err)
984                 return err;
985         kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
986         kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
987         return err;
988 }
989 EXPORT_SYMBOL_GPL(kvm_rdpmc);
990
991 /*
992  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
993  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
994  *
995  * This list is modified at module load time to reflect the
996  * capabilities of the host cpu. This capabilities test skips MSRs that are
997  * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
998  * may depend on host virtualization features rather than host cpu features.
999  */
1000
1001 static u32 msrs_to_save[] = {
1002         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1003         MSR_STAR,
1004 #ifdef CONFIG_X86_64
1005         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1006 #endif
1007         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1008         MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1009 };
1010
1011 static unsigned num_msrs_to_save;
1012
1013 static u32 emulated_msrs[] = {
1014         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1015         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1016         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1017         HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1018         HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1019         HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1020         HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1021         HV_X64_MSR_RESET,
1022         HV_X64_MSR_VP_INDEX,
1023         HV_X64_MSR_VP_RUNTIME,
1024         HV_X64_MSR_SCONTROL,
1025         HV_X64_MSR_STIMER0_CONFIG,
1026         HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1027         MSR_KVM_PV_EOI_EN,
1028
1029         MSR_IA32_TSC_ADJUST,
1030         MSR_IA32_TSCDEADLINE,
1031         MSR_IA32_MISC_ENABLE,
1032         MSR_IA32_MCG_STATUS,
1033         MSR_IA32_MCG_CTL,
1034         MSR_IA32_MCG_EXT_CTL,
1035         MSR_IA32_SMBASE,
1036         MSR_PLATFORM_INFO,
1037         MSR_MISC_FEATURES_ENABLES,
1038 };
1039
1040 static unsigned num_emulated_msrs;
1041
1042 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1043 {
1044         if (efer & efer_reserved_bits)
1045                 return false;
1046
1047         if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1048                         return false;
1049
1050         if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1051                         return false;
1052
1053         return true;
1054 }
1055 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1056
1057 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1058 {
1059         u64 old_efer = vcpu->arch.efer;
1060
1061         if (!kvm_valid_efer(vcpu, efer))
1062                 return 1;
1063
1064         if (is_paging(vcpu)
1065             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1066                 return 1;
1067
1068         efer &= ~EFER_LMA;
1069         efer |= vcpu->arch.efer & EFER_LMA;
1070
1071         kvm_x86_ops->set_efer(vcpu, efer);
1072
1073         /* Update reserved bits */
1074         if ((efer ^ old_efer) & EFER_NX)
1075                 kvm_mmu_reset_context(vcpu);
1076
1077         return 0;
1078 }
1079
1080 void kvm_enable_efer_bits(u64 mask)
1081 {
1082        efer_reserved_bits &= ~mask;
1083 }
1084 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1085
1086 /*
1087  * Writes msr value into into the appropriate "register".
1088  * Returns 0 on success, non-0 otherwise.
1089  * Assumes vcpu_load() was already called.
1090  */
1091 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1092 {
1093         switch (msr->index) {
1094         case MSR_FS_BASE:
1095         case MSR_GS_BASE:
1096         case MSR_KERNEL_GS_BASE:
1097         case MSR_CSTAR:
1098         case MSR_LSTAR:
1099                 if (is_noncanonical_address(msr->data, vcpu))
1100                         return 1;
1101                 break;
1102         case MSR_IA32_SYSENTER_EIP:
1103         case MSR_IA32_SYSENTER_ESP:
1104                 /*
1105                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1106                  * non-canonical address is written on Intel but not on
1107                  * AMD (which ignores the top 32-bits, because it does
1108                  * not implement 64-bit SYSENTER).
1109                  *
1110                  * 64-bit code should hence be able to write a non-canonical
1111                  * value on AMD.  Making the address canonical ensures that
1112                  * vmentry does not fail on Intel after writing a non-canonical
1113                  * value, and that something deterministic happens if the guest
1114                  * invokes 64-bit SYSENTER.
1115                  */
1116                 msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu));
1117         }
1118         return kvm_x86_ops->set_msr(vcpu, msr);
1119 }
1120 EXPORT_SYMBOL_GPL(kvm_set_msr);
1121
1122 /*
1123  * Adapt set_msr() to msr_io()'s calling convention
1124  */
1125 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1126 {
1127         struct msr_data msr;
1128         int r;
1129
1130         msr.index = index;
1131         msr.host_initiated = true;
1132         r = kvm_get_msr(vcpu, &msr);
1133         if (r)
1134                 return r;
1135
1136         *data = msr.data;
1137         return 0;
1138 }
1139
1140 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1141 {
1142         struct msr_data msr;
1143
1144         msr.data = *data;
1145         msr.index = index;
1146         msr.host_initiated = true;
1147         return kvm_set_msr(vcpu, &msr);
1148 }
1149
1150 #ifdef CONFIG_X86_64
1151 struct pvclock_gtod_data {
1152         seqcount_t      seq;
1153
1154         struct { /* extract of a clocksource struct */
1155                 int vclock_mode;
1156                 u64     cycle_last;
1157                 u64     mask;
1158                 u32     mult;
1159                 u32     shift;
1160         } clock;
1161
1162         u64             boot_ns;
1163         u64             nsec_base;
1164         u64             wall_time_sec;
1165 };
1166
1167 static struct pvclock_gtod_data pvclock_gtod_data;
1168
1169 static void update_pvclock_gtod(struct timekeeper *tk)
1170 {
1171         struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1172         u64 boot_ns;
1173
1174         boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1175
1176         write_seqcount_begin(&vdata->seq);
1177
1178         /* copy pvclock gtod data */
1179         vdata->clock.vclock_mode        = tk->tkr_mono.clock->archdata.vclock_mode;
1180         vdata->clock.cycle_last         = tk->tkr_mono.cycle_last;
1181         vdata->clock.mask               = tk->tkr_mono.mask;
1182         vdata->clock.mult               = tk->tkr_mono.mult;
1183         vdata->clock.shift              = tk->tkr_mono.shift;
1184
1185         vdata->boot_ns                  = boot_ns;
1186         vdata->nsec_base                = tk->tkr_mono.xtime_nsec;
1187
1188         vdata->wall_time_sec            = tk->xtime_sec;
1189
1190         write_seqcount_end(&vdata->seq);
1191 }
1192 #endif
1193
1194 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1195 {
1196         /*
1197          * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1198          * vcpu_enter_guest.  This function is only called from
1199          * the physical CPU that is running vcpu.
1200          */
1201         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1202 }
1203
1204 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1205 {
1206         int version;
1207         int r;
1208         struct pvclock_wall_clock wc;
1209         struct timespec64 boot;
1210
1211         if (!wall_clock)
1212                 return;
1213
1214         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1215         if (r)
1216                 return;
1217
1218         if (version & 1)
1219                 ++version;  /* first time write, random junk */
1220
1221         ++version;
1222
1223         if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
1224                 return;
1225
1226         /*
1227          * The guest calculates current wall clock time by adding
1228          * system time (updated by kvm_guest_time_update below) to the
1229          * wall clock specified here.  guest system time equals host
1230          * system time for us, thus we must fill in host boot time here.
1231          */
1232         getboottime64(&boot);
1233
1234         if (kvm->arch.kvmclock_offset) {
1235                 struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
1236                 boot = timespec64_sub(boot, ts);
1237         }
1238         wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1239         wc.nsec = boot.tv_nsec;
1240         wc.version = version;
1241
1242         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1243
1244         version++;
1245         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1246 }
1247
1248 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1249 {
1250         do_shl32_div32(dividend, divisor);
1251         return dividend;
1252 }
1253
1254 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1255                                s8 *pshift, u32 *pmultiplier)
1256 {
1257         uint64_t scaled64;
1258         int32_t  shift = 0;
1259         uint64_t tps64;
1260         uint32_t tps32;
1261
1262         tps64 = base_hz;
1263         scaled64 = scaled_hz;
1264         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1265                 tps64 >>= 1;
1266                 shift--;
1267         }
1268
1269         tps32 = (uint32_t)tps64;
1270         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1271                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1272                         scaled64 >>= 1;
1273                 else
1274                         tps32 <<= 1;
1275                 shift++;
1276         }
1277
1278         *pshift = shift;
1279         *pmultiplier = div_frac(scaled64, tps32);
1280
1281         pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
1282                  __func__, base_hz, scaled_hz, shift, *pmultiplier);
1283 }
1284
1285 #ifdef CONFIG_X86_64
1286 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1287 #endif
1288
1289 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1290 static unsigned long max_tsc_khz;
1291
1292 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1293 {
1294         u64 v = (u64)khz * (1000000 + ppm);
1295         do_div(v, 1000000);
1296         return v;
1297 }
1298
1299 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1300 {
1301         u64 ratio;
1302
1303         /* Guest TSC same frequency as host TSC? */
1304         if (!scale) {
1305                 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1306                 return 0;
1307         }
1308
1309         /* TSC scaling supported? */
1310         if (!kvm_has_tsc_control) {
1311                 if (user_tsc_khz > tsc_khz) {
1312                         vcpu->arch.tsc_catchup = 1;
1313                         vcpu->arch.tsc_always_catchup = 1;
1314                         return 0;
1315                 } else {
1316                         WARN(1, "user requested TSC rate below hardware speed\n");
1317                         return -1;
1318                 }
1319         }
1320
1321         /* TSC scaling required  - calculate ratio */
1322         ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
1323                                 user_tsc_khz, tsc_khz);
1324
1325         if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1326                 WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1327                           user_tsc_khz);
1328                 return -1;
1329         }
1330
1331         vcpu->arch.tsc_scaling_ratio = ratio;
1332         return 0;
1333 }
1334
1335 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1336 {
1337         u32 thresh_lo, thresh_hi;
1338         int use_scaling = 0;
1339
1340         /* tsc_khz can be zero if TSC calibration fails */
1341         if (user_tsc_khz == 0) {
1342                 /* set tsc_scaling_ratio to a safe value */
1343                 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1344                 return -1;
1345         }
1346
1347         /* Compute a scale to convert nanoseconds in TSC cycles */
1348         kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1349                            &vcpu->arch.virtual_tsc_shift,
1350                            &vcpu->arch.virtual_tsc_mult);
1351         vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1352
1353         /*
1354          * Compute the variation in TSC rate which is acceptable
1355          * within the range of tolerance and decide if the
1356          * rate being applied is within that bounds of the hardware
1357          * rate.  If so, no scaling or compensation need be done.
1358          */
1359         thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1360         thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1361         if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
1362                 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1363                 use_scaling = 1;
1364         }
1365         return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
1366 }
1367
1368 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1369 {
1370         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1371                                       vcpu->arch.virtual_tsc_mult,
1372                                       vcpu->arch.virtual_tsc_shift);
1373         tsc += vcpu->arch.this_tsc_write;
1374         return tsc;
1375 }
1376
1377 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1378 {
1379 #ifdef CONFIG_X86_64
1380         bool vcpus_matched;
1381         struct kvm_arch *ka = &vcpu->kvm->arch;
1382         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1383
1384         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1385                          atomic_read(&vcpu->kvm->online_vcpus));
1386
1387         /*
1388          * Once the masterclock is enabled, always perform request in
1389          * order to update it.
1390          *
1391          * In order to enable masterclock, the host clocksource must be TSC
1392          * and the vcpus need to have matched TSCs.  When that happens,
1393          * perform request to enable masterclock.
1394          */
1395         if (ka->use_master_clock ||
1396             (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1397                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1398
1399         trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1400                             atomic_read(&vcpu->kvm->online_vcpus),
1401                             ka->use_master_clock, gtod->clock.vclock_mode);
1402 #endif
1403 }
1404
1405 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1406 {
1407         u64 curr_offset = vcpu->arch.tsc_offset;
1408         vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1409 }
1410
1411 /*
1412  * Multiply tsc by a fixed point number represented by ratio.
1413  *
1414  * The most significant 64-N bits (mult) of ratio represent the
1415  * integral part of the fixed point number; the remaining N bits
1416  * (frac) represent the fractional part, ie. ratio represents a fixed
1417  * point number (mult + frac * 2^(-N)).
1418  *
1419  * N equals to kvm_tsc_scaling_ratio_frac_bits.
1420  */
1421 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
1422 {
1423         return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
1424 }
1425
1426 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
1427 {
1428         u64 _tsc = tsc;
1429         u64 ratio = vcpu->arch.tsc_scaling_ratio;
1430
1431         if (ratio != kvm_default_tsc_scaling_ratio)
1432                 _tsc = __scale_tsc(ratio, tsc);
1433
1434         return _tsc;
1435 }
1436 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
1437
1438 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1439 {
1440         u64 tsc;
1441
1442         tsc = kvm_scale_tsc(vcpu, rdtsc());
1443
1444         return target_tsc - tsc;
1445 }
1446
1447 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1448 {
1449         return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1450 }
1451 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
1452
1453 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1454 {
1455         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1456         vcpu->arch.tsc_offset = offset;
1457 }
1458
1459 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1460 {
1461         struct kvm *kvm = vcpu->kvm;
1462         u64 offset, ns, elapsed;
1463         unsigned long flags;
1464         bool matched;
1465         bool already_matched;
1466         u64 data = msr->data;
1467         bool synchronizing = false;
1468
1469         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1470         offset = kvm_compute_tsc_offset(vcpu, data);
1471         ns = ktime_get_boot_ns();
1472         elapsed = ns - kvm->arch.last_tsc_nsec;
1473
1474         if (vcpu->arch.virtual_tsc_khz) {
1475                 if (data == 0 && msr->host_initiated) {
1476                         /*
1477                          * detection of vcpu initialization -- need to sync
1478                          * with other vCPUs. This particularly helps to keep
1479                          * kvm_clock stable after CPU hotplug
1480                          */
1481                         synchronizing = true;
1482                 } else {
1483                         u64 tsc_exp = kvm->arch.last_tsc_write +
1484                                                 nsec_to_cycles(vcpu, elapsed);
1485                         u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
1486                         /*
1487                          * Special case: TSC write with a small delta (1 second)
1488                          * of virtual cycle time against real time is
1489                          * interpreted as an attempt to synchronize the CPU.
1490                          */
1491                         synchronizing = data < tsc_exp + tsc_hz &&
1492                                         data + tsc_hz > tsc_exp;
1493                 }
1494         }
1495
1496         /*
1497          * For a reliable TSC, we can match TSC offsets, and for an unstable
1498          * TSC, we add elapsed time in this computation.  We could let the
1499          * compensation code attempt to catch up if we fall behind, but
1500          * it's better to try to match offsets from the beginning.
1501          */
1502         if (synchronizing &&
1503             vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1504                 if (!check_tsc_unstable()) {
1505                         offset = kvm->arch.cur_tsc_offset;
1506                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1507                 } else {
1508                         u64 delta = nsec_to_cycles(vcpu, elapsed);
1509                         data += delta;
1510                         offset = kvm_compute_tsc_offset(vcpu, data);
1511                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1512                 }
1513                 matched = true;
1514                 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1515         } else {
1516                 /*
1517                  * We split periods of matched TSC writes into generations.
1518                  * For each generation, we track the original measured
1519                  * nanosecond time, offset, and write, so if TSCs are in
1520                  * sync, we can match exact offset, and if not, we can match
1521                  * exact software computation in compute_guest_tsc()
1522                  *
1523                  * These values are tracked in kvm->arch.cur_xxx variables.
1524                  */
1525                 kvm->arch.cur_tsc_generation++;
1526                 kvm->arch.cur_tsc_nsec = ns;
1527                 kvm->arch.cur_tsc_write = data;
1528                 kvm->arch.cur_tsc_offset = offset;
1529                 matched = false;
1530                 pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1531                          kvm->arch.cur_tsc_generation, data);
1532         }
1533
1534         /*
1535          * We also track th most recent recorded KHZ, write and time to
1536          * allow the matching interval to be extended at each write.
1537          */
1538         kvm->arch.last_tsc_nsec = ns;
1539         kvm->arch.last_tsc_write = data;
1540         kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1541
1542         vcpu->arch.last_guest_tsc = data;
1543
1544         /* Keep track of which generation this VCPU has synchronized to */
1545         vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1546         vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1547         vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1548
1549         if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
1550                 update_ia32_tsc_adjust_msr(vcpu, offset);
1551
1552         kvm_vcpu_write_tsc_offset(vcpu, offset);
1553         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1554
1555         spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1556         if (!matched) {
1557                 kvm->arch.nr_vcpus_matched_tsc = 0;
1558         } else if (!already_matched) {
1559                 kvm->arch.nr_vcpus_matched_tsc++;
1560         }
1561
1562         kvm_track_tsc_matching(vcpu);
1563         spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1564 }
1565
1566 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1567
1568 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
1569                                            s64 adjustment)
1570 {
1571         kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1572 }
1573
1574 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
1575 {
1576         if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
1577                 WARN_ON(adjustment < 0);
1578         adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1579         adjust_tsc_offset_guest(vcpu, adjustment);
1580 }
1581
1582 #ifdef CONFIG_X86_64
1583
1584 static u64 read_tsc(void)
1585 {
1586         u64 ret = (u64)rdtsc_ordered();
1587         u64 last = pvclock_gtod_data.clock.cycle_last;
1588
1589         if (likely(ret >= last))
1590                 return ret;
1591
1592         /*
1593          * GCC likes to generate cmov here, but this branch is extremely
1594          * predictable (it's just a function of time and the likely is
1595          * very likely) and there's a data dependence, so force GCC
1596          * to generate a branch instead.  I don't barrier() because
1597          * we don't actually need a barrier, and if this function
1598          * ever gets inlined it will generate worse code.
1599          */
1600         asm volatile ("");
1601         return last;
1602 }
1603
1604 static inline u64 vgettsc(u64 *cycle_now)
1605 {
1606         long v;
1607         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1608
1609         *cycle_now = read_tsc();
1610
1611         v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1612         return v * gtod->clock.mult;
1613 }
1614
1615 static int do_monotonic_boot(s64 *t, u64 *cycle_now)
1616 {
1617         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1618         unsigned long seq;
1619         int mode;
1620         u64 ns;
1621
1622         do {
1623                 seq = read_seqcount_begin(&gtod->seq);
1624                 mode = gtod->clock.vclock_mode;
1625                 ns = gtod->nsec_base;
1626                 ns += vgettsc(cycle_now);
1627                 ns >>= gtod->clock.shift;
1628                 ns += gtod->boot_ns;
1629         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1630         *t = ns;
1631
1632         return mode;
1633 }
1634
1635 static int do_realtime(struct timespec *ts, u64 *cycle_now)
1636 {
1637         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1638         unsigned long seq;
1639         int mode;
1640         u64 ns;
1641
1642         do {
1643                 seq = read_seqcount_begin(&gtod->seq);
1644                 mode = gtod->clock.vclock_mode;
1645                 ts->tv_sec = gtod->wall_time_sec;
1646                 ns = gtod->nsec_base;
1647                 ns += vgettsc(cycle_now);
1648                 ns >>= gtod->clock.shift;
1649         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1650
1651         ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
1652         ts->tv_nsec = ns;
1653
1654         return mode;
1655 }
1656
1657 /* returns true if host is using tsc clocksource */
1658 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
1659 {
1660         /* checked again under seqlock below */
1661         if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1662                 return false;
1663
1664         return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1665 }
1666
1667 /* returns true if host is using tsc clocksource */
1668 static bool kvm_get_walltime_and_clockread(struct timespec *ts,
1669                                            u64 *cycle_now)
1670 {
1671         /* checked again under seqlock below */
1672         if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1673                 return false;
1674
1675         return do_realtime(ts, cycle_now) == VCLOCK_TSC;
1676 }
1677 #endif
1678
1679 /*
1680  *
1681  * Assuming a stable TSC across physical CPUS, and a stable TSC
1682  * across virtual CPUs, the following condition is possible.
1683  * Each numbered line represents an event visible to both
1684  * CPUs at the next numbered event.
1685  *
1686  * "timespecX" represents host monotonic time. "tscX" represents
1687  * RDTSC value.
1688  *
1689  *              VCPU0 on CPU0           |       VCPU1 on CPU1
1690  *
1691  * 1.  read timespec0,tsc0
1692  * 2.                                   | timespec1 = timespec0 + N
1693  *                                      | tsc1 = tsc0 + M
1694  * 3. transition to guest               | transition to guest
1695  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1696  * 5.                                   | ret1 = timespec1 + (rdtsc - tsc1)
1697  *                                      | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1698  *
1699  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1700  *
1701  *      - ret0 < ret1
1702  *      - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1703  *              ...
1704  *      - 0 < N - M => M < N
1705  *
1706  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1707  * always the case (the difference between two distinct xtime instances
1708  * might be smaller then the difference between corresponding TSC reads,
1709  * when updating guest vcpus pvclock areas).
1710  *
1711  * To avoid that problem, do not allow visibility of distinct
1712  * system_timestamp/tsc_timestamp values simultaneously: use a master
1713  * copy of host monotonic time values. Update that master copy
1714  * in lockstep.
1715  *
1716  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1717  *
1718  */
1719
1720 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1721 {
1722 #ifdef CONFIG_X86_64
1723         struct kvm_arch *ka = &kvm->arch;
1724         int vclock_mode;
1725         bool host_tsc_clocksource, vcpus_matched;
1726
1727         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1728                         atomic_read(&kvm->online_vcpus));
1729
1730         /*
1731          * If the host uses TSC clock, then passthrough TSC as stable
1732          * to the guest.
1733          */
1734         host_tsc_clocksource = kvm_get_time_and_clockread(
1735                                         &ka->master_kernel_ns,
1736                                         &ka->master_cycle_now);
1737
1738         ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1739                                 && !ka->backwards_tsc_observed
1740                                 && !ka->boot_vcpu_runs_old_kvmclock;
1741
1742         if (ka->use_master_clock)
1743                 atomic_set(&kvm_guest_has_master_clock, 1);
1744
1745         vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1746         trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1747                                         vcpus_matched);
1748 #endif
1749 }
1750
1751 void kvm_make_mclock_inprogress_request(struct kvm *kvm)
1752 {
1753         kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
1754 }
1755
1756 static void kvm_gen_update_masterclock(struct kvm *kvm)
1757 {
1758 #ifdef CONFIG_X86_64
1759         int i;
1760         struct kvm_vcpu *vcpu;
1761         struct kvm_arch *ka = &kvm->arch;
1762
1763         spin_lock(&ka->pvclock_gtod_sync_lock);
1764         kvm_make_mclock_inprogress_request(kvm);
1765         /* no guest entries from this point */
1766         pvclock_update_vm_gtod_copy(kvm);
1767
1768         kvm_for_each_vcpu(i, vcpu, kvm)
1769                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1770
1771         /* guest entries allowed */
1772         kvm_for_each_vcpu(i, vcpu, kvm)
1773                 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
1774
1775         spin_unlock(&ka->pvclock_gtod_sync_lock);
1776 #endif
1777 }
1778
1779 u64 get_kvmclock_ns(struct kvm *kvm)
1780 {
1781         struct kvm_arch *ka = &kvm->arch;
1782         struct pvclock_vcpu_time_info hv_clock;
1783         u64 ret;
1784
1785         spin_lock(&ka->pvclock_gtod_sync_lock);
1786         if (!ka->use_master_clock) {
1787                 spin_unlock(&ka->pvclock_gtod_sync_lock);
1788                 return ktime_get_boot_ns() + ka->kvmclock_offset;
1789         }
1790
1791         hv_clock.tsc_timestamp = ka->master_cycle_now;
1792         hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1793         spin_unlock(&ka->pvclock_gtod_sync_lock);
1794
1795         /* both __this_cpu_read() and rdtsc() should be on the same cpu */
1796         get_cpu();
1797
1798         kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1799                            &hv_clock.tsc_shift,
1800                            &hv_clock.tsc_to_system_mul);
1801         ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1802
1803         put_cpu();
1804
1805         return ret;
1806 }
1807
1808 static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
1809 {
1810         struct kvm_vcpu_arch *vcpu = &v->arch;
1811         struct pvclock_vcpu_time_info guest_hv_clock;
1812
1813         if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1814                 &guest_hv_clock, sizeof(guest_hv_clock))))
1815                 return;
1816
1817         /* This VCPU is paused, but it's legal for a guest to read another
1818          * VCPU's kvmclock, so we really have to follow the specification where
1819          * it says that version is odd if data is being modified, and even after
1820          * it is consistent.
1821          *
1822          * Version field updates must be kept separate.  This is because
1823          * kvm_write_guest_cached might use a "rep movs" instruction, and
1824          * writes within a string instruction are weakly ordered.  So there
1825          * are three writes overall.
1826          *
1827          * As a small optimization, only write the version field in the first
1828          * and third write.  The vcpu->pv_time cache is still valid, because the
1829          * version field is the first in the struct.
1830          */
1831         BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
1832
1833         vcpu->hv_clock.version = guest_hv_clock.version + 1;
1834         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1835                                 &vcpu->hv_clock,
1836                                 sizeof(vcpu->hv_clock.version));
1837
1838         smp_wmb();
1839
1840         /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1841         vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1842
1843         if (vcpu->pvclock_set_guest_stopped_request) {
1844                 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
1845                 vcpu->pvclock_set_guest_stopped_request = false;
1846         }
1847
1848         trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1849
1850         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1851                                 &vcpu->hv_clock,
1852                                 sizeof(vcpu->hv_clock));
1853
1854         smp_wmb();
1855
1856         vcpu->hv_clock.version++;
1857         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1858                                 &vcpu->hv_clock,
1859                                 sizeof(vcpu->hv_clock.version));
1860 }
1861
1862 static int kvm_guest_time_update(struct kvm_vcpu *v)
1863 {
1864         unsigned long flags, tgt_tsc_khz;
1865         struct kvm_vcpu_arch *vcpu = &v->arch;
1866         struct kvm_arch *ka = &v->kvm->arch;
1867         s64 kernel_ns;
1868         u64 tsc_timestamp, host_tsc;
1869         u8 pvclock_flags;
1870         bool use_master_clock;
1871
1872         kernel_ns = 0;
1873         host_tsc = 0;
1874
1875         /*
1876          * If the host uses TSC clock, then passthrough TSC as stable
1877          * to the guest.
1878          */
1879         spin_lock(&ka->pvclock_gtod_sync_lock);
1880         use_master_clock = ka->use_master_clock;
1881         if (use_master_clock) {
1882                 host_tsc = ka->master_cycle_now;
1883                 kernel_ns = ka->master_kernel_ns;
1884         }
1885         spin_unlock(&ka->pvclock_gtod_sync_lock);
1886
1887         /* Keep irq disabled to prevent changes to the clock */
1888         local_irq_save(flags);
1889         tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1890         if (unlikely(tgt_tsc_khz == 0)) {
1891                 local_irq_restore(flags);
1892                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1893                 return 1;
1894         }
1895         if (!use_master_clock) {
1896                 host_tsc = rdtsc();
1897                 kernel_ns = ktime_get_boot_ns();
1898         }
1899
1900         tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
1901
1902         /*
1903          * We may have to catch up the TSC to match elapsed wall clock
1904          * time for two reasons, even if kvmclock is used.
1905          *   1) CPU could have been running below the maximum TSC rate
1906          *   2) Broken TSC compensation resets the base at each VCPU
1907          *      entry to avoid unknown leaps of TSC even when running
1908          *      again on the same CPU.  This may cause apparent elapsed
1909          *      time to disappear, and the guest to stand still or run
1910          *      very slowly.
1911          */
1912         if (vcpu->tsc_catchup) {
1913                 u64 tsc = compute_guest_tsc(v, kernel_ns);
1914                 if (tsc > tsc_timestamp) {
1915                         adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1916                         tsc_timestamp = tsc;
1917                 }
1918         }
1919
1920         local_irq_restore(flags);
1921
1922         /* With all the info we got, fill in the values */
1923
1924         if (kvm_has_tsc_control)
1925                 tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
1926
1927         if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
1928                 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
1929                                    &vcpu->hv_clock.tsc_shift,
1930                                    &vcpu->hv_clock.tsc_to_system_mul);
1931                 vcpu->hw_tsc_khz = tgt_tsc_khz;
1932         }
1933
1934         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1935         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1936         vcpu->last_guest_tsc = tsc_timestamp;
1937
1938         /* If the host uses TSC clocksource, then it is stable */
1939         pvclock_flags = 0;
1940         if (use_master_clock)
1941                 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1942
1943         vcpu->hv_clock.flags = pvclock_flags;
1944
1945         if (vcpu->pv_time_enabled)
1946                 kvm_setup_pvclock_page(v);
1947         if (v == kvm_get_vcpu(v->kvm, 0))
1948                 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
1949         return 0;
1950 }
1951
1952 /*
1953  * kvmclock updates which are isolated to a given vcpu, such as
1954  * vcpu->cpu migration, should not allow system_timestamp from
1955  * the rest of the vcpus to remain static. Otherwise ntp frequency
1956  * correction applies to one vcpu's system_timestamp but not
1957  * the others.
1958  *
1959  * So in those cases, request a kvmclock update for all vcpus.
1960  * We need to rate-limit these requests though, as they can
1961  * considerably slow guests that have a large number of vcpus.
1962  * The time for a remote vcpu to update its kvmclock is bound
1963  * by the delay we use to rate-limit the updates.
1964  */
1965
1966 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1967
1968 static void kvmclock_update_fn(struct work_struct *work)
1969 {
1970         int i;
1971         struct delayed_work *dwork = to_delayed_work(work);
1972         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1973                                            kvmclock_update_work);
1974         struct kvm *kvm = container_of(ka, struct kvm, arch);
1975         struct kvm_vcpu *vcpu;
1976
1977         kvm_for_each_vcpu(i, vcpu, kvm) {
1978                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1979                 kvm_vcpu_kick(vcpu);
1980         }
1981 }
1982
1983 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1984 {
1985         struct kvm *kvm = v->kvm;
1986
1987         kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1988         schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1989                                         KVMCLOCK_UPDATE_DELAY);
1990 }
1991
1992 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1993
1994 static void kvmclock_sync_fn(struct work_struct *work)
1995 {
1996         struct delayed_work *dwork = to_delayed_work(work);
1997         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1998                                            kvmclock_sync_work);
1999         struct kvm *kvm = container_of(ka, struct kvm, arch);
2000
2001         if (!kvmclock_periodic_sync)
2002                 return;
2003
2004         schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
2005         schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
2006                                         KVMCLOCK_SYNC_PERIOD);
2007 }
2008
2009 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2010 {
2011         u64 mcg_cap = vcpu->arch.mcg_cap;
2012         unsigned bank_num = mcg_cap & 0xff;
2013
2014         switch (msr) {
2015         case MSR_IA32_MCG_STATUS:
2016                 vcpu->arch.mcg_status = data;
2017                 break;
2018         case MSR_IA32_MCG_CTL:
2019                 if (!(mcg_cap & MCG_CTL_P))
2020                         return 1;
2021                 if (data != 0 && data != ~(u64)0)
2022                         return -1;
2023                 vcpu->arch.mcg_ctl = data;
2024                 break;
2025         default:
2026                 if (msr >= MSR_IA32_MC0_CTL &&
2027                     msr < MSR_IA32_MCx_CTL(bank_num)) {
2028                         u32 offset = msr - MSR_IA32_MC0_CTL;
2029                         /* only 0 or all 1s can be written to IA32_MCi_CTL
2030                          * some Linux kernels though clear bit 10 in bank 4 to
2031                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
2032                          * this to avoid an uncatched #GP in the guest
2033                          */
2034                         if ((offset & 0x3) == 0 &&
2035                             data != 0 && (data | (1 << 10)) != ~(u64)0)
2036                                 return -1;
2037                         vcpu->arch.mce_banks[offset] = data;
2038                         break;
2039                 }
2040                 return 1;
2041         }
2042         return 0;
2043 }
2044
2045 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
2046 {
2047         struct kvm *kvm = vcpu->kvm;
2048         int lm = is_long_mode(vcpu);
2049         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
2050                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
2051         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
2052                 : kvm->arch.xen_hvm_config.blob_size_32;
2053         u32 page_num = data & ~PAGE_MASK;
2054         u64 page_addr = data & PAGE_MASK;
2055         u8 *page;
2056         int r;
2057
2058         r = -E2BIG;
2059         if (page_num >= blob_size)
2060                 goto out;
2061         r = -ENOMEM;
2062         page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
2063         if (IS_ERR(page)) {
2064                 r = PTR_ERR(page);
2065                 goto out;
2066         }
2067         if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
2068                 goto out_free;
2069         r = 0;
2070 out_free:
2071         kfree(page);
2072 out:
2073         return r;
2074 }
2075
2076 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2077 {
2078         gpa_t gpa = data & ~0x3f;
2079
2080         /* Bits 3:5 are reserved, Should be zero */
2081         if (data & 0x38)
2082                 return 1;
2083
2084         vcpu->arch.apf.msr_val = data;
2085
2086         if (!(data & KVM_ASYNC_PF_ENABLED)) {
2087                 kvm_clear_async_pf_completion_queue(vcpu);
2088                 kvm_async_pf_hash_reset(vcpu);
2089                 return 0;
2090         }
2091
2092         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2093                                         sizeof(u32)))
2094                 return 1;
2095
2096         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2097         vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
2098         kvm_async_pf_wakeup_all(vcpu);
2099         return 0;
2100 }
2101
2102 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2103 {
2104         vcpu->arch.pv_time_enabled = false;
2105 }
2106
2107 static void record_steal_time(struct kvm_vcpu *vcpu)
2108 {
2109         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2110                 return;
2111
2112         if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2113                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2114                 return;
2115
2116         vcpu->arch.st.steal.preempted = 0;
2117
2118         if (vcpu->arch.st.steal.version & 1)
2119                 vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
2120
2121         vcpu->arch.st.steal.version += 1;
2122
2123         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2124                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2125
2126         smp_wmb();
2127
2128         vcpu->arch.st.steal.steal += current->sched_info.run_delay -
2129                 vcpu->arch.st.last_steal;
2130         vcpu->arch.st.last_steal = current->sched_info.run_delay;
2131
2132         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2133                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2134
2135         smp_wmb();
2136
2137         vcpu->arch.st.steal.version += 1;
2138
2139         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2140                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2141 }
2142
2143 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2144 {
2145         bool pr = false;
2146         u32 msr = msr_info->index;
2147         u64 data = msr_info->data;
2148
2149         switch (msr) {
2150         case MSR_AMD64_NB_CFG:
2151         case MSR_IA32_UCODE_REV:
2152         case MSR_IA32_UCODE_WRITE:
2153         case MSR_VM_HSAVE_PA:
2154         case MSR_AMD64_PATCH_LOADER:
2155         case MSR_AMD64_BU_CFG2:
2156         case MSR_AMD64_DC_CFG:
2157                 break;
2158
2159         case MSR_EFER:
2160                 return set_efer(vcpu, data);
2161         case MSR_K7_HWCR:
2162                 data &= ~(u64)0x40;     /* ignore flush filter disable */
2163                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
2164                 data &= ~(u64)0x8;      /* ignore TLB cache disable */
2165                 data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2166                 if (data != 0) {
2167                         vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2168                                     data);
2169                         return 1;
2170                 }
2171                 break;
2172         case MSR_FAM10H_MMIO_CONF_BASE:
2173                 if (data != 0) {
2174                         vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2175                                     "0x%llx\n", data);
2176                         return 1;
2177                 }
2178                 break;
2179         case MSR_IA32_DEBUGCTLMSR:
2180                 if (!data) {
2181                         /* We support the non-activated case already */
2182                         break;
2183                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2184                         /* Values other than LBR and BTF are vendor-specific,
2185                            thus reserved and should throw a #GP */
2186                         return 1;
2187                 }
2188                 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2189                             __func__, data);
2190                 break;
2191         case 0x200 ... 0x2ff:
2192                 return kvm_mtrr_set_msr(vcpu, msr, data);
2193         case MSR_IA32_APICBASE:
2194                 return kvm_set_apic_base(vcpu, msr_info);
2195         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2196                 return kvm_x2apic_msr_write(vcpu, msr, data);
2197         case MSR_IA32_TSCDEADLINE:
2198                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2199                 break;
2200         case MSR_IA32_TSC_ADJUST:
2201                 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
2202                         if (!msr_info->host_initiated) {
2203                                 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2204                                 adjust_tsc_offset_guest(vcpu, adj);
2205                         }
2206                         vcpu->arch.ia32_tsc_adjust_msr = data;
2207                 }
2208                 break;
2209         case MSR_IA32_MISC_ENABLE:
2210                 vcpu->arch.ia32_misc_enable_msr = data;
2211                 break;
2212         case MSR_IA32_SMBASE:
2213                 if (!msr_info->host_initiated)
2214                         return 1;
2215                 vcpu->arch.smbase = data;
2216                 break;
2217         case MSR_KVM_WALL_CLOCK_NEW:
2218         case MSR_KVM_WALL_CLOCK:
2219                 vcpu->kvm->arch.wall_clock = data;
2220                 kvm_write_wall_clock(vcpu->kvm, data);
2221                 break;
2222         case MSR_KVM_SYSTEM_TIME_NEW:
2223         case MSR_KVM_SYSTEM_TIME: {
2224                 struct kvm_arch *ka = &vcpu->kvm->arch;
2225
2226                 kvmclock_reset(vcpu);
2227
2228                 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
2229                         bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
2230
2231                         if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2232                                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2233
2234                         ka->boot_vcpu_runs_old_kvmclock = tmp;
2235                 }
2236
2237                 vcpu->arch.time = data;
2238                 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2239
2240                 /* we verify if the enable bit is set... */
2241                 if (!(data & 1))
2242                         break;
2243
2244                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2245                      &vcpu->arch.pv_time, data & ~1ULL,
2246                      sizeof(struct pvclock_vcpu_time_info)))
2247                         vcpu->arch.pv_time_enabled = false;
2248                 else
2249                         vcpu->arch.pv_time_enabled = true;
2250
2251                 break;
2252         }
2253         case MSR_KVM_ASYNC_PF_EN:
2254                 if (kvm_pv_enable_async_pf(vcpu, data))
2255                         return 1;
2256                 break;
2257         case MSR_KVM_STEAL_TIME:
2258
2259                 if (unlikely(!sched_info_on()))
2260                         return 1;
2261
2262                 if (data & KVM_STEAL_RESERVED_MASK)
2263                         return 1;
2264
2265                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2266                                                 data & KVM_STEAL_VALID_BITS,
2267                                                 sizeof(struct kvm_steal_time)))
2268                         return 1;
2269
2270                 vcpu->arch.st.msr_val = data;
2271
2272                 if (!(data & KVM_MSR_ENABLED))
2273                         break;
2274
2275                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2276
2277                 break;
2278         case MSR_KVM_PV_EOI_EN:
2279                 if (kvm_lapic_enable_pv_eoi(vcpu, data))
2280                         return 1;
2281                 break;
2282
2283         case MSR_IA32_MCG_CTL:
2284         case MSR_IA32_MCG_STATUS:
2285         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2286                 return set_msr_mce(vcpu, msr, data);
2287
2288         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2289         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2290                 pr = true; /* fall through */
2291         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2292         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2293                 if (kvm_pmu_is_valid_msr(vcpu, msr))
2294                         return kvm_pmu_set_msr(vcpu, msr_info);
2295
2296                 if (pr || data != 0)
2297                         vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2298                                     "0x%x data 0x%llx\n", msr, data);
2299                 break;
2300         case MSR_K7_CLK_CTL:
2301                 /*
2302                  * Ignore all writes to this no longer documented MSR.
2303                  * Writes are only relevant for old K7 processors,
2304                  * all pre-dating SVM, but a recommended workaround from
2305                  * AMD for these chips. It is possible to specify the
2306                  * affected processor models on the command line, hence
2307                  * the need to ignore the workaround.
2308                  */
2309                 break;
2310         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2311         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2312         case HV_X64_MSR_CRASH_CTL:
2313         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2314                 return kvm_hv_set_msr_common(vcpu, msr, data,
2315                                              msr_info->host_initiated);
2316         case MSR_IA32_BBL_CR_CTL3:
2317                 /* Drop writes to this legacy MSR -- see rdmsr
2318                  * counterpart for further detail.
2319                  */
2320                 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2321                 break;
2322         case MSR_AMD64_OSVW_ID_LENGTH:
2323                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2324                         return 1;
2325                 vcpu->arch.osvw.length = data;
2326                 break;
2327         case MSR_AMD64_OSVW_STATUS:
2328                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2329                         return 1;
2330                 vcpu->arch.osvw.status = data;
2331                 break;
2332         case MSR_PLATFORM_INFO:
2333                 if (!msr_info->host_initiated ||
2334                     data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
2335                     (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
2336                      cpuid_fault_enabled(vcpu)))
2337                         return 1;
2338                 vcpu->arch.msr_platform_info = data;
2339                 break;
2340         case MSR_MISC_FEATURES_ENABLES:
2341                 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
2342                     (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
2343                      !supports_cpuid_fault(vcpu)))
2344                         return 1;
2345                 vcpu->arch.msr_misc_features_enables = data;
2346                 break;
2347         default:
2348                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2349                         return xen_hvm_config(vcpu, data);
2350                 if (kvm_pmu_is_valid_msr(vcpu, msr))
2351                         return kvm_pmu_set_msr(vcpu, msr_info);
2352                 if (!ignore_msrs) {
2353                         vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2354                                     msr, data);
2355                         return 1;
2356                 } else {
2357                         vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2358                                     msr, data);
2359                         break;
2360                 }
2361         }
2362         return 0;
2363 }
2364 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2365
2366
2367 /*
2368  * Reads an msr value (of 'msr_index') into 'pdata'.
2369  * Returns 0 on success, non-0 otherwise.
2370  * Assumes vcpu_load() was already called.
2371  */
2372 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2373 {
2374         return kvm_x86_ops->get_msr(vcpu, msr);
2375 }
2376 EXPORT_SYMBOL_GPL(kvm_get_msr);
2377
2378 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2379 {
2380         u64 data;
2381         u64 mcg_cap = vcpu->arch.mcg_cap;
2382         unsigned bank_num = mcg_cap & 0xff;
2383
2384         switch (msr) {
2385         case MSR_IA32_P5_MC_ADDR:
2386         case MSR_IA32_P5_MC_TYPE:
2387                 data = 0;
2388                 break;
2389         case MSR_IA32_MCG_CAP:
2390                 data = vcpu->arch.mcg_cap;
2391                 break;
2392         case MSR_IA32_MCG_CTL:
2393                 if (!(mcg_cap & MCG_CTL_P))
2394                         return 1;
2395                 data = vcpu->arch.mcg_ctl;
2396                 break;
2397         case MSR_IA32_MCG_STATUS:
2398                 data = vcpu->arch.mcg_status;
2399                 break;
2400         default:
2401                 if (msr >= MSR_IA32_MC0_CTL &&
2402                     msr < MSR_IA32_MCx_CTL(bank_num)) {
2403                         u32 offset = msr - MSR_IA32_MC0_CTL;
2404                         data = vcpu->arch.mce_banks[offset];
2405                         break;
2406                 }
2407                 return 1;
2408         }
2409         *pdata = data;
2410         return 0;
2411 }
2412
2413 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2414 {
2415         switch (msr_info->index) {
2416         case MSR_IA32_PLATFORM_ID:
2417         case MSR_IA32_EBL_CR_POWERON:
2418         case MSR_IA32_DEBUGCTLMSR:
2419         case MSR_IA32_LASTBRANCHFROMIP:
2420         case MSR_IA32_LASTBRANCHTOIP:
2421         case MSR_IA32_LASTINTFROMIP:
2422         case MSR_IA32_LASTINTTOIP:
2423         case MSR_K8_SYSCFG:
2424         case MSR_K8_TSEG_ADDR:
2425         case MSR_K8_TSEG_MASK:
2426         case MSR_K7_HWCR:
2427         case MSR_VM_HSAVE_PA:
2428         case MSR_K8_INT_PENDING_MSG:
2429         case MSR_AMD64_NB_CFG:
2430         case MSR_FAM10H_MMIO_CONF_BASE:
2431         case MSR_AMD64_BU_CFG2:
2432         case MSR_IA32_PERF_CTL:
2433         case MSR_AMD64_DC_CFG:
2434                 msr_info->data = 0;
2435                 break;
2436         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
2437         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
2438         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
2439         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2440                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2441                         return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2442                 msr_info->data = 0;
2443                 break;
2444         case MSR_IA32_UCODE_REV:
2445                 msr_info->data = 0x100000000ULL;
2446                 break;
2447         case MSR_MTRRcap:
2448         case 0x200 ... 0x2ff:
2449                 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2450         case 0xcd: /* fsb frequency */
2451                 msr_info->data = 3;
2452                 break;
2453                 /*
2454                  * MSR_EBC_FREQUENCY_ID
2455                  * Conservative value valid for even the basic CPU models.
2456                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2457                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2458                  * and 266MHz for model 3, or 4. Set Core Clock
2459                  * Frequency to System Bus Frequency Ratio to 1 (bits
2460                  * 31:24) even though these are only valid for CPU
2461                  * models > 2, however guests may end up dividing or
2462                  * multiplying by zero otherwise.
2463                  */
2464         case MSR_EBC_FREQUENCY_ID:
2465                 msr_info->data = 1 << 24;
2466                 break;
2467         case MSR_IA32_APICBASE:
2468                 msr_info->data = kvm_get_apic_base(vcpu);
2469                 break;
2470         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2471                 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
2472                 break;
2473         case MSR_IA32_TSCDEADLINE:
2474                 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2475                 break;
2476         case MSR_IA32_TSC_ADJUST:
2477                 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2478                 break;
2479         case MSR_IA32_MISC_ENABLE:
2480                 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2481                 break;
2482         case MSR_IA32_SMBASE:
2483                 if (!msr_info->host_initiated)
2484                         return 1;
2485                 msr_info->data = vcpu->arch.smbase;
2486                 break;
2487         case MSR_IA32_PERF_STATUS:
2488                 /* TSC increment by tick */
2489                 msr_info->data = 1000ULL;
2490                 /* CPU multiplier */
2491                 msr_info->data |= (((uint64_t)4ULL) << 40);
2492                 break;
2493         case MSR_EFER:
2494                 msr_info->data = vcpu->arch.efer;
2495                 break;
2496         case MSR_KVM_WALL_CLOCK:
2497         case MSR_KVM_WALL_CLOCK_NEW:
2498                 msr_info->data = vcpu->kvm->arch.wall_clock;
2499                 break;
2500         case MSR_KVM_SYSTEM_TIME:
2501         case MSR_KVM_SYSTEM_TIME_NEW:
2502                 msr_info->data = vcpu->arch.time;
2503                 break;
2504         case MSR_KVM_ASYNC_PF_EN:
2505                 msr_info->data = vcpu->arch.apf.msr_val;
2506                 break;
2507         case MSR_KVM_STEAL_TIME:
2508                 msr_info->data = vcpu->arch.st.msr_val;
2509                 break;
2510         case MSR_KVM_PV_EOI_EN:
2511                 msr_info->data = vcpu->arch.pv_eoi.msr_val;
2512                 break;
2513         case MSR_IA32_P5_MC_ADDR:
2514         case MSR_IA32_P5_MC_TYPE:
2515         case MSR_IA32_MCG_CAP:
2516         case MSR_IA32_MCG_CTL:
2517         case MSR_IA32_MCG_STATUS:
2518         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2519                 return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2520         case MSR_K7_CLK_CTL:
2521                 /*
2522                  * Provide expected ramp-up count for K7. All other
2523                  * are set to zero, indicating minimum divisors for
2524                  * every field.
2525                  *
2526                  * This prevents guest kernels on AMD host with CPU
2527                  * type 6, model 8 and higher from exploding due to
2528                  * the rdmsr failing.
2529                  */
2530                 msr_info->data = 0x20000000;
2531                 break;
2532         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2533         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2534         case HV_X64_MSR_CRASH_CTL:
2535         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2536                 return kvm_hv_get_msr_common(vcpu,
2537                                              msr_info->index, &msr_info->data);
2538                 break;
2539         case MSR_IA32_BBL_CR_CTL3:
2540                 /* This legacy MSR exists but isn't fully documented in current
2541                  * silicon.  It is however accessed by winxp in very narrow
2542                  * scenarios where it sets bit #19, itself documented as
2543                  * a "reserved" bit.  Best effort attempt to source coherent
2544                  * read data here should the balance of the register be
2545                  * interpreted by the guest:
2546                  *
2547                  * L2 cache control register 3: 64GB range, 256KB size,
2548                  * enabled, latency 0x1, configured
2549                  */
2550                 msr_info->data = 0xbe702111;
2551                 break;
2552         case MSR_AMD64_OSVW_ID_LENGTH:
2553                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2554                         return 1;
2555                 msr_info->data = vcpu->arch.osvw.length;
2556                 break;
2557         case MSR_AMD64_OSVW_STATUS:
2558                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2559                         return 1;
2560                 msr_info->data = vcpu->arch.osvw.status;
2561                 break;
2562         case MSR_PLATFORM_INFO:
2563                 msr_info->data = vcpu->arch.msr_platform_info;
2564                 break;
2565         case MSR_MISC_FEATURES_ENABLES:
2566                 msr_info->data = vcpu->arch.msr_misc_features_enables;
2567                 break;
2568         default:
2569                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2570                         return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2571                 if (!ignore_msrs) {
2572                         vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
2573                                                msr_info->index);
2574                         return 1;
2575                 } else {
2576                         vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
2577                         msr_info->data = 0;
2578                 }
2579                 break;
2580         }
2581         return 0;
2582 }
2583 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2584
2585 /*
2586  * Read or write a bunch of msrs. All parameters are kernel addresses.
2587  *
2588  * @return number of msrs set successfully.
2589  */
2590 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2591                     struct kvm_msr_entry *entries,
2592                     int (*do_msr)(struct kvm_vcpu *vcpu,
2593                                   unsigned index, u64 *data))
2594 {
2595         int i, idx;
2596
2597         idx = srcu_read_lock(&vcpu->kvm->srcu);
2598         for (i = 0; i < msrs->nmsrs; ++i)
2599                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2600                         break;
2601         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2602
2603         return i;
2604 }
2605
2606 /*
2607  * Read or write a bunch of msrs. Parameters are user addresses.
2608  *
2609  * @return number of msrs set successfully.
2610  */
2611 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2612                   int (*do_msr)(struct kvm_vcpu *vcpu,
2613                                 unsigned index, u64 *data),
2614                   int writeback)
2615 {
2616         struct kvm_msrs msrs;
2617         struct kvm_msr_entry *entries;
2618         int r, n;
2619         unsigned size;
2620
2621         r = -EFAULT;
2622         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2623                 goto out;
2624
2625         r = -E2BIG;
2626         if (msrs.nmsrs >= MAX_IO_MSRS)
2627                 goto out;
2628
2629         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2630         entries = memdup_user(user_msrs->entries, size);
2631         if (IS_ERR(entries)) {
2632                 r = PTR_ERR(entries);
2633                 goto out;
2634         }
2635
2636         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2637         if (r < 0)
2638                 goto out_free;
2639
2640         r = -EFAULT;
2641         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2642                 goto out_free;
2643
2644         r = n;
2645
2646 out_free:
2647         kfree(entries);
2648 out:
2649         return r;
2650 }
2651
2652 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2653 {
2654         int r;
2655
2656         switch (ext) {
2657         case KVM_CAP_IRQCHIP:
2658         case KVM_CAP_HLT:
2659         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2660         case KVM_CAP_SET_TSS_ADDR:
2661         case KVM_CAP_EXT_CPUID:
2662         case KVM_CAP_EXT_EMUL_CPUID:
2663         case KVM_CAP_CLOCKSOURCE:
2664         case KVM_CAP_PIT:
2665         case KVM_CAP_NOP_IO_DELAY:
2666         case KVM_CAP_MP_STATE:
2667         case KVM_CAP_SYNC_MMU:
2668         case KVM_CAP_USER_NMI:
2669         case KVM_CAP_REINJECT_CONTROL:
2670         case KVM_CAP_IRQ_INJECT_STATUS:
2671         case KVM_CAP_IOEVENTFD:
2672         case KVM_CAP_IOEVENTFD_NO_LENGTH:
2673         case KVM_CAP_PIT2:
2674         case KVM_CAP_PIT_STATE2:
2675         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2676         case KVM_CAP_XEN_HVM:
2677         case KVM_CAP_VCPU_EVENTS:
2678         case KVM_CAP_HYPERV:
2679         case KVM_CAP_HYPERV_VAPIC:
2680         case KVM_CAP_HYPERV_SPIN:
2681         case KVM_CAP_HYPERV_SYNIC:
2682         case KVM_CAP_HYPERV_SYNIC2:
2683         case KVM_CAP_HYPERV_VP_INDEX:
2684         case KVM_CAP_PCI_SEGMENT:
2685         case KVM_CAP_DEBUGREGS:
2686         case KVM_CAP_X86_ROBUST_SINGLESTEP:
2687         case KVM_CAP_XSAVE:
2688         case KVM_CAP_ASYNC_PF:
2689         case KVM_CAP_GET_TSC_KHZ:
2690         case KVM_CAP_KVMCLOCK_CTRL:
2691         case KVM_CAP_READONLY_MEM:
2692         case KVM_CAP_HYPERV_TIME:
2693         case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2694         case KVM_CAP_TSC_DEADLINE_TIMER:
2695         case KVM_CAP_ENABLE_CAP_VM:
2696         case KVM_CAP_DISABLE_QUIRKS:
2697         case KVM_CAP_SET_BOOT_CPU_ID:
2698         case KVM_CAP_SPLIT_IRQCHIP:
2699         case KVM_CAP_IMMEDIATE_EXIT:
2700                 r = 1;
2701                 break;
2702         case KVM_CAP_ADJUST_CLOCK:
2703                 r = KVM_CLOCK_TSC_STABLE;
2704                 break;
2705         case KVM_CAP_X86_GUEST_MWAIT:
2706                 r = kvm_mwait_in_guest();
2707                 break;
2708         case KVM_CAP_X86_SMM:
2709                 /* SMBASE is usually relocated above 1M on modern chipsets,
2710                  * and SMM handlers might indeed rely on 4G segment limits,
2711                  * so do not report SMM to be available if real mode is
2712                  * emulated via vm86 mode.  Still, do not go to great lengths
2713                  * to avoid userspace's usage of the feature, because it is a
2714                  * fringe case that is not enabled except via specific settings
2715                  * of the module parameters.
2716                  */
2717                 r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
2718                 break;
2719         case KVM_CAP_VAPIC:
2720                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2721                 break;
2722         case KVM_CAP_NR_VCPUS:
2723                 r = KVM_SOFT_MAX_VCPUS;
2724                 break;
2725         case KVM_CAP_MAX_VCPUS:
2726                 r = KVM_MAX_VCPUS;
2727                 break;
2728         case KVM_CAP_NR_MEMSLOTS:
2729                 r = KVM_USER_MEM_SLOTS;
2730                 break;
2731         case KVM_CAP_PV_MMU:    /* obsolete */
2732                 r = 0;
2733                 break;
2734         case KVM_CAP_MCE:
2735                 r = KVM_MAX_MCE_BANKS;
2736                 break;
2737         case KVM_CAP_XCRS:
2738                 r = boot_cpu_has(X86_FEATURE_XSAVE);
2739                 break;
2740         case KVM_CAP_TSC_CONTROL:
2741                 r = kvm_has_tsc_control;
2742                 break;
2743         case KVM_CAP_X2APIC_API:
2744                 r = KVM_X2APIC_API_VALID_FLAGS;
2745                 break;
2746         default:
2747                 r = 0;
2748                 break;
2749         }
2750         return r;
2751
2752 }
2753
2754 long kvm_arch_dev_ioctl(struct file *filp,
2755                         unsigned int ioctl, unsigned long arg)
2756 {
2757         void __user *argp = (void __user *)arg;
2758         long r;
2759
2760         switch (ioctl) {
2761         case KVM_GET_MSR_INDEX_LIST: {
2762                 struct kvm_msr_list __user *user_msr_list = argp;
2763                 struct kvm_msr_list msr_list;
2764                 unsigned n;
2765
2766                 r = -EFAULT;
2767                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2768                         goto out;
2769                 n = msr_list.nmsrs;
2770                 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2771                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2772                         goto out;
2773                 r = -E2BIG;
2774                 if (n < msr_list.nmsrs)
2775                         goto out;
2776                 r = -EFAULT;
2777                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2778                                  num_msrs_to_save * sizeof(u32)))
2779                         goto out;
2780                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2781                                  &emulated_msrs,
2782                                  num_emulated_msrs * sizeof(u32)))
2783                         goto out;
2784                 r = 0;
2785                 break;
2786         }
2787         case KVM_GET_SUPPORTED_CPUID:
2788         case KVM_GET_EMULATED_CPUID: {
2789                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2790                 struct kvm_cpuid2 cpuid;
2791
2792                 r = -EFAULT;
2793                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2794                         goto out;
2795
2796                 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2797                                             ioctl);
2798                 if (r)
2799                         goto out;
2800
2801                 r = -EFAULT;
2802                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2803                         goto out;
2804                 r = 0;
2805                 break;
2806         }
2807         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2808                 r = -EFAULT;
2809                 if (copy_to_user(argp, &kvm_mce_cap_supported,
2810                                  sizeof(kvm_mce_cap_supported)))
2811                         goto out;
2812                 r = 0;
2813                 break;
2814         }
2815         default:
2816                 r = -EINVAL;
2817         }
2818 out:
2819         return r;
2820 }
2821
2822 static void wbinvd_ipi(void *garbage)
2823 {
2824         wbinvd();
2825 }
2826
2827 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2828 {
2829         return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2830 }
2831
2832 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2833 {
2834         /* Address WBINVD may be executed by guest */
2835         if (need_emulate_wbinvd(vcpu)) {
2836                 if (kvm_x86_ops->has_wbinvd_exit())
2837                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2838                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2839                         smp_call_function_single(vcpu->cpu,
2840                                         wbinvd_ipi, NULL, 1);
2841         }
2842
2843         kvm_x86_ops->vcpu_load(vcpu, cpu);
2844
2845         /* Apply any externally detected TSC adjustments (due to suspend) */
2846         if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2847                 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2848                 vcpu->arch.tsc_offset_adjustment = 0;
2849                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2850         }
2851
2852         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2853                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2854                                 rdtsc() - vcpu->arch.last_host_tsc;
2855                 if (tsc_delta < 0)
2856                         mark_tsc_unstable("KVM discovered backwards TSC");
2857
2858                 if (check_tsc_unstable()) {
2859                         u64 offset = kvm_compute_tsc_offset(vcpu,
2860                                                 vcpu->arch.last_guest_tsc);
2861                         kvm_vcpu_write_tsc_offset(vcpu, offset);
2862                         vcpu->arch.tsc_catchup = 1;
2863                 }
2864
2865                 if (kvm_lapic_hv_timer_in_use(vcpu))
2866                         kvm_lapic_restart_hv_timer(vcpu);
2867
2868                 /*
2869                  * On a host with synchronized TSC, there is no need to update
2870                  * kvmclock on vcpu->cpu migration
2871                  */
2872                 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2873                         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2874                 if (vcpu->cpu != cpu)
2875                         kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
2876                 vcpu->cpu = cpu;
2877         }
2878
2879         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2880 }
2881
2882 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
2883 {
2884         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2885                 return;
2886
2887         vcpu->arch.st.steal.preempted = 1;
2888
2889         kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
2890                         &vcpu->arch.st.steal.preempted,
2891                         offsetof(struct kvm_steal_time, preempted),
2892                         sizeof(vcpu->arch.st.steal.preempted));
2893 }
2894
2895 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2896 {
2897         int idx;
2898
2899         if (vcpu->preempted)
2900                 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
2901
2902         /*
2903          * Disable page faults because we're in atomic context here.
2904          * kvm_write_guest_offset_cached() would call might_fault()
2905          * that relies on pagefault_disable() to tell if there's a
2906          * bug. NOTE: the write to guest memory may not go through if
2907          * during postcopy live migration or if there's heavy guest
2908          * paging.
2909          */
2910         pagefault_disable();
2911         /*
2912          * kvm_memslots() will be called by
2913          * kvm_write_guest_offset_cached() so take the srcu lock.
2914          */
2915         idx = srcu_read_lock(&vcpu->kvm->srcu);
2916         kvm_steal_time_set_preempted(vcpu);
2917         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2918         pagefault_enable();
2919         kvm_x86_ops->vcpu_put(vcpu);
2920         kvm_put_guest_fpu(vcpu);
2921         vcpu->arch.last_host_tsc = rdtsc();
2922 }
2923
2924 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2925                                     struct kvm_lapic_state *s)
2926 {
2927         if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
2928                 kvm_x86_ops->sync_pir_to_irr(vcpu);
2929
2930         return kvm_apic_get_state(vcpu, s);
2931 }
2932
2933 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2934                                     struct kvm_lapic_state *s)
2935 {
2936         int r;
2937
2938         r = kvm_apic_set_state(vcpu, s);
2939         if (r)
2940                 return r;
2941         update_cr8_intercept(vcpu);
2942
2943         return 0;
2944 }
2945
2946 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2947 {
2948         return (!lapic_in_kernel(vcpu) ||
2949                 kvm_apic_accept_pic_intr(vcpu));
2950 }
2951
2952 /*
2953  * if userspace requested an interrupt window, check that the
2954  * interrupt window is open.
2955  *
2956  * No need to exit to userspace if we already have an interrupt queued.
2957  */
2958 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2959 {
2960         return kvm_arch_interrupt_allowed(vcpu) &&
2961                 !kvm_cpu_has_interrupt(vcpu) &&
2962                 !kvm_event_needs_reinjection(vcpu) &&
2963                 kvm_cpu_accept_dm_intr(vcpu);
2964 }
2965
2966 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2967                                     struct kvm_interrupt *irq)
2968 {
2969         if (irq->irq >= KVM_NR_INTERRUPTS)
2970                 return -EINVAL;
2971
2972         if (!irqchip_in_kernel(vcpu->kvm)) {
2973                 kvm_queue_interrupt(vcpu, irq->irq, false);
2974                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2975                 return 0;
2976         }
2977
2978         /*
2979          * With in-kernel LAPIC, we only use this to inject EXTINT, so
2980          * fail for in-kernel 8259.
2981          */
2982         if (pic_in_kernel(vcpu->kvm))
2983                 return -ENXIO;
2984
2985         if (vcpu->arch.pending_external_vector != -1)
2986                 return -EEXIST;
2987
2988         vcpu->arch.pending_external_vector = irq->irq;
2989         kvm_make_request(KVM_REQ_EVENT, vcpu);
2990         return 0;
2991 }
2992
2993 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2994 {
2995         kvm_inject_nmi(vcpu);
2996
2997         return 0;
2998 }
2999
3000 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
3001 {
3002         kvm_make_request(KVM_REQ_SMI, vcpu);
3003
3004         return 0;
3005 }
3006
3007 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
3008                                            struct kvm_tpr_access_ctl *tac)
3009 {
3010         if (tac->flags)
3011                 return -EINVAL;
3012         vcpu->arch.tpr_access_reporting = !!tac->enabled;
3013         return 0;
3014 }
3015
3016 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
3017                                         u64 mcg_cap)
3018 {
3019         int r;
3020         unsigned bank_num = mcg_cap & 0xff, bank;
3021
3022         r = -EINVAL;
3023         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
3024                 goto out;
3025         if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
3026                 goto out;
3027         r = 0;
3028         vcpu->arch.mcg_cap = mcg_cap;
3029         /* Init IA32_MCG_CTL to all 1s */
3030         if (mcg_cap & MCG_CTL_P)
3031                 vcpu->arch.mcg_ctl = ~(u64)0;
3032         /* Init IA32_MCi_CTL to all 1s */
3033         for (bank = 0; bank < bank_num; bank++)
3034                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
3035
3036         if (kvm_x86_ops->setup_mce)
3037                 kvm_x86_ops->setup_mce(vcpu);
3038 out:
3039         return r;
3040 }
3041
3042 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
3043                                       struct kvm_x86_mce *mce)
3044 {
3045         u64 mcg_cap = vcpu->arch.mcg_cap;
3046         unsigned bank_num = mcg_cap & 0xff;
3047         u64 *banks = vcpu->arch.mce_banks;
3048
3049         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
3050                 return -EINVAL;
3051         /*
3052          * if IA32_MCG_CTL is not all 1s, the uncorrected error
3053          * reporting is disabled
3054          */
3055         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3056             vcpu->arch.mcg_ctl != ~(u64)0)
3057                 return 0;
3058         banks += 4 * mce->bank;
3059         /*
3060          * if IA32_MCi_CTL is not all 1s, the uncorrected error
3061          * reporting is disabled for the bank
3062          */
3063         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3064                 return 0;
3065         if (mce->status & MCI_STATUS_UC) {
3066                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3067                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3068                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3069                         return 0;
3070                 }
3071                 if (banks[1] & MCI_STATUS_VAL)
3072                         mce->status |= MCI_STATUS_OVER;
3073                 banks[2] = mce->addr;
3074                 banks[3] = mce->misc;
3075                 vcpu->arch.mcg_status = mce->mcg_status;
3076                 banks[1] = mce->status;
3077                 kvm_queue_exception(vcpu, MC_VECTOR);
3078         } else if (!(banks[1] & MCI_STATUS_VAL)
3079                    || !(banks[1] & MCI_STATUS_UC)) {
3080                 if (banks[1] & MCI_STATUS_VAL)
3081                         mce->status |= MCI_STATUS_OVER;
3082                 banks[2] = mce->addr;
3083                 banks[3] = mce->misc;
3084                 banks[1] = mce->status;
3085         } else
3086                 banks[1] |= MCI_STATUS_OVER;
3087         return 0;
3088 }
3089
3090 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3091                                                struct kvm_vcpu_events *events)
3092 {
3093         process_nmi(vcpu);
3094         /*
3095          * FIXME: pass injected and pending separately.  This is only
3096          * needed for nested virtualization, whose state cannot be
3097          * migrated yet.  For now we can combine them.
3098          */
3099         events->exception.injected =
3100                 (vcpu->arch.exception.pending ||
3101                  vcpu->arch.exception.injected) &&
3102                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
3103         events->exception.nr = vcpu->arch.exception.nr;
3104         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3105         events->exception.pad = 0;
3106         events->exception.error_code = vcpu->arch.exception.error_code;
3107
3108         events->interrupt.injected =
3109                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
3110         events->interrupt.nr = vcpu->arch.interrupt.nr;
3111         events->interrupt.soft = 0;
3112         events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3113
3114         events->nmi.injected = vcpu->arch.nmi_injected;
3115         events->nmi.pending = vcpu->arch.nmi_pending != 0;
3116         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3117         events->nmi.pad = 0;
3118
3119         events->sipi_vector = 0; /* never valid when reporting to user space */
3120
3121         events->smi.smm = is_smm(vcpu);
3122         events->smi.pending = vcpu->arch.smi_pending;
3123         events->smi.smm_inside_nmi =
3124                 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
3125         events->smi.latched_init = kvm_lapic_latched_init(vcpu);
3126
3127         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3128                          | KVM_VCPUEVENT_VALID_SHADOW
3129                          | KVM_VCPUEVENT_VALID_SMM);
3130         memset(&events->reserved, 0, sizeof(events->reserved));
3131 }
3132
3133 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
3134
3135 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3136                                               struct kvm_vcpu_events *events)
3137 {
3138         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3139                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3140                               | KVM_VCPUEVENT_VALID_SHADOW
3141                               | KVM_VCPUEVENT_VALID_SMM))
3142                 return -EINVAL;
3143
3144         if (events->exception.injected &&
3145             (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
3146              is_guest_mode(vcpu)))
3147                 return -EINVAL;
3148
3149         /* INITs are latched while in SMM */
3150         if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
3151             (events->smi.smm || events->smi.pending) &&
3152             vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3153                 return -EINVAL;
3154
3155         process_nmi(vcpu);
3156         vcpu->arch.exception.injected = false;
3157         vcpu->arch.exception.pending = events->exception.injected;
3158         vcpu->arch.exception.nr = events->exception.nr;
3159         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3160         vcpu->arch.exception.error_code = events->exception.error_code;
3161
3162         vcpu->arch.interrupt.pending = events->interrupt.injected;
3163         vcpu->arch.interrupt.nr = events->interrupt.nr;
3164         vcpu->arch.interrupt.soft = events->interrupt.soft;
3165         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3166                 kvm_x86_ops->set_interrupt_shadow(vcpu,
3167                                                   events->interrupt.shadow);
3168
3169         vcpu->arch.nmi_injected = events->nmi.injected;
3170         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3171                 vcpu->arch.nmi_pending = events->nmi.pending;
3172         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3173
3174         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3175             lapic_in_kernel(vcpu))
3176                 vcpu->arch.apic->sipi_vector = events->sipi_vector;
3177
3178         if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3179                 u32 hflags = vcpu->arch.hflags;
3180                 if (events->smi.smm)
3181                         hflags |= HF_SMM_MASK;
3182                 else
3183                         hflags &= ~HF_SMM_MASK;
3184                 kvm_set_hflags(vcpu, hflags);
3185
3186                 vcpu->arch.smi_pending = events->smi.pending;
3187
3188                 if (events->smi.smm) {
3189                         if (events->smi.smm_inside_nmi)
3190                                 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3191                         else
3192                                 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
3193                         if (lapic_in_kernel(vcpu)) {
3194                                 if (events->smi.latched_init)
3195                                         set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3196                                 else
3197                                         clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
3198                         }
3199                 }
3200         }
3201
3202         kvm_make_request(KVM_REQ_EVENT, vcpu);
3203
3204         return 0;
3205 }
3206
3207 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3208                                              struct kvm_debugregs *dbgregs)
3209 {
3210         unsigned long val;
3211
3212         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3213         kvm_get_dr(vcpu, 6, &val);
3214         dbgregs->dr6 = val;
3215         dbgregs->dr7 = vcpu->arch.dr7;
3216         dbgregs->flags = 0;
3217         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3218 }
3219
3220 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3221                                             struct kvm_debugregs *dbgregs)
3222 {
3223         if (dbgregs->flags)
3224                 return -EINVAL;
3225
3226         if (dbgregs->dr6 & ~0xffffffffull)
3227                 return -EINVAL;
3228         if (dbgregs->dr7 & ~0xffffffffull)
3229                 return -EINVAL;
3230
3231         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3232         kvm_update_dr0123(vcpu);
3233         vcpu->arch.dr6 = dbgregs->dr6;
3234         kvm_update_dr6(vcpu);
3235         vcpu->arch.dr7 = dbgregs->dr7;
3236         kvm_update_dr7(vcpu);
3237
3238         return 0;
3239 }
3240
3241 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3242
3243 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3244 {
3245         struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3246         u64 xstate_bv = xsave->header.xfeatures;
3247         u64 valid;
3248
3249         /*
3250          * Copy legacy XSAVE area, to avoid complications with CPUID
3251          * leaves 0 and 1 in the loop below.
3252          */
3253         memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3254
3255         /* Set XSTATE_BV */
3256         xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3257         *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3258
3259         /*
3260          * Copy each region from the possibly compacted offset to the
3261          * non-compacted offset.
3262          */
3263         valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3264         while (valid) {
3265                 u64 feature = valid & -valid;
3266                 int index = fls64(feature) - 1;
3267                 void *src = get_xsave_addr(xsave, feature);
3268
3269                 if (src) {
3270                         u32 size, offset, ecx, edx;
3271                         cpuid_count(XSTATE_CPUID, index,
3272                                     &size, &offset, &ecx, &edx);
3273                         if (feature == XFEATURE_MASK_PKRU)
3274                                 memcpy(dest + offset, &vcpu->arch.pkru,
3275                                        sizeof(vcpu->arch.pkru));
3276                         else
3277                                 memcpy(dest + offset, src, size);
3278
3279                 }
3280
3281                 valid -= feature;
3282         }
3283 }
3284
3285 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3286 {
3287         struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3288         u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
3289         u64 valid;
3290
3291         /*
3292          * Copy legacy XSAVE area, to avoid complications with CPUID
3293          * leaves 0 and 1 in the loop below.
3294          */
3295         memcpy(xsave, src, XSAVE_HDR_OFFSET);
3296
3297         /* Set XSTATE_BV and possibly XCOMP_BV.  */
3298         xsave->header.xfeatures = xstate_bv;
3299         if (boot_cpu_has(X86_FEATURE_XSAVES))
3300                 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3301
3302         /*
3303          * Copy each region from the non-compacted offset to the
3304          * possibly compacted offset.
3305          */
3306         valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3307         while (valid) {
3308                 u64 feature = valid & -valid;
3309                 int index = fls64(feature) - 1;
3310                 void *dest = get_xsave_addr(xsave, feature);
3311
3312                 if (dest) {
3313                         u32 size, offset, ecx, edx;
3314                         cpuid_count(XSTATE_CPUID, index,
3315                                     &size, &offset, &ecx, &edx);
3316                         if (feature == XFEATURE_MASK_PKRU)
3317                                 memcpy(&vcpu->arch.pkru, src + offset,
3318                                        sizeof(vcpu->arch.pkru));
3319                         else
3320                                 memcpy(dest, src + offset, size);
3321             &nb