tracing: Add __string_src() helper to help compilers not to get confused
[sfrench/cifs-2.6.git] / arch / x86 / kvm / svm / svm.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "smm.h"
10 #include "cpuid.h"
11 #include "pmu.h"
12
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
31
32 #include <asm/apic.h>
33 #include <asm/perf_event.h>
34 #include <asm/tlbflush.h>
35 #include <asm/desc.h>
36 #include <asm/debugreg.h>
37 #include <asm/kvm_para.h>
38 #include <asm/irq_remapping.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/cpu_device_id.h>
41 #include <asm/traps.h>
42 #include <asm/reboot.h>
43 #include <asm/fpu/api.h>
44
45 #include <trace/events/ipi.h>
46
47 #include "trace.h"
48
49 #include "svm.h"
50 #include "svm_ops.h"
51
52 #include "kvm_onhyperv.h"
53 #include "svm_onhyperv.h"
54
55 MODULE_AUTHOR("Qumranet");
56 MODULE_LICENSE("GPL");
57
58 #ifdef MODULE
59 static const struct x86_cpu_id svm_cpu_id[] = {
60         X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
61         {}
62 };
63 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
64 #endif
65
66 #define SEG_TYPE_LDT 2
67 #define SEG_TYPE_BUSY_TSS16 3
68
69 static bool erratum_383_found __read_mostly;
70
71 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
72
73 /*
74  * Set osvw_len to higher value when updated Revision Guides
75  * are published and we know what the new status bits are
76  */
77 static uint64_t osvw_len = 4, osvw_status;
78
79 static DEFINE_PER_CPU(u64, current_tsc_ratio);
80
81 #define X2APIC_MSR(x)   (APIC_BASE_MSR + (x >> 4))
82
83 static const struct svm_direct_access_msrs {
84         u32 index;   /* Index of the MSR */
85         bool always; /* True if intercept is initially cleared */
86 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
87         { .index = MSR_STAR,                            .always = true  },
88         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
89         { .index = MSR_IA32_SYSENTER_EIP,               .always = false },
90         { .index = MSR_IA32_SYSENTER_ESP,               .always = false },
91 #ifdef CONFIG_X86_64
92         { .index = MSR_GS_BASE,                         .always = true  },
93         { .index = MSR_FS_BASE,                         .always = true  },
94         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
95         { .index = MSR_LSTAR,                           .always = true  },
96         { .index = MSR_CSTAR,                           .always = true  },
97         { .index = MSR_SYSCALL_MASK,                    .always = true  },
98 #endif
99         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
100         { .index = MSR_IA32_PRED_CMD,                   .always = false },
101         { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
102         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
103         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
104         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
105         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
106         { .index = MSR_IA32_XSS,                        .always = false },
107         { .index = MSR_EFER,                            .always = false },
108         { .index = MSR_IA32_CR_PAT,                     .always = false },
109         { .index = MSR_AMD64_SEV_ES_GHCB,               .always = true  },
110         { .index = MSR_TSC_AUX,                         .always = false },
111         { .index = X2APIC_MSR(APIC_ID),                 .always = false },
112         { .index = X2APIC_MSR(APIC_LVR),                .always = false },
113         { .index = X2APIC_MSR(APIC_TASKPRI),            .always = false },
114         { .index = X2APIC_MSR(APIC_ARBPRI),             .always = false },
115         { .index = X2APIC_MSR(APIC_PROCPRI),            .always = false },
116         { .index = X2APIC_MSR(APIC_EOI),                .always = false },
117         { .index = X2APIC_MSR(APIC_RRR),                .always = false },
118         { .index = X2APIC_MSR(APIC_LDR),                .always = false },
119         { .index = X2APIC_MSR(APIC_DFR),                .always = false },
120         { .index = X2APIC_MSR(APIC_SPIV),               .always = false },
121         { .index = X2APIC_MSR(APIC_ISR),                .always = false },
122         { .index = X2APIC_MSR(APIC_TMR),                .always = false },
123         { .index = X2APIC_MSR(APIC_IRR),                .always = false },
124         { .index = X2APIC_MSR(APIC_ESR),                .always = false },
125         { .index = X2APIC_MSR(APIC_ICR),                .always = false },
126         { .index = X2APIC_MSR(APIC_ICR2),               .always = false },
127
128         /*
129          * Note:
130          * AMD does not virtualize APIC TSC-deadline timer mode, but it is
131          * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
132          * the AVIC hardware would generate GP fault. Therefore, always
133          * intercept the MSR 0x832, and do not setup direct_access_msr.
134          */
135         { .index = X2APIC_MSR(APIC_LVTTHMR),            .always = false },
136         { .index = X2APIC_MSR(APIC_LVTPC),              .always = false },
137         { .index = X2APIC_MSR(APIC_LVT0),               .always = false },
138         { .index = X2APIC_MSR(APIC_LVT1),               .always = false },
139         { .index = X2APIC_MSR(APIC_LVTERR),             .always = false },
140         { .index = X2APIC_MSR(APIC_TMICT),              .always = false },
141         { .index = X2APIC_MSR(APIC_TMCCT),              .always = false },
142         { .index = X2APIC_MSR(APIC_TDCR),               .always = false },
143         { .index = MSR_INVALID,                         .always = false },
144 };
145
146 /*
147  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
148  * pause_filter_count: On processors that support Pause filtering(indicated
149  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
150  *      count value. On VMRUN this value is loaded into an internal counter.
151  *      Each time a pause instruction is executed, this counter is decremented
152  *      until it reaches zero at which time a #VMEXIT is generated if pause
153  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
154  *      Intercept Filtering for more details.
155  *      This also indicate if ple logic enabled.
156  *
157  * pause_filter_thresh: In addition, some processor families support advanced
158  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
159  *      the amount of time a guest is allowed to execute in a pause loop.
160  *      In this mode, a 16-bit pause filter threshold field is added in the
161  *      VMCB. The threshold value is a cycle count that is used to reset the
162  *      pause counter. As with simple pause filtering, VMRUN loads the pause
163  *      count value from VMCB into an internal counter. Then, on each pause
164  *      instruction the hardware checks the elapsed number of cycles since
165  *      the most recent pause instruction against the pause filter threshold.
166  *      If the elapsed cycle count is greater than the pause filter threshold,
167  *      then the internal pause count is reloaded from the VMCB and execution
168  *      continues. If the elapsed cycle count is less than the pause filter
169  *      threshold, then the internal pause count is decremented. If the count
170  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
171  *      triggered. If advanced pause filtering is supported and pause filter
172  *      threshold field is set to zero, the filter will operate in the simpler,
173  *      count only mode.
174  */
175
176 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
177 module_param(pause_filter_thresh, ushort, 0444);
178
179 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
180 module_param(pause_filter_count, ushort, 0444);
181
182 /* Default doubles per-vcpu window every exit. */
183 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
184 module_param(pause_filter_count_grow, ushort, 0444);
185
186 /* Default resets per-vcpu window every exit to pause_filter_count. */
187 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
188 module_param(pause_filter_count_shrink, ushort, 0444);
189
190 /* Default is to compute the maximum so we can never overflow. */
191 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
192 module_param(pause_filter_count_max, ushort, 0444);
193
194 /*
195  * Use nested page tables by default.  Note, NPT may get forced off by
196  * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
197  */
198 bool npt_enabled = true;
199 module_param_named(npt, npt_enabled, bool, 0444);
200
201 /* allow nested virtualization in KVM/SVM */
202 static int nested = true;
203 module_param(nested, int, 0444);
204
205 /* enable/disable Next RIP Save */
206 int nrips = true;
207 module_param(nrips, int, 0444);
208
209 /* enable/disable Virtual VMLOAD VMSAVE */
210 static int vls = true;
211 module_param(vls, int, 0444);
212
213 /* enable/disable Virtual GIF */
214 int vgif = true;
215 module_param(vgif, int, 0444);
216
217 /* enable/disable LBR virtualization */
218 static int lbrv = true;
219 module_param(lbrv, int, 0444);
220
221 static int tsc_scaling = true;
222 module_param(tsc_scaling, int, 0444);
223
224 /*
225  * enable / disable AVIC.  Because the defaults differ for APICv
226  * support between VMX and SVM we cannot use module_param_named.
227  */
228 static bool avic;
229 module_param(avic, bool, 0444);
230
231 bool __read_mostly dump_invalid_vmcb;
232 module_param(dump_invalid_vmcb, bool, 0644);
233
234
235 bool intercept_smi = true;
236 module_param(intercept_smi, bool, 0444);
237
238 bool vnmi = true;
239 module_param(vnmi, bool, 0444);
240
241 static bool svm_gp_erratum_intercept = true;
242
243 static u8 rsm_ins_bytes[] = "\x0f\xaa";
244
245 static unsigned long iopm_base;
246
247 DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
248
249 /*
250  * Only MSR_TSC_AUX is switched via the user return hook.  EFER is switched via
251  * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
252  *
253  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
254  * defer the restoration of TSC_AUX until the CPU returns to userspace.
255  */
256 static int tsc_aux_uret_slot __read_mostly = -1;
257
258 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
259
260 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
261 #define MSRS_RANGE_SIZE 2048
262 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
263
264 u32 svm_msrpm_offset(u32 msr)
265 {
266         u32 offset;
267         int i;
268
269         for (i = 0; i < NUM_MSR_MAPS; i++) {
270                 if (msr < msrpm_ranges[i] ||
271                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
272                         continue;
273
274                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
275                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
276
277                 /* Now we have the u8 offset - but need the u32 offset */
278                 return offset / 4;
279         }
280
281         /* MSR not in any range */
282         return MSR_INVALID;
283 }
284
285 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
286
287 static int get_npt_level(void)
288 {
289 #ifdef CONFIG_X86_64
290         return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
291 #else
292         return PT32E_ROOT_LEVEL;
293 #endif
294 }
295
296 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
297 {
298         struct vcpu_svm *svm = to_svm(vcpu);
299         u64 old_efer = vcpu->arch.efer;
300         vcpu->arch.efer = efer;
301
302         if (!npt_enabled) {
303                 /* Shadow paging assumes NX to be available.  */
304                 efer |= EFER_NX;
305
306                 if (!(efer & EFER_LMA))
307                         efer &= ~EFER_LME;
308         }
309
310         if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
311                 if (!(efer & EFER_SVME)) {
312                         svm_leave_nested(vcpu);
313                         svm_set_gif(svm, true);
314                         /* #GP intercept is still needed for vmware backdoor */
315                         if (!enable_vmware_backdoor)
316                                 clr_exception_intercept(svm, GP_VECTOR);
317
318                         /*
319                          * Free the nested guest state, unless we are in SMM.
320                          * In this case we will return to the nested guest
321                          * as soon as we leave SMM.
322                          */
323                         if (!is_smm(vcpu))
324                                 svm_free_nested(svm);
325
326                 } else {
327                         int ret = svm_allocate_nested(svm);
328
329                         if (ret) {
330                                 vcpu->arch.efer = old_efer;
331                                 return ret;
332                         }
333
334                         /*
335                          * Never intercept #GP for SEV guests, KVM can't
336                          * decrypt guest memory to workaround the erratum.
337                          */
338                         if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
339                                 set_exception_intercept(svm, GP_VECTOR);
340                 }
341         }
342
343         svm->vmcb->save.efer = efer | EFER_SVME;
344         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
345         return 0;
346 }
347
348 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
349 {
350         struct vcpu_svm *svm = to_svm(vcpu);
351         u32 ret = 0;
352
353         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
354                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
355         return ret;
356 }
357
358 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
359 {
360         struct vcpu_svm *svm = to_svm(vcpu);
361
362         if (mask == 0)
363                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
364         else
365                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
366
367 }
368
369 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
370                                            bool commit_side_effects)
371 {
372         struct vcpu_svm *svm = to_svm(vcpu);
373         unsigned long old_rflags;
374
375         /*
376          * SEV-ES does not expose the next RIP. The RIP update is controlled by
377          * the type of exit and the #VC handler in the guest.
378          */
379         if (sev_es_guest(vcpu->kvm))
380                 goto done;
381
382         if (nrips && svm->vmcb->control.next_rip != 0) {
383                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
384                 svm->next_rip = svm->vmcb->control.next_rip;
385         }
386
387         if (!svm->next_rip) {
388                 if (unlikely(!commit_side_effects))
389                         old_rflags = svm->vmcb->save.rflags;
390
391                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
392                         return 0;
393
394                 if (unlikely(!commit_side_effects))
395                         svm->vmcb->save.rflags = old_rflags;
396         } else {
397                 kvm_rip_write(vcpu, svm->next_rip);
398         }
399
400 done:
401         if (likely(commit_side_effects))
402                 svm_set_interrupt_shadow(vcpu, 0);
403
404         return 1;
405 }
406
407 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
408 {
409         return __svm_skip_emulated_instruction(vcpu, true);
410 }
411
412 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
413 {
414         unsigned long rip, old_rip = kvm_rip_read(vcpu);
415         struct vcpu_svm *svm = to_svm(vcpu);
416
417         /*
418          * Due to architectural shortcomings, the CPU doesn't always provide
419          * NextRIP, e.g. if KVM intercepted an exception that occurred while
420          * the CPU was vectoring an INTO/INT3 in the guest.  Temporarily skip
421          * the instruction even if NextRIP is supported to acquire the next
422          * RIP so that it can be shoved into the NextRIP field, otherwise
423          * hardware will fail to advance guest RIP during event injection.
424          * Drop the exception/interrupt if emulation fails and effectively
425          * retry the instruction, it's the least awful option.  If NRIPS is
426          * in use, the skip must not commit any side effects such as clearing
427          * the interrupt shadow or RFLAGS.RF.
428          */
429         if (!__svm_skip_emulated_instruction(vcpu, !nrips))
430                 return -EIO;
431
432         rip = kvm_rip_read(vcpu);
433
434         /*
435          * Save the injection information, even when using next_rip, as the
436          * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
437          * doesn't complete due to a VM-Exit occurring while the CPU is
438          * vectoring the event.   Decoding the instruction isn't guaranteed to
439          * work as there may be no backing instruction, e.g. if the event is
440          * being injected by L1 for L2, or if the guest is patching INT3 into
441          * a different instruction.
442          */
443         svm->soft_int_injected = true;
444         svm->soft_int_csbase = svm->vmcb->save.cs.base;
445         svm->soft_int_old_rip = old_rip;
446         svm->soft_int_next_rip = rip;
447
448         if (nrips)
449                 kvm_rip_write(vcpu, old_rip);
450
451         if (static_cpu_has(X86_FEATURE_NRIPS))
452                 svm->vmcb->control.next_rip = rip;
453
454         return 0;
455 }
456
457 static void svm_inject_exception(struct kvm_vcpu *vcpu)
458 {
459         struct kvm_queued_exception *ex = &vcpu->arch.exception;
460         struct vcpu_svm *svm = to_svm(vcpu);
461
462         kvm_deliver_exception_payload(vcpu, ex);
463
464         if (kvm_exception_is_soft(ex->vector) &&
465             svm_update_soft_interrupt_rip(vcpu))
466                 return;
467
468         svm->vmcb->control.event_inj = ex->vector
469                 | SVM_EVTINJ_VALID
470                 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
471                 | SVM_EVTINJ_TYPE_EXEPT;
472         svm->vmcb->control.event_inj_err = ex->error_code;
473 }
474
475 static void svm_init_erratum_383(void)
476 {
477         u32 low, high;
478         int err;
479         u64 val;
480
481         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
482                 return;
483
484         /* Use _safe variants to not break nested virtualization */
485         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
486         if (err)
487                 return;
488
489         val |= (1ULL << 47);
490
491         low  = lower_32_bits(val);
492         high = upper_32_bits(val);
493
494         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
495
496         erratum_383_found = true;
497 }
498
499 static void svm_init_osvw(struct kvm_vcpu *vcpu)
500 {
501         /*
502          * Guests should see errata 400 and 415 as fixed (assuming that
503          * HLT and IO instructions are intercepted).
504          */
505         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
506         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
507
508         /*
509          * By increasing VCPU's osvw.length to 3 we are telling the guest that
510          * all osvw.status bits inside that length, including bit 0 (which is
511          * reserved for erratum 298), are valid. However, if host processor's
512          * osvw_len is 0 then osvw_status[0] carries no information. We need to
513          * be conservative here and therefore we tell the guest that erratum 298
514          * is present (because we really don't know).
515          */
516         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
517                 vcpu->arch.osvw.status |= 1;
518 }
519
520 static bool __kvm_is_svm_supported(void)
521 {
522         int cpu = smp_processor_id();
523         struct cpuinfo_x86 *c = &cpu_data(cpu);
524
525         if (c->x86_vendor != X86_VENDOR_AMD &&
526             c->x86_vendor != X86_VENDOR_HYGON) {
527                 pr_err("CPU %d isn't AMD or Hygon\n", cpu);
528                 return false;
529         }
530
531         if (!cpu_has(c, X86_FEATURE_SVM)) {
532                 pr_err("SVM not supported by CPU %d\n", cpu);
533                 return false;
534         }
535
536         if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
537                 pr_info("KVM is unsupported when running as an SEV guest\n");
538                 return false;
539         }
540
541         return true;
542 }
543
544 static bool kvm_is_svm_supported(void)
545 {
546         bool supported;
547
548         migrate_disable();
549         supported = __kvm_is_svm_supported();
550         migrate_enable();
551
552         return supported;
553 }
554
555 static int svm_check_processor_compat(void)
556 {
557         if (!__kvm_is_svm_supported())
558                 return -EIO;
559
560         return 0;
561 }
562
563 static void __svm_write_tsc_multiplier(u64 multiplier)
564 {
565         if (multiplier == __this_cpu_read(current_tsc_ratio))
566                 return;
567
568         wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
569         __this_cpu_write(current_tsc_ratio, multiplier);
570 }
571
572 static inline void kvm_cpu_svm_disable(void)
573 {
574         uint64_t efer;
575
576         wrmsrl(MSR_VM_HSAVE_PA, 0);
577         rdmsrl(MSR_EFER, efer);
578         if (efer & EFER_SVME) {
579                 /*
580                  * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
581                  * NMI aren't blocked.
582                  */
583                 stgi();
584                 wrmsrl(MSR_EFER, efer & ~EFER_SVME);
585         }
586 }
587
588 static void svm_emergency_disable(void)
589 {
590         kvm_rebooting = true;
591
592         kvm_cpu_svm_disable();
593 }
594
595 static void svm_hardware_disable(void)
596 {
597         /* Make sure we clean up behind us */
598         if (tsc_scaling)
599                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
600
601         kvm_cpu_svm_disable();
602
603         amd_pmu_disable_virt();
604 }
605
606 static int svm_hardware_enable(void)
607 {
608
609         struct svm_cpu_data *sd;
610         uint64_t efer;
611         int me = raw_smp_processor_id();
612
613         rdmsrl(MSR_EFER, efer);
614         if (efer & EFER_SVME)
615                 return -EBUSY;
616
617         sd = per_cpu_ptr(&svm_data, me);
618         sd->asid_generation = 1;
619         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
620         sd->next_asid = sd->max_asid + 1;
621         sd->min_asid = max_sev_asid + 1;
622
623         wrmsrl(MSR_EFER, efer | EFER_SVME);
624
625         wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
626
627         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
628                 /*
629                  * Set the default value, even if we don't use TSC scaling
630                  * to avoid having stale value in the msr
631                  */
632                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
633         }
634
635
636         /*
637          * Get OSVW bits.
638          *
639          * Note that it is possible to have a system with mixed processor
640          * revisions and therefore different OSVW bits. If bits are not the same
641          * on different processors then choose the worst case (i.e. if erratum
642          * is present on one processor and not on another then assume that the
643          * erratum is present everywhere).
644          */
645         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
646                 uint64_t len, status = 0;
647                 int err;
648
649                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
650                 if (!err)
651                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
652                                                       &err);
653
654                 if (err)
655                         osvw_status = osvw_len = 0;
656                 else {
657                         if (len < osvw_len)
658                                 osvw_len = len;
659                         osvw_status |= status;
660                         osvw_status &= (1ULL << osvw_len) - 1;
661                 }
662         } else
663                 osvw_status = osvw_len = 0;
664
665         svm_init_erratum_383();
666
667         amd_pmu_enable_virt();
668
669         /*
670          * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
671          * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
672          * Since Linux does not change the value of TSC_AUX once set, prime the
673          * TSC_AUX field now to avoid a RDMSR on every vCPU run.
674          */
675         if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
676                 struct sev_es_save_area *hostsa;
677                 u32 __maybe_unused msr_hi;
678
679                 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
680
681                 rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
682         }
683
684         return 0;
685 }
686
687 static void svm_cpu_uninit(int cpu)
688 {
689         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
690
691         if (!sd->save_area)
692                 return;
693
694         kfree(sd->sev_vmcbs);
695         __free_page(sd->save_area);
696         sd->save_area_pa = 0;
697         sd->save_area = NULL;
698 }
699
700 static int svm_cpu_init(int cpu)
701 {
702         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
703         int ret = -ENOMEM;
704
705         memset(sd, 0, sizeof(struct svm_cpu_data));
706         sd->save_area = snp_safe_alloc_page(NULL);
707         if (!sd->save_area)
708                 return ret;
709
710         ret = sev_cpu_init(sd);
711         if (ret)
712                 goto free_save_area;
713
714         sd->save_area_pa = __sme_page_pa(sd->save_area);
715         return 0;
716
717 free_save_area:
718         __free_page(sd->save_area);
719         sd->save_area = NULL;
720         return ret;
721
722 }
723
724 static void set_dr_intercepts(struct vcpu_svm *svm)
725 {
726         struct vmcb *vmcb = svm->vmcb01.ptr;
727
728         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
729         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
730         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
731         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
732         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
733         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
734         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
735         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
736         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
737         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
738         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
739         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
740         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
741         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
742         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
743         vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
744
745         recalc_intercepts(svm);
746 }
747
748 static void clr_dr_intercepts(struct vcpu_svm *svm)
749 {
750         struct vmcb *vmcb = svm->vmcb01.ptr;
751
752         vmcb->control.intercepts[INTERCEPT_DR] = 0;
753
754         recalc_intercepts(svm);
755 }
756
757 static int direct_access_msr_slot(u32 msr)
758 {
759         u32 i;
760
761         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
762                 if (direct_access_msrs[i].index == msr)
763                         return i;
764
765         return -ENOENT;
766 }
767
768 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
769                                      int write)
770 {
771         struct vcpu_svm *svm = to_svm(vcpu);
772         int slot = direct_access_msr_slot(msr);
773
774         if (slot == -ENOENT)
775                 return;
776
777         /* Set the shadow bitmaps to the desired intercept states */
778         if (read)
779                 set_bit(slot, svm->shadow_msr_intercept.read);
780         else
781                 clear_bit(slot, svm->shadow_msr_intercept.read);
782
783         if (write)
784                 set_bit(slot, svm->shadow_msr_intercept.write);
785         else
786                 clear_bit(slot, svm->shadow_msr_intercept.write);
787 }
788
789 static bool valid_msr_intercept(u32 index)
790 {
791         return direct_access_msr_slot(index) != -ENOENT;
792 }
793
794 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
795 {
796         u8 bit_write;
797         unsigned long tmp;
798         u32 offset;
799         u32 *msrpm;
800
801         /*
802          * For non-nested case:
803          * If the L01 MSR bitmap does not intercept the MSR, then we need to
804          * save it.
805          *
806          * For nested case:
807          * If the L02 MSR bitmap does not intercept the MSR, then we need to
808          * save it.
809          */
810         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
811                                       to_svm(vcpu)->msrpm;
812
813         offset    = svm_msrpm_offset(msr);
814         bit_write = 2 * (msr & 0x0f) + 1;
815         tmp       = msrpm[offset];
816
817         BUG_ON(offset == MSR_INVALID);
818
819         return test_bit(bit_write, &tmp);
820 }
821
822 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
823                                         u32 msr, int read, int write)
824 {
825         struct vcpu_svm *svm = to_svm(vcpu);
826         u8 bit_read, bit_write;
827         unsigned long tmp;
828         u32 offset;
829
830         /*
831          * If this warning triggers extend the direct_access_msrs list at the
832          * beginning of the file
833          */
834         WARN_ON(!valid_msr_intercept(msr));
835
836         /* Enforce non allowed MSRs to trap */
837         if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
838                 read = 0;
839
840         if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
841                 write = 0;
842
843         offset    = svm_msrpm_offset(msr);
844         bit_read  = 2 * (msr & 0x0f);
845         bit_write = 2 * (msr & 0x0f) + 1;
846         tmp       = msrpm[offset];
847
848         BUG_ON(offset == MSR_INVALID);
849
850         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
851         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
852
853         msrpm[offset] = tmp;
854
855         svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
856         svm->nested.force_msr_bitmap_recalc = true;
857 }
858
859 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
860                           int read, int write)
861 {
862         set_shadow_msr_intercept(vcpu, msr, read, write);
863         set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
864 }
865
866 u32 *svm_vcpu_alloc_msrpm(void)
867 {
868         unsigned int order = get_order(MSRPM_SIZE);
869         struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
870         u32 *msrpm;
871
872         if (!pages)
873                 return NULL;
874
875         msrpm = page_address(pages);
876         memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
877
878         return msrpm;
879 }
880
881 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
882 {
883         int i;
884
885         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
886                 if (!direct_access_msrs[i].always)
887                         continue;
888                 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
889         }
890 }
891
892 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
893 {
894         int i;
895
896         if (intercept == svm->x2avic_msrs_intercepted)
897                 return;
898
899         if (!x2avic_enabled)
900                 return;
901
902         for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
903                 int index = direct_access_msrs[i].index;
904
905                 if ((index < APIC_BASE_MSR) ||
906                     (index > APIC_BASE_MSR + 0xff))
907                         continue;
908                 set_msr_interception(&svm->vcpu, svm->msrpm, index,
909                                      !intercept, !intercept);
910         }
911
912         svm->x2avic_msrs_intercepted = intercept;
913 }
914
915 void svm_vcpu_free_msrpm(u32 *msrpm)
916 {
917         __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
918 }
919
920 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
921 {
922         struct vcpu_svm *svm = to_svm(vcpu);
923         u32 i;
924
925         /*
926          * Set intercept permissions for all direct access MSRs again. They
927          * will automatically get filtered through the MSR filter, so we are
928          * back in sync after this.
929          */
930         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
931                 u32 msr = direct_access_msrs[i].index;
932                 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
933                 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
934
935                 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
936         }
937 }
938
939 static void add_msr_offset(u32 offset)
940 {
941         int i;
942
943         for (i = 0; i < MSRPM_OFFSETS; ++i) {
944
945                 /* Offset already in list? */
946                 if (msrpm_offsets[i] == offset)
947                         return;
948
949                 /* Slot used by another offset? */
950                 if (msrpm_offsets[i] != MSR_INVALID)
951                         continue;
952
953                 /* Add offset to list */
954                 msrpm_offsets[i] = offset;
955
956                 return;
957         }
958
959         /*
960          * If this BUG triggers the msrpm_offsets table has an overflow. Just
961          * increase MSRPM_OFFSETS in this case.
962          */
963         BUG();
964 }
965
966 static void init_msrpm_offsets(void)
967 {
968         int i;
969
970         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
971
972         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
973                 u32 offset;
974
975                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
976                 BUG_ON(offset == MSR_INVALID);
977
978                 add_msr_offset(offset);
979         }
980 }
981
982 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
983 {
984         to_vmcb->save.dbgctl            = from_vmcb->save.dbgctl;
985         to_vmcb->save.br_from           = from_vmcb->save.br_from;
986         to_vmcb->save.br_to             = from_vmcb->save.br_to;
987         to_vmcb->save.last_excp_from    = from_vmcb->save.last_excp_from;
988         to_vmcb->save.last_excp_to      = from_vmcb->save.last_excp_to;
989
990         vmcb_mark_dirty(to_vmcb, VMCB_LBR);
991 }
992
993 static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
994 {
995         struct vcpu_svm *svm = to_svm(vcpu);
996
997         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
998         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
999         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1000         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1001         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1002
1003         /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
1004         if (is_guest_mode(vcpu))
1005                 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
1006 }
1007
1008 static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
1009 {
1010         struct vcpu_svm *svm = to_svm(vcpu);
1011
1012         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1013         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1014         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1015         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1016         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1017
1018         /*
1019          * Move the LBR msrs back to the vmcb01 to avoid copying them
1020          * on nested guest entries.
1021          */
1022         if (is_guest_mode(vcpu))
1023                 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
1024 }
1025
1026 static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
1027 {
1028         /*
1029          * If LBR virtualization is disabled, the LBR MSRs are always kept in
1030          * vmcb01.  If LBR virtualization is enabled and L1 is running VMs of
1031          * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
1032          */
1033         return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
1034                                                                    svm->vmcb01.ptr;
1035 }
1036
1037 void svm_update_lbrv(struct kvm_vcpu *vcpu)
1038 {
1039         struct vcpu_svm *svm = to_svm(vcpu);
1040         bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1041         bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1042                             (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1043                             (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
1044
1045         if (enable_lbrv == current_enable_lbrv)
1046                 return;
1047
1048         if (enable_lbrv)
1049                 svm_enable_lbrv(vcpu);
1050         else
1051                 svm_disable_lbrv(vcpu);
1052 }
1053
1054 void disable_nmi_singlestep(struct vcpu_svm *svm)
1055 {
1056         svm->nmi_singlestep = false;
1057
1058         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1059                 /* Clear our flags if they were not set by the guest */
1060                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1061                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1062                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1063                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1064         }
1065 }
1066
1067 static void grow_ple_window(struct kvm_vcpu *vcpu)
1068 {
1069         struct vcpu_svm *svm = to_svm(vcpu);
1070         struct vmcb_control_area *control = &svm->vmcb->control;
1071         int old = control->pause_filter_count;
1072
1073         if (kvm_pause_in_guest(vcpu->kvm))
1074                 return;
1075
1076         control->pause_filter_count = __grow_ple_window(old,
1077                                                         pause_filter_count,
1078                                                         pause_filter_count_grow,
1079                                                         pause_filter_count_max);
1080
1081         if (control->pause_filter_count != old) {
1082                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1083                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1084                                             control->pause_filter_count, old);
1085         }
1086 }
1087
1088 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1089 {
1090         struct vcpu_svm *svm = to_svm(vcpu);
1091         struct vmcb_control_area *control = &svm->vmcb->control;
1092         int old = control->pause_filter_count;
1093
1094         if (kvm_pause_in_guest(vcpu->kvm))
1095                 return;
1096
1097         control->pause_filter_count =
1098                                 __shrink_ple_window(old,
1099                                                     pause_filter_count,
1100                                                     pause_filter_count_shrink,
1101                                                     pause_filter_count);
1102         if (control->pause_filter_count != old) {
1103                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1104                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1105                                             control->pause_filter_count, old);
1106         }
1107 }
1108
1109 static void svm_hardware_unsetup(void)
1110 {
1111         int cpu;
1112
1113         sev_hardware_unsetup();
1114
1115         for_each_possible_cpu(cpu)
1116                 svm_cpu_uninit(cpu);
1117
1118         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
1119         get_order(IOPM_SIZE));
1120         iopm_base = 0;
1121 }
1122
1123 static void init_seg(struct vmcb_seg *seg)
1124 {
1125         seg->selector = 0;
1126         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1127                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1128         seg->limit = 0xffff;
1129         seg->base = 0;
1130 }
1131
1132 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1133 {
1134         seg->selector = 0;
1135         seg->attrib = SVM_SELECTOR_P_MASK | type;
1136         seg->limit = 0xffff;
1137         seg->base = 0;
1138 }
1139
1140 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1141 {
1142         struct vcpu_svm *svm = to_svm(vcpu);
1143
1144         return svm->nested.ctl.tsc_offset;
1145 }
1146
1147 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1148 {
1149         struct vcpu_svm *svm = to_svm(vcpu);
1150
1151         return svm->tsc_ratio_msr;
1152 }
1153
1154 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
1155 {
1156         struct vcpu_svm *svm = to_svm(vcpu);
1157
1158         svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1159         svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
1160         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1161 }
1162
1163 void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1164 {
1165         preempt_disable();
1166         if (to_svm(vcpu)->guest_state_loaded)
1167                 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1168         preempt_enable();
1169 }
1170
1171 /* Evaluate instruction intercepts that depend on guest CPUID features. */
1172 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
1173                                               struct vcpu_svm *svm)
1174 {
1175         /*
1176          * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1177          * roots, or if INVPCID is disabled in the guest to inject #UD.
1178          */
1179         if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1180                 if (!npt_enabled ||
1181                     !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
1182                         svm_set_intercept(svm, INTERCEPT_INVPCID);
1183                 else
1184                         svm_clr_intercept(svm, INTERCEPT_INVPCID);
1185         }
1186
1187         if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1188                 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1189                         svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1190                 else
1191                         svm_set_intercept(svm, INTERCEPT_RDTSCP);
1192         }
1193 }
1194
1195 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
1196 {
1197         struct vcpu_svm *svm = to_svm(vcpu);
1198
1199         if (guest_cpuid_is_intel(vcpu)) {
1200                 /*
1201                  * We must intercept SYSENTER_EIP and SYSENTER_ESP
1202                  * accesses because the processor only stores 32 bits.
1203                  * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1204                  */
1205                 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1206                 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1207                 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1208
1209                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
1210                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
1211         } else {
1212                 /*
1213                  * If hardware supports Virtual VMLOAD VMSAVE then enable it
1214                  * in VMCB and clear intercepts to avoid #VMEXIT.
1215                  */
1216                 if (vls) {
1217                         svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1218                         svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1219                         svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1220                 }
1221                 /* No need to intercept these MSRs */
1222                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
1223                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
1224         }
1225 }
1226
1227 static void init_vmcb(struct kvm_vcpu *vcpu)
1228 {
1229         struct vcpu_svm *svm = to_svm(vcpu);
1230         struct vmcb *vmcb = svm->vmcb01.ptr;
1231         struct vmcb_control_area *control = &vmcb->control;
1232         struct vmcb_save_area *save = &vmcb->save;
1233
1234         svm_set_intercept(svm, INTERCEPT_CR0_READ);
1235         svm_set_intercept(svm, INTERCEPT_CR3_READ);
1236         svm_set_intercept(svm, INTERCEPT_CR4_READ);
1237         svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1238         svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1239         svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1240         if (!kvm_vcpu_apicv_active(vcpu))
1241                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1242
1243         set_dr_intercepts(svm);
1244
1245         set_exception_intercept(svm, PF_VECTOR);
1246         set_exception_intercept(svm, UD_VECTOR);
1247         set_exception_intercept(svm, MC_VECTOR);
1248         set_exception_intercept(svm, AC_VECTOR);
1249         set_exception_intercept(svm, DB_VECTOR);
1250         /*
1251          * Guest access to VMware backdoor ports could legitimately
1252          * trigger #GP because of TSS I/O permission bitmap.
1253          * We intercept those #GP and allow access to them anyway
1254          * as VMware does.
1255          */
1256         if (enable_vmware_backdoor)
1257                 set_exception_intercept(svm, GP_VECTOR);
1258
1259         svm_set_intercept(svm, INTERCEPT_INTR);
1260         svm_set_intercept(svm, INTERCEPT_NMI);
1261
1262         if (intercept_smi)
1263                 svm_set_intercept(svm, INTERCEPT_SMI);
1264
1265         svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1266         svm_set_intercept(svm, INTERCEPT_RDPMC);
1267         svm_set_intercept(svm, INTERCEPT_CPUID);
1268         svm_set_intercept(svm, INTERCEPT_INVD);
1269         svm_set_intercept(svm, INTERCEPT_INVLPG);
1270         svm_set_intercept(svm, INTERCEPT_INVLPGA);
1271         svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1272         svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1273         svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1274         svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1275         svm_set_intercept(svm, INTERCEPT_VMRUN);
1276         svm_set_intercept(svm, INTERCEPT_VMMCALL);
1277         svm_set_intercept(svm, INTERCEPT_VMLOAD);
1278         svm_set_intercept(svm, INTERCEPT_VMSAVE);
1279         svm_set_intercept(svm, INTERCEPT_STGI);
1280         svm_set_intercept(svm, INTERCEPT_CLGI);
1281         svm_set_intercept(svm, INTERCEPT_SKINIT);
1282         svm_set_intercept(svm, INTERCEPT_WBINVD);
1283         svm_set_intercept(svm, INTERCEPT_XSETBV);
1284         svm_set_intercept(svm, INTERCEPT_RDPRU);
1285         svm_set_intercept(svm, INTERCEPT_RSM);
1286
1287         if (!kvm_mwait_in_guest(vcpu->kvm)) {
1288                 svm_set_intercept(svm, INTERCEPT_MONITOR);
1289                 svm_set_intercept(svm, INTERCEPT_MWAIT);
1290         }
1291
1292         if (!kvm_hlt_in_guest(vcpu->kvm))
1293                 svm_set_intercept(svm, INTERCEPT_HLT);
1294
1295         control->iopm_base_pa = __sme_set(iopm_base);
1296         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1297         control->int_ctl = V_INTR_MASKING_MASK;
1298
1299         init_seg(&save->es);
1300         init_seg(&save->ss);
1301         init_seg(&save->ds);
1302         init_seg(&save->fs);
1303         init_seg(&save->gs);
1304
1305         save->cs.selector = 0xf000;
1306         save->cs.base = 0xffff0000;
1307         /* Executable/Readable Code Segment */
1308         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1309                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1310         save->cs.limit = 0xffff;
1311
1312         save->gdtr.base = 0;
1313         save->gdtr.limit = 0xffff;
1314         save->idtr.base = 0;
1315         save->idtr.limit = 0xffff;
1316
1317         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1318         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1319
1320         if (npt_enabled) {
1321                 /* Setup VMCB for Nested Paging */
1322                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1323                 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1324                 clr_exception_intercept(svm, PF_VECTOR);
1325                 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1326                 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1327                 save->g_pat = vcpu->arch.pat;
1328                 save->cr3 = 0;
1329         }
1330         svm->current_vmcb->asid_generation = 0;
1331         svm->asid = 0;
1332
1333         svm->nested.vmcb12_gpa = INVALID_GPA;
1334         svm->nested.last_vmcb12_gpa = INVALID_GPA;
1335
1336         if (!kvm_pause_in_guest(vcpu->kvm)) {
1337                 control->pause_filter_count = pause_filter_count;
1338                 if (pause_filter_thresh)
1339                         control->pause_filter_thresh = pause_filter_thresh;
1340                 svm_set_intercept(svm, INTERCEPT_PAUSE);
1341         } else {
1342                 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1343         }
1344
1345         svm_recalc_instruction_intercepts(vcpu, svm);
1346
1347         /*
1348          * If the host supports V_SPEC_CTRL then disable the interception
1349          * of MSR_IA32_SPEC_CTRL.
1350          */
1351         if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1352                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1353
1354         if (kvm_vcpu_apicv_active(vcpu))
1355                 avic_init_vmcb(svm, vmcb);
1356
1357         if (vnmi)
1358                 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1359
1360         if (vgif) {
1361                 svm_clr_intercept(svm, INTERCEPT_STGI);
1362                 svm_clr_intercept(svm, INTERCEPT_CLGI);
1363                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1364         }
1365
1366         if (sev_guest(vcpu->kvm))
1367                 sev_init_vmcb(svm);
1368
1369         svm_hv_init_vmcb(vmcb);
1370         init_vmcb_after_set_cpuid(vcpu);
1371
1372         vmcb_mark_all_dirty(vmcb);
1373
1374         enable_gif(svm);
1375 }
1376
1377 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1378 {
1379         struct vcpu_svm *svm = to_svm(vcpu);
1380
1381         svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1382
1383         svm_init_osvw(vcpu);
1384         vcpu->arch.microcode_version = 0x01000065;
1385         svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1386
1387         svm->nmi_masked = false;
1388         svm->awaiting_iret_completion = false;
1389
1390         if (sev_es_guest(vcpu->kvm))
1391                 sev_es_vcpu_reset(svm);
1392 }
1393
1394 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1395 {
1396         struct vcpu_svm *svm = to_svm(vcpu);
1397
1398         svm->spec_ctrl = 0;
1399         svm->virt_spec_ctrl = 0;
1400
1401         init_vmcb(vcpu);
1402
1403         if (!init_event)
1404                 __svm_vcpu_reset(vcpu);
1405 }
1406
1407 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1408 {
1409         svm->current_vmcb = target_vmcb;
1410         svm->vmcb = target_vmcb->ptr;
1411 }
1412
1413 static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1414 {
1415         struct vcpu_svm *svm;
1416         struct page *vmcb01_page;
1417         struct page *vmsa_page = NULL;
1418         int err;
1419
1420         BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1421         svm = to_svm(vcpu);
1422
1423         err = -ENOMEM;
1424         vmcb01_page = snp_safe_alloc_page(vcpu);
1425         if (!vmcb01_page)
1426                 goto out;
1427
1428         if (sev_es_guest(vcpu->kvm)) {
1429                 /*
1430                  * SEV-ES guests require a separate VMSA page used to contain
1431                  * the encrypted register state of the guest.
1432                  */
1433                 vmsa_page = snp_safe_alloc_page(vcpu);
1434                 if (!vmsa_page)
1435                         goto error_free_vmcb_page;
1436
1437                 /*
1438                  * SEV-ES guests maintain an encrypted version of their FPU
1439                  * state which is restored and saved on VMRUN and VMEXIT.
1440                  * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1441                  * do xsave/xrstor on it.
1442                  */
1443                 fpstate_set_confidential(&vcpu->arch.guest_fpu);
1444         }
1445
1446         err = avic_init_vcpu(svm);
1447         if (err)
1448                 goto error_free_vmsa_page;
1449
1450         svm->msrpm = svm_vcpu_alloc_msrpm();
1451         if (!svm->msrpm) {
1452                 err = -ENOMEM;
1453                 goto error_free_vmsa_page;
1454         }
1455
1456         svm->x2avic_msrs_intercepted = true;
1457
1458         svm->vmcb01.ptr = page_address(vmcb01_page);
1459         svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1460         svm_switch_vmcb(svm, &svm->vmcb01);
1461
1462         if (vmsa_page)
1463                 svm->sev_es.vmsa = page_address(vmsa_page);
1464
1465         svm->guest_state_loaded = false;
1466
1467         return 0;
1468
1469 error_free_vmsa_page:
1470         if (vmsa_page)
1471                 __free_page(vmsa_page);
1472 error_free_vmcb_page:
1473         __free_page(vmcb01_page);
1474 out:
1475         return err;
1476 }
1477
1478 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1479 {
1480         int i;
1481
1482         for_each_online_cpu(i)
1483                 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
1484 }
1485
1486 static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1487 {
1488         struct vcpu_svm *svm = to_svm(vcpu);
1489
1490         /*
1491          * The vmcb page can be recycled, causing a false negative in
1492          * svm_vcpu_load(). So, ensure that no logical CPU has this
1493          * vmcb page recorded as its current vmcb.
1494          */
1495         svm_clear_current_vmcb(svm->vmcb);
1496
1497         svm_leave_nested(vcpu);
1498         svm_free_nested(svm);
1499
1500         sev_free_vcpu(vcpu);
1501
1502         __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1503         __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1504 }
1505
1506 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1507 {
1508         struct vcpu_svm *svm = to_svm(vcpu);
1509         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1510
1511         if (sev_es_guest(vcpu->kvm))
1512                 sev_es_unmap_ghcb(svm);
1513
1514         if (svm->guest_state_loaded)
1515                 return;
1516
1517         /*
1518          * Save additional host state that will be restored on VMEXIT (sev-es)
1519          * or subsequent vmload of host save area.
1520          */
1521         vmsave(sd->save_area_pa);
1522         if (sev_es_guest(vcpu->kvm)) {
1523                 struct sev_es_save_area *hostsa;
1524                 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1525
1526                 sev_es_prepare_switch_to_guest(hostsa);
1527         }
1528
1529         if (tsc_scaling)
1530                 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1531
1532         /*
1533          * TSC_AUX is always virtualized for SEV-ES guests when the feature is
1534          * available. The user return MSR support is not required in this case
1535          * because TSC_AUX is restored on #VMEXIT from the host save area
1536          * (which has been initialized in svm_hardware_enable()).
1537          */
1538         if (likely(tsc_aux_uret_slot >= 0) &&
1539             (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
1540                 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1541
1542         svm->guest_state_loaded = true;
1543 }
1544
1545 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1546 {
1547         to_svm(vcpu)->guest_state_loaded = false;
1548 }
1549
1550 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1551 {
1552         struct vcpu_svm *svm = to_svm(vcpu);
1553         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
1554
1555         if (sd->current_vmcb != svm->vmcb) {
1556                 sd->current_vmcb = svm->vmcb;
1557
1558                 if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
1559                         indirect_branch_prediction_barrier();
1560         }
1561         if (kvm_vcpu_apicv_active(vcpu))
1562                 avic_vcpu_load(vcpu, cpu);
1563 }
1564
1565 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1566 {
1567         if (kvm_vcpu_apicv_active(vcpu))
1568                 avic_vcpu_put(vcpu);
1569
1570         svm_prepare_host_switch(vcpu);
1571
1572         ++vcpu->stat.host_state_reload;
1573 }
1574
1575 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1576 {
1577         struct vcpu_svm *svm = to_svm(vcpu);
1578         unsigned long rflags = svm->vmcb->save.rflags;
1579
1580         if (svm->nmi_singlestep) {
1581                 /* Hide our flags if they were not set by the guest */
1582                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1583                         rflags &= ~X86_EFLAGS_TF;
1584                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1585                         rflags &= ~X86_EFLAGS_RF;
1586         }
1587         return rflags;
1588 }
1589
1590 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1591 {
1592         if (to_svm(vcpu)->nmi_singlestep)
1593                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1594
1595        /*
1596         * Any change of EFLAGS.VM is accompanied by a reload of SS
1597         * (caused by either a task switch or an inter-privilege IRET),
1598         * so we do not need to update the CPL here.
1599         */
1600         to_svm(vcpu)->vmcb->save.rflags = rflags;
1601 }
1602
1603 static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1604 {
1605         struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1606
1607         return sev_es_guest(vcpu->kvm)
1608                 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1609                 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1610 }
1611
1612 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1613 {
1614         kvm_register_mark_available(vcpu, reg);
1615
1616         switch (reg) {
1617         case VCPU_EXREG_PDPTR:
1618                 /*
1619                  * When !npt_enabled, mmu->pdptrs[] is already available since
1620                  * it is always updated per SDM when moving to CRs.
1621                  */
1622                 if (npt_enabled)
1623                         load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1624                 break;
1625         default:
1626                 KVM_BUG_ON(1, vcpu->kvm);
1627         }
1628 }
1629
1630 static void svm_set_vintr(struct vcpu_svm *svm)
1631 {
1632         struct vmcb_control_area *control;
1633
1634         /*
1635          * The following fields are ignored when AVIC is enabled
1636          */
1637         WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1638
1639         svm_set_intercept(svm, INTERCEPT_VINTR);
1640
1641         /*
1642          * Recalculating intercepts may have cleared the VINTR intercept.  If
1643          * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1644          * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1645          * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1646          * interrupts will never be unblocked while L2 is running.
1647          */
1648         if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1649                 return;
1650
1651         /*
1652          * This is just a dummy VINTR to actually cause a vmexit to happen.
1653          * Actual injection of virtual interrupts happens through EVENTINJ.
1654          */
1655         control = &svm->vmcb->control;
1656         control->int_vector = 0x0;
1657         control->int_ctl &= ~V_INTR_PRIO_MASK;
1658         control->int_ctl |= V_IRQ_MASK |
1659                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1660         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1661 }
1662
1663 static void svm_clear_vintr(struct vcpu_svm *svm)
1664 {
1665         svm_clr_intercept(svm, INTERCEPT_VINTR);
1666
1667         /* Drop int_ctl fields related to VINTR injection.  */
1668         svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1669         if (is_guest_mode(&svm->vcpu)) {
1670                 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1671
1672                 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1673                         (svm->nested.ctl.int_ctl & V_TPR_MASK));
1674
1675                 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1676                         V_IRQ_INJECTION_BITS_MASK;
1677
1678                 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1679         }
1680
1681         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1682 }
1683
1684 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1685 {
1686         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1687         struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1688
1689         switch (seg) {
1690         case VCPU_SREG_CS: return &save->cs;
1691         case VCPU_SREG_DS: return &save->ds;
1692         case VCPU_SREG_ES: return &save->es;
1693         case VCPU_SREG_FS: return &save01->fs;
1694         case VCPU_SREG_GS: return &save01->gs;
1695         case VCPU_SREG_SS: return &save->ss;
1696         case VCPU_SREG_TR: return &save01->tr;
1697         case VCPU_SREG_LDTR: return &save01->ldtr;
1698         }
1699         BUG();
1700         return NULL;
1701 }
1702
1703 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1704 {
1705         struct vmcb_seg *s = svm_seg(vcpu, seg);
1706
1707         return s->base;
1708 }
1709
1710 static void svm_get_segment(struct kvm_vcpu *vcpu,
1711                             struct kvm_segment *var, int seg)
1712 {
1713         struct vmcb_seg *s = svm_seg(vcpu, seg);
1714
1715         var->base = s->base;
1716         var->limit = s->limit;
1717         var->selector = s->selector;
1718         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1719         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1720         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1721         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1722         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1723         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1724         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1725
1726         /*
1727          * AMD CPUs circa 2014 track the G bit for all segments except CS.
1728          * However, the SVM spec states that the G bit is not observed by the
1729          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1730          * So let's synthesize a legal G bit for all segments, this helps
1731          * running KVM nested. It also helps cross-vendor migration, because
1732          * Intel's vmentry has a check on the 'G' bit.
1733          */
1734         var->g = s->limit > 0xfffff;
1735
1736         /*
1737          * AMD's VMCB does not have an explicit unusable field, so emulate it
1738          * for cross vendor migration purposes by "not present"
1739          */
1740         var->unusable = !var->present;
1741
1742         switch (seg) {
1743         case VCPU_SREG_TR:
1744                 /*
1745                  * Work around a bug where the busy flag in the tr selector
1746                  * isn't exposed
1747                  */
1748                 var->type |= 0x2;
1749                 break;
1750         case VCPU_SREG_DS:
1751         case VCPU_SREG_ES:
1752         case VCPU_SREG_FS:
1753         case VCPU_SREG_GS:
1754                 /*
1755                  * The accessed bit must always be set in the segment
1756                  * descriptor cache, although it can be cleared in the
1757                  * descriptor, the cached bit always remains at 1. Since
1758                  * Intel has a check on this, set it here to support
1759                  * cross-vendor migration.
1760                  */
1761                 if (!var->unusable)
1762                         var->type |= 0x1;
1763                 break;
1764         case VCPU_SREG_SS:
1765                 /*
1766                  * On AMD CPUs sometimes the DB bit in the segment
1767                  * descriptor is left as 1, although the whole segment has
1768                  * been made unusable. Clear it here to pass an Intel VMX
1769                  * entry check when cross vendor migrating.
1770                  */
1771                 if (var->unusable)
1772                         var->db = 0;
1773                 /* This is symmetric with svm_set_segment() */
1774                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1775                 break;
1776         }
1777 }
1778
1779 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1780 {
1781         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1782
1783         return save->cpl;
1784 }
1785
1786 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1787 {
1788         struct kvm_segment cs;
1789
1790         svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1791         *db = cs.db;
1792         *l = cs.l;
1793 }
1794
1795 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1796 {
1797         struct vcpu_svm *svm = to_svm(vcpu);
1798
1799         dt->size = svm->vmcb->save.idtr.limit;
1800         dt->address = svm->vmcb->save.idtr.base;
1801 }
1802
1803 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1804 {
1805         struct vcpu_svm *svm = to_svm(vcpu);
1806
1807         svm->vmcb->save.idtr.limit = dt->size;
1808         svm->vmcb->save.idtr.base = dt->address ;
1809         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1810 }
1811
1812 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1813 {
1814         struct vcpu_svm *svm = to_svm(vcpu);
1815
1816         dt->size = svm->vmcb->save.gdtr.limit;
1817         dt->address = svm->vmcb->save.gdtr.base;
1818 }
1819
1820 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1821 {
1822         struct vcpu_svm *svm = to_svm(vcpu);
1823
1824         svm->vmcb->save.gdtr.limit = dt->size;
1825         svm->vmcb->save.gdtr.base = dt->address ;
1826         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1827 }
1828
1829 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1830 {
1831         struct vcpu_svm *svm = to_svm(vcpu);
1832
1833         /*
1834          * For guests that don't set guest_state_protected, the cr3 update is
1835          * handled via kvm_mmu_load() while entering the guest. For guests
1836          * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1837          * VMCB save area now, since the save area will become the initial
1838          * contents of the VMSA, and future VMCB save area updates won't be
1839          * seen.
1840          */
1841         if (sev_es_guest(vcpu->kvm)) {
1842                 svm->vmcb->save.cr3 = cr3;
1843                 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1844         }
1845 }
1846
1847 static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1848 {
1849         return true;
1850 }
1851
1852 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1853 {
1854         struct vcpu_svm *svm = to_svm(vcpu);
1855         u64 hcr0 = cr0;
1856         bool old_paging = is_paging(vcpu);
1857
1858 #ifdef CONFIG_X86_64
1859         if (vcpu->arch.efer & EFER_LME) {
1860                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1861                         vcpu->arch.efer |= EFER_LMA;
1862                         if (!vcpu->arch.guest_state_protected)
1863                                 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1864                 }
1865
1866                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1867                         vcpu->arch.efer &= ~EFER_LMA;
1868                         if (!vcpu->arch.guest_state_protected)
1869                                 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1870                 }
1871         }
1872 #endif
1873         vcpu->arch.cr0 = cr0;
1874
1875         if (!npt_enabled) {
1876                 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1877                 if (old_paging != is_paging(vcpu))
1878                         svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1879         }
1880
1881         /*
1882          * re-enable caching here because the QEMU bios
1883          * does not do it - this results in some delay at
1884          * reboot
1885          */
1886         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1887                 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1888
1889         svm->vmcb->save.cr0 = hcr0;
1890         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1891
1892         /*
1893          * SEV-ES guests must always keep the CR intercepts cleared. CR
1894          * tracking is done using the CR write traps.
1895          */
1896         if (sev_es_guest(vcpu->kvm))
1897                 return;
1898
1899         if (hcr0 == cr0) {
1900                 /* Selective CR0 write remains on.  */
1901                 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1902                 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1903         } else {
1904                 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1905                 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1906         }
1907 }
1908
1909 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1910 {
1911         return true;
1912 }
1913
1914 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1915 {
1916         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1917         unsigned long old_cr4 = vcpu->arch.cr4;
1918
1919         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1920                 svm_flush_tlb_current(vcpu);
1921
1922         vcpu->arch.cr4 = cr4;
1923         if (!npt_enabled) {
1924                 cr4 |= X86_CR4_PAE;
1925
1926                 if (!is_paging(vcpu))
1927                         cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1928         }
1929         cr4 |= host_cr4_mce;
1930         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1931         vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1932
1933         if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1934                 kvm_update_cpuid_runtime(vcpu);
1935 }
1936
1937 static void svm_set_segment(struct kvm_vcpu *vcpu,
1938                             struct kvm_segment *var, int seg)
1939 {
1940         struct vcpu_svm *svm = to_svm(vcpu);
1941         struct vmcb_seg *s = svm_seg(vcpu, seg);
1942
1943         s->base = var->base;
1944         s->limit = var->limit;
1945         s->selector = var->selector;
1946         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1947         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1948         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1949         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1950         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1951         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1952         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1953         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1954
1955         /*
1956          * This is always accurate, except if SYSRET returned to a segment
1957          * with SS.DPL != 3.  Intel does not have this quirk, and always
1958          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1959          * would entail passing the CPL to userspace and back.
1960          */
1961         if (seg == VCPU_SREG_SS)
1962                 /* This is symmetric with svm_get_segment() */
1963                 svm->vmcb->save.cpl = (var->dpl & 3);
1964
1965         vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1966 }
1967
1968 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1969 {
1970         struct vcpu_svm *svm = to_svm(vcpu);
1971
1972         clr_exception_intercept(svm, BP_VECTOR);
1973
1974         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1975                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1976                         set_exception_intercept(svm, BP_VECTOR);
1977         }
1978 }
1979
1980 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1981 {
1982         if (sd->next_asid > sd->max_asid) {
1983                 ++sd->asid_generation;
1984                 sd->next_asid = sd->min_asid;
1985                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1986                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1987         }
1988
1989         svm->current_vmcb->asid_generation = sd->asid_generation;
1990         svm->asid = sd->next_asid++;
1991 }
1992
1993 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
1994 {
1995         struct vmcb *vmcb = svm->vmcb;
1996
1997         if (svm->vcpu.arch.guest_state_protected)
1998                 return;
1999
2000         if (unlikely(value != vmcb->save.dr6)) {
2001                 vmcb->save.dr6 = value;
2002                 vmcb_mark_dirty(vmcb, VMCB_DR);
2003         }
2004 }
2005
2006 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2007 {
2008         struct vcpu_svm *svm = to_svm(vcpu);
2009
2010         if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm)))
2011                 return;
2012
2013         get_debugreg(vcpu->arch.db[0], 0);
2014         get_debugreg(vcpu->arch.db[1], 1);
2015         get_debugreg(vcpu->arch.db[2], 2);
2016         get_debugreg(vcpu->arch.db[3], 3);
2017         /*
2018          * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
2019          * because db_interception might need it.  We can do it before vmentry.
2020          */
2021         vcpu->arch.dr6 = svm->vmcb->save.dr6;
2022         vcpu->arch.dr7 = svm->vmcb->save.dr7;
2023         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2024         set_dr_intercepts(svm);
2025 }
2026
2027 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2028 {
2029         struct vcpu_svm *svm = to_svm(vcpu);
2030
2031         if (vcpu->arch.guest_state_protected)
2032                 return;
2033
2034         svm->vmcb->save.dr7 = value;
2035         vmcb_mark_dirty(svm->vmcb, VMCB_DR);
2036 }
2037
2038 static int pf_interception(struct kvm_vcpu *vcpu)
2039 {
2040         struct vcpu_svm *svm = to_svm(vcpu);
2041
2042         u64 fault_address = svm->vmcb->control.exit_info_2;
2043         u64 error_code = svm->vmcb->control.exit_info_1;
2044
2045         return kvm_handle_page_fault(vcpu, error_code, fault_address,
2046                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2047                         svm->vmcb->control.insn_bytes : NULL,
2048                         svm->vmcb->control.insn_len);
2049 }
2050
2051 static int npf_interception(struct kvm_vcpu *vcpu)
2052 {
2053         struct vcpu_svm *svm = to_svm(vcpu);
2054
2055         u64 fault_address = svm->vmcb->control.exit_info_2;
2056         u64 error_code = svm->vmcb->control.exit_info_1;
2057
2058         trace_kvm_page_fault(vcpu, fault_address, error_code);
2059         return kvm_mmu_page_fault(vcpu, fault_address, error_code,
2060                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2061                         svm->vmcb->control.insn_bytes : NULL,
2062                         svm->vmcb->control.insn_len);
2063 }
2064
2065 static int db_interception(struct kvm_vcpu *vcpu)
2066 {
2067         struct kvm_run *kvm_run = vcpu->run;
2068         struct vcpu_svm *svm = to_svm(vcpu);
2069
2070         if (!(vcpu->guest_debug &
2071               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2072                 !svm->nmi_singlestep) {
2073                 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2074                 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
2075                 return 1;
2076         }
2077
2078         if (svm->nmi_singlestep) {
2079                 disable_nmi_singlestep(svm);
2080                 /* Make sure we check for pending NMIs upon entry */
2081                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2082         }
2083
2084         if (vcpu->guest_debug &
2085             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2086                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2087                 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2088                 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2089                 kvm_run->debug.arch.pc =
2090                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2091                 kvm_run->debug.arch.exception = DB_VECTOR;
2092                 return 0;
2093         }
2094
2095         return 1;
2096 }
2097
2098 static int bp_interception(struct kvm_vcpu *vcpu)
2099 {
2100         struct vcpu_svm *svm = to_svm(vcpu);
2101         struct kvm_run *kvm_run = vcpu->run;
2102
2103         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2104         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2105         kvm_run->debug.arch.exception = BP_VECTOR;
2106         return 0;
2107 }
2108
2109 static int ud_interception(struct kvm_vcpu *vcpu)
2110 {
2111         return handle_ud(vcpu);
2112 }
2113
2114 static int ac_interception(struct kvm_vcpu *vcpu)
2115 {
2116         kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2117         return 1;
2118 }
2119
2120 static bool is_erratum_383(void)
2121 {
2122         int err, i;
2123         u64 value;
2124
2125         if (!erratum_383_found)
2126                 return false;
2127
2128         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2129         if (err)
2130                 return false;
2131
2132         /* Bit 62 may or may not be set for this mce */
2133         value &= ~(1ULL << 62);
2134
2135         if (value != 0xb600000000010015ULL)
2136                 return false;
2137
2138         /* Clear MCi_STATUS registers */
2139         for (i = 0; i < 6; ++i)
2140                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2141
2142         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2143         if (!err) {
2144                 u32 low, high;
2145
2146                 value &= ~(1ULL << 2);
2147                 low    = lower_32_bits(value);
2148                 high   = upper_32_bits(value);
2149
2150                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2151         }
2152
2153         /* Flush tlb to evict multi-match entries */
2154         __flush_tlb_all();
2155
2156         return true;
2157 }
2158
2159 static void svm_handle_mce(struct kvm_vcpu *vcpu)
2160 {
2161         if (is_erratum_383()) {
2162                 /*
2163                  * Erratum 383 triggered. Guest state is corrupt so kill the
2164                  * guest.
2165                  */
2166                 pr_err("Guest triggered AMD Erratum 383\n");
2167
2168                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2169
2170                 return;
2171         }
2172
2173         /*
2174          * On an #MC intercept the MCE handler is not called automatically in
2175          * the host. So do it by hand here.
2176          */
2177         kvm_machine_check();
2178 }
2179
2180 static int mc_interception(struct kvm_vcpu *vcpu)
2181 {
2182         return 1;
2183 }
2184
2185 static int shutdown_interception(struct kvm_vcpu *vcpu)
2186 {
2187         struct kvm_run *kvm_run = vcpu->run;
2188         struct vcpu_svm *svm = to_svm(vcpu);
2189
2190
2191         /*
2192          * VMCB is undefined after a SHUTDOWN intercept.  INIT the vCPU to put
2193          * the VMCB in a known good state.  Unfortuately, KVM doesn't have
2194          * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2195          * userspace.  At a platform view, INIT is acceptable behavior as
2196          * there exist bare metal platforms that automatically INIT the CPU
2197          * in response to shutdown.
2198          *
2199          * The VM save area for SEV-ES guests has already been encrypted so it
2200          * cannot be reinitialized, i.e. synthesizing INIT is futile.
2201          */
2202         if (!sev_es_guest(vcpu->kvm)) {
2203                 clear_page(svm->vmcb);
2204                 kvm_vcpu_reset(vcpu, true);
2205         }
2206
2207         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2208         return 0;
2209 }
2210
2211 static int io_interception(struct kvm_vcpu *vcpu)
2212 {
2213         struct vcpu_svm *svm = to_svm(vcpu);
2214         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2215         int size, in, string;
2216         unsigned port;
2217
2218         ++vcpu->stat.io_exits;
2219         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2220         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2221         port = io_info >> 16;
2222         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2223
2224         if (string) {
2225                 if (sev_es_guest(vcpu->kvm))
2226                         return sev_es_string_io(svm, size, port, in);
2227                 else
2228                         return kvm_emulate_instruction(vcpu, 0);
2229         }
2230
2231         svm->next_rip = svm->vmcb->control.exit_info_2;
2232
2233         return kvm_fast_pio(vcpu, size, port, in);
2234 }
2235
2236 static int nmi_interception(struct kvm_vcpu *vcpu)
2237 {
2238         return 1;
2239 }
2240
2241 static int smi_interception(struct kvm_vcpu *vcpu)
2242 {
2243         return 1;
2244 }
2245
2246 static int intr_interception(struct kvm_vcpu *vcpu)
2247 {
2248         ++vcpu->stat.irq_exits;
2249         return 1;
2250 }
2251
2252 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2253 {
2254         struct vcpu_svm *svm = to_svm(vcpu);
2255         struct vmcb *vmcb12;
2256         struct kvm_host_map map;
2257         int ret;
2258
2259         if (nested_svm_check_permissions(vcpu))
2260                 return 1;
2261
2262         ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2263         if (ret) {
2264                 if (ret == -EINVAL)
2265                         kvm_inject_gp(vcpu, 0);
2266                 return 1;
2267         }
2268
2269         vmcb12 = map.hva;
2270
2271         ret = kvm_skip_emulated_instruction(vcpu);
2272
2273         if (vmload) {
2274                 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2275                 svm->sysenter_eip_hi = 0;
2276                 svm->sysenter_esp_hi = 0;
2277         } else {
2278                 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2279         }
2280
2281         kvm_vcpu_unmap(vcpu, &map, true);
2282
2283         return ret;
2284 }
2285
2286 static int vmload_interception(struct kvm_vcpu *vcpu)
2287 {
2288         return vmload_vmsave_interception(vcpu, true);
2289 }
2290
2291 static int vmsave_interception(struct kvm_vcpu *vcpu)
2292 {
2293         return vmload_vmsave_interception(vcpu, false);
2294 }
2295
2296 static int vmrun_interception(struct kvm_vcpu *vcpu)
2297 {
2298         if (nested_svm_check_permissions(vcpu))
2299                 return 1;
2300
2301         return nested_svm_vmrun(vcpu);
2302 }
2303
2304 enum {
2305         NONE_SVM_INSTR,
2306         SVM_INSTR_VMRUN,
2307         SVM_INSTR_VMLOAD,
2308         SVM_INSTR_VMSAVE,
2309 };
2310
2311 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2312 static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2313 {
2314         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2315
2316         if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2317                 return NONE_SVM_INSTR;
2318
2319         switch (ctxt->modrm) {
2320         case 0xd8: /* VMRUN */
2321                 return SVM_INSTR_VMRUN;
2322         case 0xda: /* VMLOAD */
2323                 return SVM_INSTR_VMLOAD;
2324         case 0xdb: /* VMSAVE */
2325                 return SVM_INSTR_VMSAVE;
2326         default:
2327                 break;
2328         }
2329
2330         return NONE_SVM_INSTR;
2331 }
2332
2333 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2334 {
2335         const int guest_mode_exit_codes[] = {
2336                 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2337                 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2338                 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2339         };
2340         int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
2341                 [SVM_INSTR_VMRUN] = vmrun_interception,
2342                 [SVM_INSTR_VMLOAD] = vmload_interception,
2343                 [SVM_INSTR_VMSAVE] = vmsave_interception,
2344         };
2345         struct vcpu_svm *svm = to_svm(vcpu);
2346         int ret;
2347
2348         if (is_guest_mode(vcpu)) {
2349                 /* Returns '1' or -errno on failure, '0' on success. */
2350                 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
2351                 if (ret)
2352                         return ret;
2353                 return 1;
2354         }
2355         return svm_instr_handlers[opcode](vcpu);
2356 }
2357
2358 /*
2359  * #GP handling code. Note that #GP can be triggered under the following two
2360  * cases:
2361  *   1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2362  *      some AMD CPUs when EAX of these instructions are in the reserved memory
2363  *      regions (e.g. SMM memory on host).
2364  *   2) VMware backdoor
2365  */
2366 static int gp_interception(struct kvm_vcpu *vcpu)
2367 {
2368         struct vcpu_svm *svm = to_svm(vcpu);
2369         u32 error_code = svm->vmcb->control.exit_info_1;
2370         int opcode;
2371
2372         /* Both #GP cases have zero error_code */
2373         if (error_code)
2374                 goto reinject;
2375
2376         /* Decode the instruction for usage later */
2377         if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2378                 goto reinject;
2379
2380         opcode = svm_instr_opcode(vcpu);
2381
2382         if (opcode == NONE_SVM_INSTR) {
2383                 if (!enable_vmware_backdoor)
2384                         goto reinject;
2385
2386                 /*
2387                  * VMware backdoor emulation on #GP interception only handles
2388                  * IN{S}, OUT{S}, and RDPMC.
2389                  */
2390                 if (!is_guest_mode(vcpu))
2391                         return kvm_emulate_instruction(vcpu,
2392                                 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2393         } else {
2394                 /* All SVM instructions expect page aligned RAX */
2395                 if (svm->vmcb->save.rax & ~PAGE_MASK)
2396                         goto reinject;
2397
2398                 return emulate_svm_instr(vcpu, opcode);
2399         }
2400
2401 reinject:
2402         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2403         return 1;
2404 }
2405
2406 void svm_set_gif(struct vcpu_svm *svm, bool value)
2407 {
2408         if (value) {
2409                 /*
2410                  * If VGIF is enabled, the STGI intercept is only added to
2411                  * detect the opening of the SMI/NMI window; remove it now.
2412                  * Likewise, clear the VINTR intercept, we will set it
2413                  * again while processing KVM_REQ_EVENT if needed.
2414                  */
2415                 if (vgif)
2416                         svm_clr_intercept(svm, INTERCEPT_STGI);
2417                 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2418                         svm_clear_vintr(svm);
2419
2420                 enable_gif(svm);
2421                 if (svm->vcpu.arch.smi_pending ||
2422                     svm->vcpu.arch.nmi_pending ||
2423                     kvm_cpu_has_injectable_intr(&svm->vcpu) ||
2424                     kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
2425                         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2426         } else {
2427                 disable_gif(svm);
2428
2429                 /*
2430                  * After a CLGI no interrupts should come.  But if vGIF is
2431                  * in use, we still rely on the VINTR intercept (rather than
2432                  * STGI) to detect an open interrupt window.
2433                 */
2434                 if (!vgif)
2435                         svm_clear_vintr(svm);
2436         }
2437 }
2438
2439 static int stgi_interception(struct kvm_vcpu *vcpu)
2440 {
2441         int ret;
2442
2443         if (nested_svm_check_permissions(vcpu))
2444                 return 1;
2445
2446         ret = kvm_skip_emulated_instruction(vcpu);
2447         svm_set_gif(to_svm(vcpu), true);
2448         return ret;
2449 }
2450
2451 static int clgi_interception(struct kvm_vcpu *vcpu)
2452 {
2453         int ret;
2454
2455         if (nested_svm_check_permissions(vcpu))
2456                 return 1;
2457
2458         ret = kvm_skip_emulated_instruction(vcpu);
2459         svm_set_gif(to_svm(vcpu), false);
2460         return ret;
2461 }
2462
2463 static int invlpga_interception(struct kvm_vcpu *vcpu)
2464 {
2465         gva_t gva = kvm_rax_read(vcpu);
2466         u32 asid = kvm_rcx_read(vcpu);
2467
2468         /* FIXME: Handle an address size prefix. */
2469         if (!is_long_mode(vcpu))
2470                 gva = (u32)gva;
2471
2472         trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2473
2474         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2475         kvm_mmu_invlpg(vcpu, gva);
2476
2477         return kvm_skip_emulated_instruction(vcpu);
2478 }
2479
2480 static int skinit_interception(struct kvm_vcpu *vcpu)
2481 {
2482         trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2483
2484         kvm_queue_exception(vcpu, UD_VECTOR);
2485         return 1;
2486 }
2487
2488 static int task_switch_interception(struct kvm_vcpu *vcpu)
2489 {
2490         struct vcpu_svm *svm = to_svm(vcpu);
2491         u16 tss_selector;
2492         int reason;
2493         int int_type = svm->vmcb->control.exit_int_info &
2494                 SVM_EXITINTINFO_TYPE_MASK;
2495         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2496         uint32_t type =
2497                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2498         uint32_t idt_v =
2499                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2500         bool has_error_code = false;
2501         u32 error_code = 0;
2502
2503         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2504
2505         if (svm->vmcb->control.exit_info_2 &
2506             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2507                 reason = TASK_SWITCH_IRET;
2508         else if (svm->vmcb->control.exit_info_2 &
2509                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2510                 reason = TASK_SWITCH_JMP;
2511         else if (idt_v)
2512                 reason = TASK_SWITCH_GATE;
2513         else
2514                 reason = TASK_SWITCH_CALL;
2515
2516         if (reason == TASK_SWITCH_GATE) {
2517                 switch (type) {
2518                 case SVM_EXITINTINFO_TYPE_NMI:
2519                         vcpu->arch.nmi_injected = false;
2520                         break;
2521                 case SVM_EXITINTINFO_TYPE_EXEPT:
2522                         if (svm->vmcb->control.exit_info_2 &
2523                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2524                                 has_error_code = true;
2525                                 error_code =
2526                                         (u32)svm->vmcb->control.exit_info_2;
2527                         }
2528                         kvm_clear_exception_queue(vcpu);
2529                         break;
2530                 case SVM_EXITINTINFO_TYPE_INTR:
2531                 case SVM_EXITINTINFO_TYPE_SOFT:
2532                         kvm_clear_interrupt_queue(vcpu);
2533                         break;
2534                 default:
2535                         break;
2536                 }
2537         }
2538
2539         if (reason != TASK_SWITCH_GATE ||
2540             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2541             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2542              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2543                 if (!svm_skip_emulated_instruction(vcpu))
2544                         return 0;
2545         }
2546
2547         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2548                 int_vec = -1;
2549
2550         return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2551                                has_error_code, error_code);
2552 }
2553
2554 static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2555 {
2556         if (!sev_es_guest(svm->vcpu.kvm))
2557                 svm_clr_intercept(svm, INTERCEPT_IRET);
2558 }
2559
2560 static void svm_set_iret_intercept(struct vcpu_svm *svm)
2561 {
2562         if (!sev_es_guest(svm->vcpu.kvm))
2563                 svm_set_intercept(svm, INTERCEPT_IRET);
2564 }
2565
2566 static int iret_interception(struct kvm_vcpu *vcpu)
2567 {
2568         struct vcpu_svm *svm = to_svm(vcpu);
2569
2570         WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
2571
2572         ++vcpu->stat.nmi_window_exits;
2573         svm->awaiting_iret_completion = true;
2574
2575         svm_clr_iret_intercept(svm);
2576         svm->nmi_iret_rip = kvm_rip_read(vcpu);
2577
2578         kvm_make_request(KVM_REQ_EVENT, vcpu);
2579         return 1;
2580 }
2581
2582 static int invlpg_interception(struct kvm_vcpu *vcpu)
2583 {
2584         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2585                 return kvm_emulate_instruction(vcpu, 0);
2586
2587         kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2588         return kvm_skip_emulated_instruction(vcpu);
2589 }
2590
2591 static int emulate_on_interception(struct kvm_vcpu *vcpu)
2592 {
2593         return kvm_emulate_instruction(vcpu, 0);
2594 }
2595
2596 static int rsm_interception(struct kvm_vcpu *vcpu)
2597 {
2598         return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2599 }
2600
2601 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2602                                             unsigned long val)
2603 {
2604         struct vcpu_svm *svm = to_svm(vcpu);
2605         unsigned long cr0 = vcpu->arch.cr0;
2606         bool ret = false;
2607
2608         if (!is_guest_mode(vcpu) ||
2609             (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2610                 return false;
2611
2612         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2613         val &= ~SVM_CR0_SELECTIVE_MASK;
2614
2615         if (cr0 ^ val) {
2616                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2617                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2618         }
2619
2620         return ret;
2621 }
2622
2623 #define CR_VALID (1ULL << 63)
2624
2625 static int cr_interception(struct kvm_vcpu *vcpu)
2626 {
2627         struct vcpu_svm *svm = to_svm(vcpu);
2628         int reg, cr;
2629         unsigned long val;
2630         int err;
2631
2632         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2633                 return emulate_on_interception(vcpu);
2634
2635         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2636                 return emulate_on_interception(vcpu);
2637
2638         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2639         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2640                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2641         else
2642                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2643
2644         err = 0;
2645         if (cr >= 16) { /* mov to cr */
2646                 cr -= 16;
2647                 val = kvm_register_read(vcpu, reg);
2648                 trace_kvm_cr_write(cr, val);
2649                 switch (cr) {
2650                 case 0:
2651                         if (!check_selective_cr0_intercepted(vcpu, val))
2652                                 err = kvm_set_cr0(vcpu, val);
2653                         else
2654                                 return 1;
2655
2656                         break;
2657                 case 3:
2658                         err = kvm_set_cr3(vcpu, val);
2659                         break;
2660                 case 4:
2661                         err = kvm_set_cr4(vcpu, val);
2662                         break;
2663                 case 8:
2664                         err = kvm_set_cr8(vcpu, val);
2665                         break;
2666                 default:
2667                         WARN(1, "unhandled write to CR%d", cr);
2668                         kvm_queue_exception(vcpu, UD_VECTOR);
2669                         return 1;
2670                 }
2671         } else { /* mov from cr */
2672                 switch (cr) {
2673                 case 0:
2674                         val = kvm_read_cr0(vcpu);
2675                         break;
2676                 case 2:
2677                         val = vcpu->arch.cr2;
2678                         break;
2679                 case 3:
2680                         val = kvm_read_cr3(vcpu);
2681                         break;
2682                 case 4:
2683                         val = kvm_read_cr4(vcpu);
2684                         break;
2685                 case 8:
2686                         val = kvm_get_cr8(vcpu);
2687                         break;
2688                 default:
2689                         WARN(1, "unhandled read from CR%d", cr);
2690                         kvm_queue_exception(vcpu, UD_VECTOR);
2691                         return 1;
2692                 }
2693                 kvm_register_write(vcpu, reg, val);
2694                 trace_kvm_cr_read(cr, val);
2695         }
2696         return kvm_complete_insn_gp(vcpu, err);
2697 }
2698
2699 static int cr_trap(struct kvm_vcpu *vcpu)
2700 {
2701         struct vcpu_svm *svm = to_svm(vcpu);
2702         unsigned long old_value, new_value;
2703         unsigned int cr;
2704         int ret = 0;
2705
2706         new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2707
2708         cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2709         switch (cr) {
2710         case 0:
2711                 old_value = kvm_read_cr0(vcpu);
2712                 svm_set_cr0(vcpu, new_value);
2713
2714                 kvm_post_set_cr0(vcpu, old_value, new_value);
2715                 break;
2716         case 4:
2717                 old_value = kvm_read_cr4(vcpu);
2718                 svm_set_cr4(vcpu, new_value);
2719
2720                 kvm_post_set_cr4(vcpu, old_value, new_value);
2721                 break;
2722         case 8:
2723                 ret = kvm_set_cr8(vcpu, new_value);
2724                 break;
2725         default:
2726                 WARN(1, "unhandled CR%d write trap", cr);
2727                 kvm_queue_exception(vcpu, UD_VECTOR);
2728                 return 1;
2729         }
2730
2731         return kvm_complete_insn_gp(vcpu, ret);
2732 }
2733
2734 static int dr_interception(struct kvm_vcpu *vcpu)
2735 {
2736         struct vcpu_svm *svm = to_svm(vcpu);
2737         int reg, dr;
2738         unsigned long val;
2739         int err = 0;
2740
2741         /*
2742          * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
2743          * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
2744          */
2745         if (sev_es_guest(vcpu->kvm))
2746                 return 1;
2747
2748         if (vcpu->guest_debug == 0) {
2749                 /*
2750                  * No more DR vmexits; force a reload of the debug registers
2751                  * and reenter on this instruction.  The next vmexit will
2752                  * retrieve the full state of the debug registers.
2753                  */
2754                 clr_dr_intercepts(svm);
2755                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2756                 return 1;
2757         }
2758
2759         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2760                 return emulate_on_interception(vcpu);
2761
2762         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2763         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2764         if (dr >= 16) { /* mov to DRn  */
2765                 dr -= 16;
2766                 val = kvm_register_read(vcpu, reg);
2767                 err = kvm_set_dr(vcpu, dr, val);
2768         } else {
2769                 kvm_get_dr(vcpu, dr, &val);
2770                 kvm_register_write(vcpu, reg, val);
2771         }
2772
2773         return kvm_complete_insn_gp(vcpu, err);
2774 }
2775
2776 static int cr8_write_interception(struct kvm_vcpu *vcpu)
2777 {
2778         int r;
2779
2780         u8 cr8_prev = kvm_get_cr8(vcpu);
2781         /* instruction emulation calls kvm_set_cr8() */
2782         r = cr_interception(vcpu);
2783         if (lapic_in_kernel(vcpu))
2784                 return r;
2785         if (cr8_prev <= kvm_get_cr8(vcpu))
2786                 return r;
2787         vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2788         return 0;
2789 }
2790
2791 static int efer_trap(struct kvm_vcpu *vcpu)
2792 {
2793         struct msr_data msr_info;
2794         int ret;
2795
2796         /*
2797          * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2798          * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2799          * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2800          * the guest doesn't have X86_FEATURE_SVM.
2801          */
2802         msr_info.host_initiated = false;
2803         msr_info.index = MSR_EFER;
2804         msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2805         ret = kvm_set_msr_common(vcpu, &msr_info);
2806
2807         return kvm_complete_insn_gp(vcpu, ret);
2808 }
2809
2810 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2811 {
2812         msr->data = 0;
2813
2814         switch (msr->index) {
2815         case MSR_AMD64_DE_CFG:
2816                 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2817                         msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2818                 break;
2819         default:
2820                 return KVM_MSR_RET_INVALID;
2821         }
2822
2823         return 0;
2824 }
2825
2826 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2827 {
2828         struct vcpu_svm *svm = to_svm(vcpu);
2829
2830         switch (msr_info->index) {
2831         case MSR_AMD64_TSC_RATIO:
2832                 if (!msr_info->host_initiated &&
2833                     !guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR))
2834                         return 1;
2835                 msr_info->data = svm->tsc_ratio_msr;
2836                 break;
2837         case MSR_STAR:
2838                 msr_info->data = svm->vmcb01.ptr->save.star;
2839                 break;
2840 #ifdef CONFIG_X86_64
2841         case MSR_LSTAR:
2842                 msr_info->data = svm->vmcb01.ptr->save.lstar;
2843                 break;
2844         case MSR_CSTAR:
2845                 msr_info->data = svm->vmcb01.ptr->save.cstar;
2846                 break;
2847         case MSR_KERNEL_GS_BASE:
2848                 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2849                 break;
2850         case MSR_SYSCALL_MASK:
2851                 msr_info->data = svm->vmcb01.ptr->save.sfmask;
2852                 break;
2853 #endif
2854         case MSR_IA32_SYSENTER_CS:
2855                 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2856                 break;
2857         case MSR_IA32_SYSENTER_EIP:
2858                 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2859                 if (guest_cpuid_is_intel(vcpu))
2860                         msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2861                 break;
2862         case MSR_IA32_SYSENTER_ESP:
2863                 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2864                 if (guest_cpuid_is_intel(vcpu))
2865                         msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2866                 break;
2867         case MSR_TSC_AUX:
2868                 msr_info->data = svm->tsc_aux;
2869                 break;
2870         case MSR_IA32_DEBUGCTLMSR:
2871                 msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
2872                 break;
2873         case MSR_IA32_LASTBRANCHFROMIP:
2874                 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
2875                 break;
2876         case MSR_IA32_LASTBRANCHTOIP:
2877                 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
2878                 break;
2879         case MSR_IA32_LASTINTFROMIP:
2880                 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
2881                 break;
2882         case MSR_IA32_LASTINTTOIP:
2883                 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
2884                 break;
2885         case MSR_VM_HSAVE_PA:
2886                 msr_info->data = svm->nested.hsave_msr;
2887                 break;
2888         case MSR_VM_CR:
2889                 msr_info->data = svm->nested.vm_cr_msr;
2890                 break;
2891         case MSR_IA32_SPEC_CTRL:
2892                 if (!msr_info->host_initiated &&
2893                     !guest_has_spec_ctrl_msr(vcpu))
2894                         return 1;
2895
2896                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2897                         msr_info->data = svm->vmcb->save.spec_ctrl;
2898                 else
2899                         msr_info->data = svm->spec_ctrl;
2900                 break;
2901         case MSR_AMD64_VIRT_SPEC_CTRL:
2902                 if (!msr_info->host_initiated &&
2903                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2904                         return 1;
2905
2906                 msr_info->data = svm->virt_spec_ctrl;
2907                 break;
2908         case MSR_F15H_IC_CFG: {
2909
2910                 int family, model;
2911
2912                 family = guest_cpuid_family(vcpu);
2913                 model  = guest_cpuid_model(vcpu);
2914
2915                 if (family < 0 || model < 0)
2916                         return kvm_get_msr_common(vcpu, msr_info);
2917
2918                 msr_info->data = 0;
2919
2920                 if (family == 0x15 &&
2921                     (model >= 0x2 && model < 0x20))
2922                         msr_info->data = 0x1E;
2923                 }
2924                 break;
2925         case MSR_AMD64_DE_CFG:
2926                 msr_info->data = svm->msr_decfg;
2927                 break;
2928         default:
2929                 return kvm_get_msr_common(vcpu, msr_info);
2930         }
2931         return 0;
2932 }
2933
2934 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2935 {
2936         struct vcpu_svm *svm = to_svm(vcpu);
2937         if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2938                 return kvm_complete_insn_gp(vcpu, err);
2939
2940         ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2941         ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
2942                                 X86_TRAP_GP |
2943                                 SVM_EVTINJ_TYPE_EXEPT |
2944                                 SVM_EVTINJ_VALID);
2945         return 1;
2946 }
2947
2948 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2949 {
2950         struct vcpu_svm *svm = to_svm(vcpu);
2951         int svm_dis, chg_mask;
2952
2953         if (data & ~SVM_VM_CR_VALID_MASK)
2954                 return 1;
2955
2956         chg_mask = SVM_VM_CR_VALID_MASK;
2957
2958         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2959                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2960
2961         svm->nested.vm_cr_msr &= ~chg_mask;
2962         svm->nested.vm_cr_msr |= (data & chg_mask);
2963
2964         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2965
2966         /* check for svm_disable while efer.svme is set */
2967         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2968                 return 1;
2969
2970         return 0;
2971 }
2972
2973 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2974 {
2975         struct vcpu_svm *svm = to_svm(vcpu);
2976         int ret = 0;
2977
2978         u32 ecx = msr->index;
2979         u64 data = msr->data;
2980         switch (ecx) {
2981         case MSR_AMD64_TSC_RATIO:
2982
2983                 if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) {
2984
2985                         if (!msr->host_initiated)
2986                                 return 1;
2987                         /*
2988                          * In case TSC scaling is not enabled, always
2989                          * leave this MSR at the default value.
2990                          *
2991                          * Due to bug in qemu 6.2.0, it would try to set
2992                          * this msr to 0 if tsc scaling is not enabled.
2993                          * Ignore this value as well.
2994                          */
2995                         if (data != 0 && data != svm->tsc_ratio_msr)
2996                                 return 1;
2997                         break;
2998                 }
2999
3000                 if (data & SVM_TSC_RATIO_RSVD)
3001                         return 1;
3002
3003                 svm->tsc_ratio_msr = data;
3004
3005                 if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
3006                     is_guest_mode(vcpu))
3007                         nested_svm_update_tsc_ratio_msr(vcpu);
3008
3009                 break;
3010         case MSR_IA32_CR_PAT:
3011                 ret = kvm_set_msr_common(vcpu, msr);
3012                 if (ret)
3013                         break;
3014
3015                 svm->vmcb01.ptr->save.g_pat = data;
3016                 if (is_guest_mode(vcpu))
3017                         nested_vmcb02_compute_g_pat(svm);
3018                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
3019                 break;
3020         case MSR_IA32_SPEC_CTRL:
3021                 if (!msr->host_initiated &&
3022                     !guest_has_spec_ctrl_msr(vcpu))
3023                         return 1;
3024
3025                 if (kvm_spec_ctrl_test_value(data))
3026                         return 1;
3027
3028                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3029                         svm->vmcb->save.spec_ctrl = data;
3030                 else
3031                         svm->spec_ctrl = data;
3032                 if (!data)
3033                         break;
3034
3035                 /*
3036                  * For non-nested:
3037                  * When it's written (to non-zero) for the first time, pass
3038                  * it through.
3039                  *
3040                  * For nested:
3041                  * The handling of the MSR bitmap for L2 guests is done in
3042                  * nested_svm_vmrun_msrpm.
3043                  * We update the L1 MSR bit as well since it will end up
3044                  * touching the MSR anyway now.
3045                  */
3046                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
3047                 break;
3048         case MSR_AMD64_VIRT_SPEC_CTRL:
3049                 if (!msr->host_initiated &&
3050                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
3051                         return 1;
3052
3053                 if (data & ~SPEC_CTRL_SSBD)
3054                         return 1;
3055
3056                 svm->virt_spec_ctrl = data;
3057                 break;
3058         case MSR_STAR:
3059                 svm->vmcb01.ptr->save.star = data;
3060                 break;
3061 #ifdef CONFIG_X86_64
3062         case MSR_LSTAR:
3063                 svm->vmcb01.ptr->save.lstar = data;
3064                 break;
3065         case MSR_CSTAR:
3066                 svm->vmcb01.ptr->save.cstar = data;
3067                 break;
3068         case MSR_KERNEL_GS_BASE:
3069                 svm->vmcb01.ptr->save.kernel_gs_base = data;
3070                 break;
3071         case MSR_SYSCALL_MASK:
3072                 svm->vmcb01.ptr->save.sfmask = data;
3073                 break;
3074 #endif
3075         case MSR_IA32_SYSENTER_CS:
3076                 svm->vmcb01.ptr->save.sysenter_cs = data;
3077                 break;
3078         case MSR_IA32_SYSENTER_EIP:
3079                 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
3080                 /*
3081                  * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3082                  * when we spoof an Intel vendor ID (for cross vendor migration).
3083                  * In this case we use this intercept to track the high
3084                  * 32 bit part of these msrs to support Intel's
3085                  * implementation of SYSENTER/SYSEXIT.
3086                  */
3087                 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3088                 break;
3089         case MSR_IA32_SYSENTER_ESP:
3090                 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3091                 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3092                 break;
3093         case MSR_TSC_AUX:
3094                 /*
3095                  * TSC_AUX is always virtualized for SEV-ES guests when the
3096                  * feature is available. The user return MSR support is not
3097                  * required in this case because TSC_AUX is restored on #VMEXIT
3098                  * from the host save area (which has been initialized in
3099                  * svm_hardware_enable()).
3100                  */
3101                 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
3102                         break;
3103
3104                 /*
3105                  * TSC_AUX is usually changed only during boot and never read
3106                  * directly.  Intercept TSC_AUX instead of exposing it to the
3107                  * guest via direct_access_msrs, and switch it via user return.
3108                  */
3109                 preempt_disable();
3110                 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3111                 preempt_enable();
3112                 if (ret)
3113                         break;
3114
3115                 svm->tsc_aux = data;
3116                 break;
3117         case MSR_IA32_DEBUGCTLMSR:
3118                 if (!lbrv) {
3119                         kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3120                         break;
3121                 }
3122                 if (data & DEBUGCTL_RESERVED_BITS)
3123                         return 1;
3124
3125                 svm_get_lbr_vmcb(svm)->save.dbgctl = data;
3126                 svm_update_lbrv(vcpu);
3127                 break;
3128         case MSR_VM_HSAVE_PA:
3129                 /*
3130                  * Old kernels did not validate the value written to
3131                  * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
3132                  * value to allow live migrating buggy or malicious guests
3133                  * originating from those kernels.
3134                  */
3135                 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3136                         return 1;
3137
3138                 svm->nested.hsave_msr = data & PAGE_MASK;
3139                 break;
3140         case MSR_VM_CR:
3141                 return svm_set_vm_cr(vcpu, data);
3142         case MSR_VM_IGNNE:
3143                 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3144                 break;
3145         case MSR_AMD64_DE_CFG: {
3146                 struct kvm_msr_entry msr_entry;
3147
3148                 msr_entry.index = msr->index;
3149                 if (svm_get_msr_feature(&msr_entry))
3150                         return 1;
3151
3152                 /* Check the supported bits */
3153                 if (data & ~msr_entry.data)
3154                         return 1;
3155
3156                 /* Don't allow the guest to change a bit, #GP */
3157                 if (!msr->host_initiated && (data ^ msr_entry.data))
3158                         return 1;
3159
3160                 svm->msr_decfg = data;
3161                 break;
3162         }
3163         default:
3164                 return kvm_set_msr_common(vcpu, msr);
3165         }
3166         return ret;
3167 }
3168
3169 static int msr_interception(struct kvm_vcpu *vcpu)
3170 {
3171         if (to_svm(vcpu)->vmcb->control.exit_info_1)
3172                 return kvm_emulate_wrmsr(vcpu);
3173         else
3174                 return kvm_emulate_rdmsr(vcpu);
3175 }
3176
3177 static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3178 {
3179         kvm_make_request(KVM_REQ_EVENT, vcpu);
3180         svm_clear_vintr(to_svm(vcpu));
3181
3182         /*
3183          * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3184          * In this case AVIC was temporarily disabled for
3185          * requesting the IRQ window and we have to re-enable it.
3186          *
3187          * If running nested, still remove the VM wide AVIC inhibit to
3188          * support case in which the interrupt window was requested when the
3189          * vCPU was not running nested.
3190
3191          * All vCPUs which run still run nested, will remain to have their
3192          * AVIC still inhibited due to per-cpu AVIC inhibition.
3193          */
3194         kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3195
3196         ++vcpu->stat.irq_window_exits;
3197         return 1;
3198 }
3199
3200 static int pause_interception(struct kvm_vcpu *vcpu)
3201 {
3202         bool in_kernel;
3203         /*
3204          * CPL is not made available for an SEV-ES guest, therefore
3205          * vcpu->arch.preempted_in_kernel can never be true.  Just
3206          * set in_kernel to false as well.
3207          */
3208         in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
3209
3210         grow_ple_window(vcpu);
3211
3212         kvm_vcpu_on_spin(vcpu, in_kernel);
3213         return kvm_skip_emulated_instruction(vcpu);
3214 }
3215
3216 static int invpcid_interception(struct kvm_vcpu *vcpu)
3217 {
3218         struct vcpu_svm *svm = to_svm(vcpu);
3219         unsigned long type;
3220         gva_t gva;
3221
3222         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
3223                 kvm_queue_exception(vcpu, UD_VECTOR);
3224                 return 1;
3225         }
3226
3227         /*
3228          * For an INVPCID intercept:
3229          * EXITINFO1 provides the linear address of the memory operand.
3230          * EXITINFO2 provides the contents of the register operand.
3231          */
3232         type = svm->vmcb->control.exit_info_2;
3233         gva = svm->vmcb->control.exit_info_1;
3234
3235         return kvm_handle_invpcid(vcpu, type, gva);
3236 }
3237
3238 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3239         [SVM_EXIT_READ_CR0]                     = cr_interception,
3240         [SVM_EXIT_READ_CR3]                     = cr_interception,
3241         [SVM_EXIT_READ_CR4]                     = cr_interception,
3242         [SVM_EXIT_READ_CR8]                     = cr_interception,
3243         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
3244         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
3245         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
3246         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
3247         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
3248         [SVM_EXIT_READ_DR0]                     = dr_interception,
3249         [SVM_EXIT_READ_DR1]                     = dr_interception,
3250         [SVM_EXIT_READ_DR2]                     = dr_interception,
3251         [SVM_EXIT_READ_DR3]                     = dr_interception,
3252         [SVM_EXIT_READ_DR4]                     = dr_interception,
3253         [SVM_EXIT_READ_DR5]                     = dr_interception,
3254         [SVM_EXIT_READ_DR6]                     = dr_interception,
3255         [SVM_EXIT_READ_DR7]                     = dr_interception,
3256         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
3257         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
3258         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
3259         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
3260         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
3261         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
3262         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
3263         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
3264         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
3265         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
3266         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
3267         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
3268         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
3269         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
3270         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
3271         [SVM_EXIT_INTR]                         = intr_interception,
3272         [SVM_EXIT_NMI]                          = nmi_interception,
3273         [SVM_EXIT_SMI]                          = smi_interception,
3274         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
3275         [SVM_EXIT_RDPMC]                        = kvm_emulate_rdpmc,
3276         [SVM_EXIT_CPUID]                        = kvm_emulate_cpuid,
3277         [SVM_EXIT_IRET]                         = iret_interception,
3278         [SVM_EXIT_INVD]                         = kvm_emulate_invd,
3279         [SVM_EXIT_PAUSE]                        = pause_interception,
3280         [SVM_EXIT_HLT]                          = kvm_emulate_halt,
3281         [SVM_EXIT_INVLPG]                       = invlpg_interception,
3282         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
3283         [SVM_EXIT_IOIO]                         = io_interception,
3284         [SVM_EXIT_MSR]                          = msr_interception,
3285         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
3286         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
3287         [SVM_EXIT_VMRUN]                        = vmrun_interception,
3288         [SVM_EXIT_VMMCALL]                      = kvm_emulate_hypercall,
3289         [SVM_EXIT_VMLOAD]                       = vmload_interception,
3290         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
3291         [SVM_EXIT_STGI]                         = stgi_interception,
3292         [SVM_EXIT_CLGI]                         = clgi_interception,
3293         [SVM_EXIT_SKINIT]                       = skinit_interception,
3294         [SVM_EXIT_RDTSCP]                       = kvm_handle_invalid_op,
3295         [SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
3296         [SVM_EXIT_MONITOR]                      = kvm_emulate_monitor,
3297         [SVM_EXIT_MWAIT]                        = kvm_emulate_mwait,
3298         [SVM_EXIT_XSETBV]                       = kvm_emulate_xsetbv,
3299         [SVM_EXIT_RDPRU]                        = kvm_handle_invalid_op,
3300         [SVM_EXIT_EFER_WRITE_TRAP]              = efer_trap,
3301         [SVM_EXIT_CR0_WRITE_TRAP]               = cr_trap,
3302         [SVM_EXIT_CR4_WRITE_TRAP]               = cr_trap,
3303         [SVM_EXIT_CR8_WRITE_TRAP]               = cr_trap,
3304         [SVM_EXIT_INVPCID]                      = invpcid_interception,
3305         [SVM_EXIT_NPF]                          = npf_interception,
3306         [SVM_EXIT_RSM]                          = rsm_interception,
3307         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
3308         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
3309         [SVM_EXIT_VMGEXIT]                      = sev_handle_vmgexit,
3310 };
3311
3312 static void dump_vmcb(struct kvm_vcpu *vcpu)
3313 {
3314         struct vcpu_svm *svm = to_svm(vcpu);
3315         struct vmcb_control_area *control = &svm->vmcb->control;
3316         struct vmcb_save_area *save = &svm->vmcb->save;
3317         struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3318
3319         if (!dump_invalid_vmcb) {
3320                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3321                 return;
3322         }
3323
3324         pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3325                svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3326         pr_err("VMCB Control Area:\n");
3327         pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3328         pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3329         pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3330         pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3331         pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3332         pr_err("%-20s%08x %08x\n", "intercepts:",
3333               control->intercepts[INTERCEPT_WORD3],
3334                control->intercepts[INTERCEPT_WORD4]);
3335         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3336         pr_err("%-20s%d\n", "pause filter threshold:",
3337                control->pause_filter_thresh);
3338         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3339         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3340         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3341         pr_err("%-20s%d\n", "asid:", control->asid);
3342         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3343         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3344         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3345         pr_err("%-20s%08x\n", "int_state:", control->int_state);
3346         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3347         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3348         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3349         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3350         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3351         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3352         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3353         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3354         pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3355         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3356         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3357         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
3358         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3359         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3360         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3361         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3362         pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3363         pr_err("VMCB State Save Area:\n");
3364         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3365                "es:",
3366                save->es.selector, save->es.attrib,
3367                save->es.limit, save->es.base);
3368         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3369                "cs:",
3370                save->cs.selector, save->cs.attrib,
3371                save->cs.limit, save->cs.base);
3372         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3373                "ss:",
3374                save->ss.selector, save->ss.attrib,
3375                save->ss.limit, save->ss.base);
3376         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3377                "ds:",
3378                save->ds.selector, save->ds.attrib,
3379                save->ds.limit, save->ds.base);
3380         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3381                "fs:",
3382                save01->fs.selector, save01->fs.attrib,
3383                save01->fs.limit, save01->fs.base);
3384         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3385                "gs:",
3386                save01->gs.selector, save01->gs.attrib,
3387                save01->gs.limit, save01->gs.base);
3388         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3389                "gdtr:",
3390                save->gdtr.selector, save->gdtr.attrib,
3391                save->gdtr.limit, save->gdtr.base);
3392         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3393                "ldtr:",
3394                save01->ldtr.selector, save01->ldtr.attrib,
3395                save01->ldtr.limit, save01->ldtr.base);
3396         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3397                "idtr:",
3398                save->idtr.selector, save->idtr.attrib,
3399                save->idtr.limit, save->idtr.base);
3400         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3401                "tr:",
3402                save01->tr.selector, save01->tr.attrib,
3403                save01->tr.limit, save01->tr.base);
3404         pr_err("vmpl: %d   cpl:  %d               efer:          %016llx\n",
3405                save->vmpl, save->cpl, save->efer);
3406         pr_err("%-15s %016llx %-13s %016llx\n",
3407                "cr0:", save->cr0, "cr2:", save->cr2);
3408         pr_err("%-15s %016llx %-13s %016llx\n",
3409                "cr3:", save->cr3, "cr4:", save->cr4);
3410         pr_err("%-15s %016llx %-13s %016llx\n",
3411                "dr6:", save->dr6, "dr7:", save->dr7);
3412         pr_err("%-15s %016llx %-13s %016llx\n",
3413                "rip:", save->rip, "rflags:", save->rflags);
3414         pr_err("%-15s %016llx %-13s %016llx\n",
3415                "rsp:", save->rsp, "rax:", save->rax);
3416         pr_err("%-15s %016llx %-13s %016llx\n",
3417                "star:", save01->star, "lstar:", save01->lstar);
3418         pr_err("%-15s %016llx %-13s %016llx\n",
3419                "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3420         pr_err("%-15s %016llx %-13s %016llx\n",
3421                "kernel_gs_base:", save01->kernel_gs_base,
3422                "sysenter_cs:", save01->sysenter_cs);
3423         pr_err("%-15s %016llx %-13s %016llx\n",
3424                "sysenter_esp:", save01->sysenter_esp,
3425                "sysenter_eip:", save01->sysenter_eip);
3426         pr_err("%-15s %016llx %-13s %016llx\n",
3427                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3428         pr_err("%-15s %016llx %-13s %016llx\n",
3429                "br_from:", save->br_from, "br_to:", save->br_to);
3430         pr_err("%-15s %016llx %-13s %016llx\n",
3431                "excp_from:", save->last_excp_from,
3432                "excp_to:", save->last_excp_to);
3433 }
3434
3435 static bool svm_check_exit_valid(u64 exit_code)
3436 {
3437         return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3438                 svm_exit_handlers[exit_code]);
3439 }
3440
3441 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3442 {
3443         vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3444         dump_vmcb(vcpu);
3445         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3446         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3447         vcpu->run->internal.ndata = 2;
3448         vcpu->run->internal.data[0] = exit_code;
3449         vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3450         return 0;
3451 }
3452
3453 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
3454 {
3455         if (!svm_check_exit_valid(exit_code))
3456                 return svm_handle_invalid_exit(vcpu, exit_code);
3457
3458 #ifdef CONFIG_MITIGATION_RETPOLINE
3459         if (exit_code == SVM_EXIT_MSR)
3460                 return msr_interception(vcpu);
3461         else if (exit_code == SVM_EXIT_VINTR)
3462                 return interrupt_window_interception(vcpu);
3463         else if (exit_code == SVM_EXIT_INTR)
3464                 return intr_interception(vcpu);
3465         else if (exit_code == SVM_EXIT_HLT)
3466                 return kvm_emulate_halt(vcpu);
3467         else if (exit_code == SVM_EXIT_NPF)
3468                 return npf_interception(vcpu);
3469 #endif
3470         return svm_exit_handlers[exit_code](vcpu);
3471 }
3472
3473 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3474                               u64 *info1, u64 *info2,
3475                               u32 *intr_info, u32 *error_code)
3476 {
3477         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3478
3479         *reason = control->exit_code;
3480         *info1 = control->exit_info_1;
3481         *info2 = control->exit_info_2;
3482         *intr_info = control->exit_int_info;
3483         if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3484             (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3485                 *error_code = control->exit_int_info_err;
3486         else
3487                 *error_code = 0;
3488 }
3489
3490 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3491 {
3492         struct vcpu_svm *svm = to_svm(vcpu);
3493         struct kvm_run *kvm_run = vcpu->run;
3494         u32 exit_code = svm->vmcb->control.exit_code;
3495
3496         /* SEV-ES guests must use the CR write traps to track CR registers. */
3497         if (!sev_es_guest(vcpu->kvm)) {
3498                 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3499                         vcpu->arch.cr0 = svm->vmcb->save.cr0;
3500                 if (npt_enabled)
3501                         vcpu->arch.cr3 = svm->vmcb->save.cr3;
3502         }
3503
3504         if (is_guest_mode(vcpu)) {
3505                 int vmexit;
3506
3507                 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3508
3509                 vmexit = nested_svm_exit_special(svm);
3510
3511                 if (vmexit == NESTED_EXIT_CONTINUE)
3512                         vmexit = nested_svm_exit_handled(svm);
3513
3514                 if (vmexit == NESTED_EXIT_DONE)
3515                         return 1;
3516         }
3517
3518         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3519                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3520                 kvm_run->fail_entry.hardware_entry_failure_reason
3521                         = svm->vmcb->control.exit_code;
3522                 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3523                 dump_vmcb(vcpu);
3524                 return 0;
3525         }
3526
3527         if (exit_fastpath != EXIT_FASTPATH_NONE)
3528                 return 1;
3529
3530         return svm_invoke_exit_handler(vcpu, exit_code);
3531 }
3532
3533 static void pre_svm_run(struct kvm_vcpu *vcpu)
3534 {
3535         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3536         struct vcpu_svm *svm = to_svm(vcpu);
3537
3538         /*
3539          * If the previous vmrun of the vmcb occurred on a different physical
3540          * cpu, then mark the vmcb dirty and assign a new asid.  Hardware's
3541          * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3542          */
3543         if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3544                 svm->current_vmcb->asid_generation = 0;
3545                 vmcb_mark_all_dirty(svm->vmcb);
3546                 svm->current_vmcb->cpu = vcpu->cpu;
3547         }
3548
3549         if (sev_guest(vcpu->kvm))
3550                 return pre_sev_run(svm, vcpu->cpu);
3551
3552         /* FIXME: handle wraparound of asid_generation */
3553         if (svm->current_vmcb->asid_generation != sd->asid_generation)
3554                 new_asid(svm, sd);
3555 }
3556
3557 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3558 {
3559         struct vcpu_svm *svm = to_svm(vcpu);
3560
3561         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3562
3563         if (svm->nmi_l1_to_l2)
3564                 return;
3565
3566         /*
3567          * No need to manually track NMI masking when vNMI is enabled, hardware
3568          * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
3569          * case where software directly injects an NMI.
3570          */
3571         if (!is_vnmi_enabled(svm)) {
3572                 svm->nmi_masked = true;
3573                 svm_set_iret_intercept(svm);
3574         }
3575         ++vcpu->stat.nmi_injections;
3576 }
3577
3578 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3579 {
3580         struct vcpu_svm *svm = to_svm(vcpu);
3581
3582         if (!is_vnmi_enabled(svm))
3583                 return false;
3584
3585         return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3586 }
3587
3588 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3589 {
3590         struct vcpu_svm *svm = to_svm(vcpu);
3591
3592         if (!is_vnmi_enabled(svm))
3593                 return false;
3594
3595         if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3596                 return false;
3597
3598         svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3599         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3600
3601         /*
3602          * Because the pending NMI is serviced by hardware, KVM can't know when
3603          * the NMI is "injected", but for all intents and purposes, passing the
3604          * NMI off to hardware counts as injection.
3605          */
3606         ++vcpu->stat.nmi_injections;
3607
3608         return true;
3609 }
3610
3611 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3612 {
3613         struct vcpu_svm *svm = to_svm(vcpu);
3614         u32 type;
3615
3616         if (vcpu->arch.interrupt.soft) {
3617                 if (svm_update_soft_interrupt_rip(vcpu))
3618                         return;
3619
3620                 type = SVM_EVTINJ_TYPE_SOFT;
3621         } else {
3622                 type = SVM_EVTINJ_TYPE_INTR;
3623         }
3624
3625         trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
3626                            vcpu->arch.interrupt.soft, reinjected);
3627         ++vcpu->stat.irq_injections;
3628
3629         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3630                                        SVM_EVTINJ_VALID | type;
3631 }
3632
3633 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3634                                      int trig_mode, int vector)
3635 {
3636         /*
3637          * apic->apicv_active must be read after vcpu->mode.
3638          * Pairs with smp_store_release in vcpu_enter_guest.
3639          */
3640         bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3641
3642         /* Note, this is called iff the local APIC is in-kernel. */
3643         if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3644                 /* Process the interrupt via kvm_check_and_inject_events(). */
3645                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3646                 kvm_vcpu_kick(vcpu);
3647                 return;
3648         }
3649
3650         trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3651         if (in_guest_mode) {
3652                 /*
3653                  * Signal the doorbell to tell hardware to inject the IRQ.  If
3654                  * the vCPU exits the guest before the doorbell chimes, hardware
3655                  * will automatically process AVIC interrupts at the next VMRUN.
3656                  */
3657                 avic_ring_doorbell(vcpu);
3658         } else {
3659                 /*
3660                  * Wake the vCPU if it was blocking.  KVM will then detect the
3661                  * pending IRQ when checking if the vCPU has a wake event.
3662                  */
3663                 kvm_vcpu_wake_up(vcpu);
3664         }
3665 }
3666
3667 static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
3668                                   int trig_mode, int vector)
3669 {
3670         kvm_lapic_set_irr(vector, apic);
3671
3672         /*
3673          * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3674          * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3675          * the read of guest_mode.  This guarantees that either VMRUN will see
3676          * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3677          * will signal the doorbell if the CPU has already entered the guest.
3678          */
3679         smp_mb__after_atomic();
3680         svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3681 }
3682
3683 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3684 {
3685         struct vcpu_svm *svm = to_svm(vcpu);
3686
3687         /*
3688          * SEV-ES guests must always keep the CR intercepts cleared. CR
3689          * tracking is done using the CR write traps.
3690          */
3691         if (sev_es_guest(vcpu->kvm))
3692                 return;
3693
3694         if (nested_svm_virtualize_tpr(vcpu))
3695                 return;
3696
3697         svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3698
3699         if (irr == -1)
3700                 return;
3701
3702         if (tpr >= irr)
3703                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3704 }
3705
3706 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3707 {
3708         struct vcpu_svm *svm = to_svm(vcpu);
3709
3710         if (is_vnmi_enabled(svm))
3711                 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3712         else
3713                 return svm->nmi_masked;
3714 }
3715
3716 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3717 {
3718         struct vcpu_svm *svm = to_svm(vcpu);
3719
3720         if (is_vnmi_enabled(svm)) {
3721                 if (masked)
3722                         svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3723                 else
3724                         svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3725
3726         } else {
3727                 svm->nmi_masked = masked;
3728                 if (masked)
3729                         svm_set_iret_intercept(svm);
3730                 else
3731                         svm_clr_iret_intercept(svm);
3732         }
3733 }
3734
3735 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3736 {
3737         struct vcpu_svm *svm = to_svm(vcpu);
3738         struct vmcb *vmcb = svm->vmcb;
3739
3740         if (!gif_set(svm))
3741                 return true;
3742
3743         if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3744                 return false;
3745
3746         if (svm_get_nmi_mask(vcpu))
3747                 return true;
3748
3749         return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3750 }
3751
3752 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3753 {
3754         struct vcpu_svm *svm = to_svm(vcpu);
3755         if (svm->nested.nested_run_pending)
3756                 return -EBUSY;
3757
3758         if (svm_nmi_blocked(vcpu))
3759                 return 0;
3760
3761         /* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
3762         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3763                 return -EBUSY;
3764         return 1;
3765 }
3766
3767 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3768 {
3769         struct vcpu_svm *svm = to_svm(vcpu);
3770         struct vmcb *vmcb = svm->vmcb;
3771
3772         if (!gif_set(svm))
3773                 return true;
3774
3775         if (is_guest_mode(vcpu)) {
3776                 /* As long as interrupts are being delivered...  */
3777                 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3778                     ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3779                     : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3780                         return true;
3781
3782                 /* ... vmexits aren't blocked by the interrupt shadow  */
3783                 if (nested_exit_on_intr(svm))
3784                         return false;
3785         } else {
3786                 if (!svm_get_if_flag(vcpu))
3787                         return true;
3788         }
3789
3790         return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3791 }
3792
3793 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3794 {
3795         struct vcpu_svm *svm = to_svm(vcpu);
3796
3797         if (svm->nested.nested_run_pending)
3798                 return -EBUSY;
3799
3800         if (svm_interrupt_blocked(vcpu))
3801                 return 0;
3802
3803         /*
3804          * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3805          * e.g. if the IRQ arrived asynchronously after checking nested events.
3806          */
3807         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3808                 return -EBUSY;
3809
3810         return 1;
3811 }
3812
3813 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
3814 {
3815         struct vcpu_svm *svm = to_svm(vcpu);
3816
3817         /*
3818          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3819          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3820          * get that intercept, this function will be called again though and
3821          * we'll get the vintr intercept. However, if the vGIF feature is
3822          * enabled, the STGI interception will not occur. Enable the irq
3823          * window under the assumption that the hardware will set the GIF.
3824          */
3825         if (vgif || gif_set(svm)) {
3826                 /*
3827                  * IRQ window is not needed when AVIC is enabled,
3828                  * unless we have pending ExtINT since it cannot be injected
3829                  * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3830                  * and fallback to injecting IRQ via V_IRQ.
3831                  *
3832                  * If running nested, AVIC is already locally inhibited
3833                  * on this vCPU, therefore there is no need to request
3834                  * the VM wide AVIC inhibition.
3835                  */
3836                 if (!is_guest_mode(vcpu))
3837                         kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3838
3839                 svm_set_vintr(svm);
3840         }
3841 }
3842
3843 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
3844 {
3845         struct vcpu_svm *svm = to_svm(vcpu);
3846
3847         /*
3848          * KVM should never request an NMI window when vNMI is enabled, as KVM
3849          * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3850          * two NMIs arrive simultaneously, KVM will inject one and set
3851          * V_NMI_PENDING for the other.  WARN, but continue with the standard
3852          * single-step approach to try and salvage the pending NMI.
3853          */
3854         WARN_ON_ONCE(is_vnmi_enabled(svm));
3855
3856         if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
3857                 return; /* IRET will cause a vm exit */
3858
3859         /*
3860          * SEV-ES guests are responsible for signaling when a vCPU is ready to
3861          * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
3862          * KVM can't intercept and single-step IRET to detect when NMIs are
3863          * unblocked (architecturally speaking).  See SVM_VMGEXIT_NMI_COMPLETE.
3864          *
3865          * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
3866          * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
3867          * supported NAEs in the GHCB protocol.
3868          */
3869         if (sev_es_guest(vcpu->kvm))
3870                 return;
3871
3872         if (!gif_set(svm)) {
3873                 if (vgif)
3874                         svm_set_intercept(svm, INTERCEPT_STGI);
3875                 return; /* STGI will cause a vm exit */
3876         }
3877
3878         /*
3879          * Something prevents NMI from been injected. Single step over possible
3880          * problem (IRET or exception injection or interrupt shadow)
3881          */
3882         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3883         svm->nmi_singlestep = true;
3884         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3885 }
3886
3887 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
3888 {
3889         struct vcpu_svm *svm = to_svm(vcpu);
3890
3891         /*
3892          * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3893          * A TLB flush for the current ASID flushes both "host" and "guest" TLB
3894          * entries, and thus is a superset of Hyper-V's fine grained flushing.
3895          */
3896         kvm_hv_vcpu_purge_flush_tlb(vcpu);
3897
3898         /*
3899          * Flush only the current ASID even if the TLB flush was invoked via
3900          * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
3901          * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3902          * unconditionally does a TLB flush on both nested VM-Enter and nested
3903          * VM-Exit (via kvm_mmu_reset_context()).
3904          */
3905         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3906                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3907         else
3908                 svm->current_vmcb->asid_generation--;
3909 }
3910
3911 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
3912 {
3913         hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
3914
3915         /*
3916          * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3917          * flush the NPT mappings via hypercall as flushing the ASID only
3918          * affects virtual to physical mappings, it does not invalidate guest
3919          * physical to host physical mappings.
3920          */
3921         if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
3922                 hyperv_flush_guest_mapping(root_tdp);
3923
3924         svm_flush_tlb_asid(vcpu);
3925 }
3926
3927 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
3928 {
3929         /*
3930          * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3931          * flushes should be routed to hv_flush_remote_tlbs() without requesting
3932          * a "regular" remote flush.  Reaching this point means either there's
3933          * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3934          * which might be fatal to the guest.  Yell, but try to recover.
3935          */
3936         if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
3937                 hv_flush_remote_tlbs(vcpu->kvm);
3938
3939         svm_flush_tlb_asid(vcpu);
3940 }
3941
3942 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3943 {
3944         struct vcpu_svm *svm = to_svm(vcpu);
3945
3946         invlpga(gva, svm->vmcb->control.asid);
3947 }
3948
3949 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3950 {
3951         struct vcpu_svm *svm = to_svm(vcpu);
3952
3953         if (nested_svm_virtualize_tpr(vcpu))
3954                 return;
3955
3956         if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3957                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3958                 kvm_set_cr8(vcpu, cr8);
3959         }
3960 }
3961
3962 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3963 {
3964         struct vcpu_svm *svm = to_svm(vcpu);
3965         u64 cr8;
3966
3967         if (nested_svm_virtualize_tpr(vcpu) ||
3968             kvm_vcpu_apicv_active(vcpu))
3969                 return;
3970
3971         cr8 = kvm_get_cr8(vcpu);
3972         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3973         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3974 }
3975
3976 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
3977                                         int type)
3978 {
3979         bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
3980         bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
3981         struct vcpu_svm *svm = to_svm(vcpu);
3982
3983         /*
3984          * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3985          * associated with the original soft exception/interrupt.  next_rip is
3986          * cleared on all exits that can occur while vectoring an event, so KVM
3987          * needs to manually set next_rip for re-injection.  Unlike the !nrips
3988          * case below, this needs to be done if and only if KVM is re-injecting
3989          * the same event, i.e. if the event is a soft exception/interrupt,
3990          * otherwise next_rip is unused on VMRUN.
3991          */
3992         if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
3993             kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
3994                 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
3995         /*
3996          * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3997          * injecting the soft exception/interrupt.  That advancement needs to
3998          * be unwound if vectoring didn't complete.  Note, the new event may
3999          * not be the injected event, e.g. if KVM injected an INTn, the INTn
4000          * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
4001          * be the reported vectored event, but RIP still needs to be unwound.
4002          */
4003         else if (!nrips && (is_soft || is_exception) &&
4004                  kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
4005                 kvm_rip_write(vcpu, svm->soft_int_old_rip);
4006 }
4007
4008 static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
4009 {
4010         struct vcpu_svm *svm = to_svm(vcpu);
4011         u8 vector;
4012         int type;
4013         u32 exitintinfo = svm->vmcb->control.exit_int_info;
4014         bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
4015         bool soft_int_injected = svm->soft_int_injected;
4016
4017         svm->nmi_l1_to_l2 = false;
4018         svm->soft_int_injected = false;
4019
4020         /*
4021          * If we've made progress since setting awaiting_iret_completion, we've
4022          * executed an IRET and can allow NMI injection.
4023          */
4024         if (svm->awaiting_iret_completion &&
4025             kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
4026                 svm->awaiting_iret_completion = false;
4027                 svm->nmi_masked = false;
4028                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4029         }
4030
4031         vcpu->arch.nmi_injected = false;
4032         kvm_clear_exception_queue(vcpu);
4033         kvm_clear_interrupt_queue(vcpu);
4034
4035         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4036                 return;
4037
4038         kvm_make_request(KVM_REQ_EVENT, vcpu);
4039
4040         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4041         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4042
4043         if (soft_int_injected)
4044                 svm_complete_soft_interrupt(vcpu, vector, type);
4045
4046         switch (type) {
4047         case SVM_EXITINTINFO_TYPE_NMI:
4048                 vcpu->arch.nmi_injected = true;
4049                 svm->nmi_l1_to_l2 = nmi_l1_to_l2;
4050                 break;
4051         case SVM_EXITINTINFO_TYPE_EXEPT:
4052                 /*
4053                  * Never re-inject a #VC exception.
4054                  */
4055                 if (vector == X86_TRAP_VC)
4056                         break;
4057
4058                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
4059                         u32 err = svm->vmcb->control.exit_int_info_err;
4060                         kvm_requeue_exception_e(vcpu, vector, err);
4061
4062                 } else
4063                         kvm_requeue_exception(vcpu, vector);
4064                 break;
4065         case SVM_EXITINTINFO_TYPE_INTR:
4066                 kvm_queue_interrupt(vcpu, vector, false);
4067                 break;
4068         case SVM_EXITINTINFO_TYPE_SOFT:
4069                 kvm_queue_interrupt(vcpu, vector, true);
4070                 break;
4071         default:
4072                 break;
4073         }
4074
4075 }
4076
4077 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4078 {
4079         struct vcpu_svm *svm = to_svm(vcpu);
4080         struct vmcb_control_area *control = &svm->vmcb->control;
4081
4082         control->exit_int_info = control->event_inj;
4083         control->exit_int_info_err = control->event_inj_err;
4084         control->event_inj = 0;
4085         svm_complete_interrupts(vcpu);
4086 }
4087
4088 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
4089 {
4090         return 1;
4091 }
4092
4093 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
4094 {
4095         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
4096             to_svm(vcpu)->vmcb->control.exit_info_1)
4097                 return handle_fastpath_set_msr_irqoff(vcpu);
4098
4099         return EXIT_FASTPATH_NONE;
4100 }
4101
4102 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4103 {
4104         struct vcpu_svm *svm = to_svm(vcpu);
4105
4106         guest_state_enter_irqoff();
4107
4108         amd_clear_divider();
4109
4110         if (sev_es_guest(vcpu->kvm))
4111                 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4112         else
4113                 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4114
4115         guest_state_exit_irqoff();
4116 }
4117
4118 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
4119 {
4120         struct vcpu_svm *svm = to_svm(vcpu);
4121         bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4122
4123         trace_kvm_entry(vcpu);
4124
4125         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4126         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4127         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4128
4129         /*
4130          * Disable singlestep if we're injecting an interrupt/exception.
4131          * We don't want our modified rflags to be pushed on the stack where
4132          * we might not be able to easily reset them if we disabled NMI
4133          * singlestep later.
4134          */
4135         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4136                 /*
4137                  * Event injection happens before external interrupts cause a
4138                  * vmexit and interrupts are disabled here, so smp_send_reschedule
4139                  * is enough to force an immediate vmexit.
4140                  */
4141                 disable_nmi_singlestep(svm);
4142                 smp_send_reschedule(vcpu->cpu);
4143         }
4144
4145         pre_svm_run(vcpu);
4146
4147         sync_lapic_to_cr8(vcpu);
4148
4149         if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4150                 svm->vmcb->control.asid = svm->asid;
4151                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4152         }
4153         svm->vmcb->save.cr2 = vcpu->arch.cr2;
4154
4155         svm_hv_update_vp_id(svm->vmcb, vcpu);
4156
4157         /*
4158          * Run with all-zero DR6 unless needed, so that we can get the exact cause
4159          * of a #DB.
4160          */
4161         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
4162                 svm_set_dr6(svm, vcpu->arch.dr6);
4163         else
4164                 svm_set_dr6(svm, DR6_ACTIVE_LOW);
4165
4166         clgi();
4167         kvm_load_guest_xsave_state(vcpu);
4168
4169         kvm_wait_lapic_expire(vcpu);
4170
4171         /*
4172          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4173          * it's non-zero. Since vmentry is serialising on affected CPUs, there
4174          * is no need to worry about the conditional branch over the wrmsr
4175          * being speculatively taken.
4176          */
4177         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4178                 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4179
4180         svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4181
4182         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4183                 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4184
4185         if (!sev_es_guest(vcpu->kvm)) {
4186                 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4187                 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4188                 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4189                 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4190         }
4191         vcpu->arch.regs_dirty = 0;
4192
4193         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4194                 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4195
4196         kvm_load_host_xsave_state(vcpu);
4197         stgi();
4198
4199         /* Any pending NMI will happen here */
4200
4201         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4202                 kvm_after_interrupt(vcpu);
4203
4204         sync_cr8_to_lapic(vcpu);
4205
4206         svm->next_rip = 0;
4207         if (is_guest_mode(vcpu)) {
4208                 nested_sync_control_from_vmcb02(svm);
4209
4210                 /* Track VMRUNs that have made past consistency checking */
4211                 if (svm->nested.nested_run_pending &&
4212                     svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4213                         ++vcpu->stat.nested_run;
4214
4215                 svm->nested.nested_run_pending = 0;
4216         }
4217
4218         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4219         vmcb_mark_all_clean(svm->vmcb);
4220
4221         /* if exit due to PF check for async PF */
4222         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4223                 vcpu->arch.apf.host_apf_flags =
4224                         kvm_read_and_reset_apf_flags();
4225
4226         vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4227
4228         /*
4229          * We need to handle MC intercepts here before the vcpu has a chance to
4230          * change the physical cpu
4231          */
4232         if (unlikely(svm->vmcb->control.exit_code ==
4233                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
4234                 svm_handle_mce(vcpu);
4235
4236         trace_kvm_exit(vcpu, KVM_ISA_SVM);
4237
4238         svm_complete_interrupts(vcpu);
4239
4240         if (is_guest_mode(vcpu))
4241                 return EXIT_FASTPATH_NONE;
4242
4243         return svm_exit_handlers_fastpath(vcpu);
4244 }
4245
4246 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4247                              int root_level)
4248 {
4249         struct vcpu_svm *svm = to_svm(vcpu);
4250         unsigned long cr3;
4251
4252         if (npt_enabled) {
4253                 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4254                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4255
4256                 hv_track_root_tdp(vcpu, root_hpa);
4257
4258                 cr3 = vcpu->arch.cr3;
4259         } else if (root_level >= PT64_ROOT_4LEVEL) {
4260                 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4261         } else {
4262                 /* PCID in the guest should be impossible with a 32-bit MMU. */
4263                 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4264                 cr3 = root_hpa;
4265         }
4266
4267         svm->vmcb->save.cr3 = cr3;
4268         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4269 }
4270
4271 static void
4272 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4273 {
4274         /*
4275          * Patch in the VMMCALL instruction:
4276          */
4277         hypercall[0] = 0x0f;
4278         hypercall[1] = 0x01;
4279         hypercall[2] = 0xd9;
4280 }
4281
4282 /*
4283  * The kvm parameter can be NULL (module initialization, or invocation before
4284  * VM creation). Be sure to check the kvm parameter before using it.
4285  */
4286 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4287 {
4288         switch (index) {
4289         case MSR_IA32_MCG_EXT_CTL:
4290         case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4291                 return false;
4292         case MSR_IA32_SMBASE:
4293                 if (!IS_ENABLED(CONFIG_KVM_SMM))
4294                         return false;
4295                 /* SEV-ES guests do not support SMM, so report false */
4296                 if (kvm && sev_es_guest(kvm))
4297                         return false;
4298                 break;
4299         default:
4300                 break;
4301         }
4302
4303         return true;
4304 }
4305
4306 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4307 {
4308         struct vcpu_svm *svm = to_svm(vcpu);
4309
4310         /*
4311          * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
4312          * can only disable all variants of by disallowing CR4.OSXSAVE from
4313          * being set.  As a result, if the host has XSAVE and XSAVES, and the
4314          * guest has XSAVE enabled, the guest can execute XSAVES without
4315          * faulting.  Treat XSAVES as enabled in this case regardless of
4316          * whether it's advertised to the guest so that KVM context switches
4317          * XSS on VM-Enter/VM-Exit.  Failure to do so would effectively give
4318          * the guest read/write access to the host's XSS.
4319          */
4320         if (boot_cpu_has(X86_FEATURE_XSAVE) &&
4321             boot_cpu_has(X86_FEATURE_XSAVES) &&
4322             guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
4323                 kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
4324
4325         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
4326         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
4327         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
4328
4329         /*
4330          * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
4331          * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
4332          * SVM on Intel is bonkers and extremely unlikely to work).
4333          */
4334         if (!guest_cpuid_is_intel(vcpu))
4335                 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4336
4337         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
4338         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
4339         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
4340         kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
4341
4342         svm_recalc_instruction_intercepts(vcpu, svm);
4343
4344         if (boot_cpu_has(X86_FEATURE_IBPB))
4345                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
4346                                      !!guest_has_pred_cmd_msr(vcpu));
4347
4348         if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
4349                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
4350                                      !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
4351
4352         if (sev_guest(vcpu->kvm))
4353                 sev_vcpu_after_set_cpuid(svm);
4354
4355         init_vmcb_after_set_cpuid(vcpu);
4356 }
4357
4358 static bool svm_has_wbinvd_exit(void)
4359 {
4360         return true;
4361 }
4362
4363 #define PRE_EX(exit)  { .exit_code = (exit), \
4364                         .stage = X86_ICPT_PRE_EXCEPT, }
4365 #define POST_EX(exit) { .exit_code = (exit), \
4366                         .stage = X86_ICPT_POST_EXCEPT, }
4367 #define POST_MEM(exit) { .exit_code = (exit), \
4368                         .stage = X86_ICPT_POST_MEMACCESS, }
4369
4370 static const struct __x86_intercept {
4371         u32 exit_code;
4372         enum x86_intercept_stage stage;
4373 } x86_intercept_map[] = {
4374         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
4375         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
4376         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
4377         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
4378         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
4379         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
4380         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
4381         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
4382         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
4383         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
4384         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
4385         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
4386         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
4387         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
4388         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
4389         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
4390         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
4391         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
4392         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
4393         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
4394         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
4395         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
4396         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
4397         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
4398         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
4399         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
4400         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
4401         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
4402         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
4403         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
4404         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
4405         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
4406         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
4407         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
4408         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
4409         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
4410         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
4411         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
4412         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
4413         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
4414         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
4415         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
4416         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
4417         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
4418         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
4419         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
4420         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
4421 };
4422
4423 #undef PRE_EX
4424 #undef POST_EX
4425 #undef POST_MEM
4426
4427 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4428                                struct x86_instruction_info *info,
4429                                enum x86_intercept_stage stage,
4430                                struct x86_exception *exception)
4431 {
4432         struct vcpu_svm *svm = to_svm(vcpu);
4433         int vmexit, ret = X86EMUL_CONTINUE;
4434         struct __x86_intercept icpt_info;
4435         struct vmcb *vmcb = svm->vmcb;
4436
4437         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4438                 goto out;
4439
4440         icpt_info = x86_intercept_map[info->intercept];
4441
4442         if (stage != icpt_info.stage)
4443                 goto out;
4444
4445         switch (icpt_info.exit_code) {
4446         case SVM_EXIT_READ_CR0:
4447                 if (info->intercept == x86_intercept_cr_read)
4448                         icpt_info.exit_code += info->modrm_reg;
4449                 break;
4450         case SVM_EXIT_WRITE_CR0: {
4451                 unsigned long cr0, val;
4452
4453                 if (info->intercept == x86_intercept_cr_write)
4454                         icpt_info.exit_code += info->modrm_reg;
4455
4456                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4457                     info->intercept == x86_intercept_clts)
4458                         break;
4459
4460                 if (!(vmcb12_is_intercept(&svm->nested.ctl,
4461                                         INTERCEPT_SELECTIVE_CR0)))
4462                         break;
4463
4464                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4465                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
4466
4467                 if (info->intercept == x86_intercept_lmsw) {
4468                         cr0 &= 0xfUL;
4469                         val &= 0xfUL;
4470                         /* lmsw can't clear PE - catch this here */
4471                         if (cr0 & X86_CR0_PE)
4472                                 val |= X86_CR0_PE;
4473                 }
4474
4475                 if (cr0 ^ val)
4476                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4477
4478                 break;
4479         }
4480         case SVM_EXIT_READ_DR0:
4481         case SVM_EXIT_WRITE_DR0:
4482                 icpt_info.exit_code += info->modrm_reg;
4483                 break;
4484         case SVM_EXIT_MSR:
4485                 if (info->intercept == x86_intercept_wrmsr)
4486                         vmcb->control.exit_info_1 = 1;
4487                 else
4488                         vmcb->control.exit_info_1 = 0;
4489                 break;
4490         case SVM_EXIT_PAUSE:
4491                 /*
4492                  * We get this for NOP only, but pause
4493                  * is rep not, check this here
4494                  */
4495                 if (info->rep_prefix != REPE_PREFIX)
4496                         goto out;
4497                 break;
4498         case SVM_EXIT_IOIO: {
4499                 u64 exit_info;
4500                 u32 bytes;
4501
4502                 if (info->intercept == x86_intercept_in ||
4503                     info->intercept == x86_intercept_ins) {
4504                         exit_info = ((info->src_val & 0xffff) << 16) |
4505                                 SVM_IOIO_TYPE_MASK;
4506                         bytes = info->dst_bytes;
4507                 } else {
4508                         exit_info = (info->dst_val & 0xffff) << 16;
4509                         bytes = info->src_bytes;
4510                 }
4511
4512                 if (info->intercept == x86_intercept_outs ||
4513                     info->intercept == x86_intercept_ins)
4514                         exit_info |= SVM_IOIO_STR_MASK;
4515
4516                 if (info->rep_prefix)
4517                         exit_info |= SVM_IOIO_REP_MASK;
4518
4519                 bytes = min(bytes, 4u);
4520
4521                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4522
4523                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4524
4525                 vmcb->control.exit_info_1 = exit_info;
4526                 vmcb->control.exit_info_2 = info->next_rip;
4527
4528                 break;
4529         }
4530         default:
4531                 break;
4532         }
4533
4534         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4535         if (static_cpu_has(X86_FEATURE_NRIPS))
4536                 vmcb->control.next_rip  = info->next_rip;
4537         vmcb->control.exit_code = icpt_info.exit_code;
4538         vmexit = nested_svm_exit_handled(svm);
4539
4540         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4541                                            : X86EMUL_CONTINUE;
4542
4543 out:
4544         return ret;
4545 }
4546
4547 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4548 {
4549         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4550                 vcpu->arch.at_instruction_boundary = true;
4551 }
4552
4553 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4554 {
4555         if (!kvm_pause_in_guest(vcpu->kvm))
4556                 shrink_ple_window(vcpu);
4557 }
4558
4559 static void svm_setup_mce(struct kvm_vcpu *vcpu)
4560 {
4561         /* [63:9] are reserved. */
4562         vcpu->arch.mcg_cap &= 0x1ff;
4563 }
4564
4565 #ifdef CONFIG_KVM_SMM
4566 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4567 {
4568         struct vcpu_svm *svm = to_svm(vcpu);
4569
4570         /* Per APM Vol.2 15.22.2 "Response to SMI" */
4571         if (!gif_set(svm))
4572                 return true;
4573
4574         return is_smm(vcpu);
4575 }
4576
4577 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4578 {
4579         struct vcpu_svm *svm = to_svm(vcpu);
4580         if (svm->nested.nested_run_pending)
4581                 return -EBUSY;
4582
4583         if (svm_smi_blocked(vcpu))
4584                 return 0;
4585
4586         /* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
4587         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4588                 return -EBUSY;
4589
4590         return 1;
4591 }
4592
4593 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4594 {
4595         struct vcpu_svm *svm = to_svm(vcpu);
4596         struct kvm_host_map map_save;
4597         int ret;
4598
4599         if (!is_guest_mode(vcpu))
4600                 return 0;
4601
4602         /*
4603          * 32-bit SMRAM format doesn't preserve EFER and SVM state.  Userspace is
4604          * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4605          */
4606
4607         if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4608                 return 1;
4609
4610         smram->smram64.svm_guest_flag = 1;
4611         smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4612
4613         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4614         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4615         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4616
4617         ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4618         if (ret)
4619                 return ret;
4620
4621         /*
4622          * KVM uses VMCB01 to store L1 host state while L2 runs but
4623          * VMCB01 is going to be used during SMM and thus the state will
4624          * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4625          * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4626          * format of the area is identical to guest save area offsetted
4627          * by 0x400 (matches the offset of 'struct vmcb_save_area'
4628          * within 'struct vmcb'). Note: HSAVE area may also be used by
4629          * L1 hypervisor to save additional host context (e.g. KVM does
4630          * that, see svm_prepare_switch_to_guest()) which must be
4631          * preserved.
4632          */
4633         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4634                 return 1;
4635
4636         BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4637
4638         svm_copy_vmrun_state(map_save.hva + 0x400,
4639                              &svm->vmcb01.ptr->save);
4640
4641         kvm_vcpu_unmap(vcpu, &map_save, true);
4642         return 0;
4643 }
4644
4645 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4646 {
4647         struct vcpu_svm *svm = to_svm(vcpu);
4648         struct kvm_host_map map, map_save;
4649         struct vmcb *vmcb12;
4650         int ret;
4651
4652         const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4653
4654         if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4655                 return 0;
4656
4657         /* Non-zero if SMI arrived while vCPU was in guest mode. */
4658         if (!smram64->svm_guest_flag)
4659                 return 0;
4660
4661         if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4662                 return 1;
4663
4664         if (!(smram64->efer & EFER_SVME))
4665                 return 1;
4666
4667         if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
4668                 return 1;
4669
4670         ret = 1;
4671         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4672                 goto unmap_map;
4673
4674         if (svm_allocate_nested(svm))
4675                 goto unmap_save;
4676
4677         /*
4678          * Restore L1 host state from L1 HSAVE area as VMCB01 was
4679          * used during SMM (see svm_enter_smm())
4680          */
4681
4682         svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4683
4684         /*
4685          * Enter the nested guest now
4686          */
4687
4688         vmcb_mark_all_dirty(svm->vmcb01.ptr);
4689
4690         vmcb12 = map.hva;
4691         nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
4692         nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
4693         ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
4694
4695         if (ret)
4696                 goto unmap_save;
4697
4698         svm->nested.nested_run_pending = 1;
4699
4700 unmap_save:
4701         kvm_vcpu_unmap(vcpu, &map_save, true);
4702 unmap_map:
4703         kvm_vcpu_unmap(vcpu, &map, true);
4704         return ret;
4705 }
4706
4707 static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
4708 {
4709         struct vcpu_svm *svm = to_svm(vcpu);
4710
4711         if (!gif_set(svm)) {
4712                 if (vgif)
4713                         svm_set_intercept(svm, INTERCEPT_STGI);
4714                 /* STGI will cause a vm exit */
4715         } else {
4716                 /* We must be in SMM; RSM will cause a vmexit anyway.  */
4717         }
4718 }
4719 #endif
4720
4721 static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4722                                          void *insn, int insn_len)
4723 {
4724         bool smep, smap, is_user;
4725         u64 error_code;
4726
4727         /* Emulation is always possible when KVM has access to all guest state. */
4728         if (!sev_guest(vcpu->kvm))
4729                 return X86EMUL_CONTINUE;
4730
4731         /* #UD and #GP should never be intercepted for SEV guests. */
4732         WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4733                                   EMULTYPE_TRAP_UD_FORCED |
4734                                   EMULTYPE_VMWARE_GP));
4735
4736         /*
4737          * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4738          * to guest register state.
4739          */
4740         if (sev_es_guest(vcpu->kvm))
4741                 return X86EMUL_RETRY_INSTR;
4742
4743         /*
4744          * Emulation is possible if the instruction is already decoded, e.g.
4745          * when completing I/O after returning from userspace.
4746          */
4747         if (emul_type & EMULTYPE_NO_DECODE)
4748                 return X86EMUL_CONTINUE;
4749
4750         /*
4751          * Emulation is possible for SEV guests if and only if a prefilled
4752          * buffer containing the bytes of the intercepted instruction is
4753          * available. SEV guest memory is encrypted with a guest specific key
4754          * and cannot be decrypted by KVM, i.e. KVM would read ciphertext and
4755          * decode garbage.
4756          *
4757          * If KVM is NOT trying to simply skip an instruction, inject #UD if
4758          * KVM reached this point without an instruction buffer.  In practice,
4759          * this path should never be hit by a well-behaved guest, e.g. KVM
4760          * doesn't intercept #UD or #GP for SEV guests, but this path is still
4761          * theoretically reachable, e.g. via unaccelerated fault-like AVIC
4762          * access, and needs to be handled by KVM to avoid putting the guest
4763          * into an infinite loop.   Injecting #UD is somewhat arbitrary, but
4764          * its the least awful option given lack of insight into the guest.
4765          *
4766          * If KVM is trying to skip an instruction, simply resume the guest.
4767          * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
4768          * will attempt to re-inject the INT3/INTO and skip the instruction.
4769          * In that scenario, retrying the INT3/INTO and hoping the guest will
4770          * make forward progress is the only option that has a chance of
4771          * success (and in practice it will work the vast majority of the time).
4772          */
4773         if (unlikely(!insn)) {
4774                 if (emul_type & EMULTYPE_SKIP)
4775                         return X86EMUL_UNHANDLEABLE;
4776
4777                 kvm_queue_exception(vcpu, UD_VECTOR);
4778                 return X86EMUL_PROPAGATE_FAULT;
4779         }
4780
4781         /*
4782          * Emulate for SEV guests if the insn buffer is not empty.  The buffer
4783          * will be empty if the DecodeAssist microcode cannot fetch bytes for
4784          * the faulting instruction because the code fetch itself faulted, e.g.
4785          * the guest attempted to fetch from emulated MMIO or a guest page
4786          * table used to translate CS:RIP resides in emulated MMIO.
4787          */
4788         if (likely(insn_len))
4789                 return X86EMUL_CONTINUE;
4790
4791         /*
4792          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4793          *
4794          * Errata:
4795          * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4796          * possible that CPU microcode implementing DecodeAssist will fail to
4797          * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4798          * be '0'.  This happens because microcode reads CS:RIP using a _data_
4799          * loap uop with CPL=0 privileges.  If the load hits a SMAP #PF, ucode
4800          * gives up and does not fill the instruction bytes buffer.
4801          *
4802          * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4803          * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4804          * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4805          * GuestIntrBytes field of the VMCB.
4806          *
4807          * This does _not_ mean that the erratum has been encountered, as the
4808          * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4809          * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4810          * encountered a reserved/not-present #PF.
4811          *
4812          * To hit the erratum, the following conditions must be true:
4813          *    1. CR4.SMAP=1 (obviously).
4814          *    2. CR4.SMEP=0 || CPL=3.  If SMEP=1 and CPL<3, the erratum cannot
4815          *       have been hit as the guest would have encountered a SMEP
4816          *       violation #PF, not a #NPF.
4817          *    3. The #NPF is not due to a code fetch, in which case failure to
4818          *       retrieve the instruction bytes is legitimate (see abvoe).
4819          *
4820          * In addition, don't apply the erratum workaround if the #NPF occurred
4821          * while translating guest page tables (see below).
4822          */
4823         error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
4824         if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4825                 goto resume_guest;
4826
4827         smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
4828         smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
4829         is_user = svm_get_cpl(vcpu) == 3;
4830         if (smap && (!smep || is_user)) {
4831                 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4832
4833                 /*
4834                  * If the fault occurred in userspace, arbitrarily inject #GP
4835                  * to avoid killing the guest and to hopefully avoid confusing
4836                  * the guest kernel too much, e.g. injecting #PF would not be
4837                  * coherent with respect to the guest's page tables.  Request
4838                  * triple fault if the fault occurred in the kernel as there's
4839                  * no fault that KVM can inject without confusing the guest.
4840                  * In practice, the triple fault is moot as no sane SEV kernel
4841                  * will execute from user memory while also running with SMAP=1.
4842                  */
4843                 if (is_user)
4844                         kvm_inject_gp(vcpu, 0);
4845                 else
4846                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4847                 return X86EMUL_PROPAGATE_FAULT;
4848         }
4849
4850 resume_guest:
4851         /*
4852          * If the erratum was not hit, simply resume the guest and let it fault
4853          * again.  While awful, e.g. the vCPU may get stuck in an infinite loop
4854          * if the fault is at CPL=0, it's the lesser of all evils.  Exiting to
4855          * userspace will kill the guest, and letting the emulator read garbage
4856          * will yield random behavior and potentially corrupt the guest.
4857          *
4858          * Simply resuming the guest is technically not a violation of the SEV
4859          * architecture.  AMD's APM states that all code fetches and page table
4860          * accesses for SEV guest are encrypted, regardless of the C-Bit.  The
4861          * APM also states that encrypted accesses to MMIO are "ignored", but
4862          * doesn't explicitly define "ignored", i.e. doing nothing and letting
4863          * the guest spin is technically "ignoring" the access.
4864          */
4865         return X86EMUL_RETRY_INSTR;
4866 }
4867
4868 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4869 {
4870         struct vcpu_svm *svm = to_svm(vcpu);
4871
4872         return !gif_set(svm);
4873 }
4874
4875 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4876 {
4877         if (!sev_es_guest(vcpu->kvm))
4878                 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4879
4880         sev_vcpu_deliver_sipi_vector(vcpu, vector);
4881 }
4882
4883 static void svm_vm_destroy(struct kvm *kvm)
4884 {
4885         avic_vm_destroy(kvm);
4886         sev_vm_destroy(kvm);
4887 }
4888
4889 static int svm_vm_init(struct kvm *kvm)
4890 {
4891         if (!pause_filter_count || !pause_filter_thresh)
4892                 kvm->arch.pause_in_guest = true;
4893
4894         if (enable_apicv) {
4895                 int ret = avic_vm_init(kvm);
4896                 if (ret)
4897                         return ret;
4898         }
4899
4900         return 0;
4901 }
4902
4903 static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
4904 {
4905         struct page *page = snp_safe_alloc_page(vcpu);
4906
4907         if (!page)
4908                 return NULL;
4909
4910         return page_address(page);
4911 }
4912
4913 static struct kvm_x86_ops svm_x86_ops __initdata = {
4914         .name = KBUILD_MODNAME,
4915
4916         .check_processor_compatibility = svm_check_processor_compat,
4917
4918         .hardware_unsetup = svm_hardware_unsetup,
4919         .hardware_enable = svm_hardware_enable,
4920         .hardware_disable = svm_hardware_disable,
4921         .has_emulated_msr = svm_has_emulated_msr,
4922
4923         .vcpu_create = svm_vcpu_create,
4924         .vcpu_free = svm_vcpu_free,
4925         .vcpu_reset = svm_vcpu_reset,
4926
4927         .vm_size = sizeof(struct kvm_svm),
4928         .vm_init = svm_vm_init,
4929         .vm_destroy = svm_vm_destroy,
4930
4931         .prepare_switch_to_guest = svm_prepare_switch_to_guest,
4932         .vcpu_load = svm_vcpu_load,
4933         .vcpu_put = svm_vcpu_put,
4934         .vcpu_blocking = avic_vcpu_blocking,
4935         .vcpu_unblocking = avic_vcpu_unblocking,
4936
4937         .update_exception_bitmap = svm_update_exception_bitmap,
4938         .get_msr_feature = svm_get_msr_feature,
4939         .get_msr = svm_get_msr,
4940         .set_msr = svm_set_msr,
4941         .get_segment_base = svm_get_segment_base,
4942         .get_segment = svm_get_segment,
4943         .set_segment = svm_set_segment,
4944         .get_cpl = svm_get_cpl,
4945         .get_cs_db_l_bits = svm_get_cs_db_l_bits,
4946         .is_valid_cr0 = svm_is_valid_cr0,
4947         .set_cr0 = svm_set_cr0,
4948         .post_set_cr3 = sev_post_set_cr3,
4949         .is_valid_cr4 = svm_is_valid_cr4,
4950         .set_cr4 = svm_set_cr4,
4951         .set_efer = svm_set_efer,
4952         .get_idt = svm_get_idt,
4953         .set_idt = svm_set_idt,
4954         .get_gdt = svm_get_gdt,
4955         .set_gdt = svm_set_gdt,
4956         .set_dr7 = svm_set_dr7,
4957         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4958         .cache_reg = svm_cache_reg,
4959         .get_rflags = svm_get_rflags,
4960         .set_rflags = svm_set_rflags,
4961         .get_if_flag = svm_get_if_flag,
4962
4963         .flush_tlb_all = svm_flush_tlb_all,
4964         .flush_tlb_current = svm_flush_tlb_current,
4965         .flush_tlb_gva = svm_flush_tlb_gva,
4966         .flush_tlb_guest = svm_flush_tlb_asid,
4967
4968         .vcpu_pre_run = svm_vcpu_pre_run,
4969         .vcpu_run = svm_vcpu_run,
4970         .handle_exit = svm_handle_exit,
4971         .skip_emulated_instruction = svm_skip_emulated_instruction,
4972         .update_emulated_instruction = NULL,
4973         .set_interrupt_shadow = svm_set_interrupt_shadow,
4974         .get_interrupt_shadow = svm_get_interrupt_shadow,
4975         .patch_hypercall = svm_patch_hypercall,
4976         .inject_irq = svm_inject_irq,
4977         .inject_nmi = svm_inject_nmi,
4978         .is_vnmi_pending = svm_is_vnmi_pending,
4979         .set_vnmi_pending = svm_set_vnmi_pending,
4980         .inject_exception = svm_inject_exception,
4981         .cancel_injection = svm_cancel_injection,
4982         .interrupt_allowed = svm_interrupt_allowed,
4983         .nmi_allowed = svm_nmi_allowed,
4984         .get_nmi_mask = svm_get_nmi_mask,
4985         .set_nmi_mask = svm_set_nmi_mask,
4986         .enable_nmi_window = svm_enable_nmi_window,
4987         .enable_irq_window = svm_enable_irq_window,
4988         .update_cr8_intercept = svm_update_cr8_intercept,
4989         .set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
4990         .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
4991         .apicv_post_state_restore = avic_apicv_post_state_restore,
4992         .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
4993
4994         .get_exit_info = svm_get_exit_info,
4995
4996         .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
4997
4998         .has_wbinvd_exit = svm_has_wbinvd_exit,
4999
5000         .get_l2_tsc_offset = svm_get_l2_tsc_offset,
5001         .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
5002         .write_tsc_offset = svm_write_tsc_offset,
5003         .write_tsc_multiplier = svm_write_tsc_multiplier,
5004
5005         .load_mmu_pgd = svm_load_mmu_pgd,
5006
5007         .check_intercept = svm_check_intercept,
5008         .handle_exit_irqoff = svm_handle_exit_irqoff,
5009
5010         .request_immediate_exit = __kvm_request_immediate_exit,
5011
5012         .sched_in = svm_sched_in,
5013
5014         .nested_ops = &svm_nested_ops,
5015
5016         .deliver_interrupt = svm_deliver_interrupt,
5017         .pi_update_irte = avic_pi_update_irte,
5018         .setup_mce = svm_setup_mce,
5019
5020 #ifdef CONFIG_KVM_SMM
5021         .smi_allowed = svm_smi_allowed,
5022         .enter_smm = svm_enter_smm,
5023         .leave_smm = svm_leave_smm,
5024         .enable_smi_window = svm_enable_smi_window,
5025 #endif
5026
5027         .mem_enc_ioctl = sev_mem_enc_ioctl,
5028         .mem_enc_register_region = sev_mem_enc_register_region,
5029         .mem_enc_unregister_region = sev_mem_enc_unregister_region,
5030         .guest_memory_reclaimed = sev_guest_memory_reclaimed,
5031
5032         .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
5033         .vm_move_enc_context_from = sev_vm_move_enc_context_from,
5034
5035         .check_emulate_instruction = svm_check_emulate_instruction,
5036
5037         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
5038
5039         .msr_filter_changed = svm_msr_filter_changed,
5040         .complete_emulated_msr = svm_complete_emulated_msr,
5041
5042         .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
5043         .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
5044         .alloc_apic_backing_page = svm_alloc_apic_backing_page,
5045 };
5046
5047 /*
5048  * The default MMIO mask is a single bit (excluding the present bit),
5049  * which could conflict with the memory encryption bit. Check for
5050  * memory encryption support and override the default MMIO mask if
5051  * memory encryption is enabled.
5052  */
5053 static __init void svm_adjust_mmio_mask(void)
5054 {
5055         unsigned int enc_bit, mask_bit;
5056         u64 msr, mask;
5057
5058         /* If there is no memory encryption support, use existing mask */
5059         if (cpuid_eax(0x80000000) < 0x8000001f)
5060                 return;
5061
5062         /* If memory encryption is not enabled, use existing mask */
5063         rdmsrl(MSR_AMD64_SYSCFG, msr);
5064         if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
5065                 return;
5066
5067         enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
5068         mask_bit = boot_cpu_data.x86_phys_bits;
5069
5070         /* Increment the mask bit if it is the same as the encryption bit */
5071         if (enc_bit == mask_bit)
5072                 mask_bit++;
5073
5074         /*
5075          * If the mask bit location is below 52, then some bits above the
5076          * physical addressing limit will always be reserved, so use the
5077          * rsvd_bits() function to generate the mask. This mask, along with
5078          * the present bit, will be used to generate a page fault with
5079          * PFER.RSV = 1.
5080          *
5081          * If the mask bit location is 52 (or above), then clear the mask.
5082          */
5083         mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
5084
5085         kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
5086 }
5087
5088 static __init void svm_set_cpu_caps(void)
5089 {
5090         kvm_set_cpu_caps();
5091
5092         kvm_caps.supported_perf_cap = 0;
5093         kvm_caps.supported_xss = 0;
5094
5095         /* CPUID 0x80000001 and 0x8000000A (SVM features) */
5096         if (nested) {
5097                 kvm_cpu_cap_set(X86_FEATURE_SVM);
5098                 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
5099
5100                 /*
5101                  * KVM currently flushes TLBs on *every* nested SVM transition,
5102                  * and so for all intents and purposes KVM supports flushing by
5103                  * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
5104                  */
5105                 kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
5106
5107                 if (nrips)
5108                         kvm_cpu_cap_set(X86_FEATURE_NRIPS);
5109
5110                 if (npt_enabled)
5111                         kvm_cpu_cap_set(X86_FEATURE_NPT);
5112
5113                 if (tsc_scaling)
5114                         kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
5115
5116                 if (vls)
5117                         kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
5118                 if (lbrv)
5119                         kvm_cpu_cap_set(X86_FEATURE_LBRV);
5120
5121                 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
5122                         kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
5123
5124                 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
5125                         kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
5126
5127                 if (vgif)
5128                         kvm_cpu_cap_set(X86_FEATURE_VGIF);
5129
5130                 if (vnmi)
5131                         kvm_cpu_cap_set(X86_FEATURE_VNMI);
5132
5133                 /* Nested VM can receive #VMEXIT instead of triggering #GP */
5134                 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
5135         }
5136
5137         /* CPUID 0x80000008 */
5138         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5139             boot_cpu_has(X86_FEATURE_AMD_SSBD))
5140                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5141
5142         if (enable_pmu) {
5143                 /*
5144                  * Enumerate support for PERFCTR_CORE if and only if KVM has
5145                  * access to enough counters to virtualize "core" support,
5146                  * otherwise limit vPMU support to the legacy number of counters.
5147                  */
5148                 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5149                         kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5150                                                           kvm_pmu_cap.num_counters_gp);
5151                 else
5152                         kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5153
5154                 if (kvm_pmu_cap.version != 2 ||
5155                     !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5156                         kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5157         }
5158
5159         /* CPUID 0x8000001F (SME/SEV features) */
5160         sev_set_cpu_caps();
5161 }
5162
5163 static __init int svm_hardware_setup(void)
5164 {
5165         int cpu;
5166         struct page *iopm_pages;
5167         void *iopm_va;
5168         int r;
5169         unsigned int order = get_order(IOPM_SIZE);
5170
5171         /*
5172          * NX is required for shadow paging and for NPT if the NX huge pages
5173          * mitigation is enabled.
5174          */
5175         if (!boot_cpu_has(X86_FEATURE_NX)) {
5176                 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5177                 return -EOPNOTSUPP;
5178         }
5179         kvm_enable_efer_bits(EFER_NX);
5180
5181         iopm_pages = alloc_pages(GFP_KERNEL, order);
5182
5183         if (!iopm_pages)
5184                 return -ENOMEM;
5185
5186         iopm_va = page_address(iopm_pages);
5187         memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
5188         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
5189
5190         init_msrpm_offsets();
5191
5192         kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5193                                      XFEATURE_MASK_BNDCSR);
5194
5195         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
5196                 kvm_enable_efer_bits(EFER_FFXSR);
5197
5198         if (tsc_scaling) {
5199                 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5200                         tsc_scaling = false;
5201                 } else {
5202                         pr_info("TSC scaling supported\n");
5203                         kvm_caps.has_tsc_control = true;
5204                 }
5205         }
5206         kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5207         kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5208
5209         tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5210
5211         if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
5212                 kvm_enable_efer_bits(EFER_AUTOIBRS);
5213
5214         /* Check for pause filtering support */
5215         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5216                 pause_filter_count = 0;
5217                 pause_filter_thresh = 0;
5218         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5219                 pause_filter_thresh = 0;
5220         }
5221
5222         if (nested) {
5223                 pr_info("Nested Virtualization enabled\n");
5224                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
5225         }
5226
5227         /*
5228          * KVM's MMU doesn't support using 2-level paging for itself, and thus
5229          * NPT isn't supported if the host is using 2-level paging since host
5230          * CR4 is unchanged on VMRUN.
5231          */
5232         if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5233                 npt_enabled = false;
5234
5235         if (!boot_cpu_has(X86_FEATURE_NPT))
5236                 npt_enabled = false;
5237
5238         /* Force VM NPT level equal to the host's paging level */
5239         kvm_configure_mmu(npt_enabled, get_npt_level(),
5240                           get_npt_level(), PG_LEVEL_1G);
5241         pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
5242
5243         /* Setup shadow_me_value and shadow_me_mask */
5244         kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5245
5246         svm_adjust_mmio_mask();
5247
5248         nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
5249
5250         /*
5251          * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5252          * may be modified by svm_adjust_mmio_mask()), as well as nrips.
5253          */
5254         sev_hardware_setup();
5255
5256         svm_hv_hardware_setup();
5257
5258         for_each_possible_cpu(cpu) {
5259                 r = svm_cpu_init(cpu);
5260                 if (r)
5261                         goto err;
5262         }
5263
5264         enable_apicv = avic = avic && avic_hardware_setup();
5265
5266         if (!enable_apicv) {
5267                 svm_x86_ops.vcpu_blocking = NULL;
5268                 svm_x86_ops.vcpu_unblocking = NULL;
5269                 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5270         } else if (!x2avic_enabled) {
5271                 svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
5272         }
5273
5274         if (vls) {
5275                 if (!npt_enabled ||
5276                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5277                     !IS_ENABLED(CONFIG_X86_64)) {
5278                         vls = false;
5279                 } else {
5280                         pr_info("Virtual VMLOAD VMSAVE supported\n");
5281                 }
5282         }
5283
5284         if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5285                 svm_gp_erratum_intercept = false;
5286
5287         if (vgif) {
5288                 if (!boot_cpu_has(X86_FEATURE_VGIF))
5289                         vgif = false;
5290                 else
5291                         pr_info("Virtual GIF supported\n");
5292         }
5293
5294         vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5295         if (vnmi)
5296                 pr_info("Virtual NMI enabled\n");
5297
5298         if (!vnmi) {
5299                 svm_x86_ops.is_vnmi_pending = NULL;
5300                 svm_x86_ops.set_vnmi_pending = NULL;
5301         }
5302
5303
5304         if (lbrv) {
5305                 if (!boot_cpu_has(X86_FEATURE_LBRV))
5306                         lbrv = false;
5307                 else
5308                         pr_info("LBR virtualization supported\n");
5309         }
5310
5311         if (!enable_pmu)
5312                 pr_info("PMU virtualization is disabled\n");
5313
5314         svm_set_cpu_caps();
5315
5316         /*
5317          * It seems that on AMD processors PTE's accessed bit is
5318          * being set by the CPU hardware before the NPF vmexit.
5319          * This is not expected behaviour and our tests fail because
5320          * of it.
5321          * A workaround here is to disable support for
5322          * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5323          * In this case userspace can know if there is support using
5324          * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5325          * it
5326          * If future AMD CPU models change the behaviour described above,
5327          * this variable can be changed accordingly
5328          */
5329         allow_smaller_maxphyaddr = !npt_enabled;
5330
5331         return 0;
5332
5333 err:
5334         svm_hardware_unsetup();
5335         return r;
5336 }
5337
5338
5339 static struct kvm_x86_init_ops svm_init_ops __initdata = {
5340         .hardware_setup = svm_hardware_setup,
5341
5342         .runtime_ops = &svm_x86_ops,
5343         .pmu_ops = &amd_pmu_ops,
5344 };
5345
5346 static void __svm_exit(void)
5347 {
5348         kvm_x86_vendor_exit();
5349
5350         cpu_emergency_unregister_virt_callback(svm_emergency_disable);
5351 }
5352
5353 static int __init svm_init(void)
5354 {
5355         int r;
5356
5357         __unused_size_checks();
5358
5359         if (!kvm_is_svm_supported())
5360                 return -EOPNOTSUPP;
5361
5362         r = kvm_x86_vendor_init(&svm_init_ops);
5363         if (r)
5364                 return r;
5365
5366         cpu_emergency_register_virt_callback(svm_emergency_disable);
5367
5368         /*
5369          * Common KVM initialization _must_ come last, after this, /dev/kvm is
5370          * exposed to userspace!
5371          */
5372         r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5373                      THIS_MODULE);
5374         if (r)
5375                 goto err_kvm_init;
5376
5377         return 0;
5378
5379 err_kvm_init:
5380         __svm_exit();
5381         return r;
5382 }
5383
5384 static void __exit svm_exit(void)
5385 {
5386         kvm_exit();
5387         __svm_exit();
5388 }
5389
5390 module_init(svm_init)
5391 module_exit(svm_exit)