1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kvm_types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
14 #include <linux/highmem.h>
15 #include <linux/psp.h>
16 #include <linux/psp-sev.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/misc_cgroup.h>
20 #include <linux/processor.h>
21 #include <linux/trace_events.h>
24 #include <asm/trapnr.h>
25 #include <asm/fpu/xcr.h>
26 #include <asm/debugreg.h>
35 #ifndef CONFIG_KVM_AMD_SEV
37 * When this config is not defined, SEV feature is not supported and APIs in
38 * this file are not used but this file still gets compiled into the KVM AMD
41 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
42 * misc_res_type {} defined in linux/misc_cgroup.h.
44 * Below macros allow compilation to succeed.
46 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
47 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
50 #ifdef CONFIG_KVM_AMD_SEV
51 /* enable/disable SEV support */
52 static bool sev_enabled = true;
53 module_param_named(sev, sev_enabled, bool, 0444);
55 /* enable/disable SEV-ES support */
56 static bool sev_es_enabled = true;
57 module_param_named(sev_es, sev_es_enabled, bool, 0444);
59 /* enable/disable SEV-ES DebugSwap support */
60 static bool sev_es_debug_swap_enabled = false;
61 module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
63 #define sev_enabled false
64 #define sev_es_enabled false
65 #define sev_es_debug_swap_enabled false
66 #endif /* CONFIG_KVM_AMD_SEV */
68 static u8 sev_enc_bit;
69 static DECLARE_RWSEM(sev_deactivate_lock);
70 static DEFINE_MUTEX(sev_bitmap_lock);
71 unsigned int max_sev_asid;
72 static unsigned int min_sev_asid;
73 static unsigned long sev_me_mask;
74 static unsigned int nr_asids;
75 static unsigned long *sev_asid_bitmap;
76 static unsigned long *sev_reclaim_asid_bitmap;
79 struct list_head list;
86 /* Called with the sev_bitmap_lock held, or on shutdown */
87 static int sev_flush_asids(int min_asid, int max_asid)
89 int ret, asid, error = 0;
91 /* Check if there are any ASIDs to reclaim before performing a flush */
92 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
97 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
98 * so it must be guarded.
100 down_write(&sev_deactivate_lock);
102 wbinvd_on_all_cpus();
103 ret = sev_guest_df_flush(&error);
105 up_write(&sev_deactivate_lock);
108 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
113 static inline bool is_mirroring_enc_context(struct kvm *kvm)
115 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
118 /* Must be called with the sev_bitmap_lock held */
119 static bool __sev_recycle_asids(int min_asid, int max_asid)
121 if (sev_flush_asids(min_asid, max_asid))
124 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
125 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
127 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
132 static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
134 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
135 return misc_cg_try_charge(type, sev->misc_cg, 1);
138 static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
140 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
141 misc_cg_uncharge(type, sev->misc_cg, 1);
144 static int sev_asid_new(struct kvm_sev_info *sev)
146 int asid, min_asid, max_asid, ret;
149 WARN_ON(sev->misc_cg);
150 sev->misc_cg = get_current_misc_cg();
151 ret = sev_misc_cg_try_charge(sev);
153 put_misc_cg(sev->misc_cg);
158 mutex_lock(&sev_bitmap_lock);
161 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
162 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
164 min_asid = sev->es_active ? 1 : min_sev_asid;
165 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
167 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
168 if (asid > max_asid) {
169 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
173 mutex_unlock(&sev_bitmap_lock);
178 __set_bit(asid, sev_asid_bitmap);
180 mutex_unlock(&sev_bitmap_lock);
184 sev_misc_cg_uncharge(sev);
185 put_misc_cg(sev->misc_cg);
190 static int sev_get_asid(struct kvm *kvm)
192 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
197 static void sev_asid_free(struct kvm_sev_info *sev)
199 struct svm_cpu_data *sd;
202 mutex_lock(&sev_bitmap_lock);
204 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
206 for_each_possible_cpu(cpu) {
207 sd = per_cpu_ptr(&svm_data, cpu);
208 sd->sev_vmcbs[sev->asid] = NULL;
211 mutex_unlock(&sev_bitmap_lock);
213 sev_misc_cg_uncharge(sev);
214 put_misc_cg(sev->misc_cg);
218 static void sev_decommission(unsigned int handle)
220 struct sev_data_decommission decommission;
225 decommission.handle = handle;
226 sev_guest_decommission(&decommission, NULL);
229 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
231 struct sev_data_deactivate deactivate;
236 deactivate.handle = handle;
238 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
239 down_read(&sev_deactivate_lock);
240 sev_guest_deactivate(&deactivate, NULL);
241 up_read(&sev_deactivate_lock);
243 sev_decommission(handle);
246 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
248 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
249 struct sev_platform_init_args init_args = {0};
252 if (kvm->created_vcpus)
256 if (unlikely(sev->active))
260 sev->es_active = argp->id == KVM_SEV_ES_INIT;
261 asid = sev_asid_new(sev);
266 init_args.probe = false;
267 ret = sev_platform_init(&init_args);
271 INIT_LIST_HEAD(&sev->regions_list);
272 INIT_LIST_HEAD(&sev->mirror_vms);
274 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
279 argp->error = init_args.error;
283 sev->es_active = false;
288 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
290 struct sev_data_activate activate;
291 int asid = sev_get_asid(kvm);
294 /* activate ASID on the given handle */
295 activate.handle = handle;
296 activate.asid = asid;
297 ret = sev_guest_activate(&activate, error);
302 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
311 ret = sev_issue_cmd_external_user(f.file, id, data, error);
317 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
319 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
321 return __sev_issue_cmd(sev->fd, id, data, error);
324 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
326 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
327 struct sev_data_launch_start start;
328 struct kvm_sev_launch_start params;
329 void *dh_blob, *session_blob;
330 int *error = &argp->error;
336 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
339 memset(&start, 0, sizeof(start));
342 if (params.dh_uaddr) {
343 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
345 return PTR_ERR(dh_blob);
347 start.dh_cert_address = __sme_set(__pa(dh_blob));
348 start.dh_cert_len = params.dh_len;
352 if (params.session_uaddr) {
353 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
354 if (IS_ERR(session_blob)) {
355 ret = PTR_ERR(session_blob);
359 start.session_address = __sme_set(__pa(session_blob));
360 start.session_len = params.session_len;
363 start.handle = params.handle;
364 start.policy = params.policy;
366 /* create memory encryption context */
367 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
371 /* Bind ASID to this guest */
372 ret = sev_bind_asid(kvm, start.handle, error);
374 sev_decommission(start.handle);
378 /* return handle to userspace */
379 params.handle = start.handle;
380 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
381 sev_unbind_asid(kvm, start.handle);
386 sev->handle = start.handle;
387 sev->fd = argp->sev_fd;
396 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
397 unsigned long ulen, unsigned long *n,
400 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
401 unsigned long npages, size;
403 unsigned long locked, lock_limit;
405 unsigned long first, last;
408 lockdep_assert_held(&kvm->lock);
410 if (ulen == 0 || uaddr + ulen < uaddr)
411 return ERR_PTR(-EINVAL);
413 /* Calculate number of pages. */
414 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
415 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
416 npages = (last - first + 1);
418 locked = sev->pages_locked + npages;
419 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
420 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
421 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
422 return ERR_PTR(-ENOMEM);
425 if (WARN_ON_ONCE(npages > INT_MAX))
426 return ERR_PTR(-EINVAL);
428 /* Avoid using vmalloc for smaller buffers. */
429 size = npages * sizeof(struct page *);
430 if (size > PAGE_SIZE)
431 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
433 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
436 return ERR_PTR(-ENOMEM);
438 /* Pin the user virtual address. */
439 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
440 if (npinned != npages) {
441 pr_err("SEV: Failure locking %lu pages.\n", npages);
447 sev->pages_locked = locked;
453 unpin_user_pages(pages, npinned);
459 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
460 unsigned long npages)
462 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
464 unpin_user_pages(pages, npages);
466 sev->pages_locked -= npages;
469 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
471 uint8_t *page_virtual;
474 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
478 for (i = 0; i < npages; i++) {
479 page_virtual = kmap_local_page(pages[i]);
480 clflush_cache_range(page_virtual, PAGE_SIZE);
481 kunmap_local(page_virtual);
486 static unsigned long get_num_contig_pages(unsigned long idx,
487 struct page **inpages, unsigned long npages)
489 unsigned long paddr, next_paddr;
490 unsigned long i = idx + 1, pages = 1;
492 /* find the number of contiguous pages starting from idx */
493 paddr = __sme_page_pa(inpages[idx]);
495 next_paddr = __sme_page_pa(inpages[i++]);
496 if ((paddr + PAGE_SIZE) == next_paddr) {
507 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
509 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
510 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
511 struct kvm_sev_launch_update_data params;
512 struct sev_data_launch_update_data data;
513 struct page **inpages;
519 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
522 vaddr = params.uaddr;
524 vaddr_end = vaddr + size;
526 /* Lock the user memory. */
527 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
529 return PTR_ERR(inpages);
532 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
533 * place; the cache may contain the data that was written unencrypted.
535 sev_clflush_pages(inpages, npages);
538 data.handle = sev->handle;
540 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
544 * If the user buffer is not page-aligned, calculate the offset
547 offset = vaddr & (PAGE_SIZE - 1);
549 /* Calculate the number of pages that can be encrypted in one go. */
550 pages = get_num_contig_pages(i, inpages, npages);
552 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
555 data.address = __sme_page_pa(inpages[i]) + offset;
556 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
561 next_vaddr = vaddr + len;
565 /* content of memory is updated, mark pages dirty */
566 for (i = 0; i < npages; i++) {
567 set_page_dirty_lock(inpages[i]);
568 mark_page_accessed(inpages[i]);
570 /* unlock the user pages */
571 sev_unpin_memory(kvm, inpages, npages);
575 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
577 struct sev_es_save_area *save = svm->sev_es.vmsa;
579 /* Check some debug related fields before encrypting the VMSA */
580 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
584 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
585 * the traditional VMSA that is part of the VMCB. Copy the
586 * traditional VMSA as it has been built so far (in prep
587 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
589 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
591 /* Sync registgers */
592 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
593 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
594 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
595 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
596 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
597 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
598 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
599 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
601 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
602 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
603 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
604 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
605 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
606 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
607 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
608 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
610 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
612 /* Sync some non-GPR registers before encrypting */
613 save->xcr0 = svm->vcpu.arch.xcr0;
614 save->pkru = svm->vcpu.arch.pkru;
615 save->xss = svm->vcpu.arch.ia32_xss;
616 save->dr6 = svm->vcpu.arch.dr6;
618 if (sev_es_debug_swap_enabled) {
619 save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
620 pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
621 "This will not work starting with Linux 6.10\n");
624 pr_debug("Virtual Machine Save Area (VMSA):\n");
625 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
630 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
633 struct sev_data_launch_update_vmsa vmsa;
634 struct vcpu_svm *svm = to_svm(vcpu);
637 if (vcpu->guest_debug) {
638 pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
642 /* Perform some pre-encryption checks against the VMSA */
643 ret = sev_es_sync_vmsa(svm);
648 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
649 * the VMSA memory content (i.e it will write the same memory region
650 * with the guest's key), so invalidate it first.
652 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
655 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
656 vmsa.address = __sme_pa(svm->sev_es.vmsa);
657 vmsa.len = PAGE_SIZE;
658 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
662 vcpu->arch.guest_state_protected = true;
666 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
668 struct kvm_vcpu *vcpu;
672 if (!sev_es_guest(kvm))
675 kvm_for_each_vcpu(i, vcpu, kvm) {
676 ret = mutex_lock_killable(&vcpu->mutex);
680 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
682 mutex_unlock(&vcpu->mutex);
690 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
692 void __user *measure = (void __user *)(uintptr_t)argp->data;
693 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
694 struct sev_data_launch_measure data;
695 struct kvm_sev_launch_measure params;
696 void __user *p = NULL;
703 if (copy_from_user(¶ms, measure, sizeof(params)))
706 memset(&data, 0, sizeof(data));
708 /* User wants to query the blob length */
712 p = (void __user *)(uintptr_t)params.uaddr;
714 if (params.len > SEV_FW_BLOB_MAX_SIZE)
717 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
721 data.address = __psp_pa(blob);
722 data.len = params.len;
726 data.handle = sev->handle;
727 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
730 * If we query the session length, FW responded with expected data.
739 if (copy_to_user(p, blob, params.len))
744 params.len = data.len;
745 if (copy_to_user(measure, ¶ms, sizeof(params)))
752 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
754 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
755 struct sev_data_launch_finish data;
760 data.handle = sev->handle;
761 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
764 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
766 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
767 struct kvm_sev_guest_status params;
768 struct sev_data_guest_status data;
774 memset(&data, 0, sizeof(data));
776 data.handle = sev->handle;
777 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
781 params.policy = data.policy;
782 params.state = data.state;
783 params.handle = data.handle;
785 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
791 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
792 unsigned long dst, int size,
793 int *error, bool enc)
795 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
796 struct sev_data_dbg data;
799 data.handle = sev->handle;
804 return sev_issue_cmd(kvm,
805 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
809 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
810 unsigned long dst_paddr, int sz, int *err)
815 * Its safe to read more than we are asked, caller should ensure that
816 * destination has enough space.
818 offset = src_paddr & 15;
819 src_paddr = round_down(src_paddr, 16);
820 sz = round_up(sz + offset, 16);
822 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
825 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
826 void __user *dst_uaddr,
827 unsigned long dst_paddr,
830 struct page *tpage = NULL;
833 /* if inputs are not 16-byte then use intermediate buffer */
834 if (!IS_ALIGNED(dst_paddr, 16) ||
835 !IS_ALIGNED(paddr, 16) ||
836 !IS_ALIGNED(size, 16)) {
837 tpage = (void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
841 dst_paddr = __sme_page_pa(tpage);
844 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
850 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
861 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
863 unsigned long dst_paddr,
864 void __user *dst_vaddr,
865 int size, int *error)
867 struct page *src_tpage = NULL;
868 struct page *dst_tpage = NULL;
871 /* If source buffer is not aligned then use an intermediate buffer */
872 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
873 src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
877 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
878 __free_page(src_tpage);
882 paddr = __sme_page_pa(src_tpage);
886 * If destination buffer or length is not aligned then do read-modify-write:
887 * - decrypt destination in an intermediate buffer
888 * - copy the source buffer in an intermediate buffer
889 * - use the intermediate buffer as source buffer
891 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
894 dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
900 ret = __sev_dbg_decrypt(kvm, dst_paddr,
901 __sme_page_pa(dst_tpage), size, error);
906 * If source is kernel buffer then use memcpy() otherwise
909 dst_offset = dst_paddr & 15;
912 memcpy(page_address(dst_tpage) + dst_offset,
913 page_address(src_tpage), size);
915 if (copy_from_user(page_address(dst_tpage) + dst_offset,
922 paddr = __sme_page_pa(dst_tpage);
923 dst_paddr = round_down(dst_paddr, 16);
924 len = round_up(size, 16);
927 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
931 __free_page(src_tpage);
933 __free_page(dst_tpage);
937 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
939 unsigned long vaddr, vaddr_end, next_vaddr;
940 unsigned long dst_vaddr;
941 struct page **src_p, **dst_p;
942 struct kvm_sev_dbg debug;
950 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
953 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
955 if (!debug.dst_uaddr)
958 vaddr = debug.src_uaddr;
960 vaddr_end = vaddr + size;
961 dst_vaddr = debug.dst_uaddr;
963 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
964 int len, s_off, d_off;
966 /* lock userspace source and destination page */
967 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
969 return PTR_ERR(src_p);
971 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
973 sev_unpin_memory(kvm, src_p, n);
974 return PTR_ERR(dst_p);
978 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
979 * the pages; flush the destination too so that future accesses do not
982 sev_clflush_pages(src_p, 1);
983 sev_clflush_pages(dst_p, 1);
986 * Since user buffer may not be page aligned, calculate the
987 * offset within the page.
989 s_off = vaddr & ~PAGE_MASK;
990 d_off = dst_vaddr & ~PAGE_MASK;
991 len = min_t(size_t, (PAGE_SIZE - s_off), size);
994 ret = __sev_dbg_decrypt_user(kvm,
995 __sme_page_pa(src_p[0]) + s_off,
996 (void __user *)dst_vaddr,
997 __sme_page_pa(dst_p[0]) + d_off,
1000 ret = __sev_dbg_encrypt_user(kvm,
1001 __sme_page_pa(src_p[0]) + s_off,
1002 (void __user *)vaddr,
1003 __sme_page_pa(dst_p[0]) + d_off,
1004 (void __user *)dst_vaddr,
1007 sev_unpin_memory(kvm, src_p, n);
1008 sev_unpin_memory(kvm, dst_p, n);
1013 next_vaddr = vaddr + len;
1014 dst_vaddr = dst_vaddr + len;
1021 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1023 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1024 struct sev_data_launch_secret data;
1025 struct kvm_sev_launch_secret params;
1026 struct page **pages;
1031 if (!sev_guest(kvm))
1034 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1037 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1039 return PTR_ERR(pages);
1042 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1043 * place; the cache may contain the data that was written unencrypted.
1045 sev_clflush_pages(pages, n);
1048 * The secret must be copied into contiguous memory region, lets verify
1049 * that userspace memory pages are contiguous before we issue command.
1051 if (get_num_contig_pages(0, pages, n) != n) {
1053 goto e_unpin_memory;
1056 memset(&data, 0, sizeof(data));
1058 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1059 data.guest_address = __sme_page_pa(pages[0]) + offset;
1060 data.guest_len = params.guest_len;
1062 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1064 ret = PTR_ERR(blob);
1065 goto e_unpin_memory;
1068 data.trans_address = __psp_pa(blob);
1069 data.trans_len = params.trans_len;
1071 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1076 data.hdr_address = __psp_pa(hdr);
1077 data.hdr_len = params.hdr_len;
1079 data.handle = sev->handle;
1080 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1087 /* content of memory is updated, mark pages dirty */
1088 for (i = 0; i < n; i++) {
1089 set_page_dirty_lock(pages[i]);
1090 mark_page_accessed(pages[i]);
1092 sev_unpin_memory(kvm, pages, n);
1096 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1098 void __user *report = (void __user *)(uintptr_t)argp->data;
1099 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1100 struct sev_data_attestation_report data;
1101 struct kvm_sev_attestation_report params;
1106 if (!sev_guest(kvm))
1109 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1112 memset(&data, 0, sizeof(data));
1114 /* User wants to query the blob length */
1118 p = (void __user *)(uintptr_t)params.uaddr;
1120 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1123 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1127 data.address = __psp_pa(blob);
1128 data.len = params.len;
1129 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1132 data.handle = sev->handle;
1133 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1135 * If we query the session length, FW responded with expected data.
1144 if (copy_to_user(p, blob, params.len))
1149 params.len = data.len;
1150 if (copy_to_user(report, ¶ms, sizeof(params)))
1157 /* Userspace wants to query session length. */
1159 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1160 struct kvm_sev_send_start *params)
1162 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1163 struct sev_data_send_start data;
1166 memset(&data, 0, sizeof(data));
1167 data.handle = sev->handle;
1168 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1170 params->session_len = data.session_len;
1171 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1172 sizeof(struct kvm_sev_send_start)))
1178 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1180 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1181 struct sev_data_send_start data;
1182 struct kvm_sev_send_start params;
1183 void *amd_certs, *session_data;
1184 void *pdh_cert, *plat_certs;
1187 if (!sev_guest(kvm))
1190 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1191 sizeof(struct kvm_sev_send_start)))
1194 /* if session_len is zero, userspace wants to query the session length */
1195 if (!params.session_len)
1196 return __sev_send_start_query_session_length(kvm, argp,
1199 /* some sanity checks */
1200 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1201 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1204 /* allocate the memory to hold the session data blob */
1205 session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1209 /* copy the certificate blobs from userspace */
1210 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1211 params.pdh_cert_len);
1212 if (IS_ERR(pdh_cert)) {
1213 ret = PTR_ERR(pdh_cert);
1214 goto e_free_session;
1217 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1218 params.plat_certs_len);
1219 if (IS_ERR(plat_certs)) {
1220 ret = PTR_ERR(plat_certs);
1224 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1225 params.amd_certs_len);
1226 if (IS_ERR(amd_certs)) {
1227 ret = PTR_ERR(amd_certs);
1228 goto e_free_plat_cert;
1231 /* populate the FW SEND_START field with system physical address */
1232 memset(&data, 0, sizeof(data));
1233 data.pdh_cert_address = __psp_pa(pdh_cert);
1234 data.pdh_cert_len = params.pdh_cert_len;
1235 data.plat_certs_address = __psp_pa(plat_certs);
1236 data.plat_certs_len = params.plat_certs_len;
1237 data.amd_certs_address = __psp_pa(amd_certs);
1238 data.amd_certs_len = params.amd_certs_len;
1239 data.session_address = __psp_pa(session_data);
1240 data.session_len = params.session_len;
1241 data.handle = sev->handle;
1243 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1245 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1246 session_data, params.session_len)) {
1248 goto e_free_amd_cert;
1251 params.policy = data.policy;
1252 params.session_len = data.session_len;
1253 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1254 sizeof(struct kvm_sev_send_start)))
1264 kfree(session_data);
1268 /* Userspace wants to query either header or trans length. */
1270 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1271 struct kvm_sev_send_update_data *params)
1273 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1274 struct sev_data_send_update_data data;
1277 memset(&data, 0, sizeof(data));
1278 data.handle = sev->handle;
1279 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1281 params->hdr_len = data.hdr_len;
1282 params->trans_len = data.trans_len;
1284 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1285 sizeof(struct kvm_sev_send_update_data)))
1291 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1293 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1294 struct sev_data_send_update_data data;
1295 struct kvm_sev_send_update_data params;
1296 void *hdr, *trans_data;
1297 struct page **guest_page;
1301 if (!sev_guest(kvm))
1304 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1305 sizeof(struct kvm_sev_send_update_data)))
1308 /* userspace wants to query either header or trans length */
1309 if (!params.trans_len || !params.hdr_len)
1310 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1312 if (!params.trans_uaddr || !params.guest_uaddr ||
1313 !params.guest_len || !params.hdr_uaddr)
1316 /* Check if we are crossing the page boundary */
1317 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1318 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1321 /* Pin guest memory */
1322 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1324 if (IS_ERR(guest_page))
1325 return PTR_ERR(guest_page);
1327 /* allocate memory for header and transport buffer */
1329 hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1333 trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1337 memset(&data, 0, sizeof(data));
1338 data.hdr_address = __psp_pa(hdr);
1339 data.hdr_len = params.hdr_len;
1340 data.trans_address = __psp_pa(trans_data);
1341 data.trans_len = params.trans_len;
1343 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1344 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1345 data.guest_address |= sev_me_mask;
1346 data.guest_len = params.guest_len;
1347 data.handle = sev->handle;
1349 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1352 goto e_free_trans_data;
1354 /* copy transport buffer to user space */
1355 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1356 trans_data, params.trans_len)) {
1358 goto e_free_trans_data;
1361 /* Copy packet header to userspace. */
1362 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1371 sev_unpin_memory(kvm, guest_page, n);
1376 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1378 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1379 struct sev_data_send_finish data;
1381 if (!sev_guest(kvm))
1384 data.handle = sev->handle;
1385 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1388 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1390 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1391 struct sev_data_send_cancel data;
1393 if (!sev_guest(kvm))
1396 data.handle = sev->handle;
1397 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1400 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1402 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1403 struct sev_data_receive_start start;
1404 struct kvm_sev_receive_start params;
1405 int *error = &argp->error;
1410 if (!sev_guest(kvm))
1413 /* Get parameter from the userspace */
1414 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1415 sizeof(struct kvm_sev_receive_start)))
1418 /* some sanity checks */
1419 if (!params.pdh_uaddr || !params.pdh_len ||
1420 !params.session_uaddr || !params.session_len)
1423 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1424 if (IS_ERR(pdh_data))
1425 return PTR_ERR(pdh_data);
1427 session_data = psp_copy_user_blob(params.session_uaddr,
1428 params.session_len);
1429 if (IS_ERR(session_data)) {
1430 ret = PTR_ERR(session_data);
1434 memset(&start, 0, sizeof(start));
1435 start.handle = params.handle;
1436 start.policy = params.policy;
1437 start.pdh_cert_address = __psp_pa(pdh_data);
1438 start.pdh_cert_len = params.pdh_len;
1439 start.session_address = __psp_pa(session_data);
1440 start.session_len = params.session_len;
1442 /* create memory encryption context */
1443 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1446 goto e_free_session;
1448 /* Bind ASID to this guest */
1449 ret = sev_bind_asid(kvm, start.handle, error);
1451 sev_decommission(start.handle);
1452 goto e_free_session;
1455 params.handle = start.handle;
1456 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1457 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1459 sev_unbind_asid(kvm, start.handle);
1460 goto e_free_session;
1463 sev->handle = start.handle;
1464 sev->fd = argp->sev_fd;
1467 kfree(session_data);
1474 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1476 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1477 struct kvm_sev_receive_update_data params;
1478 struct sev_data_receive_update_data data;
1479 void *hdr = NULL, *trans = NULL;
1480 struct page **guest_page;
1484 if (!sev_guest(kvm))
1487 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1488 sizeof(struct kvm_sev_receive_update_data)))
1491 if (!params.hdr_uaddr || !params.hdr_len ||
1492 !params.guest_uaddr || !params.guest_len ||
1493 !params.trans_uaddr || !params.trans_len)
1496 /* Check if we are crossing the page boundary */
1497 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1498 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1501 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1503 return PTR_ERR(hdr);
1505 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1506 if (IS_ERR(trans)) {
1507 ret = PTR_ERR(trans);
1511 memset(&data, 0, sizeof(data));
1512 data.hdr_address = __psp_pa(hdr);
1513 data.hdr_len = params.hdr_len;
1514 data.trans_address = __psp_pa(trans);
1515 data.trans_len = params.trans_len;
1517 /* Pin guest memory */
1518 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1520 if (IS_ERR(guest_page)) {
1521 ret = PTR_ERR(guest_page);
1526 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1527 * encrypts the written data with the guest's key, and the cache may
1528 * contain dirty, unencrypted data.
1530 sev_clflush_pages(guest_page, n);
1532 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1533 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1534 data.guest_address |= sev_me_mask;
1535 data.guest_len = params.guest_len;
1536 data.handle = sev->handle;
1538 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1541 sev_unpin_memory(kvm, guest_page, n);
1551 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1553 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1554 struct sev_data_receive_finish data;
1556 if (!sev_guest(kvm))
1559 data.handle = sev->handle;
1560 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1563 static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1566 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1567 * active mirror VMs. Also allow the debugging and status commands.
1569 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1570 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1571 cmd_id == KVM_SEV_DBG_ENCRYPT)
1577 static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1579 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1580 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1583 if (dst_kvm == src_kvm)
1587 * Bail if these VMs are already involved in a migration to avoid
1588 * deadlock between two VMs trying to migrate to/from each other.
1590 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1593 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1597 if (mutex_lock_killable(&dst_kvm->lock))
1599 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1604 mutex_unlock(&dst_kvm->lock);
1606 atomic_set_release(&src_sev->migration_in_progress, 0);
1608 atomic_set_release(&dst_sev->migration_in_progress, 0);
1612 static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1614 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1615 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1617 mutex_unlock(&dst_kvm->lock);
1618 mutex_unlock(&src_kvm->lock);
1619 atomic_set_release(&dst_sev->migration_in_progress, 0);
1620 atomic_set_release(&src_sev->migration_in_progress, 0);
1623 /* vCPU mutex subclasses. */
1624 enum sev_migration_role {
1625 SEV_MIGRATION_SOURCE = 0,
1626 SEV_MIGRATION_TARGET,
1627 SEV_NR_MIGRATION_ROLES,
1630 static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1631 enum sev_migration_role role)
1633 struct kvm_vcpu *vcpu;
1636 kvm_for_each_vcpu(i, vcpu, kvm) {
1637 if (mutex_lock_killable_nested(&vcpu->mutex, role))
1640 #ifdef CONFIG_PROVE_LOCKING
1643 * Reset the role to one that avoids colliding with
1644 * the role used for the first vcpu mutex.
1646 role = SEV_NR_MIGRATION_ROLES;
1648 mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1656 kvm_for_each_vcpu(j, vcpu, kvm) {
1660 #ifdef CONFIG_PROVE_LOCKING
1662 mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1665 mutex_unlock(&vcpu->mutex);
1670 static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1672 struct kvm_vcpu *vcpu;
1676 kvm_for_each_vcpu(i, vcpu, kvm) {
1680 mutex_acquire(&vcpu->mutex.dep_map,
1681 SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1683 mutex_unlock(&vcpu->mutex);
1687 static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1689 struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
1690 struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
1691 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1692 struct vcpu_svm *dst_svm, *src_svm;
1693 struct kvm_sev_info *mirror;
1697 dst->asid = src->asid;
1698 dst->handle = src->handle;
1699 dst->pages_locked = src->pages_locked;
1700 dst->enc_context_owner = src->enc_context_owner;
1701 dst->es_active = src->es_active;
1704 src->active = false;
1706 src->pages_locked = 0;
1707 src->enc_context_owner = NULL;
1708 src->es_active = false;
1710 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1713 * If this VM has mirrors, "transfer" each mirror's refcount of the
1714 * source to the destination (this KVM). The caller holds a reference
1715 * to the source, so there's no danger of use-after-free.
1717 list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
1718 list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
1719 kvm_get_kvm(dst_kvm);
1720 kvm_put_kvm(src_kvm);
1721 mirror->enc_context_owner = dst_kvm;
1725 * If this VM is a mirror, remove the old mirror from the owners list
1726 * and add the new mirror to the list.
1728 if (is_mirroring_enc_context(dst_kvm)) {
1729 struct kvm_sev_info *owner_sev_info =
1730 &to_kvm_svm(dst->enc_context_owner)->sev_info;
1732 list_del(&src->mirror_entry);
1733 list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
1736 kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
1737 dst_svm = to_svm(dst_vcpu);
1739 sev_init_vmcb(dst_svm);
1741 if (!dst->es_active)
1745 * Note, the source is not required to have the same number of
1746 * vCPUs as the destination when migrating a vanilla SEV VM.
1748 src_vcpu = kvm_get_vcpu(src_kvm, i);
1749 src_svm = to_svm(src_vcpu);
1752 * Transfer VMSA and GHCB state to the destination. Nullify and
1753 * clear source fields as appropriate, the state now belongs to
1756 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
1757 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
1758 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
1759 dst_vcpu->arch.guest_state_protected = true;
1761 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
1762 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
1763 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
1764 src_vcpu->arch.guest_state_protected = false;
1768 static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
1770 struct kvm_vcpu *src_vcpu;
1773 if (!sev_es_guest(src))
1776 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1779 kvm_for_each_vcpu(i, src_vcpu, src) {
1780 if (!src_vcpu->arch.guest_state_protected)
1787 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
1789 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1790 struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1791 struct fd f = fdget(source_fd);
1792 struct kvm *source_kvm;
1793 bool charged = false;
1799 if (!file_is_kvm(f.file)) {
1804 source_kvm = f.file->private_data;
1805 ret = sev_lock_two_vms(kvm, source_kvm);
1809 if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1814 src_sev = &to_kvm_svm(source_kvm)->sev_info;
1816 dst_sev->misc_cg = get_current_misc_cg();
1817 cg_cleanup_sev = dst_sev;
1818 if (dst_sev->misc_cg != src_sev->misc_cg) {
1819 ret = sev_misc_cg_try_charge(dst_sev);
1821 goto out_dst_cgroup;
1825 ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
1827 goto out_dst_cgroup;
1828 ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
1832 ret = sev_check_source_vcpus(kvm, source_kvm);
1834 goto out_source_vcpu;
1836 sev_migrate_from(kvm, source_kvm);
1837 kvm_vm_dead(source_kvm);
1838 cg_cleanup_sev = src_sev;
1842 sev_unlock_vcpus_for_migration(source_kvm);
1844 sev_unlock_vcpus_for_migration(kvm);
1846 /* Operates on the source on success, on the destination on failure. */
1848 sev_misc_cg_uncharge(cg_cleanup_sev);
1849 put_misc_cg(cg_cleanup_sev->misc_cg);
1850 cg_cleanup_sev->misc_cg = NULL;
1852 sev_unlock_two_vms(kvm, source_kvm);
1858 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
1860 struct kvm_sev_cmd sev_cmd;
1869 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1872 mutex_lock(&kvm->lock);
1874 /* Only the enc_context_owner handles some memory enc operations. */
1875 if (is_mirroring_enc_context(kvm) &&
1876 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
1881 switch (sev_cmd.id) {
1882 case KVM_SEV_ES_INIT:
1883 if (!sev_es_enabled) {
1889 r = sev_guest_init(kvm, &sev_cmd);
1891 case KVM_SEV_LAUNCH_START:
1892 r = sev_launch_start(kvm, &sev_cmd);
1894 case KVM_SEV_LAUNCH_UPDATE_DATA:
1895 r = sev_launch_update_data(kvm, &sev_cmd);
1897 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1898 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1900 case KVM_SEV_LAUNCH_MEASURE:
1901 r = sev_launch_measure(kvm, &sev_cmd);
1903 case KVM_SEV_LAUNCH_FINISH:
1904 r = sev_launch_finish(kvm, &sev_cmd);
1906 case KVM_SEV_GUEST_STATUS:
1907 r = sev_guest_status(kvm, &sev_cmd);
1909 case KVM_SEV_DBG_DECRYPT:
1910 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1912 case KVM_SEV_DBG_ENCRYPT:
1913 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1915 case KVM_SEV_LAUNCH_SECRET:
1916 r = sev_launch_secret(kvm, &sev_cmd);
1918 case KVM_SEV_GET_ATTESTATION_REPORT:
1919 r = sev_get_attestation_report(kvm, &sev_cmd);
1921 case KVM_SEV_SEND_START:
1922 r = sev_send_start(kvm, &sev_cmd);
1924 case KVM_SEV_SEND_UPDATE_DATA:
1925 r = sev_send_update_data(kvm, &sev_cmd);
1927 case KVM_SEV_SEND_FINISH:
1928 r = sev_send_finish(kvm, &sev_cmd);
1930 case KVM_SEV_SEND_CANCEL:
1931 r = sev_send_cancel(kvm, &sev_cmd);
1933 case KVM_SEV_RECEIVE_START:
1934 r = sev_receive_start(kvm, &sev_cmd);
1936 case KVM_SEV_RECEIVE_UPDATE_DATA:
1937 r = sev_receive_update_data(kvm, &sev_cmd);
1939 case KVM_SEV_RECEIVE_FINISH:
1940 r = sev_receive_finish(kvm, &sev_cmd);
1947 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1951 mutex_unlock(&kvm->lock);
1955 int sev_mem_enc_register_region(struct kvm *kvm,
1956 struct kvm_enc_region *range)
1958 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1959 struct enc_region *region;
1962 if (!sev_guest(kvm))
1965 /* If kvm is mirroring encryption context it isn't responsible for it */
1966 if (is_mirroring_enc_context(kvm))
1969 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1972 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1976 mutex_lock(&kvm->lock);
1977 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1978 if (IS_ERR(region->pages)) {
1979 ret = PTR_ERR(region->pages);
1980 mutex_unlock(&kvm->lock);
1985 * The guest may change the memory encryption attribute from C=0 -> C=1
1986 * or vice versa for this memory range. Lets make sure caches are
1987 * flushed to ensure that guest data gets written into memory with
1988 * correct C-bit. Note, this must be done before dropping kvm->lock,
1989 * as region and its array of pages can be freed by a different task
1990 * once kvm->lock is released.
1992 sev_clflush_pages(region->pages, region->npages);
1994 region->uaddr = range->addr;
1995 region->size = range->size;
1997 list_add_tail(®ion->list, &sev->regions_list);
1998 mutex_unlock(&kvm->lock);
2007 static struct enc_region *
2008 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
2010 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2011 struct list_head *head = &sev->regions_list;
2012 struct enc_region *i;
2014 list_for_each_entry(i, head, list) {
2015 if (i->uaddr == range->addr &&
2016 i->size == range->size)
2023 static void __unregister_enc_region_locked(struct kvm *kvm,
2024 struct enc_region *region)
2026 sev_unpin_memory(kvm, region->pages, region->npages);
2027 list_del(®ion->list);
2031 int sev_mem_enc_unregister_region(struct kvm *kvm,
2032 struct kvm_enc_region *range)
2034 struct enc_region *region;
2037 /* If kvm is mirroring encryption context it isn't responsible for it */
2038 if (is_mirroring_enc_context(kvm))
2041 mutex_lock(&kvm->lock);
2043 if (!sev_guest(kvm)) {
2048 region = find_enc_region(kvm, range);
2055 * Ensure that all guest tagged cache entries are flushed before
2056 * releasing the pages back to the system for use. CLFLUSH will
2057 * not do this, so issue a WBINVD.
2059 wbinvd_on_all_cpus();
2061 __unregister_enc_region_locked(kvm, region);
2063 mutex_unlock(&kvm->lock);
2067 mutex_unlock(&kvm->lock);
2071 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2073 struct fd f = fdget(source_fd);
2074 struct kvm *source_kvm;
2075 struct kvm_sev_info *source_sev, *mirror_sev;
2081 if (!file_is_kvm(f.file)) {
2086 source_kvm = f.file->private_data;
2087 ret = sev_lock_two_vms(kvm, source_kvm);
2092 * Mirrors of mirrors should work, but let's not get silly. Also
2093 * disallow out-of-band SEV/SEV-ES init if the target is already an
2094 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2095 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2097 if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2098 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2104 * The mirror kvm holds an enc_context_owner ref so its asid can't
2105 * disappear until we're done with it
2107 source_sev = &to_kvm_svm(source_kvm)->sev_info;
2108 kvm_get_kvm(source_kvm);
2109 mirror_sev = &to_kvm_svm(kvm)->sev_info;
2110 list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2112 /* Set enc_context_owner and copy its encryption context over */
2113 mirror_sev->enc_context_owner = source_kvm;
2114 mirror_sev->active = true;
2115 mirror_sev->asid = source_sev->asid;
2116 mirror_sev->fd = source_sev->fd;
2117 mirror_sev->es_active = source_sev->es_active;
2118 mirror_sev->handle = source_sev->handle;
2119 INIT_LIST_HEAD(&mirror_sev->regions_list);
2120 INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2124 * Do not copy ap_jump_table. Since the mirror does not share the same
2125 * KVM contexts as the original, and they may have different
2130 sev_unlock_two_vms(kvm, source_kvm);
2136 void sev_vm_destroy(struct kvm *kvm)
2138 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2139 struct list_head *head = &sev->regions_list;
2140 struct list_head *pos, *q;
2142 if (!sev_guest(kvm))
2145 WARN_ON(!list_empty(&sev->mirror_vms));
2147 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2148 if (is_mirroring_enc_context(kvm)) {
2149 struct kvm *owner_kvm = sev->enc_context_owner;
2151 mutex_lock(&owner_kvm->lock);
2152 list_del(&sev->mirror_entry);
2153 mutex_unlock(&owner_kvm->lock);
2154 kvm_put_kvm(owner_kvm);
2159 * Ensure that all guest tagged cache entries are flushed before
2160 * releasing the pages back to the system for use. CLFLUSH will
2161 * not do this, so issue a WBINVD.
2163 wbinvd_on_all_cpus();
2166 * if userspace was terminated before unregistering the memory regions
2167 * then lets unpin all the registered memory.
2169 if (!list_empty(head)) {
2170 list_for_each_safe(pos, q, head) {
2171 __unregister_enc_region_locked(kvm,
2172 list_entry(pos, struct enc_region, list));
2177 sev_unbind_asid(kvm, sev->handle);
2181 void __init sev_set_cpu_caps(void)
2184 kvm_cpu_cap_clear(X86_FEATURE_SEV);
2185 if (!sev_es_enabled)
2186 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2189 void __init sev_hardware_setup(void)
2191 #ifdef CONFIG_KVM_AMD_SEV
2192 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2193 bool sev_es_supported = false;
2194 bool sev_supported = false;
2196 if (!sev_enabled || !npt_enabled || !nrips)
2200 * SEV must obviously be supported in hardware. Sanity check that the
2201 * CPU supports decode assists, which is mandatory for SEV guests to
2202 * support instruction emulation. Ditto for flushing by ASID, as SEV
2203 * guests are bound to a single ASID, i.e. KVM can't rotate to a new
2204 * ASID to effect a TLB flush.
2206 if (!boot_cpu_has(X86_FEATURE_SEV) ||
2207 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
2208 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
2211 /* Retrieve SEV CPUID information */
2212 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2214 /* Set encryption bit location for SEV-ES guests */
2215 sev_enc_bit = ebx & 0x3f;
2217 /* Maximum number of encrypted guests supported simultaneously */
2222 /* Minimum ASID value that should be used for SEV guest */
2224 sev_me_mask = 1UL << (ebx & 0x3f);
2227 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2228 * even though it's never used, so that the bitmap is indexed by the
2231 nr_asids = max_sev_asid + 1;
2232 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2233 if (!sev_asid_bitmap)
2236 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2237 if (!sev_reclaim_asid_bitmap) {
2238 bitmap_free(sev_asid_bitmap);
2239 sev_asid_bitmap = NULL;
2243 sev_asid_count = max_sev_asid - min_sev_asid + 1;
2244 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
2245 sev_supported = true;
2247 /* SEV-ES support requested? */
2248 if (!sev_es_enabled)
2252 * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
2253 * instruction stream, i.e. can't emulate in response to a #NPF and
2254 * instead relies on #NPF(RSVD) being reflected into the guest as #VC
2255 * (the guest can then do a #VMGEXIT to request MMIO emulation).
2257 if (!enable_mmio_caching)
2260 /* Does the CPU support SEV-ES? */
2261 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2264 /* Has the system been allocated ASIDs for SEV-ES? */
2265 if (min_sev_asid == 1)
2268 sev_es_asid_count = min_sev_asid - 1;
2269 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
2270 sev_es_supported = true;
2273 if (boot_cpu_has(X86_FEATURE_SEV))
2274 pr_info("SEV %s (ASIDs %u - %u)\n",
2275 sev_supported ? "enabled" : "disabled",
2276 min_sev_asid, max_sev_asid);
2277 if (boot_cpu_has(X86_FEATURE_SEV_ES))
2278 pr_info("SEV-ES %s (ASIDs %u - %u)\n",
2279 sev_es_supported ? "enabled" : "disabled",
2280 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
2282 sev_enabled = sev_supported;
2283 sev_es_enabled = sev_es_supported;
2284 if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
2285 !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
2286 sev_es_debug_swap_enabled = false;
2290 void sev_hardware_unsetup(void)
2295 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2296 sev_flush_asids(1, max_sev_asid);
2298 bitmap_free(sev_asid_bitmap);
2299 bitmap_free(sev_reclaim_asid_bitmap);
2301 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2302 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2305 int sev_cpu_init(struct svm_cpu_data *sd)
2310 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2318 * Pages used by hardware to hold guest encrypted state must be flushed before
2319 * returning them to the system.
2321 static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
2323 int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
2326 * Note! The address must be a kernel address, as regular page walk
2327 * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
2328 * address is non-deterministic and unsafe. This function deliberately
2329 * takes a pointer to deter passing in a user address.
2331 unsigned long addr = (unsigned long)va;
2334 * If CPU enforced cache coherency for encrypted mappings of the
2335 * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
2336 * flush is still needed in order to work properly with DMA devices.
2338 if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
2339 clflush_cache_range(va, PAGE_SIZE);
2344 * VM Page Flush takes a host virtual address and a guest ASID. Fall
2345 * back to WBINVD if this faults so as not to make any problems worse
2346 * by leaving stale encrypted data in the cache.
2348 if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
2354 wbinvd_on_all_cpus();
2357 void sev_guest_memory_reclaimed(struct kvm *kvm)
2359 if (!sev_guest(kvm))
2362 wbinvd_on_all_cpus();
2365 void sev_free_vcpu(struct kvm_vcpu *vcpu)
2367 struct vcpu_svm *svm;
2369 if (!sev_es_guest(vcpu->kvm))
2374 if (vcpu->arch.guest_state_protected)
2375 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
2377 __free_page(virt_to_page(svm->sev_es.vmsa));
2379 if (svm->sev_es.ghcb_sa_free)
2380 kvfree(svm->sev_es.ghcb_sa);
2383 static void dump_ghcb(struct vcpu_svm *svm)
2385 struct ghcb *ghcb = svm->sev_es.ghcb;
2388 /* Re-use the dump_invalid_vmcb module parameter */
2389 if (!dump_invalid_vmcb) {
2390 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2394 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2396 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2397 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2398 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2399 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2400 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2401 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2402 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2403 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2404 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2405 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2408 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2410 struct kvm_vcpu *vcpu = &svm->vcpu;
2411 struct ghcb *ghcb = svm->sev_es.ghcb;
2414 * The GHCB protocol so far allows for the following data
2416 * GPRs RAX, RBX, RCX, RDX
2418 * Copy their values, even if they may not have been written during the
2419 * VM-Exit. It's the guest's responsibility to not consume random data.
2421 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2422 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2423 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2424 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2427 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2429 struct vmcb_control_area *control = &svm->vmcb->control;
2430 struct kvm_vcpu *vcpu = &svm->vcpu;
2431 struct ghcb *ghcb = svm->sev_es.ghcb;
2435 * The GHCB protocol so far allows for the following data
2437 * GPRs RAX, RBX, RCX, RDX
2441 * VMMCALL allows the guest to provide extra registers. KVM also
2442 * expects RSI for hypercalls, so include that, too.
2444 * Copy their values to the appropriate location if supplied.
2446 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2448 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
2449 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
2451 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
2452 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
2453 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
2454 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
2455 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
2457 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
2459 if (kvm_ghcb_xcr0_is_valid(svm)) {
2460 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2461 kvm_update_cpuid_runtime(vcpu);
2464 /* Copy the GHCB exit information into the VMCB fields */
2465 exit_code = ghcb_get_sw_exit_code(ghcb);
2466 control->exit_code = lower_32_bits(exit_code);
2467 control->exit_code_hi = upper_32_bits(exit_code);
2468 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2469 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2470 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
2472 /* Clear the valid entries fields */
2473 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2476 static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
2478 return (((u64)control->exit_code_hi) << 32) | control->exit_code;
2481 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2483 struct vmcb_control_area *control = &svm->vmcb->control;
2484 struct kvm_vcpu *vcpu = &svm->vcpu;
2489 * Retrieve the exit code now even though it may not be marked valid
2490 * as it could help with debugging.
2492 exit_code = kvm_ghcb_get_sw_exit_code(control);
2494 /* Only GHCB Usage code 0 is supported */
2495 if (svm->sev_es.ghcb->ghcb_usage) {
2496 reason = GHCB_ERR_INVALID_USAGE;
2500 reason = GHCB_ERR_MISSING_INPUT;
2502 if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
2503 !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
2504 !kvm_ghcb_sw_exit_info_2_is_valid(svm))
2507 switch (exit_code) {
2508 case SVM_EXIT_READ_DR7:
2510 case SVM_EXIT_WRITE_DR7:
2511 if (!kvm_ghcb_rax_is_valid(svm))
2514 case SVM_EXIT_RDTSC:
2516 case SVM_EXIT_RDPMC:
2517 if (!kvm_ghcb_rcx_is_valid(svm))
2520 case SVM_EXIT_CPUID:
2521 if (!kvm_ghcb_rax_is_valid(svm) ||
2522 !kvm_ghcb_rcx_is_valid(svm))
2524 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
2525 if (!kvm_ghcb_xcr0_is_valid(svm))
2531 if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
2532 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2535 if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
2536 if (!kvm_ghcb_rax_is_valid(svm))
2541 if (!kvm_ghcb_rcx_is_valid(svm))
2543 if (control->exit_info_1) {
2544 if (!kvm_ghcb_rax_is_valid(svm) ||
2545 !kvm_ghcb_rdx_is_valid(svm))
2549 case SVM_EXIT_VMMCALL:
2550 if (!kvm_ghcb_rax_is_valid(svm) ||
2551 !kvm_ghcb_cpl_is_valid(svm))
2554 case SVM_EXIT_RDTSCP:
2556 case SVM_EXIT_WBINVD:
2558 case SVM_EXIT_MONITOR:
2559 if (!kvm_ghcb_rax_is_valid(svm) ||
2560 !kvm_ghcb_rcx_is_valid(svm) ||
2561 !kvm_ghcb_rdx_is_valid(svm))
2564 case SVM_EXIT_MWAIT:
2565 if (!kvm_ghcb_rax_is_valid(svm) ||
2566 !kvm_ghcb_rcx_is_valid(svm))
2569 case SVM_VMGEXIT_MMIO_READ:
2570 case SVM_VMGEXIT_MMIO_WRITE:
2571 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2574 case SVM_VMGEXIT_NMI_COMPLETE:
2575 case SVM_VMGEXIT_AP_HLT_LOOP:
2576 case SVM_VMGEXIT_AP_JUMP_TABLE:
2577 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2580 reason = GHCB_ERR_INVALID_EVENT;
2587 if (reason == GHCB_ERR_INVALID_USAGE) {
2588 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2589 svm->sev_es.ghcb->ghcb_usage);
2590 } else if (reason == GHCB_ERR_INVALID_EVENT) {
2591 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
2594 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
2599 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2600 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
2602 /* Resume the guest to "return" the error code. */
2606 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2608 if (!svm->sev_es.ghcb)
2611 if (svm->sev_es.ghcb_sa_free) {
2613 * The scratch area lives outside the GHCB, so there is a
2614 * buffer that, depending on the operation performed, may
2615 * need to be synced, then freed.
2617 if (svm->sev_es.ghcb_sa_sync) {
2618 kvm_write_guest(svm->vcpu.kvm,
2619 svm->sev_es.sw_scratch,
2620 svm->sev_es.ghcb_sa,
2621 svm->sev_es.ghcb_sa_len);
2622 svm->sev_es.ghcb_sa_sync = false;
2625 kvfree(svm->sev_es.ghcb_sa);
2626 svm->sev_es.ghcb_sa = NULL;
2627 svm->sev_es.ghcb_sa_free = false;
2630 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2632 sev_es_sync_to_ghcb(svm);
2634 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2635 svm->sev_es.ghcb = NULL;
2638 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2640 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
2641 int asid = sev_get_asid(svm->vcpu.kvm);
2643 /* Assign the asid allocated with this SEV guest */
2649 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2650 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2652 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2653 svm->vcpu.arch.last_vmentry_cpu == cpu)
2656 sd->sev_vmcbs[asid] = svm->vmcb;
2657 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2658 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2661 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2662 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2664 struct vmcb_control_area *control = &svm->vmcb->control;
2665 u64 ghcb_scratch_beg, ghcb_scratch_end;
2666 u64 scratch_gpa_beg, scratch_gpa_end;
2669 scratch_gpa_beg = svm->sev_es.sw_scratch;
2670 if (!scratch_gpa_beg) {
2671 pr_err("vmgexit: scratch gpa not provided\n");
2675 scratch_gpa_end = scratch_gpa_beg + len;
2676 if (scratch_gpa_end < scratch_gpa_beg) {
2677 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2678 len, scratch_gpa_beg);
2682 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2683 /* Scratch area begins within GHCB */
2684 ghcb_scratch_beg = control->ghcb_gpa +
2685 offsetof(struct ghcb, shared_buffer);
2686 ghcb_scratch_end = control->ghcb_gpa +
2687 offsetof(struct ghcb, reserved_0xff0);
2690 * If the scratch area begins within the GHCB, it must be
2691 * completely contained in the GHCB shared buffer area.
2693 if (scratch_gpa_beg < ghcb_scratch_beg ||
2694 scratch_gpa_end > ghcb_scratch_end) {
2695 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2696 scratch_gpa_beg, scratch_gpa_end);
2700 scratch_va = (void *)svm->sev_es.ghcb;
2701 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2704 * The guest memory must be read into a kernel buffer, so
2707 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2708 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2709 len, GHCB_SCRATCH_AREA_LIMIT);
2712 scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
2716 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2717 /* Unable to copy scratch area from guest */
2718 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2725 * The scratch area is outside the GHCB. The operation will
2726 * dictate whether the buffer needs to be synced before running
2727 * the vCPU next time (i.e. a read was requested so the data
2728 * must be written back to the guest memory).
2730 svm->sev_es.ghcb_sa_sync = sync;
2731 svm->sev_es.ghcb_sa_free = true;
2734 svm->sev_es.ghcb_sa = scratch_va;
2735 svm->sev_es.ghcb_sa_len = len;
2740 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2741 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2746 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2749 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2750 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2753 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2755 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2758 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2760 svm->vmcb->control.ghcb_gpa = value;
2763 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2765 struct vmcb_control_area *control = &svm->vmcb->control;
2766 struct kvm_vcpu *vcpu = &svm->vcpu;
2770 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2772 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2775 switch (ghcb_info) {
2776 case GHCB_MSR_SEV_INFO_REQ:
2777 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2781 case GHCB_MSR_CPUID_REQ: {
2782 u64 cpuid_fn, cpuid_reg, cpuid_value;
2784 cpuid_fn = get_ghcb_msr_bits(svm,
2785 GHCB_MSR_CPUID_FUNC_MASK,
2786 GHCB_MSR_CPUID_FUNC_POS);
2788 /* Initialize the registers needed by the CPUID intercept */
2789 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2790 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2792 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2794 /* Error, keep GHCB MSR value as-is */
2798 cpuid_reg = get_ghcb_msr_bits(svm,
2799 GHCB_MSR_CPUID_REG_MASK,
2800 GHCB_MSR_CPUID_REG_POS);
2802 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2803 else if (cpuid_reg == 1)
2804 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2805 else if (cpuid_reg == 2)
2806 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2808 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2810 set_ghcb_msr_bits(svm, cpuid_value,
2811 GHCB_MSR_CPUID_VALUE_MASK,
2812 GHCB_MSR_CPUID_VALUE_POS);
2814 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2819 case GHCB_MSR_TERM_REQ: {
2820 u64 reason_set, reason_code;
2822 reason_set = get_ghcb_msr_bits(svm,
2823 GHCB_MSR_TERM_REASON_SET_MASK,
2824 GHCB_MSR_TERM_REASON_SET_POS);
2825 reason_code = get_ghcb_msr_bits(svm,
2826 GHCB_MSR_TERM_REASON_MASK,
2827 GHCB_MSR_TERM_REASON_POS);
2828 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2829 reason_set, reason_code);
2831 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
2832 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
2833 vcpu->run->system_event.ndata = 1;
2834 vcpu->run->system_event.data[0] = control->ghcb_gpa;
2839 /* Error, keep GHCB MSR value as-is */
2843 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2844 control->ghcb_gpa, ret);
2849 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2851 struct vcpu_svm *svm = to_svm(vcpu);
2852 struct vmcb_control_area *control = &svm->vmcb->control;
2853 u64 ghcb_gpa, exit_code;
2856 /* Validate the GHCB */
2857 ghcb_gpa = control->ghcb_gpa;
2858 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2859 return sev_handle_vmgexit_msr_protocol(svm);
2862 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2864 /* Without a GHCB, just return right back to the guest */
2868 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2869 /* Unable to map GHCB from guest */
2870 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2873 /* Without a GHCB, just return right back to the guest */
2877 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2879 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
2881 sev_es_sync_from_ghcb(svm);
2882 ret = sev_es_validate_vmgexit(svm);
2886 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
2887 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
2889 exit_code = kvm_ghcb_get_sw_exit_code(control);
2890 switch (exit_code) {
2891 case SVM_VMGEXIT_MMIO_READ:
2892 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2896 ret = kvm_sev_es_mmio_read(vcpu,
2897 control->exit_info_1,
2898 control->exit_info_2,
2899 svm->sev_es.ghcb_sa);
2901 case SVM_VMGEXIT_MMIO_WRITE:
2902 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2906 ret = kvm_sev_es_mmio_write(vcpu,
2907 control->exit_info_1,
2908 control->exit_info_2,
2909 svm->sev_es.ghcb_sa);
2911 case SVM_VMGEXIT_NMI_COMPLETE:
2912 ++vcpu->stat.nmi_window_exits;
2913 svm->nmi_masked = false;
2914 kvm_make_request(KVM_REQ_EVENT, vcpu);
2917 case SVM_VMGEXIT_AP_HLT_LOOP:
2918 ret = kvm_emulate_ap_reset_hold(vcpu);
2920 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2921 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2923 switch (control->exit_info_1) {
2925 /* Set AP jump table address */
2926 sev->ap_jump_table = control->exit_info_2;
2929 /* Get AP jump table address */
2930 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
2933 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2934 control->exit_info_1);
2935 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2936 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
2942 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2944 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2945 control->exit_info_1, control->exit_info_2);
2949 ret = svm_invoke_exit_handler(vcpu, exit_code);
2955 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2961 if (svm->vmcb->control.exit_info_2 > INT_MAX)
2964 count = svm->vmcb->control.exit_info_2;
2965 if (unlikely(check_mul_overflow(count, size, &bytes)))
2968 r = setup_vmgexit_scratch(svm, in, bytes);
2972 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2976 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
2978 struct kvm_vcpu *vcpu = &svm->vcpu;
2980 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
2981 bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
2982 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
2984 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
2988 * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
2989 * the host/guest supports its use.
2991 * guest_can_use() checks a number of requirements on the host/guest to
2992 * ensure that MSR_IA32_XSS is available, but it might report true even
2993 * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
2994 * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
2995 * to further check that the guest CPUID actually supports
2996 * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
2997 * guests will still get intercepted and caught in the normal
2998 * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
3000 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
3001 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3002 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
3004 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
3007 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
3009 struct kvm_vcpu *vcpu = &svm->vcpu;
3010 struct kvm_cpuid_entry2 *best;
3012 /* For sev guests, the memory encryption bit is not reserved in CR3. */
3013 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
3015 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
3017 if (sev_es_guest(svm->vcpu.kvm))
3018 sev_es_vcpu_after_set_cpuid(svm);
3021 static void sev_es_init_vmcb(struct vcpu_svm *svm)
3023 struct vmcb *vmcb = svm->vmcb01.ptr;
3024 struct kvm_vcpu *vcpu = &svm->vcpu;
3026 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
3027 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
3030 * An SEV-ES guest requires a VMSA area that is a separate from the
3031 * VMCB page. Do not include the encryption mask on the VMSA physical
3032 * address since hardware will access it using the guest key. Note,
3033 * the VMSA will be NULL if this vCPU is the destination for intrahost
3034 * migration, and will be copied later.
3036 if (svm->sev_es.vmsa)
3037 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
3039 /* Can't intercept CR register access, HV can't modify CR registers */
3040 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
3041 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
3042 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
3043 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
3044 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
3045 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3047 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
3049 /* Track EFER/CR register changes */
3050 svm_set_intercept(svm, TRAP_EFER_WRITE);
3051 svm_set_intercept(svm, TRAP_CR0_WRITE);
3052 svm_set_intercept(svm, TRAP_CR4_WRITE);
3053 svm_set_intercept(svm, TRAP_CR8_WRITE);
3055 vmcb->control.intercepts[INTERCEPT_DR] = 0;
3056 if (!sev_es_debug_swap_enabled) {
3057 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
3058 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
3059 recalc_intercepts(svm);
3062 * Disable #DB intercept iff DebugSwap is enabled. KVM doesn't
3063 * allow debugging SEV-ES guests, and enables DebugSwap iff
3064 * NO_NESTED_DATA_BP is supported, so there's no reason to
3065 * intercept #DB when DebugSwap is enabled. For simplicity
3066 * with respect to guest debug, intercept #DB for other VMs
3067 * even if NO_NESTED_DATA_BP is supported, i.e. even if the
3068 * guest can't DoS the CPU with infinite #DB vectoring.
3070 clr_exception_intercept(svm, DB_VECTOR);
3073 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
3074 svm_clr_intercept(svm, INTERCEPT_XSETBV);
3076 /* Clear intercepts on selected MSRs */
3077 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
3078 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
3079 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
3080 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
3081 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
3082 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
3085 void sev_init_vmcb(struct vcpu_svm *svm)
3087 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
3088 clr_exception_intercept(svm, UD_VECTOR);
3091 * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
3092 * KVM can't decrypt guest memory to decode the faulting instruction.
3094 clr_exception_intercept(svm, GP_VECTOR);
3096 if (sev_es_guest(svm->vcpu.kvm))
3097 sev_es_init_vmcb(svm);
3100 void sev_es_vcpu_reset(struct vcpu_svm *svm)
3103 * Set the GHCB MSR value as per the GHCB specification when emulating
3104 * vCPU RESET for an SEV-ES guest.
3106 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
3111 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
3114 * All host state for SEV-ES guests is categorized into three swap types
3115 * based on how it is handled by hardware during a world switch:
3117 * A: VMRUN: Host state saved in host save area
3118 * VMEXIT: Host state loaded from host save area
3120 * B: VMRUN: Host state _NOT_ saved in host save area
3121 * VMEXIT: Host state loaded from host save area
3123 * C: VMRUN: Host state _NOT_ saved in host save area
3124 * VMEXIT: Host state initialized to default(reset) values
3126 * Manually save type-B state, i.e. state that is loaded by VMEXIT but
3127 * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
3128 * by common SVM code).
3130 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
3131 hostsa->pkru = read_pkru();
3132 hostsa->xss = host_xss;
3135 * If DebugSwap is enabled, debug registers are loaded but NOT saved by
3136 * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
3137 * saves and loads debug registers (Type-A).
3139 if (sev_es_debug_swap_enabled) {
3140 hostsa->dr0 = native_get_debugreg(0);
3141 hostsa->dr1 = native_get_debugreg(1);
3142 hostsa->dr2 = native_get_debugreg(2);
3143 hostsa->dr3 = native_get_debugreg(3);
3144 hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
3145 hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
3146 hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
3147 hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
3151 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
3153 struct vcpu_svm *svm = to_svm(vcpu);
3155 /* First SIPI: Use the values as initially set by the VMM */
3156 if (!svm->sev_es.received_first_sipi) {
3157 svm->sev_es.received_first_sipi = true;
3162 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
3163 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
3166 if (!svm->sev_es.ghcb)
3169 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
3172 struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
3177 if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
3178 return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3181 * Allocate an SNP-safe page to workaround the SNP erratum where
3182 * the CPU will incorrectly signal an RMP violation #PF if a
3183 * hugepage (2MB or 1GB) collides with the RMP entry of a
3184 * 2MB-aligned VMCB, VMSA, or AVIC backing page.
3186 * Allocate one extra page, choose a page which is not
3187 * 2MB-aligned, and free the other.
3189 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
3195 pfn = page_to_pfn(p);
3196 if (IS_ALIGNED(pfn, PTRS_PER_PMD))