KVM: selftests: Play nice with huge pages when getting PTEs/GPAs
authorSean Christopherson <seanjc@google.com>
Thu, 6 Oct 2022 00:45:12 +0000 (00:45 +0000)
committerSean Christopherson <seanjc@google.com>
Thu, 17 Nov 2022 00:58:56 +0000 (16:58 -0800)
Play nice with huge pages when getting PTEs and translating GVAs to GPAs,
there's no reason to disallow using huge pages in selftests.  Use
PG_LEVEL_NONE to indicate that the caller doesn't care about the mapping
level and just wants to get the pte+level.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006004512.666529-8-seanjc@google.com
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/processor.c

index 9676a346475801197cf9c144c3801ea06f76d621..e000e35c948fa10e9aa7e779f3001567b6c0c575 100644 (file)
@@ -172,11 +172,16 @@ struct kvm_x86_cpu_feature {
 #define PTE_GLOBAL_MASK         BIT_ULL(8)
 #define PTE_NX_MASK             BIT_ULL(63)
 
+#define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
+
 #define PAGE_SHIFT             12
 #define PAGE_SIZE              (1ULL << PAGE_SHIFT)
-#define PAGE_MASK              (~(PAGE_SIZE-1))
+#define PAGE_MASK              (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
+
+#define HUGEPAGE_SHIFT(x)      (PAGE_SHIFT + (((x) - 1) * 9))
+#define HUGEPAGE_SIZE(x)       (1UL << HUGEPAGE_SHIFT(x))
+#define HUGEPAGE_MASK(x)       (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
 
-#define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
 #define PTE_GET_PA(pte)                ((pte) & PHYSICAL_PAGE_MASK)
 #define PTE_GET_PFN(pte)        (PTE_GET_PA(pte) >> PAGE_SHIFT)
 
@@ -828,6 +833,8 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
 
 bool kvm_is_tdp_enabled(void);
 
+uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+                                   int *level);
 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
 
 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
index 053f64191122b8e171bf9f02b2158e8be2974d71..efa20d0f99271c4ca760180c261b868aae6e785d 100644 (file)
@@ -245,10 +245,26 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
        }
 }
 
-uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
+static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
+{
+       if (*pte & PTE_LARGE_MASK) {
+               TEST_ASSERT(*level == PG_LEVEL_NONE ||
+                           *level == current_level,
+                           "Unexpected hugepage at level %d\n", current_level);
+               *level = current_level;
+       }
+
+       return *level == current_level;
+}
+
+uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
+                                   int *level)
 {
        uint64_t *pml4e, *pdpe, *pde;
 
+       TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
+                   "Invalid PG_LEVEL_* '%d'", *level);
+
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
        TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
@@ -263,18 +279,27 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
                "Canonical check failed.  The virtual address is invalid.");
 
        pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G);
+       if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G))
+               return pml4e;
 
        pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G);
-       TEST_ASSERT(!(*pdpe & PTE_LARGE_MASK),
-               "Expected pdpe to map a pde not a 1-GByte page.");
+       if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G))
+               return pdpe;
 
        pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M);
-       TEST_ASSERT(!(*pde & PTE_LARGE_MASK),
-               "Expected pde to map a pte not a 2-MByte page.");
+       if (vm_is_target_pte(pde, level, PG_LEVEL_2M))
+               return pde;
 
        return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
 }
 
+uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
+{
+       int level = PG_LEVEL_4K;
+
+       return __vm_get_page_table_entry(vm, vaddr, &level);
+}
+
 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
        uint64_t *pml4e, *pml4e_start;
@@ -458,11 +483,17 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
 
 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 {
-       uint64_t *pte = vm_get_page_table_entry(vm, gva);
+       int level = PG_LEVEL_NONE;
+       uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
 
        TEST_ASSERT(*pte & PTE_PRESENT_MASK,
                    "Leaf PTE not PRESENT for gva: 0x%08lx", gva);
-       return PTE_GET_PA(*pte) | (gva & ~PAGE_MASK);
+
+       /*
+        * No need for a hugepage mask on the PTE, x86-64 requires the "unused"
+        * address bits to be zero.
+        */
+       return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
 }
 
 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)