Merge branch 'for-6.8/mcp2221' into for-linus
[sfrench/cifs-2.6.git] / tools / testing / selftests / kvm / lib / aarch64 / processor.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AArch64 code
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7
8 #include <linux/compiler.h>
9 #include <assert.h>
10
11 #include "guest_modes.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include <linux/bitfield.h>
15
16 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN     0xac0000
17
18 static vm_vaddr_t exception_handlers;
19
20 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
21 {
22         return (v + vm->page_size) & ~(vm->page_size - 1);
23 }
24
25 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
26 {
27         unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
28         uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
29
30         return (gva >> shift) & mask;
31 }
32
33 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
34 {
35         unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
36         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
37
38         TEST_ASSERT(vm->pgtable_levels == 4,
39                 "Mode %d does not have 4 page table levels", vm->mode);
40
41         return (gva >> shift) & mask;
42 }
43
44 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
45 {
46         unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
47         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
48
49         TEST_ASSERT(vm->pgtable_levels >= 3,
50                 "Mode %d does not have >= 3 page table levels", vm->mode);
51
52         return (gva >> shift) & mask;
53 }
54
55 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
56 {
57         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
58         return (gva >> vm->page_shift) & mask;
59 }
60
61 static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
62 {
63         uint64_t pte;
64
65         pte = pa & GENMASK(47, vm->page_shift);
66         if (vm->page_shift == 16)
67                 pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
68         pte |= attrs;
69
70         return pte;
71 }
72
73 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
74 {
75         uint64_t pa;
76
77         pa = pte & GENMASK(47, vm->page_shift);
78         if (vm->page_shift == 16)
79                 pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
80
81         return pa;
82 }
83
84 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
85 {
86         unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
87         return 1 << (vm->va_bits - shift);
88 }
89
90 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
91 {
92         return 1 << (vm->page_shift - 3);
93 }
94
95 void virt_arch_pgd_alloc(struct kvm_vm *vm)
96 {
97         size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size;
98
99         if (vm->pgd_created)
100                 return;
101
102         vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
103                                      KVM_GUEST_PAGE_TABLE_MIN_PADDR,
104                                      vm->memslots[MEM_REGION_PT]);
105         vm->pgd_created = true;
106 }
107
108 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
109                          uint64_t flags)
110 {
111         uint8_t attr_idx = flags & 7;
112         uint64_t *ptep;
113
114         TEST_ASSERT((vaddr % vm->page_size) == 0,
115                 "Virtual address not on page boundary,\n"
116                 "  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
117         TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
118                 (vaddr >> vm->page_shift)),
119                 "Invalid virtual address, vaddr: 0x%lx", vaddr);
120         TEST_ASSERT((paddr % vm->page_size) == 0,
121                 "Physical address not on page boundary,\n"
122                 "  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
123         TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
124                 "Physical address beyond beyond maximum supported,\n"
125                 "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
126                 paddr, vm->max_gfn, vm->page_size);
127
128         ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
129         if (!*ptep)
130                 *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
131
132         switch (vm->pgtable_levels) {
133         case 4:
134                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
135                 if (!*ptep)
136                         *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
137                 /* fall through */
138         case 3:
139                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
140                 if (!*ptep)
141                         *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
142                 /* fall through */
143         case 2:
144                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
145                 break;
146         default:
147                 TEST_FAIL("Page table levels must be 2, 3, or 4");
148         }
149
150         *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3);  /* AF */
151 }
152
153 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
154 {
155         uint64_t attr_idx = MT_NORMAL;
156
157         _virt_pg_map(vm, vaddr, paddr, attr_idx);
158 }
159
160 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
161 {
162         uint64_t *ptep;
163
164         if (!vm->pgd_created)
165                 goto unmapped_gva;
166
167         ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
168         if (!ptep)
169                 goto unmapped_gva;
170
171         switch (vm->pgtable_levels) {
172         case 4:
173                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
174                 if (!ptep)
175                         goto unmapped_gva;
176                 /* fall through */
177         case 3:
178                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
179                 if (!ptep)
180                         goto unmapped_gva;
181                 /* fall through */
182         case 2:
183                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
184                 if (!ptep)
185                         goto unmapped_gva;
186                 break;
187         default:
188                 TEST_FAIL("Page table levels must be 2, 3, or 4");
189         }
190
191         return ptep;
192
193 unmapped_gva:
194         TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
195         exit(EXIT_FAILURE);
196 }
197
198 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
199 {
200         uint64_t *ptep = virt_get_pte_hva(vm, gva);
201
202         return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
203 }
204
205 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
206 {
207 #ifdef DEBUG
208         static const char * const type[] = { "", "pud", "pmd", "pte" };
209         uint64_t pte, *ptep;
210
211         if (level == 4)
212                 return;
213
214         for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
215                 ptep = addr_gpa2hva(vm, pte);
216                 if (!*ptep)
217                         continue;
218                 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
219                 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
220         }
221 #endif
222 }
223
224 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
225 {
226         int level = 4 - (vm->pgtable_levels - 1);
227         uint64_t pgd, *ptep;
228
229         if (!vm->pgd_created)
230                 return;
231
232         for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
233                 ptep = addr_gpa2hva(vm, pgd);
234                 if (!*ptep)
235                         continue;
236                 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
237                 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
238         }
239 }
240
241 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
242 {
243         struct kvm_vcpu_init default_init = { .target = -1, };
244         struct kvm_vm *vm = vcpu->vm;
245         uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
246
247         if (!init)
248                 init = &default_init;
249
250         if (init->target == -1) {
251                 struct kvm_vcpu_init preferred;
252                 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
253                 init->target = preferred.target;
254         }
255
256         vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
257
258         /*
259          * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
260          * registers, which the variable argument list macros do.
261          */
262         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
263
264         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
265         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
266
267         /* Configure base granule size */
268         switch (vm->mode) {
269         case VM_MODE_P52V48_4K:
270                 TEST_FAIL("AArch64 does not support 4K sized pages "
271                           "with 52-bit physical address ranges");
272         case VM_MODE_PXXV48_4K:
273                 TEST_FAIL("AArch64 does not support 4K sized pages "
274                           "with ANY-bit physical address ranges");
275         case VM_MODE_P52V48_64K:
276         case VM_MODE_P48V48_64K:
277         case VM_MODE_P40V48_64K:
278         case VM_MODE_P36V48_64K:
279                 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
280                 break;
281         case VM_MODE_P48V48_16K:
282         case VM_MODE_P40V48_16K:
283         case VM_MODE_P36V48_16K:
284         case VM_MODE_P36V47_16K:
285                 tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
286                 break;
287         case VM_MODE_P48V48_4K:
288         case VM_MODE_P40V48_4K:
289         case VM_MODE_P36V48_4K:
290                 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
291                 break;
292         default:
293                 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
294         }
295
296         ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
297
298         /* Configure output size */
299         switch (vm->mode) {
300         case VM_MODE_P52V48_64K:
301                 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
302                 ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
303                 break;
304         case VM_MODE_P48V48_4K:
305         case VM_MODE_P48V48_16K:
306         case VM_MODE_P48V48_64K:
307                 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
308                 break;
309         case VM_MODE_P40V48_4K:
310         case VM_MODE_P40V48_16K:
311         case VM_MODE_P40V48_64K:
312                 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
313                 break;
314         case VM_MODE_P36V48_4K:
315         case VM_MODE_P36V48_16K:
316         case VM_MODE_P36V48_64K:
317         case VM_MODE_P36V47_16K:
318                 tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
319                 break;
320         default:
321                 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
322         }
323
324         sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
325         /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
326         tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
327         tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
328
329         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
330         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
331         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
332         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
333         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
334 }
335
336 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
337 {
338         uint64_t pstate, pc;
339
340         vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
341         vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
342
343         fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
344                 indent, "", pstate, pc);
345 }
346
347 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
348                                   struct kvm_vcpu_init *init, void *guest_code)
349 {
350         size_t stack_size;
351         uint64_t stack_vaddr;
352         struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
353
354         stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
355                                              vm->page_size;
356         stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
357                                        DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
358                                        MEM_REGION_DATA);
359
360         aarch64_vcpu_setup(vcpu, init);
361
362         vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
363         vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
364
365         return vcpu;
366 }
367
368 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
369                                   void *guest_code)
370 {
371         return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
372 }
373
374 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
375 {
376         va_list ap;
377         int i;
378
379         TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
380                     "  num: %u\n", num);
381
382         va_start(ap, num);
383
384         for (i = 0; i < num; i++) {
385                 vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
386                              va_arg(ap, uint64_t));
387         }
388
389         va_end(ap);
390 }
391
392 void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
393 {
394         ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
395         while (1)
396                 ;
397 }
398
399 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
400 {
401         struct ucall uc;
402
403         if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
404                 return;
405
406         if (uc.args[2]) /* valid_ec */ {
407                 assert(VECTOR_IS_SYNC(uc.args[0]));
408                 TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
409                           uc.args[0], uc.args[1]);
410         } else {
411                 assert(!VECTOR_IS_SYNC(uc.args[0]));
412                 TEST_FAIL("Unexpected exception (vector:0x%lx)",
413                           uc.args[0]);
414         }
415 }
416
417 struct handlers {
418         handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
419 };
420
421 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
422 {
423         extern char vectors;
424
425         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
426 }
427
428 void route_exception(struct ex_regs *regs, int vector)
429 {
430         struct handlers *handlers = (struct handlers *)exception_handlers;
431         bool valid_ec;
432         int ec = 0;
433
434         switch (vector) {
435         case VECTOR_SYNC_CURRENT:
436         case VECTOR_SYNC_LOWER_64:
437                 ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
438                 valid_ec = true;
439                 break;
440         case VECTOR_IRQ_CURRENT:
441         case VECTOR_IRQ_LOWER_64:
442         case VECTOR_FIQ_CURRENT:
443         case VECTOR_FIQ_LOWER_64:
444         case VECTOR_ERROR_CURRENT:
445         case VECTOR_ERROR_LOWER_64:
446                 ec = 0;
447                 valid_ec = false;
448                 break;
449         default:
450                 valid_ec = false;
451                 goto unexpected_exception;
452         }
453
454         if (handlers && handlers->exception_handlers[vector][ec])
455                 return handlers->exception_handlers[vector][ec](regs);
456
457 unexpected_exception:
458         kvm_exit_unexpected_exception(vector, ec, valid_ec);
459 }
460
461 void vm_init_descriptor_tables(struct kvm_vm *vm)
462 {
463         vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
464                                         vm->page_size, MEM_REGION_DATA);
465
466         *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
467 }
468
469 void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
470                          void (*handler)(struct ex_regs *))
471 {
472         struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
473
474         assert(VECTOR_IS_SYNC(vector));
475         assert(vector < VECTOR_NUM);
476         assert(ec < ESR_EC_NUM);
477         handlers->exception_handlers[vector][ec] = handler;
478 }
479
480 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
481                          void (*handler)(struct ex_regs *))
482 {
483         struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
484
485         assert(!VECTOR_IS_SYNC(vector));
486         assert(vector < VECTOR_NUM);
487         handlers->exception_handlers[vector][0] = handler;
488 }
489
490 uint32_t guest_get_vcpuid(void)
491 {
492         return read_sysreg(tpidr_el1);
493 }
494
495 void aarch64_get_supported_page_sizes(uint32_t ipa,
496                                       bool *ps4k, bool *ps16k, bool *ps64k)
497 {
498         struct kvm_vcpu_init preferred_init;
499         int kvm_fd, vm_fd, vcpu_fd, err;
500         uint64_t val;
501         struct kvm_one_reg reg = {
502                 .id     = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
503                 .addr   = (uint64_t)&val,
504         };
505
506         kvm_fd = open_kvm_dev_path_or_exit();
507         vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
508         TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
509
510         vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
511         TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
512
513         err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
514         TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
515         err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
516         TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
517
518         err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
519         TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
520
521         *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val) != 0xf;
522         *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val) == 0;
523         *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val) != 0;
524
525         close(vcpu_fd);
526         close(vm_fd);
527         close(kvm_fd);
528 }
529
530 #define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,     \
531                      arg6, res)                                                 \
532         asm volatile("mov   w0, %w[function_id]\n"                              \
533                      "mov   x1, %[arg0]\n"                                      \
534                      "mov   x2, %[arg1]\n"                                      \
535                      "mov   x3, %[arg2]\n"                                      \
536                      "mov   x4, %[arg3]\n"                                      \
537                      "mov   x5, %[arg4]\n"                                      \
538                      "mov   x6, %[arg5]\n"                                      \
539                      "mov   x7, %[arg6]\n"                                      \
540                      #insn  "#0\n"                                              \
541                      "mov   %[res0], x0\n"                                      \
542                      "mov   %[res1], x1\n"                                      \
543                      "mov   %[res2], x2\n"                                      \
544                      "mov   %[res3], x3\n"                                      \
545                      : [res0] "=r"(res->a0), [res1] "=r"(res->a1),              \
546                        [res2] "=r"(res->a2), [res3] "=r"(res->a3)               \
547                      : [function_id] "r"(function_id), [arg0] "r"(arg0),        \
548                        [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),    \
549                        [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)     \
550                      : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
551
552
553 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
554                uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
555                uint64_t arg6, struct arm_smccc_res *res)
556 {
557         __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
558                      arg6, res);
559 }
560
561 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
562                uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
563                uint64_t arg6, struct arm_smccc_res *res)
564 {
565         __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
566                      arg6, res);
567 }
568
569 void kvm_selftest_arch_init(void)
570 {
571         /*
572          * arm64 doesn't have a true default mode, so start by computing the
573          * available IPA space and page sizes early.
574          */
575         guest_modes_append_default();
576 }
577
578 void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
579 {
580         /*
581          * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
582          * is [0, 2^(64 - TCR_EL1.T0SZ)).
583          */
584         sparsebit_set_num(vm->vpages_valid, 0,
585                           (1ULL << vm->va_bits) >> vm->page_shift);
586 }