1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/kvm_host.h>
12 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
13 struct kvm_cpu_trap *trap)
15 struct kvm_memory_slot *memslot;
16 unsigned long hva, fault_addr;
21 fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
22 gfn = fault_addr >> PAGE_SHIFT;
23 memslot = gfn_to_memslot(vcpu->kvm, gfn);
24 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
26 if (kvm_is_error_hva(hva) ||
27 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
28 switch (trap->scause) {
29 case EXC_LOAD_GUEST_PAGE_FAULT:
30 return kvm_riscv_vcpu_mmio_load(vcpu, run,
33 case EXC_STORE_GUEST_PAGE_FAULT:
34 return kvm_riscv_vcpu_mmio_store(vcpu, run,
42 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
43 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
51 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
53 * @vcpu: The VCPU pointer
54 * @read_insn: Flag representing whether we are reading instruction
55 * @guest_addr: Guest address to read
56 * @trap: Output pointer to trap details
58 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
60 unsigned long guest_addr,
61 struct kvm_cpu_trap *trap)
63 register unsigned long taddr asm("a0") = (unsigned long)trap;
64 register unsigned long ttmp asm("a1");
65 register unsigned long val asm("t0");
66 register unsigned long tmp asm("t1");
67 register unsigned long addr asm("t2") = guest_addr;
69 unsigned long old_stvec, old_hstatus;
71 local_irq_save(flags);
73 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
74 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
79 * 0110010 00011 rs1 100 rd 1110011
84 "add %[ttmp], %[taddr], 0\n"
86 * HLVX.HU %[val], (%[addr])
88 * 0110010 00011 00111 100 00101 1110011
91 "andi %[tmp], %[val], 3\n"
92 "addi %[tmp], %[tmp], -3\n"
93 "bne %[tmp], zero, 2f\n"
94 "addi %[addr], %[addr], 2\n"
96 * HLVX.HU %[tmp], (%[addr])
98 * 0110010 00011 00111 100 00110 1110011
101 "sll %[tmp], %[tmp], 16\n"
102 "add %[val], %[val], %[tmp]\n"
105 : [val] "=&r" (val), [tmp] "=&r" (tmp),
106 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
107 [addr] "+&r" (addr) : : "memory");
109 if (trap->scause == EXC_LOAD_PAGE_FAULT)
110 trap->scause = EXC_INST_PAGE_FAULT;
114 * 0110110 00000 rs1 100 rd 1110011
117 * 0110100 00000 rs1 100 rd 1110011
122 "add %[ttmp], %[taddr], 0\n"
125 * HLV.D %[val], (%[addr])
127 * 0110110 00000 00111 100 00101 1110011
132 * HLV.W %[val], (%[addr])
134 * 0110100 00000 00111 100 00101 1110011
140 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
141 : [addr] "r" (addr) : "memory");
144 csr_write(CSR_STVEC, old_stvec);
145 csr_write(CSR_HSTATUS, old_hstatus);
147 local_irq_restore(flags);
153 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
155 * @vcpu: The VCPU pointer
156 * @trap: Trap details
158 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
159 struct kvm_cpu_trap *trap)
161 unsigned long vsstatus = csr_read(CSR_VSSTATUS);
163 /* Change Guest SSTATUS.SPP bit */
165 if (vcpu->arch.guest_context.sstatus & SR_SPP)
168 /* Change Guest SSTATUS.SPIE bit */
169 vsstatus &= ~SR_SPIE;
170 if (vsstatus & SR_SIE)
173 /* Clear Guest SSTATUS.SIE bit */
176 /* Update Guest SSTATUS */
177 csr_write(CSR_VSSTATUS, vsstatus);
179 /* Update Guest SCAUSE, STVAL, and SEPC */
180 csr_write(CSR_VSCAUSE, trap->scause);
181 csr_write(CSR_VSTVAL, trap->stval);
182 csr_write(CSR_VSEPC, trap->sepc);
184 /* Set Guest PC to Guest exception vector */
185 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
189 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
190 * proper exit to userspace.
192 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
193 struct kvm_cpu_trap *trap)
197 /* If we got host interrupt then do nothing */
198 if (trap->scause & CAUSE_IRQ_FLAG)
201 /* Handle guest traps */
203 run->exit_reason = KVM_EXIT_UNKNOWN;
204 switch (trap->scause) {
205 case EXC_VIRTUAL_INST_FAULT:
206 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
207 ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
209 case EXC_INST_GUEST_PAGE_FAULT:
210 case EXC_LOAD_GUEST_PAGE_FAULT:
211 case EXC_STORE_GUEST_PAGE_FAULT:
212 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
213 ret = gstage_page_fault(vcpu, run, trap);
215 case EXC_SUPERVISOR_SYSCALL:
216 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
217 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
223 /* Print details in-case of error */
225 kvm_err("VCPU exit error %d\n", ret);
226 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
227 vcpu->arch.guest_context.sepc,
228 vcpu->arch.guest_context.sstatus,
229 vcpu->arch.guest_context.hstatus);
230 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
231 trap->scause, trap->stval, trap->htval, trap->htinst);