ACPI: APEI: Fix integer overflow in ghes_estatus_pool_init()
[sfrench/cifs-2.6.git] / arch / riscv / kvm / vcpu_exit.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8
9 #include <linux/kvm_host.h>
10 #include <asm/csr.h>
11
12 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
13                              struct kvm_cpu_trap *trap)
14 {
15         struct kvm_memory_slot *memslot;
16         unsigned long hva, fault_addr;
17         bool writable;
18         gfn_t gfn;
19         int ret;
20
21         fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
22         gfn = fault_addr >> PAGE_SHIFT;
23         memslot = gfn_to_memslot(vcpu->kvm, gfn);
24         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
25
26         if (kvm_is_error_hva(hva) ||
27             (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
28                 switch (trap->scause) {
29                 case EXC_LOAD_GUEST_PAGE_FAULT:
30                         return kvm_riscv_vcpu_mmio_load(vcpu, run,
31                                                         fault_addr,
32                                                         trap->htinst);
33                 case EXC_STORE_GUEST_PAGE_FAULT:
34                         return kvm_riscv_vcpu_mmio_store(vcpu, run,
35                                                          fault_addr,
36                                                          trap->htinst);
37                 default:
38                         return -EOPNOTSUPP;
39                 };
40         }
41
42         ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
43                 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
44         if (ret < 0)
45                 return ret;
46
47         return 1;
48 }
49
50 /**
51  * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
52  *
53  * @vcpu: The VCPU pointer
54  * @read_insn: Flag representing whether we are reading instruction
55  * @guest_addr: Guest address to read
56  * @trap: Output pointer to trap details
57  */
58 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
59                                          bool read_insn,
60                                          unsigned long guest_addr,
61                                          struct kvm_cpu_trap *trap)
62 {
63         register unsigned long taddr asm("a0") = (unsigned long)trap;
64         register unsigned long ttmp asm("a1");
65         register unsigned long val asm("t0");
66         register unsigned long tmp asm("t1");
67         register unsigned long addr asm("t2") = guest_addr;
68         unsigned long flags;
69         unsigned long old_stvec, old_hstatus;
70
71         local_irq_save(flags);
72
73         old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
74         old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
75
76         if (read_insn) {
77                 /*
78                  * HLVX.HU instruction
79                  * 0110010 00011 rs1 100 rd 1110011
80                  */
81                 asm volatile ("\n"
82                         ".option push\n"
83                         ".option norvc\n"
84                         "add %[ttmp], %[taddr], 0\n"
85                         /*
86                          * HLVX.HU %[val], (%[addr])
87                          * HLVX.HU t0, (t2)
88                          * 0110010 00011 00111 100 00101 1110011
89                          */
90                         ".word 0x6433c2f3\n"
91                         "andi %[tmp], %[val], 3\n"
92                         "addi %[tmp], %[tmp], -3\n"
93                         "bne %[tmp], zero, 2f\n"
94                         "addi %[addr], %[addr], 2\n"
95                         /*
96                          * HLVX.HU %[tmp], (%[addr])
97                          * HLVX.HU t1, (t2)
98                          * 0110010 00011 00111 100 00110 1110011
99                          */
100                         ".word 0x6433c373\n"
101                         "sll %[tmp], %[tmp], 16\n"
102                         "add %[val], %[val], %[tmp]\n"
103                         "2:\n"
104                         ".option pop"
105                 : [val] "=&r" (val), [tmp] "=&r" (tmp),
106                   [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
107                   [addr] "+&r" (addr) : : "memory");
108
109                 if (trap->scause == EXC_LOAD_PAGE_FAULT)
110                         trap->scause = EXC_INST_PAGE_FAULT;
111         } else {
112                 /*
113                  * HLV.D instruction
114                  * 0110110 00000 rs1 100 rd 1110011
115                  *
116                  * HLV.W instruction
117                  * 0110100 00000 rs1 100 rd 1110011
118                  */
119                 asm volatile ("\n"
120                         ".option push\n"
121                         ".option norvc\n"
122                         "add %[ttmp], %[taddr], 0\n"
123 #ifdef CONFIG_64BIT
124                         /*
125                          * HLV.D %[val], (%[addr])
126                          * HLV.D t0, (t2)
127                          * 0110110 00000 00111 100 00101 1110011
128                          */
129                         ".word 0x6c03c2f3\n"
130 #else
131                         /*
132                          * HLV.W %[val], (%[addr])
133                          * HLV.W t0, (t2)
134                          * 0110100 00000 00111 100 00101 1110011
135                          */
136                         ".word 0x6803c2f3\n"
137 #endif
138                         ".option pop"
139                 : [val] "=&r" (val),
140                   [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
141                 : [addr] "r" (addr) : "memory");
142         }
143
144         csr_write(CSR_STVEC, old_stvec);
145         csr_write(CSR_HSTATUS, old_hstatus);
146
147         local_irq_restore(flags);
148
149         return val;
150 }
151
152 /**
153  * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
154  *
155  * @vcpu: The VCPU pointer
156  * @trap: Trap details
157  */
158 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
159                                   struct kvm_cpu_trap *trap)
160 {
161         unsigned long vsstatus = csr_read(CSR_VSSTATUS);
162
163         /* Change Guest SSTATUS.SPP bit */
164         vsstatus &= ~SR_SPP;
165         if (vcpu->arch.guest_context.sstatus & SR_SPP)
166                 vsstatus |= SR_SPP;
167
168         /* Change Guest SSTATUS.SPIE bit */
169         vsstatus &= ~SR_SPIE;
170         if (vsstatus & SR_SIE)
171                 vsstatus |= SR_SPIE;
172
173         /* Clear Guest SSTATUS.SIE bit */
174         vsstatus &= ~SR_SIE;
175
176         /* Update Guest SSTATUS */
177         csr_write(CSR_VSSTATUS, vsstatus);
178
179         /* Update Guest SCAUSE, STVAL, and SEPC */
180         csr_write(CSR_VSCAUSE, trap->scause);
181         csr_write(CSR_VSTVAL, trap->stval);
182         csr_write(CSR_VSEPC, trap->sepc);
183
184         /* Set Guest PC to Guest exception vector */
185         vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
186 }
187
188 /*
189  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
190  * proper exit to userspace.
191  */
192 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
193                         struct kvm_cpu_trap *trap)
194 {
195         int ret;
196
197         /* If we got host interrupt then do nothing */
198         if (trap->scause & CAUSE_IRQ_FLAG)
199                 return 1;
200
201         /* Handle guest traps */
202         ret = -EFAULT;
203         run->exit_reason = KVM_EXIT_UNKNOWN;
204         switch (trap->scause) {
205         case EXC_VIRTUAL_INST_FAULT:
206                 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
207                         ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
208                 break;
209         case EXC_INST_GUEST_PAGE_FAULT:
210         case EXC_LOAD_GUEST_PAGE_FAULT:
211         case EXC_STORE_GUEST_PAGE_FAULT:
212                 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
213                         ret = gstage_page_fault(vcpu, run, trap);
214                 break;
215         case EXC_SUPERVISOR_SYSCALL:
216                 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
217                         ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
218                 break;
219         default:
220                 break;
221         }
222
223         /* Print details in-case of error */
224         if (ret < 0) {
225                 kvm_err("VCPU exit error %d\n", ret);
226                 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
227                         vcpu->arch.guest_context.sepc,
228                         vcpu->arch.guest_context.sstatus,
229                         vcpu->arch.guest_context.hstatus);
230                 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
231                         trap->scause, trap->stval, trap->htval, trap->htinst);
232         }
233
234         return ret;
235 }