Merge branch 'drm-fixes-5.2' of git://people.freedesktop.org/~agd5f/linux into drm...
[sfrench/cifs-2.6.git] / arch / powerpc / kvm / emulate_loadstore.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  *
7  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8  */
9
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
16
17 #include <asm/reg.h>
18 #include <asm/time.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
24 #include "timing.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_PPC_FPU
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
29 {
30         if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31                 kvmppc_core_queue_fpunavail(vcpu);
32                 return true;
33         }
34
35         return false;
36 }
37 #endif /* CONFIG_PPC_FPU */
38
39 #ifdef CONFIG_VSX
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
41 {
42         if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43                 kvmppc_core_queue_vsx_unavail(vcpu);
44                 return true;
45         }
46
47         return false;
48 }
49 #endif /* CONFIG_VSX */
50
51 #ifdef CONFIG_ALTIVEC
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
53 {
54         if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55                 kvmppc_core_queue_vec_unavail(vcpu);
56                 return true;
57         }
58
59         return false;
60 }
61 #endif /* CONFIG_ALTIVEC */
62
63 /*
64  * XXX to do:
65  * lfiwax, lfiwzx
66  * vector loads and stores
67  *
68  * Instructions that trap when used on cache-inhibited mappings
69  * are not emulated here: multiple and string instructions,
70  * lq/stq, and the load-reserve/store-conditional instructions.
71  */
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
73 {
74         struct kvm_run *run = vcpu->run;
75         u32 inst;
76         int ra, rs, rt;
77         enum emulation_result emulated = EMULATE_FAIL;
78         int advance = 1;
79         struct instruction_op op;
80
81         /* this default type might be overwritten by subcategories */
82         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
83
84         emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
85         if (emulated != EMULATE_DONE)
86                 return emulated;
87
88         ra = get_ra(inst);
89         rs = get_rs(inst);
90         rt = get_rt(inst);
91
92         /*
93          * if mmio_vsx_tx_sx_enabled == 0, copy data between
94          * VSR[0..31] and memory
95          * if mmio_vsx_tx_sx_enabled == 1, copy data between
96          * VSR[32..63] and memory
97          */
98         vcpu->arch.mmio_vsx_copy_nums = 0;
99         vcpu->arch.mmio_vsx_offset = 0;
100         vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
101         vcpu->arch.mmio_sp64_extend = 0;
102         vcpu->arch.mmio_sign_extend = 0;
103         vcpu->arch.mmio_vmx_copy_nums = 0;
104         vcpu->arch.mmio_vmx_offset = 0;
105         vcpu->arch.mmio_host_swabbed = 0;
106
107         emulated = EMULATE_FAIL;
108         vcpu->arch.regs.msr = vcpu->arch.shared->msr;
109         if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
110                 int type = op.type & INSTR_TYPE_MASK;
111                 int size = GETSIZE(op.type);
112
113                 switch (type) {
114                 case LOAD:  {
115                         int instr_byte_swap = op.type & BYTEREV;
116
117                         if (op.type & SIGNEXT)
118                                 emulated = kvmppc_handle_loads(run, vcpu,
119                                                 op.reg, size, !instr_byte_swap);
120                         else
121                                 emulated = kvmppc_handle_load(run, vcpu,
122                                                 op.reg, size, !instr_byte_swap);
123
124                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
125                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
126
127                         break;
128                 }
129 #ifdef CONFIG_PPC_FPU
130                 case LOAD_FP:
131                         if (kvmppc_check_fp_disabled(vcpu))
132                                 return EMULATE_DONE;
133
134                         if (op.type & FPCONV)
135                                 vcpu->arch.mmio_sp64_extend = 1;
136
137                         if (op.type & SIGNEXT)
138                                 emulated = kvmppc_handle_loads(run, vcpu,
139                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
140                         else
141                                 emulated = kvmppc_handle_load(run, vcpu,
142                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
143
144                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
145                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
146
147                         break;
148 #endif
149 #ifdef CONFIG_ALTIVEC
150                 case LOAD_VMX:
151                         if (kvmppc_check_altivec_disabled(vcpu))
152                                 return EMULATE_DONE;
153
154                         /* Hardware enforces alignment of VMX accesses */
155                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
156                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
157
158                         if (size == 16) { /* lvx */
159                                 vcpu->arch.mmio_copy_type =
160                                                 KVMPPC_VMX_COPY_DWORD;
161                         } else if (size == 4) { /* lvewx  */
162                                 vcpu->arch.mmio_copy_type =
163                                                 KVMPPC_VMX_COPY_WORD;
164                         } else if (size == 2) { /* lvehx  */
165                                 vcpu->arch.mmio_copy_type =
166                                                 KVMPPC_VMX_COPY_HWORD;
167                         } else if (size == 1) { /* lvebx  */
168                                 vcpu->arch.mmio_copy_type =
169                                                 KVMPPC_VMX_COPY_BYTE;
170                         } else
171                                 break;
172
173                         vcpu->arch.mmio_vmx_offset =
174                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
175
176                         if (size == 16) {
177                                 vcpu->arch.mmio_vmx_copy_nums = 2;
178                                 emulated = kvmppc_handle_vmx_load(run,
179                                                 vcpu, KVM_MMIO_REG_VMX|op.reg,
180                                                 8, 1);
181                         } else {
182                                 vcpu->arch.mmio_vmx_copy_nums = 1;
183                                 emulated = kvmppc_handle_vmx_load(run, vcpu,
184                                                 KVM_MMIO_REG_VMX|op.reg,
185                                                 size, 1);
186                         }
187                         break;
188 #endif
189 #ifdef CONFIG_VSX
190                 case LOAD_VSX: {
191                         int io_size_each;
192
193                         if (op.vsx_flags & VSX_CHECK_VEC) {
194                                 if (kvmppc_check_altivec_disabled(vcpu))
195                                         return EMULATE_DONE;
196                         } else {
197                                 if (kvmppc_check_vsx_disabled(vcpu))
198                                         return EMULATE_DONE;
199                         }
200
201                         if (op.vsx_flags & VSX_FPCONV)
202                                 vcpu->arch.mmio_sp64_extend = 1;
203
204                         if (op.element_size == 8)  {
205                                 if (op.vsx_flags & VSX_SPLAT)
206                                         vcpu->arch.mmio_copy_type =
207                                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
208                                 else
209                                         vcpu->arch.mmio_copy_type =
210                                                 KVMPPC_VSX_COPY_DWORD;
211                         } else if (op.element_size == 4) {
212                                 if (op.vsx_flags & VSX_SPLAT)
213                                         vcpu->arch.mmio_copy_type =
214                                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
215                                 else
216                                         vcpu->arch.mmio_copy_type =
217                                                 KVMPPC_VSX_COPY_WORD;
218                         } else
219                                 break;
220
221                         if (size < op.element_size) {
222                                 /* precision convert case: lxsspx, etc */
223                                 vcpu->arch.mmio_vsx_copy_nums = 1;
224                                 io_size_each = size;
225                         } else { /* lxvw4x, lxvd2x, etc */
226                                 vcpu->arch.mmio_vsx_copy_nums =
227                                         size/op.element_size;
228                                 io_size_each = op.element_size;
229                         }
230
231                         emulated = kvmppc_handle_vsx_load(run, vcpu,
232                                         KVM_MMIO_REG_VSX|op.reg, io_size_each,
233                                         1, op.type & SIGNEXT);
234                         break;
235                 }
236 #endif
237                 case STORE:
238                         /* if need byte reverse, op.val has been reversed by
239                          * analyse_instr().
240                          */
241                         emulated = kvmppc_handle_store(run, vcpu, op.val,
242                                         size, 1);
243
244                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
245                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
246
247                         break;
248 #ifdef CONFIG_PPC_FPU
249                 case STORE_FP:
250                         if (kvmppc_check_fp_disabled(vcpu))
251                                 return EMULATE_DONE;
252
253                         /* The FP registers need to be flushed so that
254                          * kvmppc_handle_store() can read actual FP vals
255                          * from vcpu->arch.
256                          */
257                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
258                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
259                                                 MSR_FP);
260
261                         if (op.type & FPCONV)
262                                 vcpu->arch.mmio_sp64_extend = 1;
263
264                         emulated = kvmppc_handle_store(run, vcpu,
265                                         VCPU_FPR(vcpu, op.reg), size, 1);
266
267                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
268                                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
269
270                         break;
271 #endif
272 #ifdef CONFIG_ALTIVEC
273                 case STORE_VMX:
274                         if (kvmppc_check_altivec_disabled(vcpu))
275                                 return EMULATE_DONE;
276
277                         /* Hardware enforces alignment of VMX accesses. */
278                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
279                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
280
281                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
282                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
283                                                 MSR_VEC);
284                         if (size == 16) { /* stvx */
285                                 vcpu->arch.mmio_copy_type =
286                                                 KVMPPC_VMX_COPY_DWORD;
287                         } else if (size == 4) { /* stvewx  */
288                                 vcpu->arch.mmio_copy_type =
289                                                 KVMPPC_VMX_COPY_WORD;
290                         } else if (size == 2) { /* stvehx  */
291                                 vcpu->arch.mmio_copy_type =
292                                                 KVMPPC_VMX_COPY_HWORD;
293                         } else if (size == 1) { /* stvebx  */
294                                 vcpu->arch.mmio_copy_type =
295                                                 KVMPPC_VMX_COPY_BYTE;
296                         } else
297                                 break;
298
299                         vcpu->arch.mmio_vmx_offset =
300                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
301
302                         if (size == 16) {
303                                 vcpu->arch.mmio_vmx_copy_nums = 2;
304                                 emulated = kvmppc_handle_vmx_store(run,
305                                                 vcpu, op.reg, 8, 1);
306                         } else {
307                                 vcpu->arch.mmio_vmx_copy_nums = 1;
308                                 emulated = kvmppc_handle_vmx_store(run,
309                                                 vcpu, op.reg, size, 1);
310                         }
311
312                         break;
313 #endif
314 #ifdef CONFIG_VSX
315                 case STORE_VSX: {
316                         int io_size_each;
317
318                         if (op.vsx_flags & VSX_CHECK_VEC) {
319                                 if (kvmppc_check_altivec_disabled(vcpu))
320                                         return EMULATE_DONE;
321                         } else {
322                                 if (kvmppc_check_vsx_disabled(vcpu))
323                                         return EMULATE_DONE;
324                         }
325
326                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
327                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
328                                                 MSR_VSX);
329
330                         if (op.vsx_flags & VSX_FPCONV)
331                                 vcpu->arch.mmio_sp64_extend = 1;
332
333                         if (op.element_size == 8)
334                                 vcpu->arch.mmio_copy_type =
335                                                 KVMPPC_VSX_COPY_DWORD;
336                         else if (op.element_size == 4)
337                                 vcpu->arch.mmio_copy_type =
338                                                 KVMPPC_VSX_COPY_WORD;
339                         else
340                                 break;
341
342                         if (size < op.element_size) {
343                                 /* precise conversion case, like stxsspx */
344                                 vcpu->arch.mmio_vsx_copy_nums = 1;
345                                 io_size_each = size;
346                         } else { /* stxvw4x, stxvd2x, etc */
347                                 vcpu->arch.mmio_vsx_copy_nums =
348                                                 size/op.element_size;
349                                 io_size_each = op.element_size;
350                         }
351
352                         emulated = kvmppc_handle_vsx_store(run, vcpu,
353                                         op.reg, io_size_each, 1);
354                         break;
355                 }
356 #endif
357                 case CACHEOP:
358                         /* Do nothing. The guest is performing dcbi because
359                          * hardware DMA is not snooped by the dcache, but
360                          * emulated DMA either goes through the dcache as
361                          * normal writes, or the host kernel has handled dcache
362                          * coherence.
363                          */
364                         emulated = EMULATE_DONE;
365                         break;
366                 default:
367                         break;
368                 }
369         }
370
371         if (emulated == EMULATE_FAIL) {
372                 advance = 0;
373                 kvmppc_core_queue_program(vcpu, 0);
374         }
375
376         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
377
378         /* Advance past emulated instruction. */
379         if (advance)
380                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
381
382         return emulated;
383 }