Merge tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[sfrench/cifs-2.6.git] / arch / arm / kvm / hyp / switch.c
1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/jump_label.h>
18
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_hyp.h>
21
22 __asm__(".arch_extension     virt");
23
24 /*
25  * Activate the traps, saving the host's fpexc register before
26  * overwriting it. We'll restore it on VM exit.
27  */
28 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
29 {
30         u32 val;
31
32         /*
33          * We are about to set HCPTR.TCP10/11 to trap all floating point
34          * register accesses to HYP, however, the ARM ARM clearly states that
35          * traps are only taken to HYP if the operation would not otherwise
36          * trap to SVC.  Therefore, always make sure that for 32-bit guests,
37          * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
38          */
39         val = read_sysreg(VFP_FPEXC);
40         *fpexc_host = val;
41         if (!(val & FPEXC_EN)) {
42                 write_sysreg(val | FPEXC_EN, VFP_FPEXC);
43                 isb();
44         }
45
46         write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
47         /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
48         write_sysreg(HSTR_T(15), HSTR);
49         write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
50         val = read_sysreg(HDCR);
51         val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
52         val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
53         write_sysreg(val, HDCR);
54 }
55
56 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
57 {
58         u32 val;
59
60         /*
61          * If we pended a virtual abort, preserve it until it gets
62          * cleared. See B1.9.9 (Virtual Abort exception) for details,
63          * but the crucial bit is the zeroing of HCR.VA in the
64          * pseudocode.
65          */
66         if (vcpu->arch.hcr & HCR_VA)
67                 vcpu->arch.hcr = read_sysreg(HCR);
68
69         write_sysreg(0, HCR);
70         write_sysreg(0, HSTR);
71         val = read_sysreg(HDCR);
72         write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
73         write_sysreg(0, HCPTR);
74 }
75
76 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
77 {
78         struct kvm *kvm = kern_hyp_va(vcpu->kvm);
79         write_sysreg(kvm->arch.vttbr, VTTBR);
80         write_sysreg(vcpu->arch.midr, VPIDR);
81 }
82
83 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
84 {
85         write_sysreg(0, VTTBR);
86         write_sysreg(read_sysreg(MIDR), VPIDR);
87 }
88
89
90 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
91 {
92         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
93                 __vgic_v3_save_state(vcpu);
94         else
95                 __vgic_v2_save_state(vcpu);
96 }
97
98 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
99 {
100         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
101                 __vgic_v3_restore_state(vcpu);
102         else
103                 __vgic_v2_restore_state(vcpu);
104 }
105
106 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
107 {
108         u32 hsr = read_sysreg(HSR);
109         u8 ec = hsr >> HSR_EC_SHIFT;
110         u32 hpfar, far;
111
112         vcpu->arch.fault.hsr = hsr;
113
114         if (ec == HSR_EC_IABT)
115                 far = read_sysreg(HIFAR);
116         else if (ec == HSR_EC_DABT)
117                 far = read_sysreg(HDFAR);
118         else
119                 return true;
120
121         /*
122          * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
123          *
124          * Abort on the stage 2 translation for a memory access from a
125          * Non-secure PL1 or PL0 mode:
126          *
127          * For any Access flag fault or Translation fault, and also for any
128          * Permission fault on the stage 2 translation of a memory access
129          * made as part of a translation table walk for a stage 1 translation,
130          * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
131          * is UNKNOWN.
132          */
133         if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
134                 u64 par, tmp;
135
136                 par = read_sysreg(PAR);
137                 write_sysreg(far, ATS1CPR);
138                 isb();
139
140                 tmp = read_sysreg(PAR);
141                 write_sysreg(par, PAR);
142
143                 if (unlikely(tmp & 1))
144                         return false; /* Translation failed, back to guest */
145
146                 hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
147         } else {
148                 hpfar = read_sysreg(HPFAR);
149         }
150
151         vcpu->arch.fault.hxfar = far;
152         vcpu->arch.fault.hpfar = hpfar;
153         return true;
154 }
155
156 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
157 {
158         struct kvm_cpu_context *host_ctxt;
159         struct kvm_cpu_context *guest_ctxt;
160         bool fp_enabled;
161         u64 exit_code;
162         u32 fpexc;
163
164         vcpu = kern_hyp_va(vcpu);
165         write_sysreg(vcpu, HTPIDR);
166
167         host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
168         guest_ctxt = &vcpu->arch.ctxt;
169
170         __sysreg_save_state(host_ctxt);
171         __banked_save_state(host_ctxt);
172
173         __activate_traps(vcpu, &fpexc);
174         __activate_vm(vcpu);
175
176         __vgic_restore_state(vcpu);
177         __timer_restore_state(vcpu);
178
179         __sysreg_restore_state(guest_ctxt);
180         __banked_restore_state(guest_ctxt);
181
182         /* Jump in the fire! */
183 again:
184         exit_code = __guest_enter(vcpu, host_ctxt);
185         /* And we're baaack! */
186
187         if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
188                 goto again;
189
190         fp_enabled = __vfp_enabled();
191
192         __banked_save_state(guest_ctxt);
193         __sysreg_save_state(guest_ctxt);
194         __timer_save_state(vcpu);
195         __vgic_save_state(vcpu);
196
197         __deactivate_traps(vcpu);
198         __deactivate_vm(vcpu);
199
200         __banked_restore_state(host_ctxt);
201         __sysreg_restore_state(host_ctxt);
202
203         if (fp_enabled) {
204                 __vfp_save_state(&guest_ctxt->vfp);
205                 __vfp_restore_state(&host_ctxt->vfp);
206         }
207
208         write_sysreg(fpexc, VFP_FPEXC);
209
210         return exit_code;
211 }
212
213 static const char * const __hyp_panic_string[] = {
214         [ARM_EXCEPTION_RESET]      = "\nHYP panic: RST   PC:%08x CPSR:%08x",
215         [ARM_EXCEPTION_UNDEFINED]  = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
216         [ARM_EXCEPTION_SOFTWARE]   = "\nHYP panic: SVC   PC:%08x CPSR:%08x",
217         [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
218         [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
219         [ARM_EXCEPTION_IRQ]        = "\nHYP panic: IRQ   PC:%08x CPSR:%08x",
220         [ARM_EXCEPTION_FIQ]        = "\nHYP panic: FIQ   PC:%08x CPSR:%08x",
221         [ARM_EXCEPTION_HVC]        = "\nHYP panic: HVC   PC:%08x CPSR:%08x",
222 };
223
224 void __hyp_text __noreturn __hyp_panic(int cause)
225 {
226         u32 elr = read_special(ELR_hyp);
227         u32 val;
228
229         if (cause == ARM_EXCEPTION_DATA_ABORT)
230                 val = read_sysreg(HDFAR);
231         else
232                 val = read_special(SPSR);
233
234         if (read_sysreg(VTTBR)) {
235                 struct kvm_vcpu *vcpu;
236                 struct kvm_cpu_context *host_ctxt;
237
238                 vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
239                 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
240                 __deactivate_traps(vcpu);
241                 __deactivate_vm(vcpu);
242                 __sysreg_restore_state(host_ctxt);
243         }
244
245         /* Call panic for real */
246         __hyp_do_panic(__hyp_panic_string[cause], elr, val);
247
248         unreachable();
249 }