Merge branch 'i2c-mux/for-next' of https://github.com/peda-r/i2c-mux into i2c/for-5.2
[sfrench/cifs-2.6.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
1 /*
2  * Copyright (C) 2012-2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
21
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25
26 #define vtr_to_max_lr_idx(v)            ((v) & 0xf)
27 #define vtr_to_nr_pre_bits(v)           ((((u32)(v) >> 26) & 7) + 1)
28 #define vtr_to_nr_apr_regs(v)           (1 << (vtr_to_nr_pre_bits(v) - 5))
29
30 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
31 {
32         switch (lr & 0xf) {
33         case 0:
34                 return read_gicreg(ICH_LR0_EL2);
35         case 1:
36                 return read_gicreg(ICH_LR1_EL2);
37         case 2:
38                 return read_gicreg(ICH_LR2_EL2);
39         case 3:
40                 return read_gicreg(ICH_LR3_EL2);
41         case 4:
42                 return read_gicreg(ICH_LR4_EL2);
43         case 5:
44                 return read_gicreg(ICH_LR5_EL2);
45         case 6:
46                 return read_gicreg(ICH_LR6_EL2);
47         case 7:
48                 return read_gicreg(ICH_LR7_EL2);
49         case 8:
50                 return read_gicreg(ICH_LR8_EL2);
51         case 9:
52                 return read_gicreg(ICH_LR9_EL2);
53         case 10:
54                 return read_gicreg(ICH_LR10_EL2);
55         case 11:
56                 return read_gicreg(ICH_LR11_EL2);
57         case 12:
58                 return read_gicreg(ICH_LR12_EL2);
59         case 13:
60                 return read_gicreg(ICH_LR13_EL2);
61         case 14:
62                 return read_gicreg(ICH_LR14_EL2);
63         case 15:
64                 return read_gicreg(ICH_LR15_EL2);
65         }
66
67         unreachable();
68 }
69
70 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
71 {
72         switch (lr & 0xf) {
73         case 0:
74                 write_gicreg(val, ICH_LR0_EL2);
75                 break;
76         case 1:
77                 write_gicreg(val, ICH_LR1_EL2);
78                 break;
79         case 2:
80                 write_gicreg(val, ICH_LR2_EL2);
81                 break;
82         case 3:
83                 write_gicreg(val, ICH_LR3_EL2);
84                 break;
85         case 4:
86                 write_gicreg(val, ICH_LR4_EL2);
87                 break;
88         case 5:
89                 write_gicreg(val, ICH_LR5_EL2);
90                 break;
91         case 6:
92                 write_gicreg(val, ICH_LR6_EL2);
93                 break;
94         case 7:
95                 write_gicreg(val, ICH_LR7_EL2);
96                 break;
97         case 8:
98                 write_gicreg(val, ICH_LR8_EL2);
99                 break;
100         case 9:
101                 write_gicreg(val, ICH_LR9_EL2);
102                 break;
103         case 10:
104                 write_gicreg(val, ICH_LR10_EL2);
105                 break;
106         case 11:
107                 write_gicreg(val, ICH_LR11_EL2);
108                 break;
109         case 12:
110                 write_gicreg(val, ICH_LR12_EL2);
111                 break;
112         case 13:
113                 write_gicreg(val, ICH_LR13_EL2);
114                 break;
115         case 14:
116                 write_gicreg(val, ICH_LR14_EL2);
117                 break;
118         case 15:
119                 write_gicreg(val, ICH_LR15_EL2);
120                 break;
121         }
122 }
123
124 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
125 {
126         switch (n) {
127         case 0:
128                 write_gicreg(val, ICH_AP0R0_EL2);
129                 break;
130         case 1:
131                 write_gicreg(val, ICH_AP0R1_EL2);
132                 break;
133         case 2:
134                 write_gicreg(val, ICH_AP0R2_EL2);
135                 break;
136         case 3:
137                 write_gicreg(val, ICH_AP0R3_EL2);
138                 break;
139         }
140 }
141
142 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
143 {
144         switch (n) {
145         case 0:
146                 write_gicreg(val, ICH_AP1R0_EL2);
147                 break;
148         case 1:
149                 write_gicreg(val, ICH_AP1R1_EL2);
150                 break;
151         case 2:
152                 write_gicreg(val, ICH_AP1R2_EL2);
153                 break;
154         case 3:
155                 write_gicreg(val, ICH_AP1R3_EL2);
156                 break;
157         }
158 }
159
160 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
161 {
162         u32 val;
163
164         switch (n) {
165         case 0:
166                 val = read_gicreg(ICH_AP0R0_EL2);
167                 break;
168         case 1:
169                 val = read_gicreg(ICH_AP0R1_EL2);
170                 break;
171         case 2:
172                 val = read_gicreg(ICH_AP0R2_EL2);
173                 break;
174         case 3:
175                 val = read_gicreg(ICH_AP0R3_EL2);
176                 break;
177         default:
178                 unreachable();
179         }
180
181         return val;
182 }
183
184 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
185 {
186         u32 val;
187
188         switch (n) {
189         case 0:
190                 val = read_gicreg(ICH_AP1R0_EL2);
191                 break;
192         case 1:
193                 val = read_gicreg(ICH_AP1R1_EL2);
194                 break;
195         case 2:
196                 val = read_gicreg(ICH_AP1R2_EL2);
197                 break;
198         case 3:
199                 val = read_gicreg(ICH_AP1R3_EL2);
200                 break;
201         default:
202                 unreachable();
203         }
204
205         return val;
206 }
207
208 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
209 {
210         struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
211         u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
212
213         /*
214          * Make sure stores to the GIC via the memory mapped interface
215          * are now visible to the system register interface when reading the
216          * LRs, and when reading back the VMCR on non-VHE systems.
217          */
218         if (used_lrs || !has_vhe()) {
219                 if (!cpu_if->vgic_sre) {
220                         dsb(sy);
221                         isb();
222                 }
223         }
224
225         if (used_lrs || cpu_if->its_vpe.its_vm) {
226                 int i;
227                 u32 elrsr;
228
229                 elrsr = read_gicreg(ICH_ELRSR_EL2);
230
231                 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
232
233                 for (i = 0; i < used_lrs; i++) {
234                         if (elrsr & (1 << i))
235                                 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
236                         else
237                                 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
238
239                         __gic_v3_set_lr(0, i);
240                 }
241         }
242 }
243
244 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
245 {
246         struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
247         u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
248         int i;
249
250         if (used_lrs || cpu_if->its_vpe.its_vm) {
251                 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
252
253                 for (i = 0; i < used_lrs; i++)
254                         __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
255         }
256
257         /*
258          * Ensure that writes to the LRs, and on non-VHE systems ensure that
259          * the write to the VMCR in __vgic_v3_activate_traps(), will have
260          * reached the (re)distributors. This ensure the guest will read the
261          * correct values from the memory-mapped interface.
262          */
263         if (used_lrs || !has_vhe()) {
264                 if (!cpu_if->vgic_sre) {
265                         isb();
266                         dsb(sy);
267                 }
268         }
269 }
270
271 void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
272 {
273         struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
274
275         /*
276          * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
277          * Group0 interrupt (as generated in GICv2 mode) to be
278          * delivered as a FIQ to the guest, with potentially fatal
279          * consequences. So we must make sure that ICC_SRE_EL1 has
280          * been actually programmed with the value we want before
281          * starting to mess with the rest of the GIC, and VMCR_EL2 in
282          * particular.  This logic must be called before
283          * __vgic_v3_restore_state().
284          */
285         if (!cpu_if->vgic_sre) {
286                 write_gicreg(0, ICC_SRE_EL1);
287                 isb();
288                 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
289
290
291                 if (has_vhe()) {
292                         /*
293                          * Ensure that the write to the VMCR will have reached
294                          * the (re)distributors. This ensure the guest will
295                          * read the correct values from the memory-mapped
296                          * interface.
297                          */
298                         isb();
299                         dsb(sy);
300                 }
301         }
302
303         /*
304          * Prevent the guest from touching the GIC system registers if
305          * SRE isn't enabled for GICv3 emulation.
306          */
307         write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
308                      ICC_SRE_EL2);
309
310         /*
311          * If we need to trap system registers, we must write
312          * ICH_HCR_EL2 anyway, even if no interrupts are being
313          * injected,
314          */
315         if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
316             cpu_if->its_vpe.its_vm)
317                 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
318 }
319
320 void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
321 {
322         struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
323         u64 val;
324
325         if (!cpu_if->vgic_sre) {
326                 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
327         }
328
329         val = read_gicreg(ICC_SRE_EL2);
330         write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
331
332         if (!cpu_if->vgic_sre) {
333                 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
334                 isb();
335                 write_gicreg(1, ICC_SRE_EL1);
336         }
337
338         /*
339          * If we were trapping system registers, we enabled the VGIC even if
340          * no interrupts were being injected, and we disable it again here.
341          */
342         if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
343             cpu_if->its_vpe.its_vm)
344                 write_gicreg(0, ICH_HCR_EL2);
345 }
346
347 void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
348 {
349         struct vgic_v3_cpu_if *cpu_if;
350         u64 val;
351         u32 nr_pre_bits;
352
353         vcpu = kern_hyp_va(vcpu);
354         cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
355
356         val = read_gicreg(ICH_VTR_EL2);
357         nr_pre_bits = vtr_to_nr_pre_bits(val);
358
359         switch (nr_pre_bits) {
360         case 7:
361                 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
362                 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
363         case 6:
364                 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
365         default:
366                 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
367         }
368
369         switch (nr_pre_bits) {
370         case 7:
371                 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
372                 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
373         case 6:
374                 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
375         default:
376                 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
377         }
378 }
379
380 void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
381 {
382         struct vgic_v3_cpu_if *cpu_if;
383         u64 val;
384         u32 nr_pre_bits;
385
386         vcpu = kern_hyp_va(vcpu);
387         cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
388
389         val = read_gicreg(ICH_VTR_EL2);
390         nr_pre_bits = vtr_to_nr_pre_bits(val);
391
392         switch (nr_pre_bits) {
393         case 7:
394                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
395                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
396         case 6:
397                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
398         default:
399                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
400         }
401
402         switch (nr_pre_bits) {
403         case 7:
404                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
405                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
406         case 6:
407                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
408         default:
409                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
410         }
411 }
412
413 void __hyp_text __vgic_v3_init_lrs(void)
414 {
415         int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
416         int i;
417
418         for (i = 0; i <= max_lr_idx; i++)
419                 __gic_v3_set_lr(0, i);
420 }
421
422 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
423 {
424         return read_gicreg(ICH_VTR_EL2);
425 }
426
427 u64 __hyp_text __vgic_v3_read_vmcr(void)
428 {
429         return read_gicreg(ICH_VMCR_EL2);
430 }
431
432 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
433 {
434         write_gicreg(vmcr, ICH_VMCR_EL2);
435 }
436
437 #ifdef CONFIG_ARM64
438
439 static int __hyp_text __vgic_v3_bpr_min(void)
440 {
441         /* See Pseudocode for VPriorityGroup */
442         return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
443 }
444
445 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
446 {
447         u32 esr = kvm_vcpu_get_hsr(vcpu);
448         u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
449
450         return crm != 8;
451 }
452
453 #define GICv3_IDLE_PRIORITY     0xff
454
455 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
456                                                     u32 vmcr,
457                                                     u64 *lr_val)
458 {
459         unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
460         u8 priority = GICv3_IDLE_PRIORITY;
461         int i, lr = -1;
462
463         for (i = 0; i < used_lrs; i++) {
464                 u64 val = __gic_v3_get_lr(i);
465                 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
466
467                 /* Not pending in the state? */
468                 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
469                         continue;
470
471                 /* Group-0 interrupt, but Group-0 disabled? */
472                 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
473                         continue;
474
475                 /* Group-1 interrupt, but Group-1 disabled? */
476                 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
477                         continue;
478
479                 /* Not the highest priority? */
480                 if (lr_prio >= priority)
481                         continue;
482
483                 /* This is a candidate */
484                 priority = lr_prio;
485                 *lr_val = val;
486                 lr = i;
487         }
488
489         if (lr == -1)
490                 *lr_val = ICC_IAR1_EL1_SPURIOUS;
491
492         return lr;
493 }
494
495 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
496                                                int intid, u64 *lr_val)
497 {
498         unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
499         int i;
500
501         for (i = 0; i < used_lrs; i++) {
502                 u64 val = __gic_v3_get_lr(i);
503
504                 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
505                     (val & ICH_LR_ACTIVE_BIT)) {
506                         *lr_val = val;
507                         return i;
508                 }
509         }
510
511         *lr_val = ICC_IAR1_EL1_SPURIOUS;
512         return -1;
513 }
514
515 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
516 {
517         u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
518         u32 hap = 0;
519         int i;
520
521         for (i = 0; i < nr_apr_regs; i++) {
522                 u32 val;
523
524                 /*
525                  * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
526                  * contain the active priority levels for this VCPU
527                  * for the maximum number of supported priority
528                  * levels, and we return the full priority level only
529                  * if the BPR is programmed to its minimum, otherwise
530                  * we return a combination of the priority level and
531                  * subpriority, as determined by the setting of the
532                  * BPR, but without the full subpriority.
533                  */
534                 val  = __vgic_v3_read_ap0rn(i);
535                 val |= __vgic_v3_read_ap1rn(i);
536                 if (!val) {
537                         hap += 32;
538                         continue;
539                 }
540
541                 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
542         }
543
544         return GICv3_IDLE_PRIORITY;
545 }
546
547 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
548 {
549         return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
550 }
551
552 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
553 {
554         unsigned int bpr;
555
556         if (vmcr & ICH_VMCR_CBPR_MASK) {
557                 bpr = __vgic_v3_get_bpr0(vmcr);
558                 if (bpr < 7)
559                         bpr++;
560         } else {
561                 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
562         }
563
564         return bpr;
565 }
566
567 /*
568  * Convert a priority to a preemption level, taking the relevant BPR
569  * into account by zeroing the sub-priority bits.
570  */
571 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
572 {
573         unsigned int bpr;
574
575         if (!grp)
576                 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
577         else
578                 bpr = __vgic_v3_get_bpr1(vmcr);
579
580         return pri & (GENMASK(7, 0) << bpr);
581 }
582
583 /*
584  * The priority value is independent of any of the BPR values, so we
585  * normalize it using the minumal BPR value. This guarantees that no
586  * matter what the guest does with its BPR, we can always set/get the
587  * same value of a priority.
588  */
589 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
590 {
591         u8 pre, ap;
592         u32 val;
593         int apr;
594
595         pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
596         ap = pre >> __vgic_v3_bpr_min();
597         apr = ap / 32;
598
599         if (!grp) {
600                 val = __vgic_v3_read_ap0rn(apr);
601                 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
602         } else {
603                 val = __vgic_v3_read_ap1rn(apr);
604                 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
605         }
606 }
607
608 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
609 {
610         u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
611         u32 hap = 0;
612         int i;
613
614         for (i = 0; i < nr_apr_regs; i++) {
615                 u32 ap0, ap1;
616                 int c0, c1;
617
618                 ap0 = __vgic_v3_read_ap0rn(i);
619                 ap1 = __vgic_v3_read_ap1rn(i);
620                 if (!ap0 && !ap1) {
621                         hap += 32;
622                         continue;
623                 }
624
625                 c0 = ap0 ? __ffs(ap0) : 32;
626                 c1 = ap1 ? __ffs(ap1) : 32;
627
628                 /* Always clear the LSB, which is the highest priority */
629                 if (c0 < c1) {
630                         ap0 &= ~BIT(c0);
631                         __vgic_v3_write_ap0rn(ap0, i);
632                         hap += c0;
633                 } else {
634                         ap1 &= ~BIT(c1);
635                         __vgic_v3_write_ap1rn(ap1, i);
636                         hap += c1;
637                 }
638
639                 /* Rescale to 8 bits of priority */
640                 return hap << __vgic_v3_bpr_min();
641         }
642
643         return GICv3_IDLE_PRIORITY;
644 }
645
646 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
647 {
648         u64 lr_val;
649         u8 lr_prio, pmr;
650         int lr, grp;
651
652         grp = __vgic_v3_get_group(vcpu);
653
654         lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
655         if (lr < 0)
656                 goto spurious;
657
658         if (grp != !!(lr_val & ICH_LR_GROUP))
659                 goto spurious;
660
661         pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
662         lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
663         if (pmr <= lr_prio)
664                 goto spurious;
665
666         if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
667                 goto spurious;
668
669         lr_val &= ~ICH_LR_STATE;
670         /* No active state for LPIs */
671         if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
672                 lr_val |= ICH_LR_ACTIVE_BIT;
673         __gic_v3_set_lr(lr_val, lr);
674         __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
675         vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
676         return;
677
678 spurious:
679         vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
680 }
681
682 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
683 {
684         lr_val &= ~ICH_LR_ACTIVE_BIT;
685         if (lr_val & ICH_LR_HW) {
686                 u32 pid;
687
688                 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
689                 gic_write_dir(pid);
690         }
691
692         __gic_v3_set_lr(lr_val, lr);
693 }
694
695 static void __hyp_text __vgic_v3_bump_eoicount(void)
696 {
697         u32 hcr;
698
699         hcr = read_gicreg(ICH_HCR_EL2);
700         hcr += 1 << ICH_HCR_EOIcount_SHIFT;
701         write_gicreg(hcr, ICH_HCR_EL2);
702 }
703
704 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
705                                            u32 vmcr, int rt)
706 {
707         u32 vid = vcpu_get_reg(vcpu, rt);
708         u64 lr_val;
709         int lr;
710
711         /* EOImode == 0, nothing to be done here */
712         if (!(vmcr & ICH_VMCR_EOIM_MASK))
713                 return;
714
715         /* No deactivate to be performed on an LPI */
716         if (vid >= VGIC_MIN_LPI)
717                 return;
718
719         lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
720         if (lr == -1) {
721                 __vgic_v3_bump_eoicount();
722                 return;
723         }
724
725         __vgic_v3_clear_active_lr(lr, lr_val);
726 }
727
728 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
729 {
730         u32 vid = vcpu_get_reg(vcpu, rt);
731         u64 lr_val;
732         u8 lr_prio, act_prio;
733         int lr, grp;
734
735         grp = __vgic_v3_get_group(vcpu);
736
737         /* Drop priority in any case */
738         act_prio = __vgic_v3_clear_highest_active_priority();
739
740         /* If EOIing an LPI, no deactivate to be performed */
741         if (vid >= VGIC_MIN_LPI)
742                 return;
743
744         /* EOImode == 1, nothing to be done here */
745         if (vmcr & ICH_VMCR_EOIM_MASK)
746                 return;
747
748         lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
749         if (lr == -1) {
750                 __vgic_v3_bump_eoicount();
751                 return;
752         }
753
754         lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
755
756         /* If priorities or group do not match, the guest has fscked-up. */
757         if (grp != !!(lr_val & ICH_LR_GROUP) ||
758             __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
759                 return;
760
761         /* Let's now perform the deactivation */
762         __vgic_v3_clear_active_lr(lr, lr_val);
763 }
764
765 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
766 {
767         vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
768 }
769
770 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
771 {
772         vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
773 }
774
775 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
776 {
777         u64 val = vcpu_get_reg(vcpu, rt);
778
779         if (val & 1)
780                 vmcr |= ICH_VMCR_ENG0_MASK;
781         else
782                 vmcr &= ~ICH_VMCR_ENG0_MASK;
783
784         __vgic_v3_write_vmcr(vmcr);
785 }
786
787 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
788 {
789         u64 val = vcpu_get_reg(vcpu, rt);
790
791         if (val & 1)
792                 vmcr |= ICH_VMCR_ENG1_MASK;
793         else
794                 vmcr &= ~ICH_VMCR_ENG1_MASK;
795
796         __vgic_v3_write_vmcr(vmcr);
797 }
798
799 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
800 {
801         vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
802 }
803
804 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
805 {
806         vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
807 }
808
809 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
810 {
811         u64 val = vcpu_get_reg(vcpu, rt);
812         u8 bpr_min = __vgic_v3_bpr_min() - 1;
813
814         /* Enforce BPR limiting */
815         if (val < bpr_min)
816                 val = bpr_min;
817
818         val <<= ICH_VMCR_BPR0_SHIFT;
819         val &= ICH_VMCR_BPR0_MASK;
820         vmcr &= ~ICH_VMCR_BPR0_MASK;
821         vmcr |= val;
822
823         __vgic_v3_write_vmcr(vmcr);
824 }
825
826 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
827 {
828         u64 val = vcpu_get_reg(vcpu, rt);
829         u8 bpr_min = __vgic_v3_bpr_min();
830
831         if (vmcr & ICH_VMCR_CBPR_MASK)
832                 return;
833
834         /* Enforce BPR limiting */
835         if (val < bpr_min)
836                 val = bpr_min;
837
838         val <<= ICH_VMCR_BPR1_SHIFT;
839         val &= ICH_VMCR_BPR1_MASK;
840         vmcr &= ~ICH_VMCR_BPR1_MASK;
841         vmcr |= val;
842
843         __vgic_v3_write_vmcr(vmcr);
844 }
845
846 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
847 {
848         u32 val;
849
850         if (!__vgic_v3_get_group(vcpu))
851                 val = __vgic_v3_read_ap0rn(n);
852         else
853                 val = __vgic_v3_read_ap1rn(n);
854
855         vcpu_set_reg(vcpu, rt, val);
856 }
857
858 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
859 {
860         u32 val = vcpu_get_reg(vcpu, rt);
861
862         if (!__vgic_v3_get_group(vcpu))
863                 __vgic_v3_write_ap0rn(val, n);
864         else
865                 __vgic_v3_write_ap1rn(val, n);
866 }
867
868 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
869                                             u32 vmcr, int rt)
870 {
871         __vgic_v3_read_apxrn(vcpu, rt, 0);
872 }
873
874 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
875                                             u32 vmcr, int rt)
876 {
877         __vgic_v3_read_apxrn(vcpu, rt, 1);
878 }
879
880 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
881                                             u32 vmcr, int rt)
882 {
883         __vgic_v3_read_apxrn(vcpu, rt, 2);
884 }
885
886 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
887                                             u32 vmcr, int rt)
888 {
889         __vgic_v3_read_apxrn(vcpu, rt, 3);
890 }
891
892 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
893                                              u32 vmcr, int rt)
894 {
895         __vgic_v3_write_apxrn(vcpu, rt, 0);
896 }
897
898 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
899                                              u32 vmcr, int rt)
900 {
901         __vgic_v3_write_apxrn(vcpu, rt, 1);
902 }
903
904 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
905                                              u32 vmcr, int rt)
906 {
907         __vgic_v3_write_apxrn(vcpu, rt, 2);
908 }
909
910 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
911                                              u32 vmcr, int rt)
912 {
913         __vgic_v3_write_apxrn(vcpu, rt, 3);
914 }
915
916 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
917                                             u32 vmcr, int rt)
918 {
919         u64 lr_val;
920         int lr, lr_grp, grp;
921
922         grp = __vgic_v3_get_group(vcpu);
923
924         lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
925         if (lr == -1)
926                 goto spurious;
927
928         lr_grp = !!(lr_val & ICH_LR_GROUP);
929         if (lr_grp != grp)
930                 lr_val = ICC_IAR1_EL1_SPURIOUS;
931
932 spurious:
933         vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
934 }
935
936 static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
937                                           u32 vmcr, int rt)
938 {
939         vmcr &= ICH_VMCR_PMR_MASK;
940         vmcr >>= ICH_VMCR_PMR_SHIFT;
941         vcpu_set_reg(vcpu, rt, vmcr);
942 }
943
944 static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
945                                            u32 vmcr, int rt)
946 {
947         u32 val = vcpu_get_reg(vcpu, rt);
948
949         val <<= ICH_VMCR_PMR_SHIFT;
950         val &= ICH_VMCR_PMR_MASK;
951         vmcr &= ~ICH_VMCR_PMR_MASK;
952         vmcr |= val;
953
954         write_gicreg(vmcr, ICH_VMCR_EL2);
955 }
956
957 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
958                                           u32 vmcr, int rt)
959 {
960         u32 val = __vgic_v3_get_highest_active_priority();
961         vcpu_set_reg(vcpu, rt, val);
962 }
963
964 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
965                                            u32 vmcr, int rt)
966 {
967         u32 vtr, val;
968
969         vtr = read_gicreg(ICH_VTR_EL2);
970         /* PRIbits */
971         val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
972         /* IDbits */
973         val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
974         /* SEIS */
975         val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
976         /* A3V */
977         val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
978         /* EOImode */
979         val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
980         /* CBPR */
981         val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
982
983         vcpu_set_reg(vcpu, rt, val);
984 }
985
986 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
987                                             u32 vmcr, int rt)
988 {
989         u32 val = vcpu_get_reg(vcpu, rt);
990
991         if (val & ICC_CTLR_EL1_CBPR_MASK)
992                 vmcr |= ICH_VMCR_CBPR_MASK;
993         else
994                 vmcr &= ~ICH_VMCR_CBPR_MASK;
995
996         if (val & ICC_CTLR_EL1_EOImode_MASK)
997                 vmcr |= ICH_VMCR_EOIM_MASK;
998         else
999                 vmcr &= ~ICH_VMCR_EOIM_MASK;
1000
1001         write_gicreg(vmcr, ICH_VMCR_EL2);
1002 }
1003
1004 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1005 {
1006         int rt;
1007         u32 esr;
1008         u32 vmcr;
1009         void (*fn)(struct kvm_vcpu *, u32, int);
1010         bool is_read;
1011         u32 sysreg;
1012
1013         esr = kvm_vcpu_get_hsr(vcpu);
1014         if (vcpu_mode_is_32bit(vcpu)) {
1015                 if (!kvm_condition_valid(vcpu)) {
1016                         __kvm_skip_instr(vcpu);
1017                         return 1;
1018                 }
1019
1020                 sysreg = esr_cp15_to_sysreg(esr);
1021         } else {
1022                 sysreg = esr_sys64_to_sysreg(esr);
1023         }
1024
1025         is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1026
1027         switch (sysreg) {
1028         case SYS_ICC_IAR0_EL1:
1029         case SYS_ICC_IAR1_EL1:
1030                 if (unlikely(!is_read))
1031                         return 0;
1032                 fn = __vgic_v3_read_iar;
1033                 break;
1034         case SYS_ICC_EOIR0_EL1:
1035         case SYS_ICC_EOIR1_EL1:
1036                 if (unlikely(is_read))
1037                         return 0;
1038                 fn = __vgic_v3_write_eoir;
1039                 break;
1040         case SYS_ICC_IGRPEN1_EL1:
1041                 if (is_read)
1042                         fn = __vgic_v3_read_igrpen1;
1043                 else
1044                         fn = __vgic_v3_write_igrpen1;
1045                 break;
1046         case SYS_ICC_BPR1_EL1:
1047                 if (is_read)
1048                         fn = __vgic_v3_read_bpr1;
1049                 else
1050                         fn = __vgic_v3_write_bpr1;
1051                 break;
1052         case SYS_ICC_AP0Rn_EL1(0):
1053         case SYS_ICC_AP1Rn_EL1(0):
1054                 if (is_read)
1055                         fn = __vgic_v3_read_apxr0;
1056                 else
1057                         fn = __vgic_v3_write_apxr0;
1058                 break;
1059         case SYS_ICC_AP0Rn_EL1(1):
1060         case SYS_ICC_AP1Rn_EL1(1):
1061                 if (is_read)
1062                         fn = __vgic_v3_read_apxr1;
1063                 else
1064                         fn = __vgic_v3_write_apxr1;
1065                 break;
1066         case SYS_ICC_AP0Rn_EL1(2):
1067         case SYS_ICC_AP1Rn_EL1(2):
1068                 if (is_read)
1069                         fn = __vgic_v3_read_apxr2;
1070                 else
1071                         fn = __vgic_v3_write_apxr2;
1072                 break;
1073         case SYS_ICC_AP0Rn_EL1(3):
1074         case SYS_ICC_AP1Rn_EL1(3):
1075                 if (is_read)
1076                         fn = __vgic_v3_read_apxr3;
1077                 else
1078                         fn = __vgic_v3_write_apxr3;
1079                 break;
1080         case SYS_ICC_HPPIR0_EL1:
1081         case SYS_ICC_HPPIR1_EL1:
1082                 if (unlikely(!is_read))
1083                         return 0;
1084                 fn = __vgic_v3_read_hppir;
1085                 break;
1086         case SYS_ICC_IGRPEN0_EL1:
1087                 if (is_read)
1088                         fn = __vgic_v3_read_igrpen0;
1089                 else
1090                         fn = __vgic_v3_write_igrpen0;
1091                 break;
1092         case SYS_ICC_BPR0_EL1:
1093                 if (is_read)
1094                         fn = __vgic_v3_read_bpr0;
1095                 else
1096                         fn = __vgic_v3_write_bpr0;
1097                 break;
1098         case SYS_ICC_DIR_EL1:
1099                 if (unlikely(is_read))
1100                         return 0;
1101                 fn = __vgic_v3_write_dir;
1102                 break;
1103         case SYS_ICC_RPR_EL1:
1104                 if (unlikely(!is_read))
1105                         return 0;
1106                 fn = __vgic_v3_read_rpr;
1107                 break;
1108         case SYS_ICC_CTLR_EL1:
1109                 if (is_read)
1110                         fn = __vgic_v3_read_ctlr;
1111                 else
1112                         fn = __vgic_v3_write_ctlr;
1113                 break;
1114         case SYS_ICC_PMR_EL1:
1115                 if (is_read)
1116                         fn = __vgic_v3_read_pmr;
1117                 else
1118                         fn = __vgic_v3_write_pmr;
1119                 break;
1120         default:
1121                 return 0;
1122         }
1123
1124         vmcr = __vgic_v3_read_vmcr();
1125         rt = kvm_vcpu_sys_get_rt(vcpu);
1126         fn(vcpu, vmcr, rt);
1127
1128         __kvm_skip_instr(vcpu);
1129
1130         return 1;
1131 }
1132
1133 #endif