2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
26 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
27 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
28 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
30 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
34 return read_gicreg(ICH_LR0_EL2);
36 return read_gicreg(ICH_LR1_EL2);
38 return read_gicreg(ICH_LR2_EL2);
40 return read_gicreg(ICH_LR3_EL2);
42 return read_gicreg(ICH_LR4_EL2);
44 return read_gicreg(ICH_LR5_EL2);
46 return read_gicreg(ICH_LR6_EL2);
48 return read_gicreg(ICH_LR7_EL2);
50 return read_gicreg(ICH_LR8_EL2);
52 return read_gicreg(ICH_LR9_EL2);
54 return read_gicreg(ICH_LR10_EL2);
56 return read_gicreg(ICH_LR11_EL2);
58 return read_gicreg(ICH_LR12_EL2);
60 return read_gicreg(ICH_LR13_EL2);
62 return read_gicreg(ICH_LR14_EL2);
64 return read_gicreg(ICH_LR15_EL2);
70 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
74 write_gicreg(val, ICH_LR0_EL2);
77 write_gicreg(val, ICH_LR1_EL2);
80 write_gicreg(val, ICH_LR2_EL2);
83 write_gicreg(val, ICH_LR3_EL2);
86 write_gicreg(val, ICH_LR4_EL2);
89 write_gicreg(val, ICH_LR5_EL2);
92 write_gicreg(val, ICH_LR6_EL2);
95 write_gicreg(val, ICH_LR7_EL2);
98 write_gicreg(val, ICH_LR8_EL2);
101 write_gicreg(val, ICH_LR9_EL2);
104 write_gicreg(val, ICH_LR10_EL2);
107 write_gicreg(val, ICH_LR11_EL2);
110 write_gicreg(val, ICH_LR12_EL2);
113 write_gicreg(val, ICH_LR13_EL2);
116 write_gicreg(val, ICH_LR14_EL2);
119 write_gicreg(val, ICH_LR15_EL2);
124 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
128 write_gicreg(val, ICH_AP0R0_EL2);
131 write_gicreg(val, ICH_AP0R1_EL2);
134 write_gicreg(val, ICH_AP0R2_EL2);
137 write_gicreg(val, ICH_AP0R3_EL2);
142 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
146 write_gicreg(val, ICH_AP1R0_EL2);
149 write_gicreg(val, ICH_AP1R1_EL2);
152 write_gicreg(val, ICH_AP1R2_EL2);
155 write_gicreg(val, ICH_AP1R3_EL2);
160 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
166 val = read_gicreg(ICH_AP0R0_EL2);
169 val = read_gicreg(ICH_AP0R1_EL2);
172 val = read_gicreg(ICH_AP0R2_EL2);
175 val = read_gicreg(ICH_AP0R3_EL2);
184 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
190 val = read_gicreg(ICH_AP1R0_EL2);
193 val = read_gicreg(ICH_AP1R1_EL2);
196 val = read_gicreg(ICH_AP1R2_EL2);
199 val = read_gicreg(ICH_AP1R3_EL2);
208 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
210 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
211 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
214 * Make sure stores to the GIC via the memory mapped interface
215 * are now visible to the system register interface when reading the
216 * LRs, and when reading back the VMCR on non-VHE systems.
218 if (used_lrs || !has_vhe()) {
219 if (!cpu_if->vgic_sre) {
225 if (used_lrs || cpu_if->its_vpe.its_vm) {
229 elrsr = read_gicreg(ICH_ELRSR_EL2);
231 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
233 for (i = 0; i < used_lrs; i++) {
234 if (elrsr & (1 << i))
235 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
237 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
239 __gic_v3_set_lr(0, i);
244 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
246 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
247 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
250 if (used_lrs || cpu_if->its_vpe.its_vm) {
251 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
253 for (i = 0; i < used_lrs; i++)
254 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
258 * Ensure that writes to the LRs, and on non-VHE systems ensure that
259 * the write to the VMCR in __vgic_v3_activate_traps(), will have
260 * reached the (re)distributors. This ensure the guest will read the
261 * correct values from the memory-mapped interface.
263 if (used_lrs || !has_vhe()) {
264 if (!cpu_if->vgic_sre) {
271 void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
273 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
276 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
277 * Group0 interrupt (as generated in GICv2 mode) to be
278 * delivered as a FIQ to the guest, with potentially fatal
279 * consequences. So we must make sure that ICC_SRE_EL1 has
280 * been actually programmed with the value we want before
281 * starting to mess with the rest of the GIC, and VMCR_EL2 in
282 * particular. This logic must be called before
283 * __vgic_v3_restore_state().
285 if (!cpu_if->vgic_sre) {
286 write_gicreg(0, ICC_SRE_EL1);
288 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
293 * Ensure that the write to the VMCR will have reached
294 * the (re)distributors. This ensure the guest will
295 * read the correct values from the memory-mapped
304 * Prevent the guest from touching the GIC system registers if
305 * SRE isn't enabled for GICv3 emulation.
307 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
311 * If we need to trap system registers, we must write
312 * ICH_HCR_EL2 anyway, even if no interrupts are being
315 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
316 cpu_if->its_vpe.its_vm)
317 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
320 void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
322 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
325 if (!cpu_if->vgic_sre) {
326 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
329 val = read_gicreg(ICC_SRE_EL2);
330 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
332 if (!cpu_if->vgic_sre) {
333 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
335 write_gicreg(1, ICC_SRE_EL1);
339 * If we were trapping system registers, we enabled the VGIC even if
340 * no interrupts were being injected, and we disable it again here.
342 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
343 cpu_if->its_vpe.its_vm)
344 write_gicreg(0, ICH_HCR_EL2);
347 void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
349 struct vgic_v3_cpu_if *cpu_if;
353 vcpu = kern_hyp_va(vcpu);
354 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
356 val = read_gicreg(ICH_VTR_EL2);
357 nr_pre_bits = vtr_to_nr_pre_bits(val);
359 switch (nr_pre_bits) {
361 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
362 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
364 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
366 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
369 switch (nr_pre_bits) {
371 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
372 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
374 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
376 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
380 void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
382 struct vgic_v3_cpu_if *cpu_if;
386 vcpu = kern_hyp_va(vcpu);
387 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
389 val = read_gicreg(ICH_VTR_EL2);
390 nr_pre_bits = vtr_to_nr_pre_bits(val);
392 switch (nr_pre_bits) {
394 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
395 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
397 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
399 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
402 switch (nr_pre_bits) {
404 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
405 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
407 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
409 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
413 void __hyp_text __vgic_v3_init_lrs(void)
415 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
418 for (i = 0; i <= max_lr_idx; i++)
419 __gic_v3_set_lr(0, i);
422 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
424 return read_gicreg(ICH_VTR_EL2);
427 u64 __hyp_text __vgic_v3_read_vmcr(void)
429 return read_gicreg(ICH_VMCR_EL2);
432 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
434 write_gicreg(vmcr, ICH_VMCR_EL2);
439 static int __hyp_text __vgic_v3_bpr_min(void)
441 /* See Pseudocode for VPriorityGroup */
442 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
445 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
447 u32 esr = kvm_vcpu_get_hsr(vcpu);
448 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
453 #define GICv3_IDLE_PRIORITY 0xff
455 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
459 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
460 u8 priority = GICv3_IDLE_PRIORITY;
463 for (i = 0; i < used_lrs; i++) {
464 u64 val = __gic_v3_get_lr(i);
465 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
467 /* Not pending in the state? */
468 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
471 /* Group-0 interrupt, but Group-0 disabled? */
472 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
475 /* Group-1 interrupt, but Group-1 disabled? */
476 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
479 /* Not the highest priority? */
480 if (lr_prio >= priority)
483 /* This is a candidate */
490 *lr_val = ICC_IAR1_EL1_SPURIOUS;
495 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
496 int intid, u64 *lr_val)
498 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
501 for (i = 0; i < used_lrs; i++) {
502 u64 val = __gic_v3_get_lr(i);
504 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
505 (val & ICH_LR_ACTIVE_BIT)) {
511 *lr_val = ICC_IAR1_EL1_SPURIOUS;
515 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
517 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
521 for (i = 0; i < nr_apr_regs; i++) {
525 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
526 * contain the active priority levels for this VCPU
527 * for the maximum number of supported priority
528 * levels, and we return the full priority level only
529 * if the BPR is programmed to its minimum, otherwise
530 * we return a combination of the priority level and
531 * subpriority, as determined by the setting of the
532 * BPR, but without the full subpriority.
534 val = __vgic_v3_read_ap0rn(i);
535 val |= __vgic_v3_read_ap1rn(i);
541 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
544 return GICv3_IDLE_PRIORITY;
547 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
549 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
552 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
556 if (vmcr & ICH_VMCR_CBPR_MASK) {
557 bpr = __vgic_v3_get_bpr0(vmcr);
561 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
568 * Convert a priority to a preemption level, taking the relevant BPR
569 * into account by zeroing the sub-priority bits.
571 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
576 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
578 bpr = __vgic_v3_get_bpr1(vmcr);
580 return pri & (GENMASK(7, 0) << bpr);
584 * The priority value is independent of any of the BPR values, so we
585 * normalize it using the minumal BPR value. This guarantees that no
586 * matter what the guest does with its BPR, we can always set/get the
587 * same value of a priority.
589 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
595 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
596 ap = pre >> __vgic_v3_bpr_min();
600 val = __vgic_v3_read_ap0rn(apr);
601 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
603 val = __vgic_v3_read_ap1rn(apr);
604 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
608 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
610 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
614 for (i = 0; i < nr_apr_regs; i++) {
618 ap0 = __vgic_v3_read_ap0rn(i);
619 ap1 = __vgic_v3_read_ap1rn(i);
625 c0 = ap0 ? __ffs(ap0) : 32;
626 c1 = ap1 ? __ffs(ap1) : 32;
628 /* Always clear the LSB, which is the highest priority */
631 __vgic_v3_write_ap0rn(ap0, i);
635 __vgic_v3_write_ap1rn(ap1, i);
639 /* Rescale to 8 bits of priority */
640 return hap << __vgic_v3_bpr_min();
643 return GICv3_IDLE_PRIORITY;
646 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
652 grp = __vgic_v3_get_group(vcpu);
654 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
658 if (grp != !!(lr_val & ICH_LR_GROUP))
661 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
662 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
666 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
669 lr_val &= ~ICH_LR_STATE;
670 /* No active state for LPIs */
671 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
672 lr_val |= ICH_LR_ACTIVE_BIT;
673 __gic_v3_set_lr(lr_val, lr);
674 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
675 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
679 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
682 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
684 lr_val &= ~ICH_LR_ACTIVE_BIT;
685 if (lr_val & ICH_LR_HW) {
688 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
692 __gic_v3_set_lr(lr_val, lr);
695 static void __hyp_text __vgic_v3_bump_eoicount(void)
699 hcr = read_gicreg(ICH_HCR_EL2);
700 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
701 write_gicreg(hcr, ICH_HCR_EL2);
704 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
707 u32 vid = vcpu_get_reg(vcpu, rt);
711 /* EOImode == 0, nothing to be done here */
712 if (!(vmcr & ICH_VMCR_EOIM_MASK))
715 /* No deactivate to be performed on an LPI */
716 if (vid >= VGIC_MIN_LPI)
719 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
721 __vgic_v3_bump_eoicount();
725 __vgic_v3_clear_active_lr(lr, lr_val);
728 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
730 u32 vid = vcpu_get_reg(vcpu, rt);
732 u8 lr_prio, act_prio;
735 grp = __vgic_v3_get_group(vcpu);
737 /* Drop priority in any case */
738 act_prio = __vgic_v3_clear_highest_active_priority();
740 /* If EOIing an LPI, no deactivate to be performed */
741 if (vid >= VGIC_MIN_LPI)
744 /* EOImode == 1, nothing to be done here */
745 if (vmcr & ICH_VMCR_EOIM_MASK)
748 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
750 __vgic_v3_bump_eoicount();
754 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
756 /* If priorities or group do not match, the guest has fscked-up. */
757 if (grp != !!(lr_val & ICH_LR_GROUP) ||
758 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
761 /* Let's now perform the deactivation */
762 __vgic_v3_clear_active_lr(lr, lr_val);
765 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
767 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
770 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
772 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
775 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
777 u64 val = vcpu_get_reg(vcpu, rt);
780 vmcr |= ICH_VMCR_ENG0_MASK;
782 vmcr &= ~ICH_VMCR_ENG0_MASK;
784 __vgic_v3_write_vmcr(vmcr);
787 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
789 u64 val = vcpu_get_reg(vcpu, rt);
792 vmcr |= ICH_VMCR_ENG1_MASK;
794 vmcr &= ~ICH_VMCR_ENG1_MASK;
796 __vgic_v3_write_vmcr(vmcr);
799 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
801 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
804 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
806 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
809 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
811 u64 val = vcpu_get_reg(vcpu, rt);
812 u8 bpr_min = __vgic_v3_bpr_min() - 1;
814 /* Enforce BPR limiting */
818 val <<= ICH_VMCR_BPR0_SHIFT;
819 val &= ICH_VMCR_BPR0_MASK;
820 vmcr &= ~ICH_VMCR_BPR0_MASK;
823 __vgic_v3_write_vmcr(vmcr);
826 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
828 u64 val = vcpu_get_reg(vcpu, rt);
829 u8 bpr_min = __vgic_v3_bpr_min();
831 if (vmcr & ICH_VMCR_CBPR_MASK)
834 /* Enforce BPR limiting */
838 val <<= ICH_VMCR_BPR1_SHIFT;
839 val &= ICH_VMCR_BPR1_MASK;
840 vmcr &= ~ICH_VMCR_BPR1_MASK;
843 __vgic_v3_write_vmcr(vmcr);
846 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
850 if (!__vgic_v3_get_group(vcpu))
851 val = __vgic_v3_read_ap0rn(n);
853 val = __vgic_v3_read_ap1rn(n);
855 vcpu_set_reg(vcpu, rt, val);
858 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
860 u32 val = vcpu_get_reg(vcpu, rt);
862 if (!__vgic_v3_get_group(vcpu))
863 __vgic_v3_write_ap0rn(val, n);
865 __vgic_v3_write_ap1rn(val, n);
868 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
871 __vgic_v3_read_apxrn(vcpu, rt, 0);
874 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
877 __vgic_v3_read_apxrn(vcpu, rt, 1);
880 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
883 __vgic_v3_read_apxrn(vcpu, rt, 2);
886 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
889 __vgic_v3_read_apxrn(vcpu, rt, 3);
892 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
895 __vgic_v3_write_apxrn(vcpu, rt, 0);
898 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
901 __vgic_v3_write_apxrn(vcpu, rt, 1);
904 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
907 __vgic_v3_write_apxrn(vcpu, rt, 2);
910 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
913 __vgic_v3_write_apxrn(vcpu, rt, 3);
916 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
922 grp = __vgic_v3_get_group(vcpu);
924 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
928 lr_grp = !!(lr_val & ICH_LR_GROUP);
930 lr_val = ICC_IAR1_EL1_SPURIOUS;
933 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
936 static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
939 vmcr &= ICH_VMCR_PMR_MASK;
940 vmcr >>= ICH_VMCR_PMR_SHIFT;
941 vcpu_set_reg(vcpu, rt, vmcr);
944 static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
947 u32 val = vcpu_get_reg(vcpu, rt);
949 val <<= ICH_VMCR_PMR_SHIFT;
950 val &= ICH_VMCR_PMR_MASK;
951 vmcr &= ~ICH_VMCR_PMR_MASK;
954 write_gicreg(vmcr, ICH_VMCR_EL2);
957 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
960 u32 val = __vgic_v3_get_highest_active_priority();
961 vcpu_set_reg(vcpu, rt, val);
964 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
969 vtr = read_gicreg(ICH_VTR_EL2);
971 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
973 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
975 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
977 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
979 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
981 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
983 vcpu_set_reg(vcpu, rt, val);
986 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
989 u32 val = vcpu_get_reg(vcpu, rt);
991 if (val & ICC_CTLR_EL1_CBPR_MASK)
992 vmcr |= ICH_VMCR_CBPR_MASK;
994 vmcr &= ~ICH_VMCR_CBPR_MASK;
996 if (val & ICC_CTLR_EL1_EOImode_MASK)
997 vmcr |= ICH_VMCR_EOIM_MASK;
999 vmcr &= ~ICH_VMCR_EOIM_MASK;
1001 write_gicreg(vmcr, ICH_VMCR_EL2);
1004 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1009 void (*fn)(struct kvm_vcpu *, u32, int);
1013 esr = kvm_vcpu_get_hsr(vcpu);
1014 if (vcpu_mode_is_32bit(vcpu)) {
1015 if (!kvm_condition_valid(vcpu)) {
1016 __kvm_skip_instr(vcpu);
1020 sysreg = esr_cp15_to_sysreg(esr);
1022 sysreg = esr_sys64_to_sysreg(esr);
1025 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1028 case SYS_ICC_IAR0_EL1:
1029 case SYS_ICC_IAR1_EL1:
1030 if (unlikely(!is_read))
1032 fn = __vgic_v3_read_iar;
1034 case SYS_ICC_EOIR0_EL1:
1035 case SYS_ICC_EOIR1_EL1:
1036 if (unlikely(is_read))
1038 fn = __vgic_v3_write_eoir;
1040 case SYS_ICC_IGRPEN1_EL1:
1042 fn = __vgic_v3_read_igrpen1;
1044 fn = __vgic_v3_write_igrpen1;
1046 case SYS_ICC_BPR1_EL1:
1048 fn = __vgic_v3_read_bpr1;
1050 fn = __vgic_v3_write_bpr1;
1052 case SYS_ICC_AP0Rn_EL1(0):
1053 case SYS_ICC_AP1Rn_EL1(0):
1055 fn = __vgic_v3_read_apxr0;
1057 fn = __vgic_v3_write_apxr0;
1059 case SYS_ICC_AP0Rn_EL1(1):
1060 case SYS_ICC_AP1Rn_EL1(1):
1062 fn = __vgic_v3_read_apxr1;
1064 fn = __vgic_v3_write_apxr1;
1066 case SYS_ICC_AP0Rn_EL1(2):
1067 case SYS_ICC_AP1Rn_EL1(2):
1069 fn = __vgic_v3_read_apxr2;
1071 fn = __vgic_v3_write_apxr2;
1073 case SYS_ICC_AP0Rn_EL1(3):
1074 case SYS_ICC_AP1Rn_EL1(3):
1076 fn = __vgic_v3_read_apxr3;
1078 fn = __vgic_v3_write_apxr3;
1080 case SYS_ICC_HPPIR0_EL1:
1081 case SYS_ICC_HPPIR1_EL1:
1082 if (unlikely(!is_read))
1084 fn = __vgic_v3_read_hppir;
1086 case SYS_ICC_IGRPEN0_EL1:
1088 fn = __vgic_v3_read_igrpen0;
1090 fn = __vgic_v3_write_igrpen0;
1092 case SYS_ICC_BPR0_EL1:
1094 fn = __vgic_v3_read_bpr0;
1096 fn = __vgic_v3_write_bpr0;
1098 case SYS_ICC_DIR_EL1:
1099 if (unlikely(is_read))
1101 fn = __vgic_v3_write_dir;
1103 case SYS_ICC_RPR_EL1:
1104 if (unlikely(!is_read))
1106 fn = __vgic_v3_read_rpr;
1108 case SYS_ICC_CTLR_EL1:
1110 fn = __vgic_v3_read_ctlr;
1112 fn = __vgic_v3_write_ctlr;
1114 case SYS_ICC_PMR_EL1:
1116 fn = __vgic_v3_read_pmr;
1118 fn = __vgic_v3_write_pmr;
1124 vmcr = __vgic_v3_read_vmcr();
1125 rt = kvm_vcpu_sys_get_rt(vcpu);
1128 __kvm_skip_instr(vcpu);