KVM: arm64: Upgrade PMU support to ARMv8.4
authorMarc Zyngier <maz@kernel.org>
Sun, 16 Feb 2020 18:17:22 +0000 (18:17 +0000)
committerMarc Zyngier <maz@kernel.org>
Wed, 3 Feb 2021 11:00:22 +0000 (11:00 +0000)
Upgrading the PMU code from ARMv8.1 to ARMv8.4 turns out to be
pretty easy. All that is required is support for PMMIR_EL1, which
is read-only, and for which returning 0 is a valid option as long
as we don't advertise STALL_SLOT as an implemented event.

Let's just do that and adjust what we return to the guest.

Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/sysreg.h
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c

index 8b5e7e5c3cc818f5df654a67472ecac5357ebea0..2fb3f386588cbf5d0bead2b22db0ecc365303f7a 100644 (file)
 
 #define ID_DFR0_PERFMON_SHIFT          24
 
+#define ID_DFR0_PERFMON_8_0            0x3
 #define ID_DFR0_PERFMON_8_1            0x4
+#define ID_DFR0_PERFMON_8_4            0x5
+#define ID_DFR0_PERFMON_8_5            0x6
 
 #define ID_ISAR4_SWP_FRAC_SHIFT                28
 #define ID_ISAR4_PSR_M_SHIFT           24
index 398f6df1bbe40ad298831b2a7acc109f8acad0ab..72cd704a8368c64cab6770e45c15f29e17c62a9a 100644 (file)
@@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
                base = 0;
        } else {
                val = read_sysreg(pmceid1_el0);
+               /*
+                * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+                * as RAZ
+                */
+               if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
+                       val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
                base = 32;
        }
 
index 8f79ec1fffa7eb8aaf06b3d0921a183dd6616f2a..5da536ab738da5b4cfa8b9da018a49f7b8e3fbb9 100644 (file)
@@ -1051,16 +1051,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                /* Limit debug to ARMv8.0 */
                val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
                val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
-               /* Limit guests to PMUv3 for ARMv8.1 */
+               /* Limit guests to PMUv3 for ARMv8.4 */
                val = cpuid_feature_cap_perfmon_field(val,
                                                      ID_AA64DFR0_PMUVER_SHIFT,
-                                                     kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_1 : 0);
+                                                     kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
                break;
        case SYS_ID_DFR0_EL1:
-               /* Limit guests to PMUv3 for ARMv8.1 */
+               /* Limit guests to PMUv3 for ARMv8.4 */
                val = cpuid_feature_cap_perfmon_field(val,
                                                      ID_DFR0_PERFMON_SHIFT,
-                                                     kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0);
+                                                     kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
                break;
        }
 
@@ -1496,6 +1496,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 
        { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
        { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+       { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
 
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1918,6 +1919,8 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
        { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
        { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
+       /* PMMIR */
+       { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
 
        /* PRRR/MAIR0 */
        { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },