riscv_pmu_sbi: add support for PMU variant on T-Head C9xx cores
authorPalmer Dabbelt <palmer@rivosinc.com>
Thu, 27 Oct 2022 03:19:46 +0000 (20:19 -0700)
committerPalmer Dabbelt <palmer@rivosinc.com>
Thu, 27 Oct 2022 21:35:21 +0000 (14:35 -0700)
The PMU on T-Head C9xx cores is quite similar to the SSCOFPMF extension
but not completely identical, so this series adds a T-Head PMU errata
that handlen the differences.

* 'riscv-pmu' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/palmer/linux:
  drivers/perf: riscv_pmu_sbi: add support for PMU variant on T-Head C9xx cores
  RISC-V: Cache SBI vendor values

arch/riscv/Kconfig.erratas
arch/riscv/errata/thead/errata.c
arch/riscv/include/asm/errata_list.h
arch/riscv/include/asm/sbi.h
arch/riscv/kernel/cpu.c
drivers/perf/riscv_pmu_sbi.c

index f3623df23b5fcd902339552e303c257a4dd6efa4..69621ae6d647aa369cee22c5751d3a55ec93e0b6 100644 (file)
@@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO
 
          If you don't know what to do here, say "Y".
 
+config ERRATA_THEAD_PMU
+       bool "Apply T-Head PMU errata"
+       depends on ERRATA_THEAD && RISCV_PMU_SBI
+       default y
+       help
+         The T-Head C9xx cores implement a PMU overflow extension very
+         similar to the core SSCOFPMF extension.
+
+         This will apply the overflow errata to handle the non-standard
+         behaviour via the regular SBI PMU driver and interface.
+
+         If you don't know what to do here, say "Y".
+
 endmenu # "CPU errata selection"
index 21546937db39bf5fb968af60a7ee5b096242925b..fac5742d1c1e6f8e34771291b783ed58c7710251 100644 (file)
@@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage,
        return true;
 }
 
+static bool errata_probe_pmu(unsigned int stage,
+                            unsigned long arch_id, unsigned long impid)
+{
+       if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
+               return false;
+
+       /* target-c9xx cores report arch_id and impid as 0 */
+       if (arch_id != 0 || impid != 0)
+               return false;
+
+       if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+               return false;
+
+       return true;
+}
+
 static u32 thead_errata_probe(unsigned int stage,
                              unsigned long archid, unsigned long impid)
 {
@@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage,
        if (errata_probe_cmo(stage, archid, impid))
                cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
 
+       if (errata_probe_pmu(stage, archid, impid))
+               cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
+
        return cpu_req_errata;
 }
 
index 19a771085781a69da552b1daaa31b6b325a17a9a..4180312d2a70107cd532c1eb2a3bf7ed96f56b37 100644 (file)
@@ -6,6 +6,7 @@
 #define ASM_ERRATA_LIST_H
 
 #include <asm/alternative.h>
+#include <asm/csr.h>
 #include <asm/vendorid_list.h>
 
 #ifdef CONFIG_ERRATA_SIFIVE
@@ -17,7 +18,8 @@
 #ifdef CONFIG_ERRATA_THEAD
 #define        ERRATA_THEAD_PBMT 0
 #define        ERRATA_THEAD_CMO 1
-#define        ERRATA_THEAD_NUMBER 2
+#define        ERRATA_THEAD_PMU 2
+#define        ERRATA_THEAD_NUMBER 3
 #endif
 
 #define        CPUFEATURE_SVPBMT 0
@@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2(                                                \
            "r"((unsigned long)(_start) + (_size))                      \
        : "a0")
 
+#define THEAD_C9XX_RV_IRQ_PMU                  17
+#define THEAD_C9XX_CSR_SCOUNTEROF              0x5c5
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl)                                    \
+asm volatile(ALTERNATIVE(                                              \
+       "csrr %0, " __stringify(CSR_SSCOUNTOVF),                        \
+       "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF),             \
+               THEAD_VENDOR_ID, ERRATA_THEAD_PMU,                      \
+               CONFIG_ERRATA_THEAD_PMU)                                \
+       : "=r" (__ovl) :                                                \
+       : "memory")
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 2a0ef738695ed0847aa02c080489aa2f8f8981a6..4ca7fbacff42494bf3e222ce5a378b507d152383 100644 (file)
@@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err);
 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
 static inline void sbi_init(void) {}
 #endif /* CONFIG_RISCV_SBI */
+
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+unsigned long riscv_cached_marchid(unsigned int cpu_id);
+unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+
 #endif /* _ASM_RISCV_SBI_H */
index fa427bdcf773d20cba656fdb0477ff6819ef20ab..bf9dd6764bad2934d830291fce7e053148d458a8 100644 (file)
@@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
        return -1;
 }
 
-#ifdef CONFIG_PROC_FS
-
 struct riscv_cpuinfo {
        unsigned long mvendorid;
        unsigned long marchid;
@@ -79,6 +77,30 @@ struct riscv_cpuinfo {
 };
 static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
 
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
+{
+       struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+       return ci->mvendorid;
+}
+EXPORT_SYMBOL(riscv_cached_mvendorid);
+
+unsigned long riscv_cached_marchid(unsigned int cpu_id)
+{
+       struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+       return ci->marchid;
+}
+EXPORT_SYMBOL(riscv_cached_marchid);
+
+unsigned long riscv_cached_mimpid(unsigned int cpu_id)
+{
+       struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+       return ci->mimpid;
+}
+EXPORT_SYMBOL(riscv_cached_mimpid);
+
 static int riscv_cpuinfo_starting(unsigned int cpu)
 {
        struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
@@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void)
 
        return 0;
 }
-device_initcall(riscv_cpuinfo_init);
+arch_initcall(riscv_cpuinfo_init);
+
+#ifdef CONFIG_PROC_FS
 
 #define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
        {                                                       \
index 3852c18362f53ec2630a7f10c5a6d6e6baf59a9c..f6507efe2a5846d8180b1baf1047424fd65303bb 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/sched/clock.h>
 
+#include <asm/errata_list.h>
 #include <asm/sbi.h>
 #include <asm/hwcap.h>
 
@@ -47,6 +48,8 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
  * per_cpu in case of harts with different pmu counters
  */
 static union sbi_pmu_ctr_info *pmu_ctr_list;
+static bool riscv_pmu_use_irq;
+static unsigned int riscv_pmu_irq_num;
 static unsigned int riscv_pmu_irq;
 
 struct sbi_pmu_event_data {
@@ -580,7 +583,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
        fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
        event = cpu_hw_evt->events[fidx];
        if (!event) {
-               csr_clear(CSR_SIP, SIP_LCOFIP);
+               csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
                return IRQ_NONE;
        }
 
@@ -588,13 +591,13 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
        pmu_sbi_stop_hw_ctrs(pmu);
 
        /* Overflow status register should only be read after counter are stopped */
-       overflow = csr_read(CSR_SSCOUNTOVF);
+       ALT_SBI_PMU_OVERFLOW(overflow);
 
        /*
         * Overflow interrupt pending bit should only be cleared after stopping
         * all the counters to avoid any race condition.
         */
-       csr_clear(CSR_SIP, SIP_LCOFIP);
+       csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
 
        /* No overflow bit is set */
        if (!overflow)
@@ -661,10 +664,10 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
        /* Stop all the counters so that they can be enabled from perf */
        pmu_sbi_stop_all(pmu);
 
-       if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
+       if (riscv_pmu_use_irq) {
                cpu_hw_evt->irq = riscv_pmu_irq;
-               csr_clear(CSR_IP, BIT(RV_IRQ_PMU));
-               csr_set(CSR_IE, BIT(RV_IRQ_PMU));
+               csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
+               csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
                enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
        }
 
@@ -673,9 +676,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
 
 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
 {
-       if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
+       if (riscv_pmu_use_irq) {
                disable_percpu_irq(riscv_pmu_irq);
-               csr_clear(CSR_IE, BIT(RV_IRQ_PMU));
+               csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
        }
 
        /* Disable all counters access for user mode now */
@@ -691,7 +694,18 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
        struct device_node *cpu, *child;
        struct irq_domain *domain = NULL;
 
-       if (!riscv_isa_extension_available(NULL, SSCOFPMF))
+       if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
+               riscv_pmu_irq_num = RV_IRQ_PMU;
+               riscv_pmu_use_irq = true;
+       } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
+                  riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
+                  riscv_cached_marchid(0) == 0 &&
+                  riscv_cached_mimpid(0) == 0) {
+               riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
+               riscv_pmu_use_irq = true;
+       }
+
+       if (!riscv_pmu_use_irq)
                return -EOPNOTSUPP;
 
        for_each_of_cpu_node(cpu) {
@@ -713,7 +727,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
                return -ENODEV;
        }
 
-       riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU);
+       riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
        if (!riscv_pmu_irq) {
                pr_err("Failed to map PMU interrupt for node\n");
                return -ENODEV;