Merge patch series "RISC-V: Probe for misaligned access speed"
authorPalmer Dabbelt <palmer@rivosinc.com>
Fri, 8 Sep 2023 18:24:12 +0000 (11:24 -0700)
committerPalmer Dabbelt <palmer@rivosinc.com>
Fri, 8 Sep 2023 18:24:12 +0000 (11:24 -0700)
Evan Green <evan@rivosinc.com> says:

The current setting for the hwprobe bit indicating misaligned access
speed is controlled by a vendor-specific feature probe function. This is
essentially a per-SoC table we have to maintain on behalf of each vendor
going forward. Let's convert that instead to something we detect at
runtime.

We have two assembly routines at the heart of our probe: one that
does a bunch of word-sized accesses (without aligning its input buffer),
and the other that does byte accesses. If we can move a larger number of
bytes using misaligned word accesses than we can with the same amount of
time doing byte accesses, then we can declare misaligned accesses as
"fast".

The tradeoff of reducing this maintenance burden is boot time. We spend
4-6 jiffies per core doing this measurement (0-2 on jiffie edge
alignment, and 4 on measurement). The timing loop was based on
raid6_choose_gen(), which uses (16+1)*N jiffies (where N is the number
of algorithms). By taking only the fastest iteration out of all
attempts for use in the comparison, variance between runs is very low.
On my THead C906, it looks like this:

[    0.047563] cpu0: Ratio of byte access time to unaligned word access is 4.34, unaligned accesses are fast

Several others have chimed in with results on slow machines with the
older algorithm, which took all runs into account, including noise like
interrupts. Even with this variation, results indicate that in all cases
(fast, slow, and emulated) the measured numbers are nowhere near each
other (always multiple factors away).

* b4-shazam-merge:
  RISC-V: alternative: Remove feature_probe_func
  RISC-V: Probe for unaligned access speed

Link: https://lore.kernel.org/r/20230818194136.4084400-1-evan@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Documentation/riscv/hwprobe.rst
arch/riscv/errata/thead/errata.c
arch/riscv/include/asm/alternative.h
arch/riscv/include/asm/cpufeature.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/alternative.c
arch/riscv/kernel/copy-unaligned.S [new file with mode: 0644]
arch/riscv/kernel/copy-unaligned.h [new file with mode: 0644]
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/smpboot.c

index 20eff9650da959dc1e38d756c6e49ebd0d7ae191..a52996b22f75d3e8fea1064313cf36abcd878cf3 100644 (file)
@@ -87,13 +87,12 @@ The following keys are defined:
     emulated via software, either in or below the kernel.  These accesses are
     always extremely slow.
 
-  * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are supported
-    in hardware, but are slower than the corresponding aligned accesses
-    sequences.
+  * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower
+    than equivalent byte accesses.  Misaligned accesses may be supported
+    directly in hardware, or trapped and emulated by software.
 
-  * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are supported
-    in hardware and are faster than the corresponding aligned accesses
-    sequences.
+  * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster
+    than equivalent byte accesses.
 
   * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
     not supported at all and will generate a misaligned address fault.
index be84b14f01180a1db6b80846d5ce8be6a16a6b8c..0554ed4bf087cf6cd06dc2967c0c2c38e9784887 100644 (file)
@@ -120,11 +120,3 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
        if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
                local_flush_icache_all();
 }
-
-void thead_feature_probe_func(unsigned int cpu,
-                             unsigned long archid,
-                             unsigned long impid)
-{
-       if ((archid == 0) && (impid == 0))
-               per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST;
-}
index 6a41537826a7dc9d6451a6185887cabbeefcf72f..58ccd2f8cab7aa477489636638ceb534510fc01c 100644 (file)
@@ -30,7 +30,6 @@
 #define ALT_OLD_PTR(a)                 __ALT_PTR(a, old_offset)
 #define ALT_ALT_PTR(a)                 __ALT_PTR(a, alt_offset)
 
-void probe_vendor_features(unsigned int cpu);
 void __init apply_boot_alternatives(void);
 void __init apply_early_boot_alternatives(void);
 void apply_module_alternatives(void *start, size_t length);
@@ -53,15 +52,11 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
                             unsigned long archid, unsigned long impid,
                             unsigned int stage);
 
-void thead_feature_probe_func(unsigned int cpu, unsigned long archid,
-                             unsigned long impid);
-
 void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
                                 unsigned int stage);
 
 #else /* CONFIG_RISCV_ALTERNATIVE */
 
-static inline void probe_vendor_features(unsigned int cpu) { }
 static inline void apply_boot_alternatives(void) { }
 static inline void apply_early_boot_alternatives(void) { }
 static inline void apply_module_alternatives(void *start, size_t length) { }
index 23fed53b88157842308fd8e67640b981dacde40b..d0345bd659c94f11f349e6d92b9acd6b178218f6 100644 (file)
@@ -30,4 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
 /* Per-cpu ISA extensions. */
 extern struct riscv_isainfo hart_isa[NR_CPUS];
 
+void check_unaligned_access(int cpu);
+
 #endif
index 6ac56af42f4a0c847bcc258e7cb9e6df0ffbb7bb..95cf25d484052e88b39ca3a49b74cc2bb1453992 100644 (file)
@@ -38,6 +38,7 @@ extra-y += vmlinux.lds
 obj-y  += head.o
 obj-y  += soc.o
 obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
+obj-y  += copy-unaligned.o
 obj-y  += cpu.o
 obj-y  += cpufeature.o
 obj-y  += entry.o
index 6b75788c18e66c70a6ec6e21c8f91ef0d36c490d..85056153fa23cfb3bbe481cc5c5dd62d4e9ee792 100644 (file)
@@ -27,8 +27,6 @@ struct cpu_manufacturer_info_t {
        void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
                                  unsigned long archid, unsigned long impid,
                                  unsigned int stage);
-       void (*feature_probe_func)(unsigned int cpu, unsigned long archid,
-                                  unsigned long impid);
 };
 
 static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
@@ -43,7 +41,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
        cpu_mfr_info->imp_id = sbi_get_mimpid();
 #endif
 
-       cpu_mfr_info->feature_probe_func = NULL;
        switch (cpu_mfr_info->vendor_id) {
 #ifdef CONFIG_ERRATA_SIFIVE
        case SIFIVE_VENDOR_ID:
@@ -53,7 +50,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
 #ifdef CONFIG_ERRATA_THEAD
        case THEAD_VENDOR_ID:
                cpu_mfr_info->patch_func = thead_errata_patch_func;
-               cpu_mfr_info->feature_probe_func = thead_feature_probe_func;
                break;
 #endif
        default:
@@ -143,20 +139,6 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
        }
 }
 
-/* Called on each CPU as it starts */
-void probe_vendor_features(unsigned int cpu)
-{
-       struct cpu_manufacturer_info_t cpu_mfr_info;
-
-       riscv_fill_cpu_mfr_info(&cpu_mfr_info);
-       if (!cpu_mfr_info.feature_probe_func)
-               return;
-
-       cpu_mfr_info.feature_probe_func(cpu,
-                                       cpu_mfr_info.arch_id,
-                                       cpu_mfr_info.imp_id);
-}
-
 /*
  * This is called very early in the boot process (directly after we run
  * a feature detect on the boot CPU). No need to worry about other CPUs
@@ -211,7 +193,6 @@ void __init apply_boot_alternatives(void)
        /* If called on non-boot cpu things could go wrong */
        WARN_ON(smp_processor_id() != 0);
 
-       probe_vendor_features(0);
        _apply_alternatives((struct alt_entry *)__alt_start,
                            (struct alt_entry *)__alt_end,
                            RISCV_ALTERNATIVES_BOOT);
diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
new file mode 100644 (file)
index 0000000..cfdecfb
--- /dev/null
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023 Rivos Inc. */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+       .text
+
+/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using word loads and stores. */
+/* Note: The size is truncated to a multiple of 8 * SZREG */
+ENTRY(__riscv_copy_words_unaligned)
+       andi  a4, a2, ~((8*SZREG)-1)
+       beqz  a4, 2f
+       add   a3, a1, a4
+1:
+       REG_L a4,       0(a1)
+       REG_L a5,   SZREG(a1)
+       REG_L a6, 2*SZREG(a1)
+       REG_L a7, 3*SZREG(a1)
+       REG_L t0, 4*SZREG(a1)
+       REG_L t1, 5*SZREG(a1)
+       REG_L t2, 6*SZREG(a1)
+       REG_L t3, 7*SZREG(a1)
+       REG_S a4,       0(a0)
+       REG_S a5,   SZREG(a0)
+       REG_S a6, 2*SZREG(a0)
+       REG_S a7, 3*SZREG(a0)
+       REG_S t0, 4*SZREG(a0)
+       REG_S t1, 5*SZREG(a0)
+       REG_S t2, 6*SZREG(a0)
+       REG_S t3, 7*SZREG(a0)
+       addi  a0, a0, 8*SZREG
+       addi  a1, a1, 8*SZREG
+       bltu  a1, a3, 1b
+
+2:
+       ret
+END(__riscv_copy_words_unaligned)
+
+/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
+/* Performs a memcpy without aligning buffers, using only byte accesses. */
+/* Note: The size is truncated to a multiple of 8 */
+ENTRY(__riscv_copy_bytes_unaligned)
+       andi a4, a2, ~(8-1)
+       beqz a4, 2f
+       add  a3, a1, a4
+1:
+       lb   a4, 0(a1)
+       lb   a5, 1(a1)
+       lb   a6, 2(a1)
+       lb   a7, 3(a1)
+       lb   t0, 4(a1)
+       lb   t1, 5(a1)
+       lb   t2, 6(a1)
+       lb   t3, 7(a1)
+       sb   a4, 0(a0)
+       sb   a5, 1(a0)
+       sb   a6, 2(a0)
+       sb   a7, 3(a0)
+       sb   t0, 4(a0)
+       sb   t1, 5(a0)
+       sb   t2, 6(a0)
+       sb   t3, 7(a0)
+       addi a0, a0, 8
+       addi a1, a1, 8
+       bltu a1, a3, 1b
+
+2:
+       ret
+END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h
new file mode 100644 (file)
index 0000000..e3d70d3
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos, Inc.
+ */
+#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H
+#define __RISCV_KERNEL_COPY_UNALIGNED_H
+
+#include <linux/types.h>
+
+void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
+void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
+
+#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */
index ef7b4fd9e87688f42a62345ba66b8ef0a2b61500..1cfbba65d11ae311d54729966e57fb3c9386d61c 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
 #include <asm/hwcap.h>
+#include <asm/hwprobe.h>
 #include <asm/patch.h>
 #include <asm/processor.h>
 #include <asm/vector.h>
 
+#include "copy-unaligned.h"
+
 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
 
+#define MISALIGNED_ACCESS_JIFFIES_LG2 1
+#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
+
 unsigned long elf_hwcap __read_mostly;
 
 /* Host ISA bitmap */
@@ -549,6 +556,103 @@ unsigned long riscv_get_elf_hwcap(void)
        return hwcap;
 }
 
+void check_unaligned_access(int cpu)
+{
+       u64 start_cycles, end_cycles;
+       u64 word_cycles;
+       u64 byte_cycles;
+       int ratio;
+       unsigned long start_jiffies, now;
+       struct page *page;
+       void *dst;
+       void *src;
+       long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
+
+       page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
+       if (!page) {
+               pr_warn("Can't alloc pages to measure memcpy performance");
+               return;
+       }
+
+       /* Make an unaligned destination buffer. */
+       dst = (void *)((unsigned long)page_address(page) | 0x1);
+       /* Unalign src as well, but differently (off by 1 + 2 = 3). */
+       src = dst + (MISALIGNED_BUFFER_SIZE / 2);
+       src += 2;
+       word_cycles = -1ULL;
+       /* Do a warmup. */
+       __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+       preempt_disable();
+       start_jiffies = jiffies;
+       while ((now = jiffies) == start_jiffies)
+               cpu_relax();
+
+       /*
+        * For a fixed amount of time, repeatedly try the function, and take
+        * the best time in cycles as the measurement.
+        */
+       while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+               start_cycles = get_cycles64();
+               /* Ensure the CSR read can't reorder WRT to the copy. */
+               mb();
+               __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+               /* Ensure the copy ends before the end time is snapped. */
+               mb();
+               end_cycles = get_cycles64();
+               if ((end_cycles - start_cycles) < word_cycles)
+                       word_cycles = end_cycles - start_cycles;
+       }
+
+       byte_cycles = -1ULL;
+       __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+       start_jiffies = jiffies;
+       while ((now = jiffies) == start_jiffies)
+               cpu_relax();
+
+       while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
+               start_cycles = get_cycles64();
+               mb();
+               __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
+               mb();
+               end_cycles = get_cycles64();
+               if ((end_cycles - start_cycles) < byte_cycles)
+                       byte_cycles = end_cycles - start_cycles;
+       }
+
+       preempt_enable();
+
+       /* Don't divide by zero. */
+       if (!word_cycles || !byte_cycles) {
+               pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
+                       cpu);
+
+               goto out;
+       }
+
+       if (word_cycles < byte_cycles)
+               speed = RISCV_HWPROBE_MISALIGNED_FAST;
+
+       ratio = div_u64((byte_cycles * 100), word_cycles);
+       pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
+               cpu,
+               ratio / 100,
+               ratio % 100,
+               (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
+
+       per_cpu(misaligned_access_speed, cpu) = speed;
+
+out:
+       __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+}
+
+static int check_unaligned_access_boot_cpu(void)
+{
+       check_unaligned_access(0);
+       return 0;
+}
+
+arch_initcall(check_unaligned_access_boot_cpu);
+
 #ifdef CONFIG_RISCV_ALTERNATIVE
 /*
  * Alternative patch sites consider 48 bits when determining when to patch
index f4d6acb38dd0dfd81653425f9717f976703ca232..1b8da4e40a4d6e979293e4734fb7d4b2e7407723 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/sched/task_stack.h>
 #include <linux/sched/mm.h>
 #include <asm/cpu_ops.h>
+#include <asm/cpufeature.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/numa.h>
@@ -245,7 +246,7 @@ asmlinkage __visible void smp_callin(void)
 
        numa_add_cpu(curr_cpuid);
        set_cpu_online(curr_cpuid, 1);
-       probe_vendor_features(curr_cpuid);
+       check_unaligned_access(curr_cpuid);
 
        if (has_vector()) {
                if (riscv_v_setup_vsize())