xtensa: enable KCSAN
authorMax Filippov <jcmvbkbc@gmail.com>
Sat, 5 Oct 2019 06:33:31 +0000 (23:33 -0700)
committerMax Filippov <jcmvbkbc@gmail.com>
Mon, 2 May 2022 02:51:22 +0000 (19:51 -0700)
Prefix arch-specific barrier macros with '__' to make use of instrumented
generic macros.
Prefix arch-specific bitops with 'arch_' to make use of instrumented
generic functions.
Provide stubs for 64-bit atomics when building with KCSAN.
Disable KCSAN instrumentation in arch/xtensa/boot.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: Marco Elver <elver@google.com>
arch/xtensa/Kconfig
arch/xtensa/boot/lib/Makefile
arch/xtensa/include/asm/barrier.h
arch/xtensa/include/asm/bitops.h
arch/xtensa/lib/Makefile
arch/xtensa/lib/kcsan-stubs.c [new file with mode: 0644]

index 78619c847b87cae1f18a7d72f07b622ec64f2559..036854e7335183bace83564da6a8c92be4d37317 100644 (file)
@@ -29,6 +29,7 @@ config XTENSA
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
+       select HAVE_ARCH_KCSAN
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_CONTEXT_TRACKING
index e3d717c7bfa1f6f5a0d4c0725aef0b8749a55dcd..162d10af36f33cf6a963b5ebb3a915a2ea5480f1 100644 (file)
@@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg
 endif
 
 KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
 
 CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
 CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
index d6f8d4ddc2bca04e850ba75b10677cf8611d6619..898ea397e9bc989673ebd0e99761cc30a8b103bb 100644 (file)
 
 #include <asm/core.h>
 
-#define mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
-#define rmb() barrier()
-#define wmb() mb()
+#define __mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define __rmb() barrier()
+#define __wmb() __mb()
+
+#ifdef CONFIG_SMP
+#define __smp_mb() __mb()
+#define __smp_rmb() __rmb()
+#define __smp_wmb() __wmb()
+#endif
 
 #if XCHAL_HAVE_S32C1I
 #define __smp_mb__before_atomic()              barrier()
index cd225896c40f4bfa5e675ee439d27a95b0230a22..e02ec5833389479c1c3054f51a3fe90a2f62437b 100644 (file)
@@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
 #if XCHAL_HAVE_EXCLUSIVE
 
 #define BIT_OP(op, insn, inv)                                          \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
 {                                                                      \
        unsigned long tmp;                                              \
        unsigned long mask = 1UL << (bit & 31);                         \
@@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
 
 #define TEST_AND_BIT_OP(op, insn, inv)                                 \
 static inline int                                                      \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)       \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)  \
 {                                                                      \
        unsigned long tmp, value;                                       \
        unsigned long mask = 1UL << (bit & 31);                         \
@@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)    \
 #elif XCHAL_HAVE_S32C1I
 
 #define BIT_OP(op, insn, inv)                                          \
-static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
 {                                                                      \
        unsigned long tmp, value;                                       \
        unsigned long mask = 1UL << (bit & 31);                         \
@@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
 
 #define TEST_AND_BIT_OP(op, insn, inv)                                 \
 static inline int                                                      \
-test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)       \
+arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)  \
 {                                                                      \
        unsigned long tmp, value;                                       \
        unsigned long mask = 1UL << (bit & 31);                         \
@@ -205,6 +205,8 @@ BIT_OPS(change, "xor", )
 #undef BIT_OP
 #undef TEST_AND_BIT_OP
 
+#include <asm-generic/bitops/instrumented-atomic.h>
+
 #include <asm-generic/bitops/le.h>
 
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
index 5848c133f7ea871967c308060316a0482334734e..d4e9c397e3fdefb35fcd4a57953b04b2d7149007 100644 (file)
@@ -8,3 +8,5 @@ lib-y   += memcopy.o memset.o checksum.o \
           divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
           usercopy.o strncpy_user.o strnlen_user.o
 lib-$(CONFIG_PCI) += pci-auto.o
+lib-$(CONFIG_KCSAN) += kcsan-stubs.o
+KCSAN_SANITIZE_kcsan-stubs.o := n
diff --git a/arch/xtensa/lib/kcsan-stubs.c b/arch/xtensa/lib/kcsan-stubs.c
new file mode 100644 (file)
index 0000000..2b08faa
--- /dev/null
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+void __atomic_store_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_load_8(const volatile void *p, int i)
+{
+       BUG();
+}
+
+u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}
+
+u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
+{
+       BUG();
+}