Revert "x86/jump-labels: Macrofy inline assembly code to work around GCC inlining...
authorIngo Molnar <mingo@kernel.org>
Wed, 19 Dec 2018 10:20:23 +0000 (11:20 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 19 Dec 2018 10:58:10 +0000 (11:58 +0100)
This reverts commit 5bdcd510c2ac9efaf55c4cbd8d46421d8e2320cd.

The macro based workarounds for GCC's inlining bugs caused regressions: distcc
and other distro build setups broke, and the fixes are not easy nor will they
solve regressions on already existing installations.

So we are reverting this patch and the 8 followup patches.

What makes this revert easier is that GCC9 will likely include the new 'asm inline'
syntax that makes inlining of assembly blocks a lot more robust.

This is a superior method to any macro based hackeries - and might even be
backported to GCC8, which would make all modern distros get the inlining
fixes as well.

Many thanks to Masahiro Yamada and others for helping sort out these problems.

Reported-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Reviewed-by: Borislav Petkov <bp@alien8.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Juergen Gross <jgross@suse.com>
Cc: Richard Biener <rguenther@suse.de>
Cc: Kees Cook <keescook@chromium.org>
Cc: Segher Boessenkool <segher@kernel.crashing.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/entry/calling.h
arch/x86/include/asm/jump_label.h
arch/x86/kernel/macros.S

index 25e5a6bda8c3a971609dff93919ccab27d6a3aa9..20d0885b00fbec4c77dfee23c701ba0c3612890b 100644 (file)
@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
 .macro CALL_enter_from_user_mode
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef HAVE_JUMP_LABEL
-       STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1
+       STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
 #endif
        call enter_from_user_mode
 .Lafter_call_\@:
index a5fb34fe56a4bb31f78023ff3d258132ff93ee16..21efc9d07ed909adfc37b06188b331ea0e6f747d 100644 (file)
@@ -2,6 +2,19 @@
 #ifndef _ASM_X86_JUMP_LABEL_H
 #define _ASM_X86_JUMP_LABEL_H
 
+#ifndef HAVE_JUMP_LABEL
+/*
+ * For better or for worse, if jump labels (the gcc extension) are missing,
+ * then the entire static branch patching infrastructure is compiled out.
+ * If that happens, the code in here will malfunction.  Raise a compiler
+ * error instead.
+ *
+ * In theory, jump labels and the static branch patching infrastructure
+ * could be decoupled to fix this.
+ */
+#error asm/jump_label.h included on a non-jump-label kernel
+#endif
+
 #define JUMP_LABEL_NOP_SIZE 5
 
 #ifdef CONFIG_X86_64
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
-                         "branch=\"%c1\""
-                       : :  "i" (key), "i" (branch) : : l_yes);
+       asm_volatile_goto("1:"
+               ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+               ".pushsection __jump_table,  \"aw\" \n\t"
+               _ASM_ALIGN "\n\t"
+               ".long 1b - ., %l[l_yes] - . \n\t"
+               _ASM_PTR "%c0 + %c1 - .\n\t"
+               ".popsection \n\t"
+               : :  "i" (key), "i" (branch) : : l_yes);
+
        return false;
 l_yes:
        return true;
@@ -30,8 +49,14 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
-                         "branch=\"%c1\""
+       asm_volatile_goto("1:"
+               ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+               "2:\n\t"
+               ".pushsection __jump_table,  \"aw\" \n\t"
+               _ASM_ALIGN "\n\t"
+               ".long 1b - ., %l[l_yes] - . \n\t"
+               _ASM_PTR "%c0 + %c1 - .\n\t"
+               ".popsection \n\t"
                : :  "i" (key), "i" (branch) : : l_yes);
 
        return false;
@@ -41,26 +66,37 @@ l_yes:
 
 #else  /* __ASSEMBLY__ */
 
-.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
-.Lstatic_branch_nop_\@:
-       .byte STATIC_KEY_INIT_NOP
-.Lstatic_branch_no_after_\@:
+.macro STATIC_JUMP_IF_TRUE target, key, def
+.Lstatic_jump_\@:
+       .if \def
+       /* Equivalent to "jmp.d32 \target" */
+       .byte           0xe9
+       .long           \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+       .else
+       .byte           STATIC_KEY_INIT_NOP
+       .endif
        .pushsection __jump_table, "aw"
        _ASM_ALIGN
-       .long           .Lstatic_branch_nop_\@ - ., \l_yes - .
-       _ASM_PTR        \key + \branch - .
+       .long           .Lstatic_jump_\@ - ., \target - .
+       _ASM_PTR        \key - .
        .popsection
 .endm
 
-.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
-.Lstatic_branch_jmp_\@:
-       .byte 0xe9
-       .long \l_yes - .Lstatic_branch_jmp_after_\@
-.Lstatic_branch_jmp_after_\@:
+.macro STATIC_JUMP_IF_FALSE target, key, def
+.Lstatic_jump_\@:
+       .if \def
+       .byte           STATIC_KEY_INIT_NOP
+       .else
+       /* Equivalent to "jmp.d32 \target" */
+       .byte           0xe9
+       .long           \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+       .endif
        .pushsection __jump_table, "aw"
        _ASM_ALIGN
-       .long           .Lstatic_branch_jmp_\@ - ., \l_yes - .
-       _ASM_PTR        \key + \branch - .
+       .long           .Lstatic_jump_\@ - ., \target - .
+       _ASM_PTR        \key + 1 - .
        .popsection
 .endm
 
index 161c95059044ffcccbe82adebdf4c2b0a89ea1c0..bf8b9c93e2552863c67e1c741d78dbe7a0f35e14 100644 (file)
@@ -13,4 +13,3 @@
 #include <asm/paravirt.h>
 #include <asm/asm.h>
 #include <asm/cpufeature.h>
-#include <asm/jump_label.h>