Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / include / linux / compiler.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4
5 #include <linux/compiler_types.h>
6
7 #ifndef __ASSEMBLY__
8
9 #ifdef __KERNEL__
10
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18                           int expect, int is_constant);
19
20 #define likely_notrace(x)       __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
22
23 #define __branch_check__(x, expect, is_constant) ({                     \
24                         int ______r;                                    \
25                         static struct ftrace_likely_data                \
26                                 __attribute__((__aligned__(4)))         \
27                                 __attribute__((section("_ftrace_annotated_branch"))) \
28                                 ______f = {                             \
29                                 .data.func = __func__,                  \
30                                 .data.file = __FILE__,                  \
31                                 .data.line = __LINE__,                  \
32                         };                                              \
33                         ______r = __builtin_expect(!!(x), expect);      \
34                         ftrace_likely_update(&______f, ______r,         \
35                                              expect, is_constant);      \
36                         ______r;                                        \
37                 })
38
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)     (__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)   (__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58         if (__builtin_constant_p(!!(cond)) ? !!(cond) :                 \
59         ({                                                              \
60                 int ______r;                                            \
61                 static struct ftrace_branch_data                        \
62                         __attribute__((__aligned__(4)))                 \
63                         __attribute__((section("_ftrace_branch")))      \
64                         ______f = {                                     \
65                                 .func = __func__,                       \
66                                 .file = __FILE__,                       \
67                                 .line = __LINE__,                       \
68                         };                                              \
69                 ______r = !!(cond);                                     \
70                 ______f.miss_hit[______r]++;                                    \
71                 ______r;                                                \
72         }))
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
74
75 #else
76 # define likely(x)      __builtin_expect(!!(x), 1)
77 # define unlikely(x)    __builtin_expect(!!(x), 0)
78 #endif
79
80 /* Optimization barrier */
81 #ifndef barrier
82 # define barrier() __memory_barrier()
83 #endif
84
85 #ifndef barrier_data
86 # define barrier_data(ptr) barrier()
87 #endif
88
89 /* Unreachable code */
90 #ifdef CONFIG_STACK_VALIDATION
91 /*
92  * These macros help objtool understand GCC code flow for unreachable code.
93  * The __COUNTER__ based labels are a hack to make each instance of the macros
94  * unique, to convince GCC not to merge duplicate inline asm statements.
95  */
96 #define annotate_reachable() ({                                         \
97         asm volatile("%c0:\n\t"                                         \
98                      ".pushsection .discard.reachable\n\t"              \
99                      ".long %c0b - .\n\t"                               \
100                      ".popsection\n\t" : : "i" (__COUNTER__));          \
101 })
102 #define annotate_unreachable() ({                                       \
103         asm volatile("%c0:\n\t"                                         \
104                      ".pushsection .discard.unreachable\n\t"            \
105                      ".long %c0b - .\n\t"                               \
106                      ".popsection\n\t" : : "i" (__COUNTER__));          \
107 })
108 #define ASM_UNREACHABLE                                                 \
109         "999:\n\t"                                                      \
110         ".pushsection .discard.unreachable\n\t"                         \
111         ".long 999b - .\n\t"                                            \
112         ".popsection\n\t"
113 #else
114 #define annotate_reachable()
115 #define annotate_unreachable()
116 #endif
117
118 #ifndef ASM_UNREACHABLE
119 # define ASM_UNREACHABLE
120 #endif
121 #ifndef unreachable
122 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
123 #endif
124
125 /*
126  * KENTRY - kernel entry point
127  * This can be used to annotate symbols (functions or data) that are used
128  * without their linker symbol being referenced explicitly. For example,
129  * interrupt vector handlers, or functions in the kernel image that are found
130  * programatically.
131  *
132  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
133  * are handled in their own way (with KEEP() in linker scripts).
134  *
135  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
136  * linker script. For example an architecture could KEEP() its entire
137  * boot/exception vector code rather than annotate each function and data.
138  */
139 #ifndef KENTRY
140 # define KENTRY(sym)                                            \
141         extern typeof(sym) sym;                                 \
142         static const unsigned long __kentry_##sym               \
143         __used                                                  \
144         __attribute__((section("___kentry" "+" #sym ), used))   \
145         = (unsigned long)&sym;
146 #endif
147
148 #ifndef RELOC_HIDE
149 # define RELOC_HIDE(ptr, off)                                   \
150   ({ unsigned long __ptr;                                       \
151      __ptr = (unsigned long) (ptr);                             \
152     (typeof(ptr)) (__ptr + (off)); })
153 #endif
154
155 #ifndef OPTIMIZER_HIDE_VAR
156 #define OPTIMIZER_HIDE_VAR(var) barrier()
157 #endif
158
159 /* Not-quite-unique ID. */
160 #ifndef __UNIQUE_ID
161 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
162 #endif
163
164 #include <uapi/linux/types.h>
165
166 #define __READ_ONCE_SIZE                                                \
167 ({                                                                      \
168         switch (size) {                                                 \
169         case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
170         case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
171         case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
172         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
173         default:                                                        \
174                 barrier();                                              \
175                 __builtin_memcpy((void *)res, (const void *)p, size);   \
176                 barrier();                                              \
177         }                                                               \
178 })
179
180 static __always_inline
181 void __read_once_size(const volatile void *p, void *res, int size)
182 {
183         __READ_ONCE_SIZE;
184 }
185
186 #ifdef CONFIG_KASAN
187 /*
188  * We can't declare function 'inline' because __no_sanitize_address confilcts
189  * with inlining. Attempt to inline it may cause a build failure.
190  *      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
191  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
192  */
193 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
194 #else
195 # define __no_kasan_or_inline __always_inline
196 #endif
197
198 static __no_kasan_or_inline
199 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
200 {
201         __READ_ONCE_SIZE;
202 }
203
204 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
205 {
206         switch (size) {
207         case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
208         case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
209         case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
210         case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
211         default:
212                 barrier();
213                 __builtin_memcpy((void *)p, (const void *)res, size);
214                 barrier();
215         }
216 }
217
218 /*
219  * Prevent the compiler from merging or refetching reads or writes. The
220  * compiler is also forbidden from reordering successive instances of
221  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
222  * particular ordering. One way to make the compiler aware of ordering is to
223  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
224  * statements.
225  *
226  * These two macros will also work on aggregate data types like structs or
227  * unions. If the size of the accessed data type exceeds the word size of
228  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
229  * fall back to memcpy(). There's at least two memcpy()s: one for the
230  * __builtin_memcpy() and then one for the macro doing the copy of variable
231  * - '__u' allocated on the stack.
232  *
233  * Their two major use cases are: (1) Mediating communication between
234  * process-level code and irq/NMI handlers, all running on the same CPU,
235  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
236  * mutilate accesses that either do not require ordering or that interact
237  * with an explicit memory barrier or atomic instruction that provides the
238  * required ordering.
239  */
240 #include <asm/barrier.h>
241 #include <linux/kasan-checks.h>
242
243 #define __READ_ONCE(x, check)                                           \
244 ({                                                                      \
245         union { typeof(x) __val; char __c[1]; } __u;                    \
246         if (check)                                                      \
247                 __read_once_size(&(x), __u.__c, sizeof(x));             \
248         else                                                            \
249                 __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
250         smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
251         __u.__val;                                                      \
252 })
253 #define READ_ONCE(x) __READ_ONCE(x, 1)
254
255 /*
256  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
257  * to hide memory access from KASAN.
258  */
259 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
260
261 static __no_kasan_or_inline
262 unsigned long read_word_at_a_time(const void *addr)
263 {
264         kasan_check_read(addr, 1);
265         return *(unsigned long *)addr;
266 }
267
268 #define WRITE_ONCE(x, val) \
269 ({                                                      \
270         union { typeof(x) __val; char __c[1]; } __u =   \
271                 { .__val = (__force typeof(x)) (val) }; \
272         __write_once_size(&(x), __u.__c, sizeof(x));    \
273         __u.__val;                                      \
274 })
275
276 #endif /* __KERNEL__ */
277
278 #endif /* __ASSEMBLY__ */
279
280 /* Compile time object size, -1 for unknown */
281 #ifndef __compiletime_object_size
282 # define __compiletime_object_size(obj) -1
283 #endif
284 #ifndef __compiletime_warning
285 # define __compiletime_warning(message)
286 #endif
287 #ifndef __compiletime_error
288 # define __compiletime_error(message)
289 /*
290  * Sparse complains of variable sized arrays due to the temporary variable in
291  * __compiletime_assert. Unfortunately we can't just expand it out to make
292  * sparse see a constant array size without breaking compiletime_assert on old
293  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
294  */
295 # ifndef __CHECKER__
296 #  define __compiletime_error_fallback(condition) \
297         do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
298 # endif
299 #endif
300 #ifndef __compiletime_error_fallback
301 # define __compiletime_error_fallback(condition) do { } while (0)
302 #endif
303
304 #ifdef __OPTIMIZE__
305 # define __compiletime_assert(condition, msg, prefix, suffix)           \
306         do {                                                            \
307                 bool __cond = !(condition);                             \
308                 extern void prefix ## suffix(void) __compiletime_error(msg); \
309                 if (__cond)                                             \
310                         prefix ## suffix();                             \
311                 __compiletime_error_fallback(__cond);                   \
312         } while (0)
313 #else
314 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
315 #endif
316
317 #define _compiletime_assert(condition, msg, prefix, suffix) \
318         __compiletime_assert(condition, msg, prefix, suffix)
319
320 /**
321  * compiletime_assert - break build and emit msg if condition is false
322  * @condition: a compile-time constant condition to check
323  * @msg:       a message to emit if condition is false
324  *
325  * In tradition of POSIX assert, this macro will break the build if the
326  * supplied condition is *false*, emitting the supplied error message if the
327  * compiler has support to do so.
328  */
329 #define compiletime_assert(condition, msg) \
330         _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
331
332 #define compiletime_assert_atomic_type(t)                               \
333         compiletime_assert(__native_word(t),                            \
334                 "Need native word sized stores/loads for atomicity.")
335
336 #endif /* __LINUX_COMPILER_H */