Merge branch 'ieee802154-for-davem-2019-08-24' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / include / linux / compiler.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4
5 #include <linux/compiler_types.h>
6
7 #ifndef __ASSEMBLY__
8
9 #ifdef __KERNEL__
10
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18                           int expect, int is_constant);
19
20 #define likely_notrace(x)       __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
22
23 #define __branch_check__(x, expect, is_constant) ({                     \
24                         long ______r;                                   \
25                         static struct ftrace_likely_data                \
26                                 __aligned(4)                            \
27                                 __section("_ftrace_annotated_branch")   \
28                                 ______f = {                             \
29                                 .data.func = __func__,                  \
30                                 .data.file = __FILE__,                  \
31                                 .data.line = __LINE__,                  \
32                         };                                              \
33                         ______r = __builtin_expect(!!(x), expect);      \
34                         ftrace_likely_update(&______f, ______r,         \
35                                              expect, is_constant);      \
36                         ______r;                                        \
37                 })
38
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)     (__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)   (__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57
58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59
60 #define __trace_if_value(cond) ({                       \
61         static struct ftrace_branch_data                \
62                 __aligned(4)                            \
63                 __section("_ftrace_branch")             \
64                 __if_trace = {                          \
65                         .func = __func__,               \
66                         .file = __FILE__,               \
67                         .line = __LINE__,               \
68                 };                                      \
69         (cond) ?                                        \
70                 (__if_trace.miss_hit[1]++,1) :          \
71                 (__if_trace.miss_hit[0]++,0);           \
72 })
73
74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
75
76 #else
77 # define likely(x)      __builtin_expect(!!(x), 1)
78 # define unlikely(x)    __builtin_expect(!!(x), 0)
79 #endif
80
81 /* Optimization barrier */
82 #ifndef barrier
83 # define barrier() __memory_barrier()
84 #endif
85
86 #ifndef barrier_data
87 # define barrier_data(ptr) barrier()
88 #endif
89
90 /* workaround for GCC PR82365 if needed */
91 #ifndef barrier_before_unreachable
92 # define barrier_before_unreachable() do { } while (0)
93 #endif
94
95 /* Unreachable code */
96 #ifdef CONFIG_STACK_VALIDATION
97 /*
98  * These macros help objtool understand GCC code flow for unreachable code.
99  * The __COUNTER__ based labels are a hack to make each instance of the macros
100  * unique, to convince GCC not to merge duplicate inline asm statements.
101  */
102 #define annotate_reachable() ({                                         \
103         asm volatile("%c0:\n\t"                                         \
104                      ".pushsection .discard.reachable\n\t"              \
105                      ".long %c0b - .\n\t"                               \
106                      ".popsection\n\t" : : "i" (__COUNTER__));          \
107 })
108 #define annotate_unreachable() ({                                       \
109         asm volatile("%c0:\n\t"                                         \
110                      ".pushsection .discard.unreachable\n\t"            \
111                      ".long %c0b - .\n\t"                               \
112                      ".popsection\n\t" : : "i" (__COUNTER__));          \
113 })
114 #define ASM_UNREACHABLE                                                 \
115         "999:\n\t"                                                      \
116         ".pushsection .discard.unreachable\n\t"                         \
117         ".long 999b - .\n\t"                                            \
118         ".popsection\n\t"
119
120 /* Annotate a C jump table to allow objtool to follow the code flow */
121 #define __annotate_jump_table __section(".rodata..c_jump_table")
122
123 #else
124 #define annotate_reachable()
125 #define annotate_unreachable()
126 #define __annotate_jump_table
127 #endif
128
129 #ifndef ASM_UNREACHABLE
130 # define ASM_UNREACHABLE
131 #endif
132 #ifndef unreachable
133 # define unreachable() do {             \
134         annotate_unreachable();         \
135         __builtin_unreachable();        \
136 } while (0)
137 #endif
138
139 /*
140  * KENTRY - kernel entry point
141  * This can be used to annotate symbols (functions or data) that are used
142  * without their linker symbol being referenced explicitly. For example,
143  * interrupt vector handlers, or functions in the kernel image that are found
144  * programatically.
145  *
146  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
147  * are handled in their own way (with KEEP() in linker scripts).
148  *
149  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
150  * linker script. For example an architecture could KEEP() its entire
151  * boot/exception vector code rather than annotate each function and data.
152  */
153 #ifndef KENTRY
154 # define KENTRY(sym)                                            \
155         extern typeof(sym) sym;                                 \
156         static const unsigned long __kentry_##sym               \
157         __used                                                  \
158         __section("___kentry" "+" #sym )                        \
159         = (unsigned long)&sym;
160 #endif
161
162 #ifndef RELOC_HIDE
163 # define RELOC_HIDE(ptr, off)                                   \
164   ({ unsigned long __ptr;                                       \
165      __ptr = (unsigned long) (ptr);                             \
166     (typeof(ptr)) (__ptr + (off)); })
167 #endif
168
169 #ifndef OPTIMIZER_HIDE_VAR
170 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
171 #define OPTIMIZER_HIDE_VAR(var)                                         \
172         __asm__ ("" : "=r" (var) : "0" (var))
173 #endif
174
175 /* Not-quite-unique ID. */
176 #ifndef __UNIQUE_ID
177 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
178 #endif
179
180 #include <uapi/linux/types.h>
181
182 #define __READ_ONCE_SIZE                                                \
183 ({                                                                      \
184         switch (size) {                                                 \
185         case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
186         case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
187         case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
188         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
189         default:                                                        \
190                 barrier();                                              \
191                 __builtin_memcpy((void *)res, (const void *)p, size);   \
192                 barrier();                                              \
193         }                                                               \
194 })
195
196 static __always_inline
197 void __read_once_size(const volatile void *p, void *res, int size)
198 {
199         __READ_ONCE_SIZE;
200 }
201
202 #ifdef CONFIG_KASAN
203 /*
204  * We can't declare function 'inline' because __no_sanitize_address confilcts
205  * with inlining. Attempt to inline it may cause a build failure.
206  *      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
207  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
208  */
209 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
210 #else
211 # define __no_kasan_or_inline __always_inline
212 #endif
213
214 static __no_kasan_or_inline
215 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
216 {
217         __READ_ONCE_SIZE;
218 }
219
220 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
221 {
222         switch (size) {
223         case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
224         case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
225         case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
226         case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
227         default:
228                 barrier();
229                 __builtin_memcpy((void *)p, (const void *)res, size);
230                 barrier();
231         }
232 }
233
234 /*
235  * Prevent the compiler from merging or refetching reads or writes. The
236  * compiler is also forbidden from reordering successive instances of
237  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
238  * particular ordering. One way to make the compiler aware of ordering is to
239  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
240  * statements.
241  *
242  * These two macros will also work on aggregate data types like structs or
243  * unions. If the size of the accessed data type exceeds the word size of
244  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
245  * fall back to memcpy(). There's at least two memcpy()s: one for the
246  * __builtin_memcpy() and then one for the macro doing the copy of variable
247  * - '__u' allocated on the stack.
248  *
249  * Their two major use cases are: (1) Mediating communication between
250  * process-level code and irq/NMI handlers, all running on the same CPU,
251  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252  * mutilate accesses that either do not require ordering or that interact
253  * with an explicit memory barrier or atomic instruction that provides the
254  * required ordering.
255  */
256 #include <asm/barrier.h>
257 #include <linux/kasan-checks.h>
258
259 #define __READ_ONCE(x, check)                                           \
260 ({                                                                      \
261         union { typeof(x) __val; char __c[1]; } __u;                    \
262         if (check)                                                      \
263                 __read_once_size(&(x), __u.__c, sizeof(x));             \
264         else                                                            \
265                 __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
266         smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
267         __u.__val;                                                      \
268 })
269 #define READ_ONCE(x) __READ_ONCE(x, 1)
270
271 /*
272  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
273  * to hide memory access from KASAN.
274  */
275 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
276
277 static __no_kasan_or_inline
278 unsigned long read_word_at_a_time(const void *addr)
279 {
280         kasan_check_read(addr, 1);
281         return *(unsigned long *)addr;
282 }
283
284 #define WRITE_ONCE(x, val) \
285 ({                                                      \
286         union { typeof(x) __val; char __c[1]; } __u =   \
287                 { .__val = (__force typeof(x)) (val) }; \
288         __write_once_size(&(x), __u.__c, sizeof(x));    \
289         __u.__val;                                      \
290 })
291
292 #endif /* __KERNEL__ */
293
294 /*
295  * Force the compiler to emit 'sym' as a symbol, so that we can reference
296  * it from inline assembler. Necessary in case 'sym' could be inlined
297  * otherwise, or eliminated entirely due to lack of references that are
298  * visible to the compiler.
299  */
300 #define __ADDRESSABLE(sym) \
301         static void * __section(".discard.addressable") __used \
302                 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
303
304 /**
305  * offset_to_ptr - convert a relative memory offset to an absolute pointer
306  * @off:        the address of the 32-bit offset value
307  */
308 static inline void *offset_to_ptr(const int *off)
309 {
310         return (void *)((unsigned long)off + *off);
311 }
312
313 #endif /* __ASSEMBLY__ */
314
315 /* Compile time object size, -1 for unknown */
316 #ifndef __compiletime_object_size
317 # define __compiletime_object_size(obj) -1
318 #endif
319 #ifndef __compiletime_warning
320 # define __compiletime_warning(message)
321 #endif
322 #ifndef __compiletime_error
323 # define __compiletime_error(message)
324 #endif
325
326 #ifdef __OPTIMIZE__
327 # define __compiletime_assert(condition, msg, prefix, suffix)           \
328         do {                                                            \
329                 extern void prefix ## suffix(void) __compiletime_error(msg); \
330                 if (!(condition))                                       \
331                         prefix ## suffix();                             \
332         } while (0)
333 #else
334 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
335 #endif
336
337 #define _compiletime_assert(condition, msg, prefix, suffix) \
338         __compiletime_assert(condition, msg, prefix, suffix)
339
340 /**
341  * compiletime_assert - break build and emit msg if condition is false
342  * @condition: a compile-time constant condition to check
343  * @msg:       a message to emit if condition is false
344  *
345  * In tradition of POSIX assert, this macro will break the build if the
346  * supplied condition is *false*, emitting the supplied error message if the
347  * compiler has support to do so.
348  */
349 #define compiletime_assert(condition, msg) \
350         _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
351
352 #define compiletime_assert_atomic_type(t)                               \
353         compiletime_assert(__native_word(t),                            \
354                 "Need native word sized stores/loads for atomicity.")
355
356 /* &a[0] degrades to a pointer: a different type from an array */
357 #define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
358
359 #endif /* __LINUX_COMPILER_H */