2 * Based on arch/arm/include/asm/barrier.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_BARRIER_H
19 #define __ASM_BARRIER_H
23 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
24 #define nops(n) asm volatile(__nops(n))
26 #define sev() asm volatile("sev" : : : "memory")
27 #define wfe() asm volatile("wfe" : : : "memory")
28 #define wfi() asm volatile("wfi" : : : "memory")
30 #define isb() asm volatile("isb" : : : "memory")
31 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
32 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
34 #define psb_csync() asm volatile("hint #17" : : : "memory")
35 #define csdb() asm volatile("hint #20" : : : "memory")
37 #define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
38 SB_BARRIER_INSN"nop\n", \
45 #define dma_rmb() dmb(oshld)
46 #define dma_wmb() dmb(oshst)
49 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
52 #define array_index_mask_nospec array_index_mask_nospec
53 static inline unsigned long array_index_mask_nospec(unsigned long idx,
62 : "r" (idx), "Ir" (sz)
69 #define __smp_mb() dmb(ish)
70 #define __smp_rmb() dmb(ishld)
71 #define __smp_wmb() dmb(ishst)
73 #define __smp_store_release(p, v) \
75 union { typeof(*p) __val; char __c[1]; } __u = \
76 { .__val = (__force typeof(*p)) (v) }; \
77 compiletime_assert_atomic_type(*p); \
78 switch (sizeof(*p)) { \
80 asm volatile ("stlrb %w1, %0" \
82 : "r" (*(__u8 *)__u.__c) \
86 asm volatile ("stlrh %w1, %0" \
88 : "r" (*(__u16 *)__u.__c) \
92 asm volatile ("stlr %w1, %0" \
94 : "r" (*(__u32 *)__u.__c) \
98 asm volatile ("stlr %1, %0" \
100 : "r" (*(__u64 *)__u.__c) \
106 #define __smp_load_acquire(p) \
108 union { typeof(*p) __val; char __c[1]; } __u; \
109 compiletime_assert_atomic_type(*p); \
110 switch (sizeof(*p)) { \
112 asm volatile ("ldarb %w0, %1" \
113 : "=r" (*(__u8 *)__u.__c) \
114 : "Q" (*p) : "memory"); \
117 asm volatile ("ldarh %w0, %1" \
118 : "=r" (*(__u16 *)__u.__c) \
119 : "Q" (*p) : "memory"); \
122 asm volatile ("ldar %w0, %1" \
123 : "=r" (*(__u32 *)__u.__c) \
124 : "Q" (*p) : "memory"); \
127 asm volatile ("ldar %0, %1" \
128 : "=r" (*(__u64 *)__u.__c) \
129 : "Q" (*p) : "memory"); \
135 #define smp_cond_load_relaxed(ptr, cond_expr) \
137 typeof(ptr) __PTR = (ptr); \
140 VAL = READ_ONCE(*__PTR); \
143 __cmpwait_relaxed(__PTR, VAL); \
148 #define smp_cond_load_acquire(ptr, cond_expr) \
150 typeof(ptr) __PTR = (ptr); \
153 VAL = smp_load_acquire(__PTR); \
156 __cmpwait_relaxed(__PTR, VAL); \
161 #include <asm-generic/barrier.h>
163 #endif /* __ASSEMBLY__ */
165 #endif /* __ASM_BARRIER_H */