Update copyright notices with scripts/update-copyrights
[jlayton/glibc.git] / sysdeps / powerpc / powerpc64 / bits / atomic.h
1 /* Atomic operations.  PowerPC64 version.
2    Copyright (C) 2003-2014 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Lesser General Public
8    License as published by the Free Software Foundation; either
9    version 2.1 of the License, or (at your option) any later version.
10
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Lesser General Public License for more details.
15
16    You should have received a copy of the GNU Lesser General Public
17    License along with the GNU C Library; if not, see
18    <http://www.gnu.org/licenses/>.  */
19
20 /*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
21     This is a hint to the hardware to expect additional updates adjacent
22     to the lock word or not.  If we are acquiring a Mutex, the hint
23     should be true. Otherwise we releasing a Mutex or doing a simple
24     atomic operation.  In that case we don't expect additional updates
25     adjacent to the lock word after the Store Conditional and the hint
26     should be false.  */
27
28 #if defined _ARCH_PWR6 || defined _ARCH_PWR6X
29 # define MUTEX_HINT_ACQ ",1"
30 # define MUTEX_HINT_REL ",0"
31 #else
32 # define MUTEX_HINT_ACQ
33 # define MUTEX_HINT_REL
34 #endif
35
36 /* The 32-bit exchange_bool is different on powerpc64 because the subf
37    does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
38    (a load word and zero (high 32) form) load.
39    In powerpc64 register values are 64-bit by default,  including oldval.
40    The value in old val unknown sign extension, lwarx loads the 32-bit
41    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
42 #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
43 ({                                                                            \
44   unsigned int __tmp, __tmp2;                                                 \
45   __asm __volatile ("   clrldi  %1,%1,32\n"                                   \
46                     "1: lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"                  \
47                     "   subf.   %0,%1,%0\n"                                   \
48                     "   bne     2f\n"                                         \
49                     "   stwcx.  %4,0,%2\n"                                    \
50                     "   bne-    1b\n"                                         \
51                     "2: " __ARCH_ACQ_INSTR                                    \
52                     : "=&r" (__tmp), "=r" (__tmp2)                            \
53                     : "b" (mem), "1" (oldval), "r" (newval)                   \
54                     : "cr0", "memory");                                       \
55   __tmp != 0;                                                                 \
56 })
57
58 #define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
59 ({                                                                            \
60   unsigned int __tmp, __tmp2;                                                 \
61   __asm __volatile (__ARCH_REL_INSTR "\n"                                     \
62                     "   clrldi  %1,%1,32\n"                                   \
63                     "1: lwarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
64                     "   subf.   %0,%1,%0\n"                                   \
65                     "   bne     2f\n"                                         \
66                     "   stwcx.  %4,0,%2\n"                                    \
67                     "   bne-    1b\n"                                         \
68                     "2: "                                                     \
69                     : "=&r" (__tmp), "=r" (__tmp2)                            \
70                     : "b" (mem), "1" (oldval), "r" (newval)                   \
71                     : "cr0", "memory");                                       \
72   __tmp != 0;                                                                 \
73 })
74
75 /*
76  * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
77  * and Store doubleword conditional indexed (stdcx) instructions.  So here
78  * we define the 64-bit forms.
79  */
80 #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
81 ({                                                                            \
82   unsigned long __tmp;                                                        \
83   __asm __volatile (                                                          \
84                     "1: ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"                  \
85                     "   subf.   %0,%2,%0\n"                                   \
86                     "   bne     2f\n"                                         \
87                     "   stdcx.  %3,0,%1\n"                                    \
88                     "   bne-    1b\n"                                         \
89                     "2: " __ARCH_ACQ_INSTR                                    \
90                     : "=&r" (__tmp)                                           \
91                     : "b" (mem), "r" (oldval), "r" (newval)                   \
92                     : "cr0", "memory");                                       \
93   __tmp != 0;                                                                 \
94 })
95
96 #define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
97 ({                                                                            \
98   unsigned long __tmp;                                                        \
99   __asm __volatile (__ARCH_REL_INSTR "\n"                                     \
100                     "1: ldarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
101                     "   subf.   %0,%2,%0\n"                                   \
102                     "   bne     2f\n"                                         \
103                     "   stdcx.  %3,0,%1\n"                                    \
104                     "   bne-    1b\n"                                         \
105                     "2: "                                                     \
106                     : "=&r" (__tmp)                                           \
107                     : "b" (mem), "r" (oldval), "r" (newval)                   \
108                     : "cr0", "memory");                                       \
109   __tmp != 0;                                                                 \
110 })
111
112 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
113   ({                                                                          \
114       __typeof (*(mem)) __tmp;                                                \
115       __typeof (mem)  __memp = (mem);                                         \
116       __asm __volatile (                                                      \
117                         "1:     ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"          \
118                         "       cmpd    %0,%2\n"                              \
119                         "       bne     2f\n"                                 \
120                         "       stdcx.  %3,0,%1\n"                            \
121                         "       bne-    1b\n"                                 \
122                         "2:     " __ARCH_ACQ_INSTR                            \
123                         : "=&r" (__tmp)                                       \
124                         : "b" (__memp), "r" (oldval), "r" (newval)            \
125                         : "cr0", "memory");                                   \
126       __tmp;                                                                  \
127   })
128
129 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
130   ({                                                                          \
131       __typeof (*(mem)) __tmp;                                                \
132       __typeof (mem)  __memp = (mem);                                         \
133       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
134                         "1:     ldarx   %0,0,%1" MUTEX_HINT_REL "\n"          \
135                         "       cmpd    %0,%2\n"                              \
136                         "       bne     2f\n"                                 \
137                         "       stdcx.  %3,0,%1\n"                            \
138                         "       bne-    1b\n"                                 \
139                         "2:     "                                             \
140                         : "=&r" (__tmp)                                       \
141                         : "b" (__memp), "r" (oldval), "r" (newval)            \
142                         : "cr0", "memory");                                   \
143       __tmp;                                                                  \
144   })
145
146 #define __arch_atomic_exchange_64_acq(mem, value) \
147     ({                                                                        \
148       __typeof (*mem) __val;                                                  \
149       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
150                         "1:     ldarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
151                         "       stdcx.  %3,0,%2\n"                            \
152                         "       bne-    1b\n"                                 \
153                   " " __ARCH_ACQ_INSTR                                        \
154                         : "=&r" (__val), "=m" (*mem)                          \
155                         : "b" (mem), "r" (value), "m" (*mem)                  \
156                         : "cr0", "memory");                                   \
157       __val;                                                                  \
158     })
159
160 #define __arch_atomic_exchange_64_rel(mem, value) \
161     ({                                                                        \
162       __typeof (*mem) __val;                                                  \
163       __asm __volatile (__ARCH_REL_INSTR "\n"                                 \
164                         "1:     ldarx   %0,0,%2" MUTEX_HINT_REL "\n"          \
165                         "       stdcx.  %3,0,%2\n"                            \
166                         "       bne-    1b"                                   \
167                         : "=&r" (__val), "=m" (*mem)                          \
168                         : "b" (mem), "r" (value), "m" (*mem)                  \
169                         : "cr0", "memory");                                   \
170       __val;                                                                  \
171     })
172
173 #define __arch_atomic_exchange_and_add_64(mem, value) \
174     ({                                                                        \
175       __typeof (*mem) __val, __tmp;                                           \
176       __asm __volatile ("1:     ldarx   %0,0,%3\n"                            \
177                         "       add     %1,%0,%4\n"                           \
178                         "       stdcx.  %1,0,%3\n"                            \
179                         "       bne-    1b"                                   \
180                         : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
181                         : "b" (mem), "r" (value), "m" (*mem)                  \
182                         : "cr0", "memory");                                   \
183       __val;                                                                  \
184     })
185
186 #define __arch_atomic_increment_val_64(mem) \
187     ({                                                                        \
188       __typeof (*(mem)) __val;                                                \
189       __asm __volatile ("1:     ldarx   %0,0,%2\n"                            \
190                         "       addi    %0,%0,1\n"                            \
191                         "       stdcx.  %0,0,%2\n"                            \
192                         "       bne-    1b"                                   \
193                         : "=&b" (__val), "=m" (*mem)                          \
194                         : "b" (mem), "m" (*mem)                               \
195                         : "cr0", "memory");                                   \
196       __val;                                                                  \
197     })
198
199 #define __arch_atomic_decrement_val_64(mem) \
200     ({                                                                        \
201       __typeof (*(mem)) __val;                                                \
202       __asm __volatile ("1:     ldarx   %0,0,%2\n"                            \
203                         "       subi    %0,%0,1\n"                            \
204                         "       stdcx.  %0,0,%2\n"                            \
205                         "       bne-    1b"                                   \
206                         : "=&b" (__val), "=m" (*mem)                          \
207                         : "b" (mem), "m" (*mem)                               \
208                         : "cr0", "memory");                                   \
209       __val;                                                                  \
210     })
211
212 #define __arch_atomic_decrement_if_positive_64(mem) \
213   ({ int __val, __tmp;                                                        \
214      __asm __volatile ("1:      ldarx   %0,0,%3\n"                            \
215                        "        cmpdi   0,%0,0\n"                             \
216                        "        addi    %1,%0,-1\n"                           \
217                        "        ble     2f\n"                                 \
218                        "        stdcx.  %1,0,%3\n"                            \
219                        "        bne-    1b\n"                                 \
220                        "2:      " __ARCH_ACQ_INSTR                            \
221                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
222                        : "b" (mem), "m" (*mem)                                \
223                        : "cr0", "memory");                                    \
224      __val;                                                                   \
225   })
226
227 /*
228  * All powerpc64 processors support the new "light weight"  sync (lwsync).
229  */
230 #define atomic_read_barrier()   __asm ("lwsync" ::: "memory")
231 /*
232  * "light weight" sync can also be used for the release barrier.
233  */
234 #ifndef UP
235 # define __ARCH_REL_INSTR       "lwsync"
236 #endif
237
238 /*
239  * Include the rest of the atomic ops macros which are common to both
240  * powerpc32 and powerpc64.
241  */
242 #include_next <bits/atomic.h>