Merge branch 'i2c/for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[sfrench/cifs-2.6.git] / arch / m68k / include / asm / atomic.h
1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
3
4 #include <linux/types.h>
5 #include <linux/irqflags.h>
6 #include <asm/cmpxchg.h>
7 #include <asm/barrier.h>
8
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc..
12  */
13
14 /*
15  * We do not have SMP m68k systems, so we don't have to deal with that.
16  */
17
18 #define ATOMIC_INIT(i)  { (i) }
19
20 #define atomic_read(v)          ACCESS_ONCE((v)->counter)
21 #define atomic_set(v, i)        (((v)->counter) = i)
22
23 /*
24  * The ColdFire parts cannot do some immediate to memory operations,
25  * so for them we do not specify the "i" asm constraint.
26  */
27 #ifdef CONFIG_COLDFIRE
28 #define ASM_DI  "d"
29 #else
30 #define ASM_DI  "di"
31 #endif
32
33 #define ATOMIC_OP(op, c_op, asm_op)                                     \
34 static inline void atomic_##op(int i, atomic_t *v)                      \
35 {                                                                       \
36         __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
37 }                                                                       \
38
39 #ifdef CONFIG_RMW_INSNS
40
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
42 static inline int atomic_##op##_return(int i, atomic_t *v)              \
43 {                                                                       \
44         int t, tmp;                                                     \
45                                                                         \
46         __asm__ __volatile__(                                           \
47                         "1:     movel %2,%1\n"                          \
48                         "       " #asm_op "l %3,%1\n"                   \
49                         "       casl %2,%1,%0\n"                        \
50                         "       jne 1b"                                 \
51                         : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
52                         : "g" (i), "2" (atomic_read(v)));               \
53         return t;                                                       \
54 }
55
56 #else
57
58 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
59 static inline int atomic_##op##_return(int i, atomic_t * v)             \
60 {                                                                       \
61         unsigned long flags;                                            \
62         int t;                                                          \
63                                                                         \
64         local_irq_save(flags);                                          \
65         t = (v->counter c_op i);                                        \
66         local_irq_restore(flags);                                       \
67                                                                         \
68         return t;                                                       \
69 }
70
71 #endif /* CONFIG_RMW_INSNS */
72
73 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
74         ATOMIC_OP(op, c_op, asm_op)                                     \
75         ATOMIC_OP_RETURN(op, c_op, asm_op)
76
77 ATOMIC_OPS(add, +=, add)
78 ATOMIC_OPS(sub, -=, sub)
79
80 #undef ATOMIC_OPS
81 #undef ATOMIC_OP_RETURN
82 #undef ATOMIC_OP
83
84 static inline void atomic_inc(atomic_t *v)
85 {
86         __asm__ __volatile__("addql #1,%0" : "+m" (*v));
87 }
88
89 static inline void atomic_dec(atomic_t *v)
90 {
91         __asm__ __volatile__("subql #1,%0" : "+m" (*v));
92 }
93
94 static inline int atomic_dec_and_test(atomic_t *v)
95 {
96         char c;
97         __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
98         return c != 0;
99 }
100
101 static inline int atomic_dec_and_test_lt(atomic_t *v)
102 {
103         char c;
104         __asm__ __volatile__(
105                 "subql #1,%1; slt %0"
106                 : "=d" (c), "=m" (*v)
107                 : "m" (*v));
108         return c != 0;
109 }
110
111 static inline int atomic_inc_and_test(atomic_t *v)
112 {
113         char c;
114         __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
115         return c != 0;
116 }
117
118 #ifdef CONFIG_RMW_INSNS
119
120 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
122
123 #else /* !CONFIG_RMW_INSNS */
124
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
126 {
127         unsigned long flags;
128         int prev;
129
130         local_irq_save(flags);
131         prev = atomic_read(v);
132         if (prev == old)
133                 atomic_set(v, new);
134         local_irq_restore(flags);
135         return prev;
136 }
137
138 static inline int atomic_xchg(atomic_t *v, int new)
139 {
140         unsigned long flags;
141         int prev;
142
143         local_irq_save(flags);
144         prev = atomic_read(v);
145         atomic_set(v, new);
146         local_irq_restore(flags);
147         return prev;
148 }
149
150 #endif /* !CONFIG_RMW_INSNS */
151
152 #define atomic_dec_return(v)    atomic_sub_return(1, (v))
153 #define atomic_inc_return(v)    atomic_add_return(1, (v))
154
155 static inline int atomic_sub_and_test(int i, atomic_t *v)
156 {
157         char c;
158         __asm__ __volatile__("subl %2,%1; seq %0"
159                              : "=d" (c), "+m" (*v)
160                              : ASM_DI (i));
161         return c != 0;
162 }
163
164 static inline int atomic_add_negative(int i, atomic_t *v)
165 {
166         char c;
167         __asm__ __volatile__("addl %2,%1; smi %0"
168                              : "=d" (c), "+m" (*v)
169                              : ASM_DI (i));
170         return c != 0;
171 }
172
173 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
174 {
175         __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
176 }
177
178 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
179 {
180         __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
181 }
182
183 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
184 {
185         int c, old;
186         c = atomic_read(v);
187         for (;;) {
188                 if (unlikely(c == (u)))
189                         break;
190                 old = atomic_cmpxchg((v), c, c + (a));
191                 if (likely(old == c))
192                         break;
193                 c = old;
194         }
195         return c;
196 }
197
198 #endif /* __ARCH_M68K_ATOMIC __ */