Merge branch 'next-general' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[sfrench/cifs-2.6.git] / arch / metag / include / asm / atomic_lock1.h
1 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
2 #define __ASM_METAG_ATOMIC_LOCK1_H
3
4 #define ATOMIC_INIT(i)  { (i) }
5
6 #include <linux/compiler.h>
7
8 #include <asm/barrier.h>
9 #include <asm/global_lock.h>
10
11 static inline int atomic_read(const atomic_t *v)
12 {
13         return READ_ONCE((v)->counter);
14 }
15
16 /*
17  * atomic_set needs to be take the lock to protect atomic_add_unless from a
18  * possible race, as it reads the counter twice:
19  *
20  *  CPU0                               CPU1
21  *  atomic_add_unless(1, 0)
22  *    ret = v->counter (non-zero)
23  *    if (ret != u)                    v->counter = 0
24  *      v->counter += 1 (counter set to 1)
25  *
26  * Making atomic_set take the lock ensures that ordering and logical
27  * consistency is preserved.
28  */
29 static inline int atomic_set(atomic_t *v, int i)
30 {
31         unsigned long flags;
32
33         __global_lock1(flags);
34         fence();
35         v->counter = i;
36         __global_unlock1(flags);
37         return i;
38 }
39
40 #define atomic_set_release(v, i) atomic_set((v), (i))
41
42 #define ATOMIC_OP(op, c_op)                                             \
43 static inline void atomic_##op(int i, atomic_t *v)                      \
44 {                                                                       \
45         unsigned long flags;                                            \
46                                                                         \
47         __global_lock1(flags);                                          \
48         fence();                                                        \
49         v->counter c_op i;                                              \
50         __global_unlock1(flags);                                        \
51 }                                                                       \
52
53 #define ATOMIC_OP_RETURN(op, c_op)                                      \
54 static inline int atomic_##op##_return(int i, atomic_t *v)              \
55 {                                                                       \
56         unsigned long result;                                           \
57         unsigned long flags;                                            \
58                                                                         \
59         __global_lock1(flags);                                          \
60         result = v->counter;                                            \
61         result c_op i;                                                  \
62         fence();                                                        \
63         v->counter = result;                                            \
64         __global_unlock1(flags);                                        \
65                                                                         \
66         return result;                                                  \
67 }
68
69 #define ATOMIC_FETCH_OP(op, c_op)                                       \
70 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
71 {                                                                       \
72         unsigned long result;                                           \
73         unsigned long flags;                                            \
74                                                                         \
75         __global_lock1(flags);                                          \
76         result = v->counter;                                            \
77         fence();                                                        \
78         v->counter c_op i;                                              \
79         __global_unlock1(flags);                                        \
80                                                                         \
81         return result;                                                  \
82 }
83
84 #define ATOMIC_OPS(op, c_op)                                            \
85         ATOMIC_OP(op, c_op)                                             \
86         ATOMIC_OP_RETURN(op, c_op)                                      \
87         ATOMIC_FETCH_OP(op, c_op)
88
89 ATOMIC_OPS(add, +=)
90 ATOMIC_OPS(sub, -=)
91
92 #undef ATOMIC_OPS
93 #define ATOMIC_OPS(op, c_op)                                            \
94         ATOMIC_OP(op, c_op)                                             \
95         ATOMIC_FETCH_OP(op, c_op)
96
97 ATOMIC_OPS(and, &=)
98 ATOMIC_OPS(or, |=)
99 ATOMIC_OPS(xor, ^=)
100
101 #undef ATOMIC_OPS
102 #undef ATOMIC_FETCH_OP
103 #undef ATOMIC_OP_RETURN
104 #undef ATOMIC_OP
105
106 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
107 {
108         int ret;
109         unsigned long flags;
110
111         __global_lock1(flags);
112         ret = v->counter;
113         if (ret == old) {
114                 fence();
115                 v->counter = new;
116         }
117         __global_unlock1(flags);
118
119         return ret;
120 }
121
122 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
123
124 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
125 {
126         int ret;
127         unsigned long flags;
128
129         __global_lock1(flags);
130         ret = v->counter;
131         if (ret != u) {
132                 fence();
133                 v->counter += a;
134         }
135         __global_unlock1(flags);
136
137         return ret;
138 }
139
140 static inline int atomic_sub_if_positive(int i, atomic_t *v)
141 {
142         int ret;
143         unsigned long flags;
144
145         __global_lock1(flags);
146         ret = v->counter - 1;
147         if (ret >= 0) {
148                 fence();
149                 v->counter = ret;
150         }
151         __global_unlock1(flags);
152
153         return ret;
154 }
155
156 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */