Merge branch 'topic/omap3isp' of git://git.kernel.org/pub/scm/linux/kernel/git/mcheha...
[sfrench/cifs-2.6.git] / arch / frv / include / asm / atomic.h
1 /* atomic.h: atomic operation emulation for FR-V
2  *
3  * For an explanation of how atomic ops work in this arch, see:
4  *   Documentation/frv/atomic-ops.txt
5  *
6  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7  * Written by David Howells (dhowells@redhat.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/types.h>
18 #include <asm/spr-regs.h>
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
21
22 #ifdef CONFIG_SMP
23 #error not SMP safe
24 #endif
25
26 /*
27  * Atomic operations that C can't guarantee us.  Useful for
28  * resource counting etc..
29  *
30  * We do not have SMP systems, so we don't have to deal with that.
31  */
32
33 #define ATOMIC_INIT(i)          { (i) }
34 #define atomic_read(v)          (*(volatile int *)&(v)->counter)
35 #define atomic_set(v, i)        (((v)->counter) = (i))
36
37 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
38 static inline int atomic_add_return(int i, atomic_t *v)
39 {
40         unsigned long val;
41
42         asm("0:                                         \n"
43             "   orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
44             "   ckeq            icc3,cc7                \n"
45             "   ld.p            %M0,%1                  \n"     /* LD.P/ORCR must be atomic */
46             "   orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
47             "   add%I2          %1,%2,%1                \n"
48             "   cst.p           %1,%M0          ,cc3,#1 \n"
49             "   corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* clear ICC3.Z if store happens */
50             "   beq             icc3,#0,0b              \n"
51             : "+U"(v->counter), "=&r"(val)
52             : "NPr"(i)
53             : "memory", "cc7", "cc3", "icc3"
54             );
55
56         return val;
57 }
58
59 static inline int atomic_sub_return(int i, atomic_t *v)
60 {
61         unsigned long val;
62
63         asm("0:                                         \n"
64             "   orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
65             "   ckeq            icc3,cc7                \n"
66             "   ld.p            %M0,%1                  \n"     /* LD.P/ORCR must be atomic */
67             "   orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
68             "   sub%I2          %1,%2,%1                \n"
69             "   cst.p           %1,%M0          ,cc3,#1 \n"
70             "   corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* clear ICC3.Z if store happens */
71             "   beq             icc3,#0,0b              \n"
72             : "+U"(v->counter), "=&r"(val)
73             : "NPr"(i)
74             : "memory", "cc7", "cc3", "icc3"
75             );
76
77         return val;
78 }
79
80 #else
81
82 extern int atomic_add_return(int i, atomic_t *v);
83 extern int atomic_sub_return(int i, atomic_t *v);
84
85 #endif
86
87 static inline int atomic_add_negative(int i, atomic_t *v)
88 {
89         return atomic_add_return(i, v) < 0;
90 }
91
92 static inline void atomic_add(int i, atomic_t *v)
93 {
94         atomic_add_return(i, v);
95 }
96
97 static inline void atomic_sub(int i, atomic_t *v)
98 {
99         atomic_sub_return(i, v);
100 }
101
102 static inline void atomic_inc(atomic_t *v)
103 {
104         atomic_add_return(1, v);
105 }
106
107 static inline void atomic_dec(atomic_t *v)
108 {
109         atomic_sub_return(1, v);
110 }
111
112 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
113 #define atomic_inc_return(v)            atomic_add_return(1, (v))
114
115 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i), (v)) == 0)
116 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
117 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
118
119 /*
120  * 64-bit atomic ops
121  */
122 typedef struct {
123         volatile long long counter;
124 } atomic64_t;
125
126 #define ATOMIC64_INIT(i)        { (i) }
127
128 static inline long long atomic64_read(atomic64_t *v)
129 {
130         long long counter;
131
132         asm("ldd%I1 %M1,%0"
133             : "=e"(counter)
134             : "m"(v->counter));
135         return counter;
136 }
137
138 static inline void atomic64_set(atomic64_t *v, long long i)
139 {
140         asm volatile("std%I0 %1,%M0"
141                      : "=m"(v->counter)
142                      : "e"(i));
143 }
144
145 extern long long atomic64_inc_return(atomic64_t *v);
146 extern long long atomic64_dec_return(atomic64_t *v);
147 extern long long atomic64_add_return(long long i, atomic64_t *v);
148 extern long long atomic64_sub_return(long long i, atomic64_t *v);
149
150 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
151 {
152         return atomic64_add_return(i, v) < 0;
153 }
154
155 static inline void atomic64_add(long long i, atomic64_t *v)
156 {
157         atomic64_add_return(i, v);
158 }
159
160 static inline void atomic64_sub(long long i, atomic64_t *v)
161 {
162         atomic64_sub_return(i, v);
163 }
164
165 static inline void atomic64_inc(atomic64_t *v)
166 {
167         atomic64_inc_return(v);
168 }
169
170 static inline void atomic64_dec(atomic64_t *v)
171 {
172         atomic64_dec_return(v);
173 }
174
175 #define atomic64_sub_and_test(i,v)      (atomic64_sub_return((i), (v)) == 0)
176 #define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
177 #define atomic64_inc_and_test(v)        (atomic64_inc_return((v)) == 0)
178
179 #define atomic_cmpxchg(v, old, new)     (cmpxchg(&(v)->counter, old, new))
180 #define atomic_xchg(v, new)             (xchg(&(v)->counter, new))
181 #define atomic64_cmpxchg(v, old, new)   (__cmpxchg_64(old, new, &(v)->counter))
182 #define atomic64_xchg(v, new)           (__xchg_64(new, &(v)->counter))
183
184 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
185 {
186         int c, old;
187         c = atomic_read(v);
188         for (;;) {
189                 if (unlikely(c == (u)))
190                         break;
191                 old = atomic_cmpxchg((v), c, c + (a));
192                 if (likely(old == c))
193                         break;
194                 c = old;
195         }
196         return c;
197 }
198
199
200 #endif /* _ASM_ATOMIC_H */