Merge branch 'nohz/printk-v8' into irq/core
[sfrench/cifs-2.6.git] / include / linux / seqlock.h
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4  * Reader/writer consistent mechanism without starving writers. This type of
5  * lock for data where the reader wants a consistent set of information
6  * and is willing to retry if the information changes.  Readers never
7  * block but they may have to retry if a writer is in
8  * progress. Writers do not wait for readers. 
9  *
10  * This is not as cache friendly as brlock. Also, this will not work
11  * for data that contains pointers, because any writer could
12  * invalidate a pointer that a reader was following.
13  *
14  * Expected reader usage:
15  *      do {
16  *          seq = read_seqbegin(&foo);
17  *      ...
18  *      } while (read_seqretry(&foo, seq));
19  *
20  *
21  * On non-SMP the spin locks disappear but the writer still needs
22  * to increment the sequence variables because an interrupt routine could
23  * change the state of the data.
24  *
25  * Based on x86_64 vsyscall gettimeofday 
26  * by Keith Owens and Andrea Arcangeli
27  */
28
29 #include <linux/spinlock.h>
30 #include <linux/preempt.h>
31 #include <asm/processor.h>
32
33 typedef struct {
34         unsigned sequence;
35         spinlock_t lock;
36 } seqlock_t;
37
38 /*
39  * These macros triggered gcc-3.x compile-time problems.  We think these are
40  * OK now.  Be cautious.
41  */
42 #define __SEQLOCK_UNLOCKED(lockname) \
43                  { 0, __SPIN_LOCK_UNLOCKED(lockname) }
44
45 #define seqlock_init(x)                                 \
46         do {                                            \
47                 (x)->sequence = 0;                      \
48                 spin_lock_init(&(x)->lock);             \
49         } while (0)
50
51 #define DEFINE_SEQLOCK(x) \
52                 seqlock_t x = __SEQLOCK_UNLOCKED(x)
53
54 /* Lock out other writers and update the count.
55  * Acts like a normal spin_lock/unlock.
56  * Don't need preempt_disable() because that is in the spin_lock already.
57  */
58 static inline void write_seqlock(seqlock_t *sl)
59 {
60         spin_lock(&sl->lock);
61         ++sl->sequence;
62         smp_wmb();
63 }
64
65 static inline void write_sequnlock(seqlock_t *sl)
66 {
67         smp_wmb();
68         sl->sequence++;
69         spin_unlock(&sl->lock);
70 }
71
72 static inline int write_tryseqlock(seqlock_t *sl)
73 {
74         int ret = spin_trylock(&sl->lock);
75
76         if (ret) {
77                 ++sl->sequence;
78                 smp_wmb();
79         }
80         return ret;
81 }
82
83 /* Start of read calculation -- fetch last complete writer token */
84 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
85 {
86         unsigned ret;
87
88 repeat:
89         ret = ACCESS_ONCE(sl->sequence);
90         if (unlikely(ret & 1)) {
91                 cpu_relax();
92                 goto repeat;
93         }
94         smp_rmb();
95
96         return ret;
97 }
98
99 /*
100  * Test if reader processed invalid data.
101  *
102  * If sequence value changed then writer changed data while in section.
103  */
104 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
105 {
106         smp_rmb();
107
108         return unlikely(sl->sequence != start);
109 }
110
111
112 /*
113  * Version using sequence counter only.
114  * This can be used when code has its own mutex protecting the
115  * updating starting before the write_seqcountbeqin() and ending
116  * after the write_seqcount_end().
117  */
118
119 typedef struct seqcount {
120         unsigned sequence;
121 } seqcount_t;
122
123 #define SEQCNT_ZERO { 0 }
124 #define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
125
126 /**
127  * __read_seqcount_begin - begin a seq-read critical section (without barrier)
128  * @s: pointer to seqcount_t
129  * Returns: count to be passed to read_seqcount_retry
130  *
131  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
132  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
133  * provided before actually loading any of the variables that are to be
134  * protected in this critical section.
135  *
136  * Use carefully, only in critical code, and comment how the barrier is
137  * provided.
138  */
139 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
140 {
141         unsigned ret;
142
143 repeat:
144         ret = ACCESS_ONCE(s->sequence);
145         if (unlikely(ret & 1)) {
146                 cpu_relax();
147                 goto repeat;
148         }
149         return ret;
150 }
151
152 /**
153  * read_seqcount_begin - begin a seq-read critical section
154  * @s: pointer to seqcount_t
155  * Returns: count to be passed to read_seqcount_retry
156  *
157  * read_seqcount_begin opens a read critical section of the given seqcount.
158  * Validity of the critical section is tested by checking read_seqcount_retry
159  * function.
160  */
161 static inline unsigned read_seqcount_begin(const seqcount_t *s)
162 {
163         unsigned ret = __read_seqcount_begin(s);
164         smp_rmb();
165         return ret;
166 }
167
168 /**
169  * raw_seqcount_begin - begin a seq-read critical section
170  * @s: pointer to seqcount_t
171  * Returns: count to be passed to read_seqcount_retry
172  *
173  * raw_seqcount_begin opens a read critical section of the given seqcount.
174  * Validity of the critical section is tested by checking read_seqcount_retry
175  * function.
176  *
177  * Unlike read_seqcount_begin(), this function will not wait for the count
178  * to stabilize. If a writer is active when we begin, we will fail the
179  * read_seqcount_retry() instead of stabilizing at the beginning of the
180  * critical section.
181  */
182 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 {
184         unsigned ret = ACCESS_ONCE(s->sequence);
185         smp_rmb();
186         return ret & ~1;
187 }
188
189 /**
190  * __read_seqcount_retry - end a seq-read critical section (without barrier)
191  * @s: pointer to seqcount_t
192  * @start: count, from read_seqcount_begin
193  * Returns: 1 if retry is required, else 0
194  *
195  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197  * provided before actually loading any of the variables that are to be
198  * protected in this critical section.
199  *
200  * Use carefully, only in critical code, and comment how the barrier is
201  * provided.
202  */
203 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 {
205         return unlikely(s->sequence != start);
206 }
207
208 /**
209  * read_seqcount_retry - end a seq-read critical section
210  * @s: pointer to seqcount_t
211  * @start: count, from read_seqcount_begin
212  * Returns: 1 if retry is required, else 0
213  *
214  * read_seqcount_retry closes a read critical section of the given seqcount.
215  * If the critical section was invalid, it must be ignored (and typically
216  * retried).
217  */
218 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 {
220         smp_rmb();
221
222         return __read_seqcount_retry(s, start);
223 }
224
225
226 /*
227  * Sequence counter only version assumes that callers are using their
228  * own mutexing.
229  */
230 static inline void write_seqcount_begin(seqcount_t *s)
231 {
232         s->sequence++;
233         smp_wmb();
234 }
235
236 static inline void write_seqcount_end(seqcount_t *s)
237 {
238         smp_wmb();
239         s->sequence++;
240 }
241
242 /**
243  * write_seqcount_barrier - invalidate in-progress read-side seq operations
244  * @s: pointer to seqcount_t
245  *
246  * After write_seqcount_barrier, no read-side seq operations will complete
247  * successfully and see data older than this.
248  */
249 static inline void write_seqcount_barrier(seqcount_t *s)
250 {
251         smp_wmb();
252         s->sequence+=2;
253 }
254
255 /*
256  * Possible sw/hw IRQ protected versions of the interfaces.
257  */
258 #define write_seqlock_irqsave(lock, flags)                              \
259         do { local_irq_save(flags); write_seqlock(lock); } while (0)
260 #define write_seqlock_irq(lock)                                         \
261         do { local_irq_disable();   write_seqlock(lock); } while (0)
262 #define write_seqlock_bh(lock)                                          \
263         do { local_bh_disable();    write_seqlock(lock); } while (0)
264
265 #define write_sequnlock_irqrestore(lock, flags)                         \
266         do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
267 #define write_sequnlock_irq(lock)                                       \
268         do { write_sequnlock(lock); local_irq_enable(); } while(0)
269 #define write_sequnlock_bh(lock)                                        \
270         do { write_sequnlock(lock); local_bh_enable(); } while(0)
271
272 #define read_seqbegin_irqsave(lock, flags)                              \
273         ({ local_irq_save(flags);   read_seqbegin(lock); })
274
275 #define read_seqretry_irqrestore(lock, iv, flags)                       \
276         ({                                                              \
277                 int ret = read_seqretry(lock, iv);                      \
278                 local_irq_restore(flags);                               \
279                 ret;                                                    \
280         })
281
282 #endif /* __LINUX_SEQLOCK_H */