btrfs: add assertion helpers for spinning readers
[sfrench/cifs-2.6.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 #ifdef CONFIG_BTRFS_DEBUG
18 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19 {
20         WARN_ON(atomic_read(&eb->spinning_writers));
21         atomic_inc(&eb->spinning_writers);
22 }
23
24 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25 {
26         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27         atomic_dec(&eb->spinning_writers);
28 }
29
30 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31 {
32         WARN_ON(atomic_read(&eb->spinning_writers));
33 }
34
35 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
36 {
37         atomic_inc(&eb->spinning_readers);
38 }
39
40 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
41 {
42         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
43         atomic_dec(&eb->spinning_readers);
44 }
45
46 #else
47 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
48 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
49 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
50 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
51 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
52 #endif
53
54 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
55 {
56         /*
57          * No lock is required.  The lock owner may change if we have a read
58          * lock, but it won't change to or away from us.  If we have the write
59          * lock, we are the owner and it'll never change.
60          */
61         if (eb->lock_nested && current->pid == eb->lock_owner)
62                 return;
63         btrfs_assert_tree_read_locked(eb);
64         atomic_inc(&eb->blocking_readers);
65         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
66         atomic_dec(&eb->spinning_readers);
67         read_unlock(&eb->lock);
68 }
69
70 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
71 {
72         /*
73          * No lock is required.  The lock owner may change if we have a read
74          * lock, but it won't change to or away from us.  If we have the write
75          * lock, we are the owner and it'll never change.
76          */
77         if (eb->lock_nested && current->pid == eb->lock_owner)
78                 return;
79         if (atomic_read(&eb->blocking_writers) == 0) {
80                 btrfs_assert_spinning_writers_put(eb);
81                 btrfs_assert_tree_locked(eb);
82                 atomic_inc(&eb->blocking_writers);
83                 write_unlock(&eb->lock);
84         }
85 }
86
87 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
88 {
89         /*
90          * No lock is required.  The lock owner may change if we have a read
91          * lock, but it won't change to or away from us.  If we have the write
92          * lock, we are the owner and it'll never change.
93          */
94         if (eb->lock_nested && current->pid == eb->lock_owner)
95                 return;
96         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
97         read_lock(&eb->lock);
98         atomic_inc(&eb->spinning_readers);
99         /* atomic_dec_and_test implies a barrier */
100         if (atomic_dec_and_test(&eb->blocking_readers))
101                 cond_wake_up_nomb(&eb->read_lock_wq);
102 }
103
104 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
105 {
106         /*
107          * no lock is required.  The lock owner may change if
108          * we have a read lock, but it won't change to or away
109          * from us.  If we have the write lock, we are the owner
110          * and it'll never change.
111          */
112         if (eb->lock_nested && current->pid == eb->lock_owner)
113                 return;
114         BUG_ON(atomic_read(&eb->blocking_writers) != 1);
115         write_lock(&eb->lock);
116         btrfs_assert_spinning_writers_get(eb);
117         /* atomic_dec_and_test implies a barrier */
118         if (atomic_dec_and_test(&eb->blocking_writers))
119                 cond_wake_up_nomb(&eb->write_lock_wq);
120 }
121
122 /*
123  * take a spinning read lock.  This will wait for any blocking
124  * writers
125  */
126 void btrfs_tree_read_lock(struct extent_buffer *eb)
127 {
128 again:
129         BUG_ON(!atomic_read(&eb->blocking_writers) &&
130                current->pid == eb->lock_owner);
131
132         read_lock(&eb->lock);
133         if (atomic_read(&eb->blocking_writers) &&
134             current->pid == eb->lock_owner) {
135                 /*
136                  * This extent is already write-locked by our thread. We allow
137                  * an additional read lock to be added because it's for the same
138                  * thread. btrfs_find_all_roots() depends on this as it may be
139                  * called on a partly (write-)locked tree.
140                  */
141                 BUG_ON(eb->lock_nested);
142                 eb->lock_nested = 1;
143                 read_unlock(&eb->lock);
144                 return;
145         }
146         if (atomic_read(&eb->blocking_writers)) {
147                 read_unlock(&eb->lock);
148                 wait_event(eb->write_lock_wq,
149                            atomic_read(&eb->blocking_writers) == 0);
150                 goto again;
151         }
152         atomic_inc(&eb->read_locks);
153         atomic_inc(&eb->spinning_readers);
154 }
155
156 /*
157  * take a spinning read lock.
158  * returns 1 if we get the read lock and 0 if we don't
159  * this won't wait for blocking writers
160  */
161 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
162 {
163         if (atomic_read(&eb->blocking_writers))
164                 return 0;
165
166         read_lock(&eb->lock);
167         if (atomic_read(&eb->blocking_writers)) {
168                 read_unlock(&eb->lock);
169                 return 0;
170         }
171         atomic_inc(&eb->read_locks);
172         atomic_inc(&eb->spinning_readers);
173         return 1;
174 }
175
176 /*
177  * returns 1 if we get the read lock and 0 if we don't
178  * this won't wait for blocking writers
179  */
180 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
181 {
182         if (atomic_read(&eb->blocking_writers))
183                 return 0;
184
185         if (!read_trylock(&eb->lock))
186                 return 0;
187
188         if (atomic_read(&eb->blocking_writers)) {
189                 read_unlock(&eb->lock);
190                 return 0;
191         }
192         atomic_inc(&eb->read_locks);
193         atomic_inc(&eb->spinning_readers);
194         return 1;
195 }
196
197 /*
198  * returns 1 if we get the read lock and 0 if we don't
199  * this won't wait for blocking writers or readers
200  */
201 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
202 {
203         if (atomic_read(&eb->blocking_writers) ||
204             atomic_read(&eb->blocking_readers))
205                 return 0;
206
207         write_lock(&eb->lock);
208         if (atomic_read(&eb->blocking_writers) ||
209             atomic_read(&eb->blocking_readers)) {
210                 write_unlock(&eb->lock);
211                 return 0;
212         }
213         atomic_inc(&eb->write_locks);
214         btrfs_assert_spinning_writers_get(eb);
215         eb->lock_owner = current->pid;
216         return 1;
217 }
218
219 /*
220  * drop a spinning read lock
221  */
222 void btrfs_tree_read_unlock(struct extent_buffer *eb)
223 {
224         /*
225          * if we're nested, we have the write lock.  No new locking
226          * is needed as long as we are the lock owner.
227          * The write unlock will do a barrier for us, and the lock_nested
228          * field only matters to the lock owner.
229          */
230         if (eb->lock_nested && current->pid == eb->lock_owner) {
231                 eb->lock_nested = 0;
232                 return;
233         }
234         btrfs_assert_tree_read_locked(eb);
235         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
236         atomic_dec(&eb->spinning_readers);
237         atomic_dec(&eb->read_locks);
238         read_unlock(&eb->lock);
239 }
240
241 /*
242  * drop a blocking read lock
243  */
244 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
245 {
246         /*
247          * if we're nested, we have the write lock.  No new locking
248          * is needed as long as we are the lock owner.
249          * The write unlock will do a barrier for us, and the lock_nested
250          * field only matters to the lock owner.
251          */
252         if (eb->lock_nested && current->pid == eb->lock_owner) {
253                 eb->lock_nested = 0;
254                 return;
255         }
256         btrfs_assert_tree_read_locked(eb);
257         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
258         /* atomic_dec_and_test implies a barrier */
259         if (atomic_dec_and_test(&eb->blocking_readers))
260                 cond_wake_up_nomb(&eb->read_lock_wq);
261         atomic_dec(&eb->read_locks);
262 }
263
264 /*
265  * take a spinning write lock.  This will wait for both
266  * blocking readers or writers
267  */
268 void btrfs_tree_lock(struct extent_buffer *eb)
269 {
270         WARN_ON(eb->lock_owner == current->pid);
271 again:
272         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
273         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
274         write_lock(&eb->lock);
275         if (atomic_read(&eb->blocking_readers) ||
276             atomic_read(&eb->blocking_writers)) {
277                 write_unlock(&eb->lock);
278                 goto again;
279         }
280         btrfs_assert_spinning_writers_get(eb);
281         atomic_inc(&eb->write_locks);
282         eb->lock_owner = current->pid;
283 }
284
285 /*
286  * drop a spinning or a blocking write lock.
287  */
288 void btrfs_tree_unlock(struct extent_buffer *eb)
289 {
290         int blockers = atomic_read(&eb->blocking_writers);
291
292         BUG_ON(blockers > 1);
293
294         btrfs_assert_tree_locked(eb);
295         eb->lock_owner = 0;
296         atomic_dec(&eb->write_locks);
297
298         if (blockers) {
299                 btrfs_assert_no_spinning_writers(eb);
300                 atomic_dec(&eb->blocking_writers);
301                 /* Use the lighter barrier after atomic */
302                 smp_mb__after_atomic();
303                 cond_wake_up_nomb(&eb->write_lock_wq);
304         } else {
305                 btrfs_assert_spinning_writers_put(eb);
306                 write_unlock(&eb->lock);
307         }
308 }
309
310 void btrfs_assert_tree_locked(struct extent_buffer *eb)
311 {
312         BUG_ON(!atomic_read(&eb->write_locks));
313 }
314
315 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
316 {
317         BUG_ON(!atomic_read(&eb->read_locks));
318 }