btrfs: use assertion helpers for spinning readers
[sfrench/cifs-2.6.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 #ifdef CONFIG_BTRFS_DEBUG
18 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19 {
20         WARN_ON(atomic_read(&eb->spinning_writers));
21         atomic_inc(&eb->spinning_writers);
22 }
23
24 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25 {
26         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27         atomic_dec(&eb->spinning_writers);
28 }
29
30 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31 {
32         WARN_ON(atomic_read(&eb->spinning_writers));
33 }
34
35 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
36 {
37         atomic_inc(&eb->spinning_readers);
38 }
39
40 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
41 {
42         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
43         atomic_dec(&eb->spinning_readers);
44 }
45
46 #else
47 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
48 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
49 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
50 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
51 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
52 #endif
53
54 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
55 {
56         /*
57          * No lock is required.  The lock owner may change if we have a read
58          * lock, but it won't change to or away from us.  If we have the write
59          * lock, we are the owner and it'll never change.
60          */
61         if (eb->lock_nested && current->pid == eb->lock_owner)
62                 return;
63         btrfs_assert_tree_read_locked(eb);
64         atomic_inc(&eb->blocking_readers);
65         btrfs_assert_spinning_readers_put(eb);
66         read_unlock(&eb->lock);
67 }
68
69 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
70 {
71         /*
72          * No lock is required.  The lock owner may change if we have a read
73          * lock, but it won't change to or away from us.  If we have the write
74          * lock, we are the owner and it'll never change.
75          */
76         if (eb->lock_nested && current->pid == eb->lock_owner)
77                 return;
78         if (atomic_read(&eb->blocking_writers) == 0) {
79                 btrfs_assert_spinning_writers_put(eb);
80                 btrfs_assert_tree_locked(eb);
81                 atomic_inc(&eb->blocking_writers);
82                 write_unlock(&eb->lock);
83         }
84 }
85
86 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
87 {
88         /*
89          * No lock is required.  The lock owner may change if we have a read
90          * lock, but it won't change to or away from us.  If we have the write
91          * lock, we are the owner and it'll never change.
92          */
93         if (eb->lock_nested && current->pid == eb->lock_owner)
94                 return;
95         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
96         read_lock(&eb->lock);
97         btrfs_assert_spinning_readers_get(eb);
98         /* atomic_dec_and_test implies a barrier */
99         if (atomic_dec_and_test(&eb->blocking_readers))
100                 cond_wake_up_nomb(&eb->read_lock_wq);
101 }
102
103 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
104 {
105         /*
106          * no lock is required.  The lock owner may change if
107          * we have a read lock, but it won't change to or away
108          * from us.  If we have the write lock, we are the owner
109          * and it'll never change.
110          */
111         if (eb->lock_nested && current->pid == eb->lock_owner)
112                 return;
113         BUG_ON(atomic_read(&eb->blocking_writers) != 1);
114         write_lock(&eb->lock);
115         btrfs_assert_spinning_writers_get(eb);
116         /* atomic_dec_and_test implies a barrier */
117         if (atomic_dec_and_test(&eb->blocking_writers))
118                 cond_wake_up_nomb(&eb->write_lock_wq);
119 }
120
121 /*
122  * take a spinning read lock.  This will wait for any blocking
123  * writers
124  */
125 void btrfs_tree_read_lock(struct extent_buffer *eb)
126 {
127 again:
128         BUG_ON(!atomic_read(&eb->blocking_writers) &&
129                current->pid == eb->lock_owner);
130
131         read_lock(&eb->lock);
132         if (atomic_read(&eb->blocking_writers) &&
133             current->pid == eb->lock_owner) {
134                 /*
135                  * This extent is already write-locked by our thread. We allow
136                  * an additional read lock to be added because it's for the same
137                  * thread. btrfs_find_all_roots() depends on this as it may be
138                  * called on a partly (write-)locked tree.
139                  */
140                 BUG_ON(eb->lock_nested);
141                 eb->lock_nested = 1;
142                 read_unlock(&eb->lock);
143                 return;
144         }
145         if (atomic_read(&eb->blocking_writers)) {
146                 read_unlock(&eb->lock);
147                 wait_event(eb->write_lock_wq,
148                            atomic_read(&eb->blocking_writers) == 0);
149                 goto again;
150         }
151         atomic_inc(&eb->read_locks);
152         btrfs_assert_spinning_readers_get(eb);
153 }
154
155 /*
156  * take a spinning read lock.
157  * returns 1 if we get the read lock and 0 if we don't
158  * this won't wait for blocking writers
159  */
160 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
161 {
162         if (atomic_read(&eb->blocking_writers))
163                 return 0;
164
165         read_lock(&eb->lock);
166         if (atomic_read(&eb->blocking_writers)) {
167                 read_unlock(&eb->lock);
168                 return 0;
169         }
170         atomic_inc(&eb->read_locks);
171         btrfs_assert_spinning_readers_get(eb);
172         return 1;
173 }
174
175 /*
176  * returns 1 if we get the read lock and 0 if we don't
177  * this won't wait for blocking writers
178  */
179 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
180 {
181         if (atomic_read(&eb->blocking_writers))
182                 return 0;
183
184         if (!read_trylock(&eb->lock))
185                 return 0;
186
187         if (atomic_read(&eb->blocking_writers)) {
188                 read_unlock(&eb->lock);
189                 return 0;
190         }
191         atomic_inc(&eb->read_locks);
192         btrfs_assert_spinning_readers_get(eb);
193         return 1;
194 }
195
196 /*
197  * returns 1 if we get the read lock and 0 if we don't
198  * this won't wait for blocking writers or readers
199  */
200 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
201 {
202         if (atomic_read(&eb->blocking_writers) ||
203             atomic_read(&eb->blocking_readers))
204                 return 0;
205
206         write_lock(&eb->lock);
207         if (atomic_read(&eb->blocking_writers) ||
208             atomic_read(&eb->blocking_readers)) {
209                 write_unlock(&eb->lock);
210                 return 0;
211         }
212         atomic_inc(&eb->write_locks);
213         btrfs_assert_spinning_writers_get(eb);
214         eb->lock_owner = current->pid;
215         return 1;
216 }
217
218 /*
219  * drop a spinning read lock
220  */
221 void btrfs_tree_read_unlock(struct extent_buffer *eb)
222 {
223         /*
224          * if we're nested, we have the write lock.  No new locking
225          * is needed as long as we are the lock owner.
226          * The write unlock will do a barrier for us, and the lock_nested
227          * field only matters to the lock owner.
228          */
229         if (eb->lock_nested && current->pid == eb->lock_owner) {
230                 eb->lock_nested = 0;
231                 return;
232         }
233         btrfs_assert_tree_read_locked(eb);
234         btrfs_assert_spinning_readers_put(eb);
235         atomic_dec(&eb->read_locks);
236         read_unlock(&eb->lock);
237 }
238
239 /*
240  * drop a blocking read lock
241  */
242 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
243 {
244         /*
245          * if we're nested, we have the write lock.  No new locking
246          * is needed as long as we are the lock owner.
247          * The write unlock will do a barrier for us, and the lock_nested
248          * field only matters to the lock owner.
249          */
250         if (eb->lock_nested && current->pid == eb->lock_owner) {
251                 eb->lock_nested = 0;
252                 return;
253         }
254         btrfs_assert_tree_read_locked(eb);
255         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
256         /* atomic_dec_and_test implies a barrier */
257         if (atomic_dec_and_test(&eb->blocking_readers))
258                 cond_wake_up_nomb(&eb->read_lock_wq);
259         atomic_dec(&eb->read_locks);
260 }
261
262 /*
263  * take a spinning write lock.  This will wait for both
264  * blocking readers or writers
265  */
266 void btrfs_tree_lock(struct extent_buffer *eb)
267 {
268         WARN_ON(eb->lock_owner == current->pid);
269 again:
270         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
271         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
272         write_lock(&eb->lock);
273         if (atomic_read(&eb->blocking_readers) ||
274             atomic_read(&eb->blocking_writers)) {
275                 write_unlock(&eb->lock);
276                 goto again;
277         }
278         btrfs_assert_spinning_writers_get(eb);
279         atomic_inc(&eb->write_locks);
280         eb->lock_owner = current->pid;
281 }
282
283 /*
284  * drop a spinning or a blocking write lock.
285  */
286 void btrfs_tree_unlock(struct extent_buffer *eb)
287 {
288         int blockers = atomic_read(&eb->blocking_writers);
289
290         BUG_ON(blockers > 1);
291
292         btrfs_assert_tree_locked(eb);
293         eb->lock_owner = 0;
294         atomic_dec(&eb->write_locks);
295
296         if (blockers) {
297                 btrfs_assert_no_spinning_writers(eb);
298                 atomic_dec(&eb->blocking_writers);
299                 /* Use the lighter barrier after atomic */
300                 smp_mb__after_atomic();
301                 cond_wake_up_nomb(&eb->write_lock_wq);
302         } else {
303                 btrfs_assert_spinning_writers_put(eb);
304                 write_unlock(&eb->lock);
305         }
306 }
307
308 void btrfs_assert_tree_locked(struct extent_buffer *eb)
309 {
310         BUG_ON(!atomic_read(&eb->write_locks));
311 }
312
313 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
314 {
315         BUG_ON(!atomic_read(&eb->read_locks));
316 }