btrfs: add assertion helpers for spinning writers
[sfrench/cifs-2.6.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 #ifdef CONFIG_BTRFS_DEBUG
18 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19 {
20         WARN_ON(atomic_read(&eb->spinning_writers));
21         atomic_inc(&eb->spinning_writers);
22 }
23
24 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25 {
26         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27         atomic_dec(&eb->spinning_writers);
28 }
29
30 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31 {
32         WARN_ON(atomic_read(&eb->spinning_writers));
33 }
34
35 #else
36 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
37 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
38 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
39 #endif
40
41 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
42 {
43         /*
44          * No lock is required.  The lock owner may change if we have a read
45          * lock, but it won't change to or away from us.  If we have the write
46          * lock, we are the owner and it'll never change.
47          */
48         if (eb->lock_nested && current->pid == eb->lock_owner)
49                 return;
50         btrfs_assert_tree_read_locked(eb);
51         atomic_inc(&eb->blocking_readers);
52         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
53         atomic_dec(&eb->spinning_readers);
54         read_unlock(&eb->lock);
55 }
56
57 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
58 {
59         /*
60          * No lock is required.  The lock owner may change if we have a read
61          * lock, but it won't change to or away from us.  If we have the write
62          * lock, we are the owner and it'll never change.
63          */
64         if (eb->lock_nested && current->pid == eb->lock_owner)
65                 return;
66         if (atomic_read(&eb->blocking_writers) == 0) {
67                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
68                 atomic_dec(&eb->spinning_writers);
69                 btrfs_assert_tree_locked(eb);
70                 atomic_inc(&eb->blocking_writers);
71                 write_unlock(&eb->lock);
72         }
73 }
74
75 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
76 {
77         /*
78          * No lock is required.  The lock owner may change if we have a read
79          * lock, but it won't change to or away from us.  If we have the write
80          * lock, we are the owner and it'll never change.
81          */
82         if (eb->lock_nested && current->pid == eb->lock_owner)
83                 return;
84         BUG_ON(atomic_read(&eb->blocking_readers) == 0);
85         read_lock(&eb->lock);
86         atomic_inc(&eb->spinning_readers);
87         /* atomic_dec_and_test implies a barrier */
88         if (atomic_dec_and_test(&eb->blocking_readers))
89                 cond_wake_up_nomb(&eb->read_lock_wq);
90 }
91
92 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
93 {
94         /*
95          * no lock is required.  The lock owner may change if
96          * we have a read lock, but it won't change to or away
97          * from us.  If we have the write lock, we are the owner
98          * and it'll never change.
99          */
100         if (eb->lock_nested && current->pid == eb->lock_owner)
101                 return;
102         BUG_ON(atomic_read(&eb->blocking_writers) != 1);
103         write_lock(&eb->lock);
104         WARN_ON(atomic_read(&eb->spinning_writers));
105         atomic_inc(&eb->spinning_writers);
106         /* atomic_dec_and_test implies a barrier */
107         if (atomic_dec_and_test(&eb->blocking_writers))
108                 cond_wake_up_nomb(&eb->write_lock_wq);
109 }
110
111 /*
112  * take a spinning read lock.  This will wait for any blocking
113  * writers
114  */
115 void btrfs_tree_read_lock(struct extent_buffer *eb)
116 {
117 again:
118         BUG_ON(!atomic_read(&eb->blocking_writers) &&
119                current->pid == eb->lock_owner);
120
121         read_lock(&eb->lock);
122         if (atomic_read(&eb->blocking_writers) &&
123             current->pid == eb->lock_owner) {
124                 /*
125                  * This extent is already write-locked by our thread. We allow
126                  * an additional read lock to be added because it's for the same
127                  * thread. btrfs_find_all_roots() depends on this as it may be
128                  * called on a partly (write-)locked tree.
129                  */
130                 BUG_ON(eb->lock_nested);
131                 eb->lock_nested = 1;
132                 read_unlock(&eb->lock);
133                 return;
134         }
135         if (atomic_read(&eb->blocking_writers)) {
136                 read_unlock(&eb->lock);
137                 wait_event(eb->write_lock_wq,
138                            atomic_read(&eb->blocking_writers) == 0);
139                 goto again;
140         }
141         atomic_inc(&eb->read_locks);
142         atomic_inc(&eb->spinning_readers);
143 }
144
145 /*
146  * take a spinning read lock.
147  * returns 1 if we get the read lock and 0 if we don't
148  * this won't wait for blocking writers
149  */
150 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
151 {
152         if (atomic_read(&eb->blocking_writers))
153                 return 0;
154
155         read_lock(&eb->lock);
156         if (atomic_read(&eb->blocking_writers)) {
157                 read_unlock(&eb->lock);
158                 return 0;
159         }
160         atomic_inc(&eb->read_locks);
161         atomic_inc(&eb->spinning_readers);
162         return 1;
163 }
164
165 /*
166  * returns 1 if we get the read lock and 0 if we don't
167  * this won't wait for blocking writers
168  */
169 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
170 {
171         if (atomic_read(&eb->blocking_writers))
172                 return 0;
173
174         if (!read_trylock(&eb->lock))
175                 return 0;
176
177         if (atomic_read(&eb->blocking_writers)) {
178                 read_unlock(&eb->lock);
179                 return 0;
180         }
181         atomic_inc(&eb->read_locks);
182         atomic_inc(&eb->spinning_readers);
183         return 1;
184 }
185
186 /*
187  * returns 1 if we get the read lock and 0 if we don't
188  * this won't wait for blocking writers or readers
189  */
190 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
191 {
192         if (atomic_read(&eb->blocking_writers) ||
193             atomic_read(&eb->blocking_readers))
194                 return 0;
195
196         write_lock(&eb->lock);
197         if (atomic_read(&eb->blocking_writers) ||
198             atomic_read(&eb->blocking_readers)) {
199                 write_unlock(&eb->lock);
200                 return 0;
201         }
202         atomic_inc(&eb->write_locks);
203         atomic_inc(&eb->spinning_writers);
204         eb->lock_owner = current->pid;
205         return 1;
206 }
207
208 /*
209  * drop a spinning read lock
210  */
211 void btrfs_tree_read_unlock(struct extent_buffer *eb)
212 {
213         /*
214          * if we're nested, we have the write lock.  No new locking
215          * is needed as long as we are the lock owner.
216          * The write unlock will do a barrier for us, and the lock_nested
217          * field only matters to the lock owner.
218          */
219         if (eb->lock_nested && current->pid == eb->lock_owner) {
220                 eb->lock_nested = 0;
221                 return;
222         }
223         btrfs_assert_tree_read_locked(eb);
224         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
225         atomic_dec(&eb->spinning_readers);
226         atomic_dec(&eb->read_locks);
227         read_unlock(&eb->lock);
228 }
229
230 /*
231  * drop a blocking read lock
232  */
233 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
234 {
235         /*
236          * if we're nested, we have the write lock.  No new locking
237          * is needed as long as we are the lock owner.
238          * The write unlock will do a barrier for us, and the lock_nested
239          * field only matters to the lock owner.
240          */
241         if (eb->lock_nested && current->pid == eb->lock_owner) {
242                 eb->lock_nested = 0;
243                 return;
244         }
245         btrfs_assert_tree_read_locked(eb);
246         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
247         /* atomic_dec_and_test implies a barrier */
248         if (atomic_dec_and_test(&eb->blocking_readers))
249                 cond_wake_up_nomb(&eb->read_lock_wq);
250         atomic_dec(&eb->read_locks);
251 }
252
253 /*
254  * take a spinning write lock.  This will wait for both
255  * blocking readers or writers
256  */
257 void btrfs_tree_lock(struct extent_buffer *eb)
258 {
259         WARN_ON(eb->lock_owner == current->pid);
260 again:
261         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
262         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
263         write_lock(&eb->lock);
264         if (atomic_read(&eb->blocking_readers) ||
265             atomic_read(&eb->blocking_writers)) {
266                 write_unlock(&eb->lock);
267                 goto again;
268         }
269         WARN_ON(atomic_read(&eb->spinning_writers));
270         atomic_inc(&eb->spinning_writers);
271         atomic_inc(&eb->write_locks);
272         eb->lock_owner = current->pid;
273 }
274
275 /*
276  * drop a spinning or a blocking write lock.
277  */
278 void btrfs_tree_unlock(struct extent_buffer *eb)
279 {
280         int blockers = atomic_read(&eb->blocking_writers);
281
282         BUG_ON(blockers > 1);
283
284         btrfs_assert_tree_locked(eb);
285         eb->lock_owner = 0;
286         atomic_dec(&eb->write_locks);
287
288         if (blockers) {
289                 WARN_ON(atomic_read(&eb->spinning_writers));
290                 atomic_dec(&eb->blocking_writers);
291                 /* Use the lighter barrier after atomic */
292                 smp_mb__after_atomic();
293                 cond_wake_up_nomb(&eb->write_lock_wq);
294         } else {
295                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
296                 atomic_dec(&eb->spinning_writers);
297                 write_unlock(&eb->lock);
298         }
299 }
300
301 void btrfs_assert_tree_locked(struct extent_buffer *eb)
302 {
303         BUG_ON(!atomic_read(&eb->write_locks));
304 }
305
306 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
307 {
308         BUG_ON(!atomic_read(&eb->read_locks));
309 }