Merge tag 'ntb-4.18' of git://github.com/jonmason/ntb
[sfrench/cifs-2.6.git] / fs / btrfs / locking.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "ctree.h"
12 #include "extent_io.h"
13 #include "locking.h"
14
15 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17 /*
18  * if we currently have a spinning reader or writer lock
19  * (indicated by the rw flag) this will bump the count
20  * of blocking holders and drop the spinlock.
21  */
22 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
23 {
24         /*
25          * no lock is required.  The lock owner may change if
26          * we have a read lock, but it won't change to or away
27          * from us.  If we have the write lock, we are the owner
28          * and it'll never change.
29          */
30         if (eb->lock_nested && current->pid == eb->lock_owner)
31                 return;
32         if (rw == BTRFS_WRITE_LOCK) {
33                 if (atomic_read(&eb->blocking_writers) == 0) {
34                         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
35                         atomic_dec(&eb->spinning_writers);
36                         btrfs_assert_tree_locked(eb);
37                         atomic_inc(&eb->blocking_writers);
38                         write_unlock(&eb->lock);
39                 }
40         } else if (rw == BTRFS_READ_LOCK) {
41                 btrfs_assert_tree_read_locked(eb);
42                 atomic_inc(&eb->blocking_readers);
43                 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
44                 atomic_dec(&eb->spinning_readers);
45                 read_unlock(&eb->lock);
46         }
47 }
48
49 /*
50  * if we currently have a blocking lock, take the spinlock
51  * and drop our blocking count
52  */
53 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
54 {
55         /*
56          * no lock is required.  The lock owner may change if
57          * we have a read lock, but it won't change to or away
58          * from us.  If we have the write lock, we are the owner
59          * and it'll never change.
60          */
61         if (eb->lock_nested && current->pid == eb->lock_owner)
62                 return;
63
64         if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
65                 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
66                 write_lock(&eb->lock);
67                 WARN_ON(atomic_read(&eb->spinning_writers));
68                 atomic_inc(&eb->spinning_writers);
69                 /* atomic_dec_and_test implies a barrier */
70                 if (atomic_dec_and_test(&eb->blocking_writers))
71                         cond_wake_up_nomb(&eb->write_lock_wq);
72         } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
73                 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
74                 read_lock(&eb->lock);
75                 atomic_inc(&eb->spinning_readers);
76                 /* atomic_dec_and_test implies a barrier */
77                 if (atomic_dec_and_test(&eb->blocking_readers))
78                         cond_wake_up_nomb(&eb->read_lock_wq);
79         }
80 }
81
82 /*
83  * take a spinning read lock.  This will wait for any blocking
84  * writers
85  */
86 void btrfs_tree_read_lock(struct extent_buffer *eb)
87 {
88 again:
89         BUG_ON(!atomic_read(&eb->blocking_writers) &&
90                current->pid == eb->lock_owner);
91
92         read_lock(&eb->lock);
93         if (atomic_read(&eb->blocking_writers) &&
94             current->pid == eb->lock_owner) {
95                 /*
96                  * This extent is already write-locked by our thread. We allow
97                  * an additional read lock to be added because it's for the same
98                  * thread. btrfs_find_all_roots() depends on this as it may be
99                  * called on a partly (write-)locked tree.
100                  */
101                 BUG_ON(eb->lock_nested);
102                 eb->lock_nested = 1;
103                 read_unlock(&eb->lock);
104                 return;
105         }
106         if (atomic_read(&eb->blocking_writers)) {
107                 read_unlock(&eb->lock);
108                 wait_event(eb->write_lock_wq,
109                            atomic_read(&eb->blocking_writers) == 0);
110                 goto again;
111         }
112         atomic_inc(&eb->read_locks);
113         atomic_inc(&eb->spinning_readers);
114 }
115
116 /*
117  * take a spinning read lock.
118  * returns 1 if we get the read lock and 0 if we don't
119  * this won't wait for blocking writers
120  */
121 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
122 {
123         if (atomic_read(&eb->blocking_writers))
124                 return 0;
125
126         read_lock(&eb->lock);
127         if (atomic_read(&eb->blocking_writers)) {
128                 read_unlock(&eb->lock);
129                 return 0;
130         }
131         atomic_inc(&eb->read_locks);
132         atomic_inc(&eb->spinning_readers);
133         return 1;
134 }
135
136 /*
137  * returns 1 if we get the read lock and 0 if we don't
138  * this won't wait for blocking writers
139  */
140 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
141 {
142         if (atomic_read(&eb->blocking_writers))
143                 return 0;
144
145         if (!read_trylock(&eb->lock))
146                 return 0;
147
148         if (atomic_read(&eb->blocking_writers)) {
149                 read_unlock(&eb->lock);
150                 return 0;
151         }
152         atomic_inc(&eb->read_locks);
153         atomic_inc(&eb->spinning_readers);
154         return 1;
155 }
156
157 /*
158  * returns 1 if we get the read lock and 0 if we don't
159  * this won't wait for blocking writers or readers
160  */
161 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
162 {
163         if (atomic_read(&eb->blocking_writers) ||
164             atomic_read(&eb->blocking_readers))
165                 return 0;
166
167         write_lock(&eb->lock);
168         if (atomic_read(&eb->blocking_writers) ||
169             atomic_read(&eb->blocking_readers)) {
170                 write_unlock(&eb->lock);
171                 return 0;
172         }
173         atomic_inc(&eb->write_locks);
174         atomic_inc(&eb->spinning_writers);
175         eb->lock_owner = current->pid;
176         return 1;
177 }
178
179 /*
180  * drop a spinning read lock
181  */
182 void btrfs_tree_read_unlock(struct extent_buffer *eb)
183 {
184         /*
185          * if we're nested, we have the write lock.  No new locking
186          * is needed as long as we are the lock owner.
187          * The write unlock will do a barrier for us, and the lock_nested
188          * field only matters to the lock owner.
189          */
190         if (eb->lock_nested && current->pid == eb->lock_owner) {
191                 eb->lock_nested = 0;
192                 return;
193         }
194         btrfs_assert_tree_read_locked(eb);
195         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
196         atomic_dec(&eb->spinning_readers);
197         atomic_dec(&eb->read_locks);
198         read_unlock(&eb->lock);
199 }
200
201 /*
202  * drop a blocking read lock
203  */
204 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
205 {
206         /*
207          * if we're nested, we have the write lock.  No new locking
208          * is needed as long as we are the lock owner.
209          * The write unlock will do a barrier for us, and the lock_nested
210          * field only matters to the lock owner.
211          */
212         if (eb->lock_nested && current->pid == eb->lock_owner) {
213                 eb->lock_nested = 0;
214                 return;
215         }
216         btrfs_assert_tree_read_locked(eb);
217         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
218         /* atomic_dec_and_test implies a barrier */
219         if (atomic_dec_and_test(&eb->blocking_readers))
220                 cond_wake_up_nomb(&eb->read_lock_wq);
221         atomic_dec(&eb->read_locks);
222 }
223
224 /*
225  * take a spinning write lock.  This will wait for both
226  * blocking readers or writers
227  */
228 void btrfs_tree_lock(struct extent_buffer *eb)
229 {
230         WARN_ON(eb->lock_owner == current->pid);
231 again:
232         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
233         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
234         write_lock(&eb->lock);
235         if (atomic_read(&eb->blocking_readers)) {
236                 write_unlock(&eb->lock);
237                 wait_event(eb->read_lock_wq,
238                            atomic_read(&eb->blocking_readers) == 0);
239                 goto again;
240         }
241         if (atomic_read(&eb->blocking_writers)) {
242                 write_unlock(&eb->lock);
243                 wait_event(eb->write_lock_wq,
244                            atomic_read(&eb->blocking_writers) == 0);
245                 goto again;
246         }
247         WARN_ON(atomic_read(&eb->spinning_writers));
248         atomic_inc(&eb->spinning_writers);
249         atomic_inc(&eb->write_locks);
250         eb->lock_owner = current->pid;
251 }
252
253 /*
254  * drop a spinning or a blocking write lock.
255  */
256 void btrfs_tree_unlock(struct extent_buffer *eb)
257 {
258         int blockers = atomic_read(&eb->blocking_writers);
259
260         BUG_ON(blockers > 1);
261
262         btrfs_assert_tree_locked(eb);
263         eb->lock_owner = 0;
264         atomic_dec(&eb->write_locks);
265
266         if (blockers) {
267                 WARN_ON(atomic_read(&eb->spinning_writers));
268                 atomic_dec(&eb->blocking_writers);
269                 /* Use the lighter barrier after atomic */
270                 smp_mb__after_atomic();
271                 cond_wake_up_nomb(&eb->write_lock_wq);
272         } else {
273                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
274                 atomic_dec(&eb->spinning_writers);
275                 write_unlock(&eb->lock);
276         }
277 }
278
279 void btrfs_assert_tree_locked(struct extent_buffer *eb)
280 {
281         BUG_ON(!atomic_read(&eb->write_locks));
282 }
283
284 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
285 {
286         BUG_ON(!atomic_read(&eb->read_locks));
287 }