1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 #ifdef CONFIG_BTRFS_DEBUG
16 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18 WARN_ON(atomic_read(&eb->spinning_writers));
19 atomic_inc(&eb->spinning_writers);
22 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
25 atomic_dec(&eb->spinning_writers);
28 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30 WARN_ON(atomic_read(&eb->spinning_writers));
33 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35 atomic_inc(&eb->spinning_readers);
38 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
44 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46 atomic_inc(&eb->read_locks);
49 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51 atomic_dec(&eb->read_locks);
54 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56 BUG_ON(!atomic_read(&eb->read_locks));
59 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61 atomic_inc(&eb->write_locks);
64 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66 atomic_dec(&eb->write_locks);
69 void btrfs_assert_tree_locked(struct extent_buffer *eb)
71 BUG_ON(!atomic_read(&eb->write_locks));
75 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
78 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
80 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
83 void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84 static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85 static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
88 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
91 * No lock is required. The lock owner may change if we have a read
92 * lock, but it won't change to or away from us. If we have the write
93 * lock, we are the owner and it'll never change.
95 if (eb->lock_nested && current->pid == eb->lock_owner)
97 btrfs_assert_tree_read_locked(eb);
98 atomic_inc(&eb->blocking_readers);
99 btrfs_assert_spinning_readers_put(eb);
100 read_unlock(&eb->lock);
103 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106 * No lock is required. The lock owner may change if we have a read
107 * lock, but it won't change to or away from us. If we have the write
108 * lock, we are the owner and it'll never change.
110 if (eb->lock_nested && current->pid == eb->lock_owner)
112 if (atomic_read(&eb->blocking_writers) == 0) {
113 btrfs_assert_spinning_writers_put(eb);
114 btrfs_assert_tree_locked(eb);
115 atomic_inc(&eb->blocking_writers);
116 write_unlock(&eb->lock);
120 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
123 * No lock is required. The lock owner may change if we have a read
124 * lock, but it won't change to or away from us. If we have the write
125 * lock, we are the owner and it'll never change.
127 if (eb->lock_nested && current->pid == eb->lock_owner)
129 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
130 read_lock(&eb->lock);
131 btrfs_assert_spinning_readers_get(eb);
132 /* atomic_dec_and_test implies a barrier */
133 if (atomic_dec_and_test(&eb->blocking_readers))
134 cond_wake_up_nomb(&eb->read_lock_wq);
137 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
140 * no lock is required. The lock owner may change if
141 * we have a read lock, but it won't change to or away
142 * from us. If we have the write lock, we are the owner
143 * and it'll never change.
145 if (eb->lock_nested && current->pid == eb->lock_owner)
147 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
148 write_lock(&eb->lock);
149 btrfs_assert_spinning_writers_get(eb);
150 /* atomic_dec_and_test implies a barrier */
151 if (atomic_dec_and_test(&eb->blocking_writers))
152 cond_wake_up_nomb(&eb->write_lock_wq);
156 * take a spinning read lock. This will wait for any blocking
159 void btrfs_tree_read_lock(struct extent_buffer *eb)
162 BUG_ON(!atomic_read(&eb->blocking_writers) &&
163 current->pid == eb->lock_owner);
165 read_lock(&eb->lock);
166 if (atomic_read(&eb->blocking_writers) &&
167 current->pid == eb->lock_owner) {
169 * This extent is already write-locked by our thread. We allow
170 * an additional read lock to be added because it's for the same
171 * thread. btrfs_find_all_roots() depends on this as it may be
172 * called on a partly (write-)locked tree.
174 BUG_ON(eb->lock_nested);
175 eb->lock_nested = true;
176 read_unlock(&eb->lock);
179 if (atomic_read(&eb->blocking_writers)) {
180 read_unlock(&eb->lock);
181 wait_event(eb->write_lock_wq,
182 atomic_read(&eb->blocking_writers) == 0);
185 btrfs_assert_tree_read_locks_get(eb);
186 btrfs_assert_spinning_readers_get(eb);
190 * take a spinning read lock.
191 * returns 1 if we get the read lock and 0 if we don't
192 * this won't wait for blocking writers
194 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
196 if (atomic_read(&eb->blocking_writers))
199 read_lock(&eb->lock);
200 if (atomic_read(&eb->blocking_writers)) {
201 read_unlock(&eb->lock);
204 btrfs_assert_tree_read_locks_get(eb);
205 btrfs_assert_spinning_readers_get(eb);
210 * returns 1 if we get the read lock and 0 if we don't
211 * this won't wait for blocking writers
213 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
215 if (atomic_read(&eb->blocking_writers))
218 if (!read_trylock(&eb->lock))
221 if (atomic_read(&eb->blocking_writers)) {
222 read_unlock(&eb->lock);
225 btrfs_assert_tree_read_locks_get(eb);
226 btrfs_assert_spinning_readers_get(eb);
231 * returns 1 if we get the read lock and 0 if we don't
232 * this won't wait for blocking writers or readers
234 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
236 if (atomic_read(&eb->blocking_writers) ||
237 atomic_read(&eb->blocking_readers))
240 write_lock(&eb->lock);
241 if (atomic_read(&eb->blocking_writers) ||
242 atomic_read(&eb->blocking_readers)) {
243 write_unlock(&eb->lock);
246 btrfs_assert_tree_write_locks_get(eb);
247 btrfs_assert_spinning_writers_get(eb);
248 eb->lock_owner = current->pid;
253 * drop a spinning read lock
255 void btrfs_tree_read_unlock(struct extent_buffer *eb)
258 * if we're nested, we have the write lock. No new locking
259 * is needed as long as we are the lock owner.
260 * The write unlock will do a barrier for us, and the lock_nested
261 * field only matters to the lock owner.
263 if (eb->lock_nested && current->pid == eb->lock_owner) {
264 eb->lock_nested = false;
267 btrfs_assert_tree_read_locked(eb);
268 btrfs_assert_spinning_readers_put(eb);
269 btrfs_assert_tree_read_locks_put(eb);
270 read_unlock(&eb->lock);
274 * drop a blocking read lock
276 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
279 * if we're nested, we have the write lock. No new locking
280 * is needed as long as we are the lock owner.
281 * The write unlock will do a barrier for us, and the lock_nested
282 * field only matters to the lock owner.
284 if (eb->lock_nested && current->pid == eb->lock_owner) {
285 eb->lock_nested = false;
288 btrfs_assert_tree_read_locked(eb);
289 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
290 /* atomic_dec_and_test implies a barrier */
291 if (atomic_dec_and_test(&eb->blocking_readers))
292 cond_wake_up_nomb(&eb->read_lock_wq);
293 btrfs_assert_tree_read_locks_put(eb);
297 * take a spinning write lock. This will wait for both
298 * blocking readers or writers
300 void btrfs_tree_lock(struct extent_buffer *eb)
302 WARN_ON(eb->lock_owner == current->pid);
304 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
305 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
306 write_lock(&eb->lock);
307 if (atomic_read(&eb->blocking_readers) ||
308 atomic_read(&eb->blocking_writers)) {
309 write_unlock(&eb->lock);
312 btrfs_assert_spinning_writers_get(eb);
313 btrfs_assert_tree_write_locks_get(eb);
314 eb->lock_owner = current->pid;
318 * drop a spinning or a blocking write lock.
320 void btrfs_tree_unlock(struct extent_buffer *eb)
322 int blockers = atomic_read(&eb->blocking_writers);
324 BUG_ON(blockers > 1);
326 btrfs_assert_tree_locked(eb);
328 btrfs_assert_tree_write_locks_put(eb);
331 btrfs_assert_no_spinning_writers(eb);
332 atomic_dec(&eb->blocking_writers);
333 /* Use the lighter barrier after atomic */
334 smp_mb__after_atomic();
335 cond_wake_up_nomb(&eb->write_lock_wq);
337 btrfs_assert_spinning_writers_put(eb);
338 write_unlock(&eb->lock);