1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 #ifdef CONFIG_BTRFS_DEBUG
16 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18 WARN_ON(atomic_read(&eb->spinning_writers));
19 atomic_inc(&eb->spinning_writers);
22 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
25 atomic_dec(&eb->spinning_writers);
28 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30 WARN_ON(atomic_read(&eb->spinning_writers));
33 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35 atomic_inc(&eb->spinning_readers);
38 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
44 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46 atomic_inc(&eb->read_locks);
49 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51 atomic_dec(&eb->read_locks);
54 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56 BUG_ON(!atomic_read(&eb->read_locks));
60 static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
61 static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
62 static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
63 static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
64 static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
65 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
66 static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
67 static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
70 void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
73 * No lock is required. The lock owner may change if we have a read
74 * lock, but it won't change to or away from us. If we have the write
75 * lock, we are the owner and it'll never change.
77 if (eb->lock_nested && current->pid == eb->lock_owner)
79 btrfs_assert_tree_read_locked(eb);
80 atomic_inc(&eb->blocking_readers);
81 btrfs_assert_spinning_readers_put(eb);
82 read_unlock(&eb->lock);
85 void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
88 * No lock is required. The lock owner may change if we have a read
89 * lock, but it won't change to or away from us. If we have the write
90 * lock, we are the owner and it'll never change.
92 if (eb->lock_nested && current->pid == eb->lock_owner)
94 if (atomic_read(&eb->blocking_writers) == 0) {
95 btrfs_assert_spinning_writers_put(eb);
96 btrfs_assert_tree_locked(eb);
97 atomic_inc(&eb->blocking_writers);
98 write_unlock(&eb->lock);
102 void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
105 * No lock is required. The lock owner may change if we have a read
106 * lock, but it won't change to or away from us. If we have the write
107 * lock, we are the owner and it'll never change.
109 if (eb->lock_nested && current->pid == eb->lock_owner)
111 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
112 read_lock(&eb->lock);
113 btrfs_assert_spinning_readers_get(eb);
114 /* atomic_dec_and_test implies a barrier */
115 if (atomic_dec_and_test(&eb->blocking_readers))
116 cond_wake_up_nomb(&eb->read_lock_wq);
119 void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
122 * no lock is required. The lock owner may change if
123 * we have a read lock, but it won't change to or away
124 * from us. If we have the write lock, we are the owner
125 * and it'll never change.
127 if (eb->lock_nested && current->pid == eb->lock_owner)
129 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
130 write_lock(&eb->lock);
131 btrfs_assert_spinning_writers_get(eb);
132 /* atomic_dec_and_test implies a barrier */
133 if (atomic_dec_and_test(&eb->blocking_writers))
134 cond_wake_up_nomb(&eb->write_lock_wq);
138 * take a spinning read lock. This will wait for any blocking
141 void btrfs_tree_read_lock(struct extent_buffer *eb)
144 BUG_ON(!atomic_read(&eb->blocking_writers) &&
145 current->pid == eb->lock_owner);
147 read_lock(&eb->lock);
148 if (atomic_read(&eb->blocking_writers) &&
149 current->pid == eb->lock_owner) {
151 * This extent is already write-locked by our thread. We allow
152 * an additional read lock to be added because it's for the same
153 * thread. btrfs_find_all_roots() depends on this as it may be
154 * called on a partly (write-)locked tree.
156 BUG_ON(eb->lock_nested);
158 read_unlock(&eb->lock);
161 if (atomic_read(&eb->blocking_writers)) {
162 read_unlock(&eb->lock);
163 wait_event(eb->write_lock_wq,
164 atomic_read(&eb->blocking_writers) == 0);
167 btrfs_assert_tree_read_locks_get(eb);
168 btrfs_assert_spinning_readers_get(eb);
172 * take a spinning read lock.
173 * returns 1 if we get the read lock and 0 if we don't
174 * this won't wait for blocking writers
176 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
178 if (atomic_read(&eb->blocking_writers))
181 read_lock(&eb->lock);
182 if (atomic_read(&eb->blocking_writers)) {
183 read_unlock(&eb->lock);
186 btrfs_assert_tree_read_locks_get(eb);
187 btrfs_assert_spinning_readers_get(eb);
192 * returns 1 if we get the read lock and 0 if we don't
193 * this won't wait for blocking writers
195 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
197 if (atomic_read(&eb->blocking_writers))
200 if (!read_trylock(&eb->lock))
203 if (atomic_read(&eb->blocking_writers)) {
204 read_unlock(&eb->lock);
207 btrfs_assert_tree_read_locks_get(eb);
208 btrfs_assert_spinning_readers_get(eb);
213 * returns 1 if we get the read lock and 0 if we don't
214 * this won't wait for blocking writers or readers
216 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
218 if (atomic_read(&eb->blocking_writers) ||
219 atomic_read(&eb->blocking_readers))
222 write_lock(&eb->lock);
223 if (atomic_read(&eb->blocking_writers) ||
224 atomic_read(&eb->blocking_readers)) {
225 write_unlock(&eb->lock);
228 atomic_inc(&eb->write_locks);
229 btrfs_assert_spinning_writers_get(eb);
230 eb->lock_owner = current->pid;
235 * drop a spinning read lock
237 void btrfs_tree_read_unlock(struct extent_buffer *eb)
240 * if we're nested, we have the write lock. No new locking
241 * is needed as long as we are the lock owner.
242 * The write unlock will do a barrier for us, and the lock_nested
243 * field only matters to the lock owner.
245 if (eb->lock_nested && current->pid == eb->lock_owner) {
249 btrfs_assert_tree_read_locked(eb);
250 btrfs_assert_spinning_readers_put(eb);
251 btrfs_assert_tree_read_locks_put(eb);
252 read_unlock(&eb->lock);
256 * drop a blocking read lock
258 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
261 * if we're nested, we have the write lock. No new locking
262 * is needed as long as we are the lock owner.
263 * The write unlock will do a barrier for us, and the lock_nested
264 * field only matters to the lock owner.
266 if (eb->lock_nested && current->pid == eb->lock_owner) {
270 btrfs_assert_tree_read_locked(eb);
271 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
272 /* atomic_dec_and_test implies a barrier */
273 if (atomic_dec_and_test(&eb->blocking_readers))
274 cond_wake_up_nomb(&eb->read_lock_wq);
275 btrfs_assert_tree_read_locks_put(eb);
279 * take a spinning write lock. This will wait for both
280 * blocking readers or writers
282 void btrfs_tree_lock(struct extent_buffer *eb)
284 WARN_ON(eb->lock_owner == current->pid);
286 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
287 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
288 write_lock(&eb->lock);
289 if (atomic_read(&eb->blocking_readers) ||
290 atomic_read(&eb->blocking_writers)) {
291 write_unlock(&eb->lock);
294 btrfs_assert_spinning_writers_get(eb);
295 atomic_inc(&eb->write_locks);
296 eb->lock_owner = current->pid;
300 * drop a spinning or a blocking write lock.
302 void btrfs_tree_unlock(struct extent_buffer *eb)
304 int blockers = atomic_read(&eb->blocking_writers);
306 BUG_ON(blockers > 1);
308 btrfs_assert_tree_locked(eb);
310 atomic_dec(&eb->write_locks);
313 btrfs_assert_no_spinning_writers(eb);
314 atomic_dec(&eb->blocking_writers);
315 /* Use the lighter barrier after atomic */
316 smp_mb__after_atomic();
317 cond_wake_up_nomb(&eb->write_lock_wq);
319 btrfs_assert_spinning_writers_put(eb);
320 write_unlock(&eb->lock);
324 void btrfs_assert_tree_locked(struct extent_buffer *eb)
326 BUG_ON(!atomic_read(&eb->write_locks));