btrfs: split btrfs_clear_lock_blocking_rw to read and write helpers
authorDavid Sterba <dsterba@suse.com>
Tue, 3 Apr 2018 23:52:31 +0000 (01:52 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 25 Feb 2019 13:13:27 +0000 (14:13 +0100)
There are many callers that hardcode the desired lock type so we can
avoid the switch and call them directly. Split the current function to
two. There are no remaining users of btrfs_clear_lock_blocking_rw so
it's removed.  The call sites will be converted in followup patches.

Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/locking.c
fs/btrfs/locking.h

index 7201d000f61d08e3e1e9c0d2d74529ec608777fc..7f89ca6f1fbc57462a435efb9da60f27af78c47c 100644 (file)
@@ -48,11 +48,24 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
        }
 }
 
        }
 }
 
-/*
- * if we currently have a blocking lock, take the spinlock
- * and drop our blocking count
- */
-void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
+void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
+{
+       /*
+        * No lock is required.  The lock owner may change if we have a read
+        * lock, but it won't change to or away from us.  If we have the write
+        * lock, we are the owner and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
+       BUG_ON(atomic_read(&eb->blocking_readers) == 0);
+       read_lock(&eb->lock);
+       atomic_inc(&eb->spinning_readers);
+       /* atomic_dec_and_test implies a barrier */
+       if (atomic_dec_and_test(&eb->blocking_readers))
+               cond_wake_up_nomb(&eb->read_lock_wq);
+}
+
+void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
 {
        /*
         * no lock is required.  The lock owner may change if
 {
        /*
         * no lock is required.  The lock owner may change if
@@ -62,23 +75,13 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
         */
        if (eb->lock_nested && current->pid == eb->lock_owner)
                return;
         */
        if (eb->lock_nested && current->pid == eb->lock_owner)
                return;
-
-       if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
-               BUG_ON(atomic_read(&eb->blocking_writers) != 1);
-               write_lock(&eb->lock);
-               WARN_ON(atomic_read(&eb->spinning_writers));
-               atomic_inc(&eb->spinning_writers);
-               /* atomic_dec_and_test implies a barrier */
-               if (atomic_dec_and_test(&eb->blocking_writers))
-                       cond_wake_up_nomb(&eb->write_lock_wq);
-       } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
-               BUG_ON(atomic_read(&eb->blocking_readers) == 0);
-               read_lock(&eb->lock);
-               atomic_inc(&eb->spinning_readers);
-               /* atomic_dec_and_test implies a barrier */
-               if (atomic_dec_and_test(&eb->blocking_readers))
-                       cond_wake_up_nomb(&eb->read_lock_wq);
-       }
+       BUG_ON(atomic_read(&eb->blocking_writers) != 1);
+       write_lock(&eb->lock);
+       WARN_ON(atomic_read(&eb->spinning_writers));
+       atomic_inc(&eb->spinning_writers);
+       /* atomic_dec_and_test implies a barrier */
+       if (atomic_dec_and_test(&eb->blocking_writers))
+               cond_wake_up_nomb(&eb->write_lock_wq);
 }
 
 /*
 }
 
 /*
index 0453a47976933202b48ee54a8682448202c3aac7..3f81d6900c71ec81d03b5ca6dc8bf52cafad607d 100644 (file)
@@ -19,7 +19,8 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb);
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
 void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
 void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
 void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
 void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
-void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
+void btrfs_clear_lock_blocking_read(struct extent_buffer *eb);
+void btrfs_clear_lock_blocking_write(struct extent_buffer *eb);
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
 void btrfs_assert_tree_locked(struct extent_buffer *eb);
 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
@@ -55,8 +56,4 @@ static inline void btrfs_set_lock_blocking(struct extent_buffer *eb)
        btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
 }
 
        btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK);
 }
 
-static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb)
-{
-       btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING);
-}
 #endif
 #endif