mm/filemap: inline __wait_on_page_locked_async into caller
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 24 Feb 2021 20:02:09 +0000 (12:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Feb 2021 21:38:28 +0000 (13:38 -0800)
The previous patch removed wait_on_page_locked_async(), so inline
__wait_on_page_locked_async into __lock_page_async().

Link: https://lkml.kernel.org/r/20210122160140.223228-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c

index f62b589cbfe4355b06bd0ea8e69242789ef490cf..43448d4d66f3588245c284deff58bcd1392f7218 100644 (file)
@@ -1343,36 +1343,6 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
 }
 EXPORT_SYMBOL(wait_on_page_bit_killable);
 
-static int __wait_on_page_locked_async(struct page *page,
-                                      struct wait_page_queue *wait, bool set)
-{
-       struct wait_queue_head *q = page_waitqueue(page);
-       int ret = 0;
-
-       wait->page = page;
-       wait->bit_nr = PG_locked;
-
-       spin_lock_irq(&q->lock);
-       __add_wait_queue_entry_tail(q, &wait->wait);
-       SetPageWaiters(page);
-       if (set)
-               ret = !trylock_page(page);
-       else
-               ret = PageLocked(page);
-       /*
-        * If we were successful now, we know we're still on the
-        * waitqueue as we're still under the lock. This means it's
-        * safe to remove and return success, we know the callback
-        * isn't going to trigger.
-        */
-       if (!ret)
-               __remove_wait_queue(q, &wait->wait);
-       else
-               ret = -EIOCBQUEUED;
-       spin_unlock_irq(&q->lock);
-       return ret;
-}
-
 /**
  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
  * @page: The page to wait for.
@@ -1548,7 +1518,28 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
 
 int __lock_page_async(struct page *page, struct wait_page_queue *wait)
 {
-       return __wait_on_page_locked_async(page, wait, true);
+       struct wait_queue_head *q = page_waitqueue(page);
+       int ret = 0;
+
+       wait->page = page;
+       wait->bit_nr = PG_locked;
+
+       spin_lock_irq(&q->lock);
+       __add_wait_queue_entry_tail(q, &wait->wait);
+       SetPageWaiters(page);
+       ret = !trylock_page(page);
+       /*
+        * If we were successful now, we know we're still on the
+        * waitqueue as we're still under the lock. This means it's
+        * safe to remove and return success, we know the callback
+        * isn't going to trigger.
+        */
+       if (!ret)
+               __remove_wait_queue(q, &wait->wait);
+       else
+               ret = -EIOCBQUEUED;
+       spin_unlock_irq(&q->lock);
+       return ret;
 }
 
 /*