mm, fault_around: do not take a reference to a locked page
[sfrench/cifs-2.6.git] / mm / filemap.c
index 81adec8ee02cc3bdb765625e28c3d765f203e512..29655fb47a2c4b2cf61e1c61e5b06d6f51e537a8 100644 (file)
@@ -981,7 +981,14 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
        if (wait_page->bit_nr != key->bit_nr)
                return 0;
 
-       /* Stop walking if it's locked */
+       /*
+        * Stop walking if it's locked.
+        * Is this safe if put_and_wait_on_page_locked() is in use?
+        * Yes: the waker must hold a reference to this page, and if PG_locked
+        * has now already been set by another task, that task must also hold
+        * a reference to the *same usage* of this page; so there is no need
+        * to walk on to wake even the put_and_wait_on_page_locked() callers.
+        */
        if (test_bit(key->bit_nr, &key->page->flags))
                return -1;
 
@@ -1049,25 +1056,44 @@ static void wake_up_page(struct page *page, int bit)
        wake_up_page_bit(page, bit);
 }
 
+/*
+ * A choice of three behaviors for wait_on_page_bit_common():
+ */
+enum behavior {
+       EXCLUSIVE,      /* Hold ref to page and take the bit when woken, like
+                        * __lock_page() waiting on then setting PG_locked.
+                        */
+       SHARED,         /* Hold ref to page and check the bit when woken, like
+                        * wait_on_page_writeback() waiting on PG_writeback.
+                        */
+       DROP,           /* Drop ref to page before wait, no check when woken,
+                        * like put_and_wait_on_page_locked() on PG_locked.
+                        */
+};
+
 static inline int wait_on_page_bit_common(wait_queue_head_t *q,
-               struct page *page, int bit_nr, int state, bool lock)
+       struct page *page, int bit_nr, int state, enum behavior behavior)
 {
        struct wait_page_queue wait_page;
        wait_queue_entry_t *wait = &wait_page.wait;
+       bool bit_is_set;
        bool thrashing = false;
+       bool delayacct = false;
        unsigned long pflags;
        int ret = 0;
 
        if (bit_nr == PG_locked &&
            !PageUptodate(page) && PageWorkingset(page)) {
-               if (!PageSwapBacked(page))
+               if (!PageSwapBacked(page)) {
                        delayacct_thrashing_start();
+                       delayacct = true;
+               }
                psi_memstall_enter(&pflags);
                thrashing = true;
        }
 
        init_wait(wait);
-       wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
+       wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
        wait->func = wake_page_function;
        wait_page.page = page;
        wait_page.bit_nr = bit_nr;
@@ -1084,14 +1110,17 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 
                spin_unlock_irq(&q->lock);
 
-               if (likely(test_bit(bit_nr, &page->flags))) {
+               bit_is_set = test_bit(bit_nr, &page->flags);
+               if (behavior == DROP)
+                       put_page(page);
+
+               if (likely(bit_is_set))
                        io_schedule();
-               }
 
-               if (lock) {
+               if (behavior == EXCLUSIVE) {
                        if (!test_and_set_bit_lock(bit_nr, &page->flags))
                                break;
-               } else {
+               } else if (behavior == SHARED) {
                        if (!test_bit(bit_nr, &page->flags))
                                break;
                }
@@ -1100,12 +1129,23 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
                        ret = -EINTR;
                        break;
                }
+
+               if (behavior == DROP) {
+                       /*
+                        * We can no longer safely access page->flags:
+                        * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
+                        * there is a risk of waiting forever on a page reused
+                        * for something that keeps it locked indefinitely.
+                        * But best check for -EINTR above before breaking.
+                        */
+                       break;
+               }
        }
 
        finish_wait(q, wait);
 
        if (thrashing) {
-               if (!PageSwapBacked(page))
+               if (delayacct)
                        delayacct_thrashing_end();
                psi_memstall_leave(&pflags);
        }
@@ -1124,17 +1164,36 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 void wait_on_page_bit(struct page *page, int bit_nr)
 {
        wait_queue_head_t *q = page_waitqueue(page);
-       wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
+       wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
 int wait_on_page_bit_killable(struct page *page, int bit_nr)
 {
        wait_queue_head_t *q = page_waitqueue(page);
-       return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
+       return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
 }
 EXPORT_SYMBOL(wait_on_page_bit_killable);
 
+/**
+ * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
+ * @page: The page to wait for.
+ *
+ * The caller should hold a reference on @page.  They expect the page to
+ * become unlocked relatively soon, but do not wish to hold up migration
+ * (for example) by holding the reference while waiting for the page to
+ * come unlocked.  After this function returns, the caller should not
+ * dereference @page.
+ */
+void put_and_wait_on_page_locked(struct page *page)
+{
+       wait_queue_head_t *q;
+
+       page = compound_head(page);
+       q = page_waitqueue(page);
+       wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
+}
+
 /**
  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
  * @page: Page defining the wait queue of interest
@@ -1264,7 +1323,8 @@ void __lock_page(struct page *__page)
 {
        struct page *page = compound_head(__page);
        wait_queue_head_t *q = page_waitqueue(page);
-       wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
+       wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
+                               EXCLUSIVE);
 }
 EXPORT_SYMBOL(__lock_page);
 
@@ -1272,7 +1332,8 @@ int __lock_page_killable(struct page *__page)
 {
        struct page *page = compound_head(__page);
        wait_queue_head_t *q = page_waitqueue(page);
-       return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
+       return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+                                       EXCLUSIVE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
@@ -1540,7 +1601,7 @@ repeat:
                VM_BUG_ON_PAGE(page->index != offset, page);
        }
 
-       if (page && (fgp_flags & FGP_ACCESSED))
+       if (fgp_flags & FGP_ACCESSED)
                mark_page_accessed(page);
 
 no_page:
@@ -2553,6 +2614,13 @@ void filemap_map_pages(struct vm_fault *vmf,
                        goto next;
 
                head = compound_head(page);
+
+               /*
+                * Check for a locked page first, as a speculative
+                * reference may adversely influence page migration.
+                */
+               if (PageLocked(head))
+                       goto next;
                if (!page_cache_get_speculative(head))
                        goto next;