USB: usb-skeleton: drop redundant in-urb check
[sfrench/cifs-2.6.git] / mm / swap.c
index ae300397dfdac994f0544fdbe127d184796ac00a..38c3fa4308e210e4b70bf9b47ab2699d41111e98 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,6 +47,7 @@ int page_cluster;
 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
 #ifdef CONFIG_SMP
 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
@@ -71,12 +72,12 @@ static void __page_cache_release(struct page *page)
                spin_unlock_irqrestore(&pgdat->lru_lock, flags);
        }
        __ClearPageWaiters(page);
-       mem_cgroup_uncharge(page);
 }
 
 static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
+       mem_cgroup_uncharge(page);
        free_unref_page(page);
 }
 
@@ -515,7 +516,6 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
        del_page_from_lru_list(page, lruvec, lru + active);
        ClearPageActive(page);
        ClearPageReferenced(page);
-       add_page_to_lru_list(page, lruvec, lru);
 
        if (PageWriteback(page) || PageDirty(page)) {
                /*
@@ -523,13 +523,14 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                 * It can make readahead confusing.  But race window
                 * is _really_ small and  it's non-critical problem.
                 */
+               add_page_to_lru_list(page, lruvec, lru);
                SetPageReclaim(page);
        } else {
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               list_move_tail(&page->lru, &lruvec->lists[lru]);
+               add_page_to_lru_list_tail(page, lruvec, lru);
                __count_vm_event(PGROTATED);
        }
 
@@ -538,6 +539,22 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
        update_page_reclaim_stat(lruvec, file, 0);
 }
 
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+                           void *arg)
+{
+       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+               int file = page_is_file_cache(page);
+               int lru = page_lru_base_type(page);
+
+               del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
+               ClearPageActive(page);
+               ClearPageReferenced(page);
+               add_page_to_lru_list(page, lruvec, lru);
+
+               __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
+               update_page_reclaim_stat(lruvec, file, 0);
+       }
+}
 
 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
                            void *arg)
@@ -590,6 +607,10 @@ void lru_add_drain_cpu(int cpu)
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
 
+       pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+       if (pagevec_count(pvec))
+               pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
        pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
        if (pagevec_count(pvec))
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
@@ -623,6 +644,26 @@ void deactivate_file_page(struct page *page)
        }
 }
 
+/*
+ * deactivate_page - deactivate a page
+ * @page: page to deactivate
+ *
+ * deactivate_page() moves @page to the inactive list if @page was on the active
+ * list and was not an unevictable page.  This is done to accelerate the reclaim
+ * of @page.
+ */
+void deactivate_page(struct page *page)
+{
+       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+
+               get_page(page);
+               if (!pagevec_add(pvec, page) || PageCompound(page))
+                       pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+               put_cpu_var(lru_deactivate_pvecs);
+       }
+}
+
 /**
  * mark_page_lazyfree - make an anon page lazyfree
  * @page: page to deactivate
@@ -687,6 +728,7 @@ void lru_add_drain_all(void)
                if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
                    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
                    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+                   pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
                    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
@@ -844,17 +886,15 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
                get_page(page_tail);
                list_add_tail(&page_tail->lru, list);
        } else {
-               struct list_head *list_head;
                /*
                 * Head page has not yet been counted, as an hpage,
                 * so we must account for each subpage individually.
                 *
-                * Use the standard add function to put page_tail on the list,
-                * but then correct its position so they all end up in order.
+                * Put page_tail on the list at the correct position
+                * so they all end up in order.
                 */
-               add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
-               list_head = page_tail->lru.prev;
-               list_move_tail(&page_tail->lru, list_head);
+               add_page_to_lru_list_tail(page_tail, lruvec,
+                                         page_lru(page_tail));
        }
 
        if (!PageUnevictable(page))