mm: speed up cancel_dirty_page() for clean pages
[sfrench/cifs-2.6.git] / mm / page-writeback.c
index c518c845f202591cfa1fb8da83a38d6fd256be7b..436714917e038c82d8d08a42d0b2f6c4f14644c2 100644 (file)
@@ -433,8 +433,11 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (bg_thresh >= thresh)
+       if (unlikely(bg_thresh >= thresh)) {
+               pr_warn("vm direct limit must be set greater than background limit.\n");
                bg_thresh = thresh / 2;
+       }
+
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -625,9 +628,9 @@ EXPORT_SYMBOL_GPL(wb_writeout_inc);
  * On idle system, we can be called long after we scheduled because we use
  * deferred timers so count with missed periods.
  */
-static void writeout_period(unsigned long t)
+static void writeout_period(struct timer_list *t)
 {
-       struct wb_domain *dom = (void *)t;
+       struct wb_domain *dom = from_timer(dom, t, period_timer);
        int miss_periods = (jiffies - dom->period_time) /
                                                 VM_COMPLETIONS_PERIOD_LEN;
 
@@ -650,8 +653,7 @@ int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
 
        spin_lock_init(&dom->lock);
 
-       setup_deferrable_timer(&dom->period_timer, writeout_period,
-                              (unsigned long)dom);
+       timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
 
        dom->dirty_limit_tstamp = jiffies;
 
@@ -1559,8 +1561,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
  * If we're over `background_thresh' then the writeback threads are woken to
  * perform some writeout.
  */
-static void balance_dirty_pages(struct address_space *mapping,
-                               struct bdi_writeback *wb,
+static void balance_dirty_pages(struct bdi_writeback *wb,
                                unsigned long pages_dirtied)
 {
        struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
@@ -1910,7 +1911,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
        preempt_enable();
 
        if (unlikely(current->nr_dirtied >= ratelimit))
-               balance_dirty_pages(mapping, wb, current->nr_dirtied);
+               balance_dirty_pages(wb, current->nr_dirtied);
 
        wb_put(wb);
 }
@@ -2194,30 +2195,14 @@ retry:
        while (!done && (index <= end)) {
                int i;
 
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+                               tag);
                if (nr_pages == 0)
                        break;
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
-                       /*
-                        * At this point, the page may be truncated or
-                        * invalidated (changing page->mapping to NULL), or
-                        * even swizzled back from swapper_space to tmpfs file
-                        * mapping. However, page->index will not change
-                        * because we have a reference on the page.
-                        */
-                       if (page->index > end) {
-                               /*
-                                * can't be range_cyclic (1st pass) because
-                                * end == -1 in that case.
-                                */
-                               done = 1;
-                               break;
-                       }
-
                        done_index = page->index;
 
                        lock_page(page);
@@ -2623,7 +2608,7 @@ EXPORT_SYMBOL(set_page_dirty_lock);
  * page without actually doing it through the VM. Can you say "ext3 is
  * horribly ugly"? Thought you could.
  */
-void cancel_dirty_page(struct page *page)
+void __cancel_dirty_page(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
@@ -2644,7 +2629,7 @@ void cancel_dirty_page(struct page *page)
                ClearPageDirty(page);
        }
 }
-EXPORT_SYMBOL(cancel_dirty_page);
+EXPORT_SYMBOL(__cancel_dirty_page);
 
 /*
  * Clear a page's dirty flag, while caring for dirty memory accounting.