mm: use pagevec_lookup_range_tag() in write_cache_pages()
[sfrench/cifs-2.6.git] / mm / page-writeback.c
index 0b9c5cbe8eba086b385e489eefac7d601aed2535..460fc022cbc8a11e0c3ec62f3647aa2dc4640a6e 100644 (file)
@@ -433,8 +433,11 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        else
                bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
-       if (bg_thresh >= thresh)
+       if (unlikely(bg_thresh >= thresh)) {
+               pr_warn("vm direct limit must be set greater than background limit.\n");
                bg_thresh = thresh / 2;
+       }
+
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -1559,8 +1562,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
  * If we're over `background_thresh' then the writeback threads are woken to
  * perform some writeout.
  */
-static void balance_dirty_pages(struct address_space *mapping,
-                               struct bdi_writeback *wb,
+static void balance_dirty_pages(struct bdi_writeback *wb,
                                unsigned long pages_dirtied)
 {
        struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
@@ -1910,7 +1912,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
        preempt_enable();
 
        if (unlikely(current->nr_dirtied >= ratelimit))
-               balance_dirty_pages(mapping, wb, current->nr_dirtied);
+               balance_dirty_pages(wb, current->nr_dirtied);
 
        wb_put(wb);
 }
@@ -1972,31 +1974,31 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, buffer, length, ppos);
-       return 0;
+       unsigned int old_interval = dirty_writeback_interval;
+       int ret;
+
+       ret = proc_dointvec(table, write, buffer, length, ppos);
+
+       /*
+        * Writing 0 to dirty_writeback_interval will disable periodic writeback
+        * and a different non-zero value will wakeup the writeback threads.
+        * wb_wakeup_delayed() would be more appropriate, but it's a pain to
+        * iterate over all bdis and wbs.
+        * The reason we do this is to make the change take effect immediately.
+        */
+       if (!ret && write && dirty_writeback_interval &&
+               dirty_writeback_interval != old_interval)
+               wakeup_flusher_threads(WB_REASON_PERIODIC);
+
+       return ret;
 }
 
 #ifdef CONFIG_BLOCK
 void laptop_mode_timer_fn(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *)data;
-       int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
-               global_node_page_state(NR_UNSTABLE_NFS);
-       struct bdi_writeback *wb;
-
-       /*
-        * We want to write everything out, not just down to the dirty
-        * threshold
-        */
-       if (!bdi_has_dirty_io(q->backing_dev_info))
-               return;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
-               if (wb_has_dirty_io(wb))
-                       wb_start_writeback(wb, nr_pages, true,
-                                          WB_REASON_LAPTOP_TIMER);
-       rcu_read_unlock();
+       wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
 }
 
 /*
@@ -2194,30 +2196,14 @@ retry:
        while (!done && (index <= end)) {
                int i;
 
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
-                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+                               tag, PAGEVEC_SIZE);
                if (nr_pages == 0)
                        break;
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
-                       /*
-                        * At this point, the page may be truncated or
-                        * invalidated (changing page->mapping to NULL), or
-                        * even swizzled back from swapper_space to tmpfs file
-                        * mapping. However, page->index will not change
-                        * because we have a reference on the page.
-                        */
-                       if (page->index > end) {
-                               /*
-                                * can't be range_cyclic (1st pass) because
-                                * end == -1 in that case.
-                                */
-                               done = 1;
-                               break;
-                       }
-
                        done_index = page->index;
 
                        lock_page(page);