sched: reorder SCHED_FEAT_ bits
[sfrench/cifs-2.6.git] / mm / page-writeback.c
index 838a5e31394cbbbe0e9a34b66e1570cf03b5bb86..81a91e6f1f99983ade59cc723bef2a07ad67f0fa 100644 (file)
@@ -355,8 +355,8 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
  */
 static void balance_dirty_pages(struct address_space *mapping)
 {
-       long bdi_nr_reclaimable;
-       long bdi_nr_writeback;
+       long nr_reclaimable, bdi_nr_reclaimable;
+       long nr_writeback, bdi_nr_writeback;
        long background_thresh;
        long dirty_thresh;
        long bdi_thresh;
@@ -376,11 +376,26 @@ static void balance_dirty_pages(struct address_space *mapping)
 
                get_dirty_limits(&background_thresh, &dirty_thresh,
                                &bdi_thresh, bdi);
+
+               nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
+                                       global_page_state(NR_UNSTABLE_NFS);
+               nr_writeback = global_page_state(NR_WRITEBACK);
+
                bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
                bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
+
                if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
                        break;
 
+               /*
+                * Throttle it only when the background writeback cannot
+                * catch-up. This avoids (excessively) small writeouts
+                * when the bdi limits are ramping up.
+                */
+               if (nr_reclaimable + nr_writeback <
+                               (background_thresh + dirty_thresh) / 2)
+                       break;
+
                if (!bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;