mm, oom: base root bonus on current usage
[sfrench/cifs-2.6.git] / mm / page-writeback.c
index 63807583d8e89f1c96f8b05bcf5fe422ed200c26..2d30e2cfe8047606064f117fbaca4675540e00dd 100644 (file)
@@ -191,6 +191,26 @@ static unsigned long writeout_period_time = 0;
  * global dirtyable memory first.
  */
 
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache.  This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+       unsigned long nr_pages;
+
+       nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+       nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+       nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+       return nr_pages;
+}
+
 static unsigned long highmem_dirtyable_memory(unsigned long total)
 {
 #ifdef CONFIG_HIGHMEM
@@ -198,11 +218,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        unsigned long x = 0;
 
        for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+               struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+               x += zone_dirtyable_memory(z);
        }
        /*
         * Unreclaimable memory (kernel memory or anonymous memory
@@ -238,9 +256,12 @@ static unsigned long global_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+       x = global_page_state(NR_FREE_PAGES);
        x -= min(x, dirty_balance_reserve);
 
+       x += global_page_state(NR_INACTIVE_FILE);
+       x += global_page_state(NR_ACTIVE_FILE);
+
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
 
@@ -288,32 +309,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
        trace_global_dirty_state(background, dirty);
 }
 
-/**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache.  This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
-       /*
-        * The effective global number of dirtyable pages may exclude
-        * highmem as a big-picture measure to keep the ratio between
-        * dirty memory and lowmem reasonable.
-        *
-        * But this function is purely about the individual zone and a
-        * highmem zone can hold its share of dirty pages, so we don't
-        * care about vm_highmem_is_dirtyable here.
-        */
-       unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
-               zone_reclaimable_pages(zone);
-
-       /* don't allow this to underflow */
-       nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
-       return nr_pages;
-}
-
 /**
  * zone_dirty_limit - maximum number of dirty pages allowed in a zone
  * @zone: the zone