memcg: use correct scan number at reclaim
[sfrench/cifs-2.6.git] / mm / vmscan.c
index 9a27c44aa327f8f46ddbde07776df8644db82de5..e89517141657bb6ecfa4c17cf1aceb8b76be78f6 100644 (file)
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
                int file = is_file_lru(l);
                int scan;
 
-               scan = zone_page_state(zone, NR_LRU_BASE + l);
+               scan = zone_nr_pages(zone, sc, l);
                if (priority) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                                      int pass, struct scan_control *sc)
 {
        struct zone *zone;
-       unsigned long nr_to_scan, ret = 0;
-       enum lru_list l;
+       unsigned long ret = 0;
 
        for_each_zone(zone) {
+               enum lru_list l;
 
                if (!populated_zone(zone))
                        continue;
-
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
                for_each_evictable_lru(l) {
+                       enum zone_stat_item ls = NR_LRU_BASE + l;
+                       unsigned long lru_pages = zone_page_state(zone, ls);
+
                        /* For pass = 0, we don't shrink the active list */
-                       if (pass == 0 &&
-                               (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
+                       if (pass == 0 && (l == LRU_ACTIVE_ANON ||
+                                               l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan +=
-                               (zone_page_state(zone, NR_LRU_BASE + l)
-                                                               >> prio) + 1;
+                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
                        if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                               unsigned long nr_to_scan;
+
                                zone->lru[l].nr_scan = 0;
-                               nr_to_scan = min(nr_pages,
-                                       zone_page_state(zone,
-                                                       NR_LRU_BASE + l));
+                               nr_to_scan = min(nr_pages, lru_pages);
                                ret += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
                                if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                        }
                }
        }
-
        return ret;
 }
 
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                .may_swap = 0,
                .swap_cluster_max = nr_pages,
                .may_writepage = 1,
-               .swappiness = vm_swappiness,
                .isolate_pages = isolate_pages_global,
        };
 
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                int prio;
 
                /* Force reclaiming mapped pages in the passes #3 and #4 */
-               if (pass > 2) {
+               if (pass > 2)
                        sc.may_swap = 1;
-                       sc.swappiness = 100;
-               }
 
                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
                        unsigned long nr_to_scan = nr_pages - ret;