x86: make NUMA work on 32-bit
[sfrench/cifs-2.6.git] / mm / vmscan.c
index bbd194630c5b0c16dd550d21b81fb2b0865c3538..e5a9597e3bbc7aad2ed7af943528aa164eb4aa36 100644 (file)
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(unregister_shrinker);
  * percentages of the lru and ageable caches.  This should balance the seeks
  * generated by these structures.
  *
- * If the vm encounted mapped pages on the LRU it increase the pressure on
+ * If the vm encountered mapped pages on the LRU it increase the pressure on
  * slab to avoid swapping.
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
@@ -1108,8 +1108,6 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
        unsigned long nr_to_scan;
        unsigned long nr_reclaimed = 0;
 
-       atomic_inc(&zone->reclaim_in_progress);
-
        /*
         * Add one to `nr_to_scan' just to make sure that the kernel will
         * slowly sift through the active list.
@@ -1148,8 +1146,6 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
        }
 
        throttle_vm_writeout(sc->gfp_mask);
-
-       atomic_dec(&zone->reclaim_in_progress);
        return nr_reclaimed;
 }
 
@@ -1187,7 +1183,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
 
                note_zone_scanning_priority(zone, priority);
 
-               if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+               if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
                sc->all_unreclaimable = 0;
@@ -1286,7 +1282,7 @@ out:
         */
        if (priority < 0)
                priority = 0;
-       for (i = 0; zones[i] != 0; i++) {
+       for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1368,7 +1364,8 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                           priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
@@ -1403,7 +1400,8 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                                       priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
@@ -1424,12 +1422,13 @@ loop_again:
                                                lru_pages);
                        nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
-                       if (zone->all_unreclaimable)
+                       if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
                                (zone_page_state(zone, NR_ACTIVE)
                                + zone_page_state(zone, NR_INACTIVE)) * 6)
-                                       zone->all_unreclaimable = 1;
+                                       zone_set_flag(zone,
+                                                     ZONE_ALL_UNRECLAIMABLE);
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -1595,7 +1594,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                if (!populated_zone(zone))
                        continue;
 
-               if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+               if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
                /* For pass = 0 we don't shrink the active list */
@@ -1897,6 +1896,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 {
        int node_id;
+       int ret;
 
        /*
         * Zone reclaim reclaims unmapped file backed pages and
@@ -1914,15 +1914,13 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                        <= zone->min_slab_pages)
                return 0;
 
+       if (zone_is_all_unreclaimable(zone))
+               return 0;
+
        /*
-        * Avoid concurrent zone reclaims, do not reclaim in a zone that does
-        * not have reclaimable pages and if we should not delay the allocation
-        * then do not scan.
+        * Do not scan if the allocation should not be delayed.
         */
-       if (!(gfp_mask & __GFP_WAIT) ||
-               zone->all_unreclaimable ||
-               atomic_read(&zone->reclaim_in_progress) > 0 ||
-               (current->flags & PF_MEMALLOC))
+       if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
                        return 0;
 
        /*
@@ -1934,6 +1932,12 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        node_id = zone_to_nid(zone);
        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
                return 0;
-       return __zone_reclaim(zone, gfp_mask, order);
+
+       if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
+               return 0;
+       ret = __zone_reclaim(zone, gfp_mask, order);
+       zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+
+       return ret;
 }
 #endif