[PATCH] Drop nr_free_pages_pgdat()
[sfrench/cifs-2.6.git] / mm / page_alloc.c
index e6b17b2989e099636dcb1ec64587f130b8ad27c0..cf1a34074f086a0cfbede5acbac2a94ec228ac9c 100644 (file)
@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page,
        VM_BUG_ON(page_idx & (order_size - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       zone->free_pages += order_size;
+       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct free_area *area;
@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               zone->free_pages -= 1UL << order;
+               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area);
                return page;
        }
@@ -711,6 +711,9 @@ static void __drain_pages(unsigned int cpu)
        for_each_zone(zone) {
                struct per_cpu_pageset *pset;
 
+               if (!populated_zone(zone))
+                       continue;
+
                pset = zone_pcp(zone, cpu);
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
@@ -986,8 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
                      int classzone_idx, int alloc_flags)
 {
        /* free_pages my go negative - that's OK */
-       unsigned long min = mark;
-       long free_pages = z->free_pages - (1 << order) + 1;
+       long min = mark;
+       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1162,7 +1165,7 @@ zonelist_scan:
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                       !cpuset_zone_allowed(zone, gfp_mask))
+                       !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
@@ -1437,35 +1440,6 @@ fastcall void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
-/*
- * Total amount of free (allocatable) RAM:
- */
-unsigned int nr_free_pages(void)
-{
-       unsigned int sum = 0;
-       struct zone *zone;
-
-       for_each_zone(zone)
-               sum += zone->free_pages;
-
-       return sum;
-}
-
-EXPORT_SYMBOL(nr_free_pages);
-
-#ifdef CONFIG_NUMA
-unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
-{
-       unsigned int sum = 0;
-       enum zone_type i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               sum += pgdat->node_zones[i].free_pages;
-
-       return sum;
-}
-#endif
-
 static unsigned int nr_free_zone_pages(int offset)
 {
        /* Just pick one node, since fallback list is circular */
@@ -1512,7 +1486,7 @@ void si_meminfo(struct sysinfo *val)
 {
        val->totalram = totalram_pages;
        val->sharedram = 0;
-       val->freeram = nr_free_pages();
+       val->freeram = global_page_state(NR_FREE_PAGES);
        val->bufferram = nr_blockdev_pages();
        val->totalhigh = totalhigh_pages;
        val->freehigh = nr_free_highpages();
@@ -1527,10 +1501,11 @@ void si_meminfo_node(struct sysinfo *val, int nid)
        pg_data_t *pgdat = NODE_DATA(nid);
 
        val->totalram = pgdat->node_present_pages;
-       val->freeram = nr_free_pages_pgdat(pgdat);
+       val->freeram = node_page_state(nid, NR_FREE_PAGES);
 #ifdef CONFIG_HIGHMEM
        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
-       val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+       val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
+                       NR_FREE_PAGES);
 #else
        val->totalhigh = 0;
        val->freehigh = 0;
@@ -1577,18 +1552,19 @@ void show_free_areas(void)
 
        get_zone_counts(&active, &inactive, &free);
 
-       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
-               "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+               " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                active,
                inactive,
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
-               nr_free_pages(),
+               global_page_state(NR_FREE_PAGES),
                global_page_state(NR_SLAB_RECLAIMABLE) +
                        global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
-               global_page_state(NR_PAGETABLE));
+               global_page_state(NR_PAGETABLE),
+               global_page_state(NR_BOUNCE));
 
        for_each_zone(zone) {
                int i;
@@ -1609,12 +1585,12 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone->free_pages),
+                       K(zone_page_state(zone, NR_FREE_PAGES)),
                        K(zone->pages_min),
                        K(zone->pages_low),
                        K(zone->pages_high),
-                       K(zone->nr_active),
-                       K(zone->nr_inactive),
+                       K(zone_page_state(zone, NR_ACTIVE)),
+                       K(zone_page_state(zone, NR_INACTIVE)),
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone->all_unreclaimable ? "yes" : "no")
@@ -1953,17 +1929,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
  * done. Non-atomic initialization, single-pass.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn)
+               unsigned long start_pfn, enum memmap_context context)
 {
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-               if (!early_pfn_valid(pfn))
-                       continue;
-               if (!early_pfn_in_nid(pfn, nid))
-                       continue;
+               /*
+                * There can be holes in boot-time mem_map[]s
+                * handed to this function.  They do not
+                * exist on hotplugged memory.
+                */
+               if (context == MEMMAP_EARLY) {
+                       if (!early_pfn_valid(pfn))
+                               continue;
+                       if (!early_pfn_in_nid(pfn, nid))
+                               continue;
+               }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1990,7 +1973,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
 
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
-       memmap_init_zone((size), (nid), (zone), (start_pfn))
+       memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
 static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2236,7 +2219,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
 
 __meminit int init_currently_empty_zone(struct zone *zone,
                                        unsigned long zone_start_pfn,
-                                       unsigned long size)
+                                       unsigned long size,
+                                       enum memmap_context context)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int ret;
@@ -2664,7 +2648,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                spin_lock_init(&zone->lru_lock);
                zone_seqlock_init(zone);
                zone->zone_pgdat = pgdat;
-               zone->free_pages = 0;
 
                zone->prev_priority = DEF_PRIORITY;
 
@@ -2673,14 +2656,13 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                INIT_LIST_HEAD(&zone->inactive_list);
                zone->nr_scan_active = 0;
                zone->nr_scan_inactive = 0;
-               zone->nr_active = 0;
-               zone->nr_inactive = 0;
                zap_zone_vm_stats(zone);
                atomic_set(&zone->reclaim_in_progress, 0);
                if (!size)
                        continue;
 
-               ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+               ret = init_currently_empty_zone(zone, zone_start_pfn,
+                                               size, MEMMAP_EARLY);
                BUG_ON(ret);
                zone_start_pfn += size;
        }
@@ -2865,20 +2847,23 @@ static void __init sort_node_map(void)
                        cmp_node_active_region, NULL);
 }
 
-/* Find the lowest pfn for a node. This depends on a sorted early_node_map */
+/* Find the lowest pfn for a node */
 unsigned long __init find_min_pfn_for_node(unsigned long nid)
 {
        int i;
-
-       /* Regions in the early_node_map can be in any order */
-       sort_node_map();
+       unsigned long min_pfn = ULONG_MAX;
 
        /* Assuming a sorted map, the first range found has the starting pfn */
        for_each_active_range_index_in_nid(i, nid)
-               return early_node_map[i].start_pfn;
+               min_pfn = min(min_pfn, early_node_map[i].start_pfn);
 
-       printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid);
-       return 0;
+       if (min_pfn == ULONG_MAX) {
+               printk(KERN_WARNING
+                       "Could not find start_pfn for node %lu\n", nid);
+               return 0;
+       }
+
+       return min_pfn;
 }
 
 /**
@@ -2927,6 +2912,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        unsigned long nid;
        enum zone_type i;
 
+       /* Sort early_node_map as initialisation assumes it is sorted */
+       sort_node_map();
+
        /* Record where the zone boundaries are */
        memset(arch_zone_lowest_possible_pfn, 0,
                                sizeof(arch_zone_lowest_possible_pfn));
@@ -3321,6 +3309,10 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries >>= (scale - PAGE_SHIFT);
                else
                        numentries <<= (PAGE_SHIFT - scale);
+
+               /* Make sure we've got at least a 0-order allocation.. */
+               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+                       numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);