Merge branches 'release', 'bugzilla-8570', 'bugzilla-9966', 'bugzilla-9998', 'bugzill...
[sfrench/cifs-2.6.git] / mm / page_alloc.c
index 5c7de8e959fc1485f7253972a922e90d5587ef1f..402a504f12283f23cb510ec9ebfce4d0d52eea29 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/swap.h>
 #include <linux/interrupt.h>
 #include <linux/pagemap.h>
+#include <linux/jiffies.h>
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
@@ -43,6 +44,7 @@
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
+#include <linux/memcontrol.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -220,13 +222,19 @@ static inline int bad_range(struct zone *zone, struct page *page)
 
 static void bad_page(struct page *page)
 {
-       printk(KERN_EMERG "Bad page state in process '%s'\n"
-               KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
-               KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
-               KERN_EMERG "Backtrace:\n",
+       void *pc = page_get_page_cgroup(page);
+
+       printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
+               "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
                current->comm, page, (int)(2*sizeof(unsigned long)),
                (unsigned long)page->flags, page->mapping,
                page_mapcount(page), page_count(page));
+       if (pc) {
+               printk(KERN_EMERG "cgroup:%p\n", pc);
+               page_reset_bad_cgroup(page);
+       }
+       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
+               KERN_EMERG "Backtrace:\n");
        dump_stack();
        page->flags &= ~(1 << PG_lru    |
                        1 << PG_private |
@@ -452,6 +460,7 @@ static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
+               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
@@ -537,7 +546,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 /*
  * permit the bootmem allocator to evade page validation on high-order frees
  */
-void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
        if (order == 0) {
                __ClearPageReserved(page);
@@ -601,6 +610,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
+               (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
                (page->flags & (
                        1 << PG_lru     |
@@ -901,24 +911,21 @@ static void drain_pages(unsigned int cpu)
 {
        unsigned long flags;
        struct zone *zone;
-       int i;
 
        for_each_zone(zone) {
                struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
 
                if (!populated_zone(zone))
                        continue;
 
                pset = zone_pcp(zone, cpu);
-               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &pset->pcp[i];
-                       local_irq_save(flags);
-                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-                       pcp->count = 0;
-                       local_irq_restore(flags);
-               }
+
+               pcp = &pset->pcp;
+               local_irq_save(flags);
+               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               pcp->count = 0;
+               local_irq_restore(flags);
        }
 }
 
@@ -977,7 +984,7 @@ void mark_free_pages(struct zone *zone)
 /*
  * Free a 0-order page
  */
-static void fastcall free_hot_cold_page(struct page *page, int cold)
+static void free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
@@ -993,10 +1000,13 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
-       pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+       pcp = &zone_pcp(zone, get_cpu())->pcp;
        local_irq_save(flags);
        __count_vm_event(PGFREE);
-       list_add(&page->lru, &pcp->list);
+       if (cold)
+               list_add_tail(&page->lru, &pcp->list);
+       else
+               list_add(&page->lru, &pcp->list);
        set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
@@ -1007,12 +1017,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        put_cpu();
 }
 
-void fastcall free_hot_page(struct page *page)
+void free_hot_page(struct page *page)
 {
        free_hot_cold_page(page, 0);
 }
        
-void fastcall free_cold_page(struct page *page)
+void free_cold_page(struct page *page)
 {
        free_hot_cold_page(page, 1);
 }
@@ -1054,7 +1064,7 @@ again:
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
 
-               pcp = &zone_pcp(zone, cpu)->pcp[cold];
+               pcp = &zone_pcp(zone, cpu)->pcp;
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
@@ -1064,9 +1074,15 @@ again:
                }
 
                /* Find a page of the appropriate migrate type */
-               list_for_each_entry(page, &pcp->list, lru)
-                       if (page_private(page) == migratetype)
-                               break;
+               if (cold) {
+                       list_for_each_entry_reverse(page, &pcp->list, lru)
+                               if (page_private(page) == migratetype)
+                                       break;
+               } else {
+                       list_for_each_entry(page, &pcp->list, lru)
+                               if (page_private(page) == migratetype)
+                                       break;
+               }
 
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
@@ -1268,7 +1284,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
        if (!zlc)
                return NULL;
 
-       if (jiffies - zlc->last_full_zap > 1 * HZ) {
+       if (time_after(jiffies, zlc->last_full_zap + HZ)) {
                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
                zlc->last_full_zap = jiffies;
        }
@@ -1443,7 +1459,7 @@ try_next_zone:
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page * fastcall
+struct page *
 __alloc_pages(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist)
 {
@@ -1635,7 +1651,7 @@ EXPORT_SYMBOL(__alloc_pages);
 /*
  * Common helper functions.
  */
-fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
        struct page * page;
        page = alloc_pages(gfp_mask, order);
@@ -1646,7 +1662,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 
 EXPORT_SYMBOL(__get_free_pages);
 
-fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
+unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
        struct page * page;
 
@@ -1672,7 +1688,7 @@ void __pagevec_free(struct pagevec *pvec)
                free_hot_cold_page(pvec->pages[i], pvec->cold);
 }
 
-fastcall void __free_pages(struct page *page, unsigned int order)
+void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
                if (order == 0)
@@ -1684,7 +1700,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
 
 EXPORT_SYMBOL(__free_pages);
 
-fastcall void free_pages(unsigned long addr, unsigned int order)
+void free_pages(unsigned long addr, unsigned int order)
 {
        if (addr != 0) {
                VM_BUG_ON(!virt_addr_valid((void *)addr));
@@ -1793,12 +1809,9 @@ void show_free_areas(void)
 
                        pageset = zone_pcp(zone, cpu);
 
-                       printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d   "
-                              "Cold: hi:%5d, btch:%4d usd:%4d\n",
-                              cpu, pageset->pcp[0].high,
-                              pageset->pcp[0].batch, pageset->pcp[0].count,
-                              pageset->pcp[1].high, pageset->pcp[1].batch,
-                              pageset->pcp[1].count);
+                       printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
+                              cpu, pageset->pcp.high,
+                              pageset->pcp.batch, pageset->pcp.count);
                }
        }
 
@@ -1871,6 +1884,8 @@ void show_free_areas(void)
                printk("= %lukB\n", K(total));
        }
 
+       printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
+
        show_swap_cache_info();
 }
 
@@ -2543,8 +2558,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        }
 }
 
-static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long size)
+static void __meminit zone_init_free_lists(struct zone *zone)
 {
        int order, t;
        for_each_migratetype_order(order, t) {
@@ -2596,17 +2610,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 
        memset(p, 0, sizeof(*p));
 
-       pcp = &p->pcp[0];               /* hot */
+       pcp = &p->pcp;
        pcp->count = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
        INIT_LIST_HEAD(&pcp->list);
-
-       pcp = &p->pcp[1];               /* cold*/
-       pcp->count = 0;
-       pcp->high = 2 * batch;
-       pcp->batch = max(1UL, batch/2);
-       INIT_LIST_HEAD(&pcp->list);
 }
 
 /*
@@ -2619,7 +2627,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
 {
        struct per_cpu_pages *pcp;
 
-       pcp = &p->pcp[0]; /* hot list */
+       pcp = &p->pcp;
        pcp->high = high;
        pcp->batch = max(1UL, high/4);
        if ((high/4) > (PAGE_SHIFT * 8))
@@ -2823,7 +2831,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
 
        memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
 
-       zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+       zone_init_free_lists(zone);
 
        return 0;
 }
@@ -3313,7 +3321,7 @@ static inline int pageblock_default_order(unsigned int order)
  *   - mark all memory queues empty
  *   - clear the memory bitmaps
  */
-static void __meminit free_area_init_core(struct pglist_data *pgdat,
+static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                unsigned long *zones_size, unsigned long *zholes_size)
 {
        enum zone_type j;
@@ -3437,7 +3445,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
 }
 
-void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
+void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
                unsigned long *zones_size, unsigned long node_start_pfn,
                unsigned long *zholes_size)
 {