mm: have zonelist contains structs with both a zone pointer and zone_idx
[sfrench/cifs-2.6.git] / mm / hugetlb.c
index 89e6286a7f57823e0427b7eeb84e6e39340764e1..4bced0d705caa0c60659c5445516afa6f3eb8863 100644 (file)
@@ -71,7 +71,25 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+       int nid;
+       struct page *page = NULL;
+
+       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
+               if (!list_empty(&hugepage_freelists[nid])) {
+                       page = list_entry(hugepage_freelists[nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       free_huge_pages--;
+                       free_huge_pages_node[nid]--;
+                       break;
+               }
+       }
+       return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                                unsigned long address)
 {
        int nid;
@@ -79,11 +97,12 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
        struct mempolicy *mpol;
        struct zonelist *zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask, &mpol);
-       struct zone **z;
+       struct zone *zone;
+       struct zoneref *z;
 
-       for (z = zonelist->zones; *z; z++) {
-               nid = zone_to_nid(*z);
-               if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+       for_each_zone_zonelist(zone, z, zonelist, MAX_NR_ZONES - 1) {
+               nid = zone_to_nid(zone);
+               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
                    !list_empty(&hugepage_freelists[nid])) {
                        page = list_entry(hugepage_freelists[nid].next,
                                          struct page, lru);
@@ -268,6 +287,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
 
        spin_lock(&hugetlb_lock);
        if (page) {
+               /*
+                * This page is now managed by the hugetlb allocator and has
+                * no users -- drop the buddy allocator's reference.
+                */
+               put_page_testzero(page);
+               VM_BUG_ON(page_count(page));
                nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
                /*
@@ -296,8 +321,10 @@ static int gather_surplus_pages(int delta)
        int needed, allocated;
 
        needed = (resv_huge_pages + delta) - free_huge_pages;
-       if (needed <= 0)
+       if (needed <= 0) {
+               resv_huge_pages += delta;
                return 0;
+       }
 
        allocated = 0;
        INIT_LIST_HEAD(&surplus_list);
@@ -335,9 +362,12 @@ retry:
         * The surplus_list now contains _at_least_ the number of extra pages
         * needed to accomodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
-        * allocator.
+        * allocator.  Commit the entire reservation here to prevent another
+        * process from stealing the pages as they are added to the pool but
+        * before they are reserved.
         */
        needed += allocated;
+       resv_huge_pages += delta;
        ret = 0;
 free:
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
@@ -346,13 +376,14 @@ free:
                        enqueue_huge_page(page);
                else {
                        /*
-                        * Decrement the refcount and free the page using its
-                        * destructor.  This must be done with hugetlb_lock
+                        * The page has a reference count of zero already, so
+                        * call free_huge_page directly instead of using
+                        * put_page.  This must be done with hugetlb_lock
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
                        spin_unlock(&hugetlb_lock);
-                       put_page(page);
+                       free_huge_page(page);
                        spin_lock(&hugetlb_lock);
                }
        }
@@ -371,9 +402,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
        struct page *page;
        unsigned long nr_pages;
 
+       /*
+        * We want to release as many surplus pages as possible, spread
+        * evenly across all nodes. Iterate across all nodes until we
+        * can no longer free unreserved surplus pages. This occurs when
+        * the nodes with surplus pages have no free pages.
+        */
+       unsigned long remaining_iterations = num_online_nodes();
+
+       /* Uncommit the reservation */
+       resv_huge_pages -= unused_resv_pages;
+
        nr_pages = min(unused_resv_pages, surplus_huge_pages);
 
-       while (nr_pages) {
+       while (remaining_iterations-- && nr_pages) {
                nid = next_node(nid, node_online_map);
                if (nid == MAX_NUMNODES)
                        nid = first_node(node_online_map);
@@ -391,6 +433,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
                        surplus_huge_pages--;
                        surplus_huge_pages_node[nid]--;
                        nr_pages--;
+                       remaining_iterations = num_online_nodes();
                }
        }
 }
@@ -402,7 +445,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
        struct page *page;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page(vma, addr);
+       page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        return page ? page : ERR_PTR(-VM_FAULT_OOM);
 }
@@ -417,7 +460,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
 
        spin_lock(&hugetlb_lock);
        if (free_huge_pages > resv_huge_pages)
-               page = dequeue_huge_page(vma, addr);
+               page = dequeue_huge_page_vma(vma, addr);
        spin_unlock(&hugetlb_lock);
        if (!page) {
                page = alloc_buddy_huge_page(vma, addr);
@@ -570,7 +613,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
        min_count = max(count, min_count);
        try_to_free_low(min_count);
        while (min_count < persistent_huge_pages) {
-               struct page *page = dequeue_huge_page(NULL, 0);
+               struct page *page = dequeue_huge_page();
                if (!page)
                        break;
                update_and_free_page(page);
@@ -638,9 +681,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 {
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
-               "Node %d HugePages_Free:  %5u\n",
+               "Node %d HugePages_Free:  %5u\n"
+               "Node %d HugePages_Surp:  %5u\n",
                nid, nr_huge_pages_node[nid],
-               nid, free_huge_pages_node[nid]);
+               nid, free_huge_pages_node[nid],
+               nid, surplus_huge_pages_node[nid]);
 }
 
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
@@ -1205,12 +1250,13 @@ static int hugetlb_acct_memory(long delta)
                if (gather_surplus_pages(delta) < 0)
                        goto out;
 
-               if (delta > cpuset_mems_nr(free_huge_pages_node))
+               if (delta > cpuset_mems_nr(free_huge_pages_node)) {
+                       return_unused_surplus_pages(delta);
                        goto out;
+               }
        }
 
        ret = 0;
-       resv_huge_pages += delta;
        if (delta < 0)
                return_unused_surplus_pages((unsigned long) -delta);